index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
997,200 | 1c1979f073a67eed276775b6a7905601bf7802c7 | #11-12 doesn't have many files
#importing is still very messy
#try and get graduation rates for each school
import pandas as pd
import numpy as np
files = {}
def year_creator(data):
for i in range(98,101):
data.update({str(i-1):''})
data.update({'00':''})
for i in range(1,11):
data.update({str(0)+str(i-1):''})
for i in range(11,14):
data.update({str(i-1):''})
year_creator(files)
def academic_importer_early(finish_year):
file = {}
for i in ['A','B','C','D','E']:
file.update({'edu'+str(i):pd.read_csv('/Users/vincentcarse/Desktop/Thesis/Texas_Education/Formatted_Data/Campus_Academic_Performance/'+finish_year+'/CAMPTAS'+str(i)+'.csv', dtype = str)})
else:
file.update({'staff_info':pd.read_csv('/Users/vincentcarse/Desktop/Thesis/Texas_Education/Formatted_Data/Campus_Academic_Performance/'+finish_year+'/CAMPSTAF.csv', dtype = str)})
file.update({'stud_info':pd.read_csv('/Users/vincentcarse/Desktop/Thesis/Texas_Education/Formatted_Data/Campus_Academic_Performance/'+finish_year+'/CAMPSTUD.csv', dtype = str)})
file.update({'ref':pd.read_csv('/Users/vincentcarse/Desktop/Thesis/Texas_Education/Formatted_Data/Campus_Academic_Performance/'+finish_year+'/CAMPREF.csv', dtype = str)})
file.update({'fin':pd.read_csv('/Users/vincentcarse/Desktop/Thesis/Texas_Education/Formatted_Data/Campus_Academic_Performance/'+finish_year+'/CAMPFIN.csv', dtype = str)})
file.update({'attend':pd.read_csv('/Users/vincentcarse/Desktop/Thesis/Texas_Education/Formatted_Data/Campus_Academic_Performance/'+finish_year+'/CAMPOTHR.csv', dtype = str)})
if finish_year in ['00','01','02']:
file.update({'attend':pd.read_csv('/Users/vincentcarse/Desktop/Thesis/Texas_Education/Formatted_Data/Campus_Academic_Performance/'+finish_year+'/CAMPCOMP.csv', dtype = str)})
return(file)
def academic_importer_late(finish_year):
file = {}
if finish_year in ['03','04','05','06','07','08','09','10']:
for i in range(1,14):
file.update({'edu'+str(i):pd.read_csv('/Users/vincentcarse/Desktop/Thesis/Texas_Education/Formatted_Data/Campus_Academic_Performance/'+finish_year+'/ctaks'+str(i)+'.csv', dtype = str)})
if finish_year in ['11']:
for i in range(2,14):
file.update({'edu'+str(i):pd.read_csv('/Users/vincentcarse/Desktop/Thesis/Texas_Education/Formatted_Data/Campus_Academic_Performance/'+finish_year+'/ctaks'+str(i)+'.csv', dtype = str)})
if finish_year in ['12']:
for i in range(1,5):
file.update({'edu'+str(i):pd.read_csv('/Users/vincentcarse/Desktop/Thesis/Texas_Education/Formatted_Data/Campus_Academic_Performance/'+finish_year+'/ctaks'+str(i)+'.csv', dtype = str)})
file.update({'staff_info':pd.read_csv('/Users/vincentcarse/Desktop/Thesis/Texas_Education/Formatted_Data/Campus_Academic_Performance/'+finish_year+'/cstaf.csv', dtype = str)})
file.update({'stud_info':pd.read_csv('/Users/vincentcarse/Desktop/Thesis/Texas_Education/Formatted_Data/Campus_Academic_Performance/'+finish_year+'/cstud.csv', dtype = str)})
file.update({'ref':pd.read_csv('/Users/vincentcarse/Desktop/Thesis/Texas_Education/Formatted_Data/Campus_Academic_Performance/'+finish_year+'/cref.csv', dtype = str)})
try:
file.update({'SAT':pd.read_csv('/Users/vincentcarse/Desktop/Thesis/Texas_Education/Formatted_Data/Campus_Academic_Performance/'+finish_year+'/ccadcomp.csv', dtype = str)})
except FileNotFoundError:
file.update({'SAT':pd.read_csv('/Users/vincentcarse/Desktop/Thesis/Texas_Education/Formatted_Data/Campus_Academic_Performance/'+finish_year+'/ccad.csv', dtype = str)})
try:
file.update({'attend':pd.read_csv('/Users/vincentcarse/Desktop/Thesis/Texas_Education/Formatted_Data/Campus_Academic_Performance/'+finish_year+'/ccomp.csv', dtype = str)})
except FileNotFoundError:
file.update({'attend':pd.read_csv('/Users/vincentcarse/Desktop/Thesis/Texas_Education/Formatted_Data/Campus_Academic_Performance/'+finish_year+'/ccadcomp.csv', dtype = str)})
file.update({'fin':pd.read_csv('/Users/vincentcarse/Desktop/Thesis/Texas_Education/Formatted_Data/Campus_Academic_Performance/'+finish_year+'/cfin.csv', dtype = str)})
file.update({'other':pd.read_csv('/Users/vincentcarse/Desktop/Thesis/Texas_Education/Formatted_Data/Campus_Academic_Performance/'+finish_year+'/cothr.csv', dtype = str)})
return(file)
def joint_aca_importer(finish_year):
if finish_year in ['97','98','99','00','01','02']:
return(academic_importer_early(finish_year))
else:
return(academic_importer_late(finish_year))
for i in files:
files.update({i:joint_aca_importer(i)})
def column_extractor(data,code,name):
file = data[code].copy()
file.name = name
file.index = data['CAMPUS'].values
return(file)
def joint_extractor(data,codes,name):
try:
return(column_extractor(data,codes[0],name))
except KeyError:
return(column_extractor(data,codes[1],name))
def info(i, data):
lag_year = i[0] + str(int(i[1])-1)
if i == '00':
lag_year = '99'
if i == '10':
lag_year = '09'
storage = []
storage = []
storage.append(joint_extractor(data[i]['stud_info'],['CPETG09C','CPETG09C'],'gr9_stu_count'))
storage.append(joint_extractor(data[i]['stud_info'],['CPETG10C','CPETG10C'],'gr10_stu_count'))
storage.append(joint_extractor(data[i]['stud_info'],['CPETG11C','CPETG11C'],'gr11_stu_count'))
storage.append(joint_extractor(data[i]['stud_info'],['CPETG12C','CPETG12C'],'gr12_stu_count'))
storage.append(joint_extractor(data[i]['stud_info'],['CPETBLAC','CPETBLAC'],'black_stu_count'))
storage.append(joint_extractor(data[i]['stud_info'],['CPETWHIC','CPETWHIC'],'white_stu_count'))
storage.append(joint_extractor(data[i]['stud_info'],['CPETHISC','CPETHISC'],'his_stu_count'))
storage.append(joint_extractor(data[i]['stud_info'],['CPETECOC','CPETECOC'],'all_stud_count'))
storage.append(joint_extractor(data[i]['stud_info'],['CPETGIFC','CPETGIFC'],'gifted_stu_count'))
storage.append(joint_extractor(data[i]['stud_info'],['CPETSPEC','CPETSPEC'],'spec_ed_stu_count'))
storage.append(joint_extractor(data[i]['stud_info'],['CPETECOC','CPETECOC'],'econ_dis_stu_count'))
storage.append(joint_extractor(data[i]['staff_info'],['CPSTTOSA','CPSTTOSA'],'teacher_avg_salary'))
storage.append(joint_extractor(data[i]['staff_info'],['CPSTEXPA','CPSTEXPA'],'teacher_experience'))
storage.append(joint_extractor(data[i]['staff_info'],['CPSTTENA','CPSTTENA'],'exp_w_dist'))
if i in ['97','98','99']:
print('not relevant')
else:
storage.append(joint_extractor(data[i]['attend'],['CANC4'+lag_year+'R','canc4'+lag_year+'r'],'completion_rate'))
storage.append(joint_extractor(data[i]['attend'],['CAEC4'+lag_year+'R','caec4'+lag_year+'r'],'recieved_GED'))
storage.append(joint_extractor(data[i]['attend'],['CAGC4'+lag_year+'R','cagc4'+lag_year+'r'],'graduated'))
storage.append(joint_extractor(data[i]['ref'],['DISTNAME','CPFEOPRK'],'dist_name'))
storage.append(joint_extractor(data[i]['ref'],['COUNTY','CPFEOPRK'],'county_num'))
storage.append(joint_extractor(data[i]['ref'],['CFLCHART','CPFEOPRK'],'charter'))
storage.append(joint_extractor(data[i]['ref'],['GRDSPAN','CPFEOPRK'],'grade_span'))
#problem casesGRDTYPE
try:
storage.append(joint_extractor(data[i]['staff_info'],['CPCTENGA','CPETENGA'],'eng_class_size'))
storage.append(joint_extractor(data[i]['staff_info'],['CPCTMATA','CPETMATA'],'math_class_size'))
storage.append(joint_extractor(data[i]['staff_info'],['CPCTSCIA','CPETSCIA'],'sci_class_size'))
except KeyError:
print(i)
file = pd.concat(storage,axis=1,join = 'outer', sort= True)
data[i]['info'] = file
for i in files:
info(i,files)
def finance(i, data):
storage = []
storage.append(joint_extractor(data[i]['fin'],['CPFEOPRK','CPFEAOPRK'],'per_pupil_exp'))
storage.append(joint_extractor(data[i]['fin'],['CPFEINRK','CPFEAINSK'],'per_pupil_instruction'))
storage.append(joint_extractor(data[i]['fin'],['CPFEADSK','CPFEAADIK'],'per_pupil_leadership'))
file = pd.concat(storage,axis=1,join = 'outer', sort= True)
data[i]['finance'] = file
for i in files:
finance(i,files)
###\/###
def performance_early(i, data):
lag_year = i[0] + str(int(i[1])-1)
if i == '00':
lag_year = '99'
storage = []
storage.append(joint_extractor(data[i]['attend'],['CA0CS'+lag_year+'R','CPFEAOPRK'],'sat'))
storage.append(joint_extractor(data[i]['attend'],['CA0CA'+lag_year+'R','CPFEAINSK'],'act'))
storage.append(joint_extractor(data[i]['attend'],['CA0CT'+lag_year+'R','CPFEAADIK'],'act_pct'))
file = pd.concat(storage,axis=1,join = 'outer', sort= True)
data[i]['performance'] = file
def performance_later(i, data):
lag_year = i[0] + str(int(i[1])-1)
if i == '10':
lag_year = '09'
storage = []
try:
storage.append(joint_extractor(data[i]['edu6'],['CA009PA'+i+'R','CA009TA'+i+'R'],'gr9_all_tests'))
except KeyError:
storage.append(joint_extractor(data[i]['edu6'],['CA009RA'+i+'R','CA009QA'+i+'R'],'gr9_all_tests'))
try:
storage.append(joint_extractor(data[i]['edu6'],['CA009PM'+i+'R','CA009TM'+i+'R'],'gr9_maths'))
except KeyError:
storage.append(joint_extractor(data[i]['edu6'],['CA009RM'+i+'R','CA009QM'+i+'R'],'gr9_maths'))
try:
storage.append(joint_extractor(data[i]['edu6'],['CA009PR'+i+'R','CA009TR'+i+'R'],'gr9_reading'))
except KeyError:
storage.append(joint_extractor(data[i]['edu6'],['CA009RR'+i+'R','CA009QR'+i+'R'],'gr9_reading'))
storage.append(joint_extractor(data[i]['SAT'],['CA0CS'+lag_year+'R','CA009TR'+i+'R'],'sat'))
storage.append(joint_extractor(data[i]['SAT'],['CA0CA'+lag_year+'R','CA009TR'+i+'R'],'act'))
storage.append(joint_extractor(data[i]['SAT'],['CA0CT'+lag_year+'R','CA009TR'+i+'R'],'sat/act_pct'))
file = pd.concat(storage,axis=1,join = 'outer', sort= True)
data[i]['performance'] = file
def joint_performance(finish_year, data):
try:
performance_early(finish_year, data)
except KeyError:
performance_later(finish_year, data)
for i in files:
joint_performance(i,files)
print(i)
for i in files:
for j in ['info']:
files[i][j].to_csv('/Users/vincentcarse/Desktop/Thesis/Texas_Education/Regression/campus_reg/'+j+i+'.csv')
|
997,201 | 99a2a90ce95f40187f28596398127929647416ea | import pandas as pd
from pandas import DataFrame
import datetime
import pandas.io.data
import matplotlib.pyplot as plt
"""
sp500 = pd.io.data.get_data_yahoo('%5EGSPC',
start = datetime.datetime(2000, 10, 1),
end = datetime.datetime(2014, 6, 11))
sp500.to_csv('sp500_ohlc.csv')
"""
df = pd.read_csv('sp500_ohlc.csv', index_col = 'Date', parse_dates = True)
df['H-L'] = df['High'] - df.Low
df['100MA'] = pd.rolling_mean(df['Close'], 100)
df['Difference'] = df['Close'].diff()
df[['Close', 'High', 'Low', 'Open', '100MA']].plot()
plt.show()
|
997,202 | 2218207110dd460ff22a7df0329c62981f3a6243 | for i in range(1,10):
if i % 2 ==0: continue # "if" expression ":" suite
for j in range(1,i+1):print(j, '*', i, '=', i * j, sep='', end='\t') #这个for循环相当于第一个if语句隐藏的 "else" ":" suite
if j==i:print(end="\n") #这个if语句项也相当于第一个for语句的suite
print(end="\n")
#while语句 "while" expression ":" suite ["else" ":" suite]
#while 语句用于在表达式保持为真的情况下重复地执行
#这是一个嵌套条件循环
i=1
while i < 10: #"while" expression ":" suite以下
j = 1 #从这一行开始,都是while的suite
while j < i + 1 :
print(j, '*', i, '=', i * j, sep='', end='\t')
j += 1
print()
i += 1
n = 1000 #以条件为基础的循环
a, b = 0, 1 #
while a < n: #如果其值为真就执行第一个子句体,这将重复地检测表达式
print(a, end=' ')
a, b = b, a+b #如果表达式为假,有else存在的话,将会被被执行,并且终止循环。如果else不存在的话,直接终止循环
print() #print()放在子句体里面,出来的数字时竖列.放在外面,或者,不写出来的数字时横列。
|
997,203 | 66a0c9c79280ba756537a6361323af6840d96668 | import argparse
def parse_args():
text = 'You can read a file with an argument -f or 2 numbers with arguments -m and -n'
parser = argparse.ArgumentParser(description=text)
parser.add_argument("-f", "--file", help="file with the cell board")
parser.add_argument("-m", "--rows", help="number of rows", type=int)
parser.add_argument("-n", "--columns", help="number of columns", type=int)
args = parser.parse_args()
if args.file:
print("This is the file you are using %s" % args.file)
if args.rows:
print("This is the number of rows %s" % args.rows)
if args.columns:
print("This is the number of columns %s" % args.columns)
return args
|
997,204 | 9d2a331ff55520fe2952fcf7ae4cf0657b86b7b8 | # David O'Brien, 2018-02-19
# Sum all the even numbers from 1 to 100
sum = 0
i = 0
while i <= 100:
sum = sum + i
i = i + 2
print ("The sum of the even numbers from 1 to 100 is:", sum) |
997,205 | b9f3a394fd7db503da604001fc5be7a5eef119a6 | from . import loss as losslayer
from . import utils
from . import model
import myutils
import torch
import torch.nn as nn
from tqdm import tqdm
import math
import numpy as np
def train_hqsnet(model, optimizer, dataloaders, num_epochs, device, w_coeff, tv_coeff, mask, filename, strategy, log_interval=1):
loss_list = []
val_loss_list = []
best_val_loss = 1e10
best_epoch = 0
for epoch in range(1, num_epochs+1):
for phase in ['train', 'val']:
if phase == 'train':
print('Train %d/%d, strategy=%s' % (epoch, num_epochs, strategy))
model.train()
elif phase == 'val':
print('Validate %d/%d, strategy=%s' % (epoch, num_epochs, strategy))
model.eval()
epoch_loss = 0
epoch_samples = 0
for batch_idx, (y, gt) in tqdm(enumerate(dataloaders[phase]), total=len(dataloaders[phase])):
y = y.float().to(device)
gt = gt.float().to(device)
optimizer.zero_grad()
with torch.set_grad_enabled(phase == 'train'):
zf = utils.ifft(y)
if strategy == 'unsup':
y, zf = utils.scale(y, zf)
x_hat = model(zf, y)
loss = losslayer.get_loss(x_hat, gt, y, mask, device, strategy, batch_idx, epoch, phase, len(dataloaders[phase]))
if phase == 'train' and loss is not None:
loss.backward()
optimizer.step()
if loss is not None and math.isnan(loss):
sys.exit('found nan at epoch ' + str(epoch))
if loss is not None:
epoch_loss += loss.data.cpu().numpy()
epoch_samples += len(y)
epoch_loss /= epoch_samples
if phase == 'train':
train_epoch_loss = epoch_loss
if phase == 'val':
val_epoch_loss = epoch_loss
if val_epoch_loss < best_val_loss:
best_val_loss = val_epoch_loss
best_epoch = epoch
print('Best loss: %s, Epoch: %s' % (best_val_loss, best_epoch))
# Optionally save checkpoints here, e.g.:
myutils.io.save_checkpoint(epoch, model.state_dict(), optimizer.state_dict(), train_epoch_loss, val_epoch_loss, filename, log_interval)
return model, loss_list
def test_hqsnet(trained_model, xdata, strategy, device):
recons = []
for i in range(len(xdata)):
y = torch.as_tensor(xdata[i:i+1]).to(device).float()
zf = utils.ifft(y)
if strategy == 'unsup':
y, zf = utils.scale(y, zf)
pred = trained_model(zf, y)
recons.append(pred.cpu().detach().numpy())
preds = np.array(recons).squeeze()
return preds
|
997,206 | 9a999514d7a0a90eb94b57eca43167b9e705b7ad | import io
from django.core.files.base import ContentFile
from django.test import Client, TestCase
from django.urls import reverse
from PIL import Image
from users.models import User
from recipes.models import Recipe
class TestUser(TestCase):
def setUp(self):
self.auth_client = Client()
self.nonauth_client = Client()
self.user = User.objects.create_user(
'user1', 'user1@test.com', '12345'
)
self.user.save()
self.auth_client.force_login(self.user)
self.user_not_found = 'user2'
def get_image(self):
buffer = io.BytesIO()
img = Image.new('RGB', (500, 500), (0, 0, 0))
img.save(buffer, format='jpeg')
buffer.seek(0)
image = ContentFile(buffer.read(), name='test.jpeg')
return image
def test_profile_page(self):
response = self.auth_client.get(
reverse('profile', kwargs={'username': self.user})
)
assert response.status_code == 200, (
'Пользователь не может перейти на страницу зарег. пользователя'
)
assert response.context['username'] == self.user.username, (
'Username из url не совпадает с тем что на странице'
)
def test_error_404(self):
response = self.auth_client.get(
reverse('profile', kwargs={'username': self.user_not_found})
)
assert response.status_code == 404, (
'При доступе к страницы несуществующего пользователя не '
'возвращается ошибка 404'
)
def test_auth_client_create_recipe_post(self):
self.auth_client.post(
reverse('create_recipe'),
data={
'title': 'title1',
'tag': 'Завтрак',
'duration': 1,
'text': 'text1',
'image': self.get_image()
}
)
recipes = Recipe.objects.all()
assert 'text1' in [recipe.text for recipe in recipes], (
'Текс созданного рецепта не совпадает с тем что в БД (POST)'
)
assert recipes.count() == 1, (
'Рецепт не был создан зарегестрированным пользователем (POST)'
)
def test_auth_client_create_recipe_get(self):
self.auth_client.get(
reverse('create_recipe'),
data={
'title': 'title1',
'duration': 1,
'text': 'text1',
'image': self.get_image()
}
)
recipes = Recipe.objects.all()
assert recipes.count() == 0, (
'Рецепт был создан зарегестрированным пользователем (GET)'
)
def test_nonauth_client_create_recipe_post(self):
self.nonauth_client.post(
reverse('create_recipe'),
data={
'title': 'title1',
'duration': 1,
'text': 'text1',
'image': self.get_image()
}
)
recipes = Recipe.objects.all()
assert recipes.count() == 0, (
'Рецепт был создан незарегестрированным пользователем (POST)'
)
response = self.nonauth_client.get(reverse('create_recipe'))
assert response.status_code == 302, (
'Незарегестрированный пользователь при попытке создать рецепт '
'не был перенаправлен (POST)'
)
def test_nonauth_client_create_recipe_get(self):
self.nonauth_client.get(
reverse('create_recipe'),
data={
'title': 'title1',
'tag': 'Завтрак',
'duration': 1,
'text': 'text1',
'image': self.get_image()
}
)
recipes = Recipe.objects.all()
assert recipes.count() == 0, (
'Рецепт был создан незарегестрированным пользователем (GET)'
)
response = self.nonauth_client.get(reverse('create_recipe'))
assert response.status_code == 302, (
'Незарегестрированный пользователь при попытке создать рецепт '
'не был перенаправлен (GET)'
)
|
997,207 | 0d23becc9aaabdb6714f9d91f2f42e11c016cc70 | # -*- coding: utf-8 -*-
"""
Created on Sun Mar 21 10:56:41 2021
@author: Brian
"""
'''
本題目標是希望將chars中重複出現(次數大於2)的字母以字母+次數的形式進行壓縮(ex:["a","a","a"] - > ["a","3"])
而只連續出現一次的字母不動。解題思路如下:
(a) 定義一個找出與起始點不同的第一個字母的函數(findFirstFalseElement),利用該函數找到下一個起始位置
(b) 直接將該位置減去起始位置即為起始字母連續且重複出現的次數,將該次數轉成字元後取代重複出現的字母,
並將多餘位置移除,最後更新起始點直到末端即可
*** 如果chars的末端也是連續出現的字母的一部分時,我們直接把下一個不等於起始字母的位置設成len(chars),
如此即可不影響主程式中計算重複次數的邏輯
'''
class Solution:
def findFirstFalseElement(self,start,chars):
for i in range(start,l := len(chars)):
if chars[i] != chars[start]:
return i
if i == l - 1: # 如果末端是連續出現的字母的一部分時,我們直接把下一個不等於起始字母的位置設成len(chars)
return l
def compress(self, chars):
start = 0
while start < len(chars):
i = self.findFirstFalseElement(start,chars)
if (n := i - start) >= 2:
r = []
while n != 0:
r += [str(n % 10)]
n = n // 10
chars[start + 1:(end := start + 1 + len(r))] = r[::-1]
del chars[end:i]
start = end
else:
start = i
print(chars)
test = Solution()
chars = [["a","a","b","b","c","c","c"],
["a"],
["a","b","b","b","b","b","b","b","b","b","b","b","b"],
["a","a","a","b","b","a","a"],
["p","p","p","p","m","m","b","b","b","b","b","u","u","r","r","u","n","n","n","n","n","n","n","n","n","n","n","u","u","u","u","a","a","u","u","r","r","r","s","s","a","a","y","y","y","g","g","g","g","g"]]
for s in chars:
test.compress(s)
|
997,208 | 4928b0dbe6f73fca125a5ba8d3efd087de1aa651 | user=input("year ")
# ------------check leap year--------------------
if user%4==0 and user%100==0 and user%400==0:
print "leap year ",user
elif user%4==0 and user%100!=0:
print '{0} {1}'.format(user, 'leap year hai ')
else:
print '{0} {1}'.format(user, 'leap year nahi hai')
# -------------------3 previous leap year-----------------
year=user-1
add=0
print "3 pechee k leap year "
while year>0:
if add==3:
break
if year%4 ==0 and year %100==0 and year%400==0:
print year
add+=1
elif year%4==0 and year%100!=0:
print year
add+=1
year-=1
# --------------------3 next leap year---------------------
print "3 aage k leap year "
add1=0
var=0
year2=user+1
while var<year2:
if add1==3:
break
if year2%4 ==0 and year2 %100==0 and year2%400==0:
print year2
add1+=1
elif year2%4==0 and year2%100!=0:
print year2
add1+=1
year2+=1
|
997,209 | 369bf984a1b3496c998a73601378d4eafc378b55 | import random
import csv
def training_generate():
numberOfFeatures = 4
numberOfClasses = 3
sample =""
i = 0;
while (i<50):
j =0
while(j<4):
n = random.random()
n = float("{0:.2f}".format(n))
sample += str(n)
sample += ","
j+=1
n = random.randint(0,numberOfClasses-1)
sample += str(n)
sample += "\n"
i+=1
print(sample)
allSamples = str(numberOfFeatures) + ",\n" + str(numberOfClasses) + ",\n" + sample
# write the result into a CSV file
with open('archTraining_1.csv', mode='w') as file:
file.write(allSamples)
def testing_generate():
numberOfFeatures = 4
numberOfClasses = 3
sample = ""
i = 0;
while (i < 13):
j = 0
while (j < 4):
n = random.random()
n = float("{0:.2f}".format(n))
sample += str(n)
sample += ","
j += 1
n = random.randint(0, numberOfClasses-1)
sample += str(n)
sample += "\n"
i += 1
print(sample)
# write the result into a CSV file
with open('archTesting_1.csv', mode='w') as file:
file.write(sample)
training_generate()
testing_generate() |
997,210 | ad6ab4c0cbd7d21c9a8482f920f104793442b71a | import os, time, csv
import subprocess
import pyautogui
from datetime import datetime
dir = '' #Add a valid path pointing to zoom, this will be used by os.startfile to open zoom
# This takes too long that is why just hard code the path in dir
# start = "C:\\Users\\"
# for dirpath, dirnames, filenames in os.walk(start):
# for filename in filenames:
# if filename == "Zoom":
# filename = os.path.join(dirpath, filename)
# print(filename)
# print(dirpath)
# dir = os.path.join(dir, 'Zoom')
def join_meeting(meeting_id, meeting_pwd):
print('Opening Zoom')
os.startfile(dir)
# os.system(dir)
# subprocess.Popen([dir])
# subprocess.call([dir])
print('Zoom opened successfully')
# locatecenteronscreen will locate the button and by move to we will move the mouse over there
# so that the next step takes place until zoom has loaded completely
time.sleep(10)
print('\nFinding Join Button')
join_btn = pyautogui.locateCenterOnScreen('locators/join_meeting.png')
print('Located join button at', join_btn)
pyautogui.moveTo(join_btn)
pyautogui.click()
print('Clicked on join button')
time.sleep(3)
print('\nTyping Meeting ID')
pyautogui.write(meeting_id)
print('Typed Meeting ID')
print('\nFinding join button')
join_meeting_btn = pyautogui.locateCenterOnScreen('locators/join.png')
print('Located join button at', join_meeting_btn)
pyautogui.click(join_meeting_btn)
print('Clicked on join button')
time.sleep(3)
print('\nTyping meeting Password')
pyautogui.write(meeting_pwd)
print('Typed meeting Password')
time.sleep(3)
pyautogui.press('enter')
print('\nPressed Enter')
time.sleep(3)
print('\nMeeting joined successfuly')
def leave_meeting():
pyautogui.hotkey('alt', 'f4')
time.sleep(1)
print('\nMeeting Left Successfully')
meetings = []
with open('meetings.csv', mode='r') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
line_count = 0
for row in csv_reader:
if line_count == 0:
line_count += 1
print(f'Column names are {", ".join(row)}')
else:
meeting_val = {}
meeting_val['start_time'] = row[0]
meeting_val['end_time'] = row[1]
meeting_val['id'] = row[2]
meeting_val['pwd'] = row[3]
meetings.append(meeting_val)
print(row[0], row[1], row[2], row[3])
line_count += 1
in_meeting = False
total_meetings = len(meetings)
start_meeting_counter = 0
end_meeting_counter = 0
while True:
now = datetime.now().strftime('%H:%M')
for val in meetings:
if now in str(val['start_time']):
meeting_id = val['id']
meeting_pwd = val['pwd']
join_meeting(meeting_id, meeting_pwd)
print('\nMeeting will end at:', val['end_time'])
start_meeting_counter += 1
in_meeting = True
if now in str(val['end_time']) and in_meeting == True:
leave_meeting()
next_meeting = meetings.index(val) + 1
if next_meeting < len(meetings):
print('\nNext meeting will start at:', meetings[next_meeting]['start_time'])
end_meeting_counter += 1
in_meeting = False
if start_meeting_counter == end_meeting_counter == total_meetings:
print('\nAll meetings completed for Today!')
print('Have a nice Day!')
break |
997,211 | a156cf22323d1e7a3a86d86893bb4d3c6fd61499 | __author__ = 'zoulida'
import pandas as pd
import numpy as np
from sklearn import datasets,decomposition,manifold
def loadData():
#data_path = '../small_HFT1.csv'
data_path = '/volume/HFT_XY_unselected.csv'
csv_data = pd.read_csv(data_path) # 读取训练数据
#print(csv_data.shape) # (189, 9)
N = 5
#csv_batch_data = csv_data.tail(N) # 取后5条数据
#print(csv_batch_data.shape) # (5, 9)
#print(csv_data) # (5, 9)
csv_data
return csv_data.drop(['index', 'realY', 'predictY'], axis=1), csv_data['realY']
def transform_PCA(*data):
X,Y=data
pca = decomposition.PCA(n_components=20)
#pca=decomposition.IncrementalPCA(n_components=None) #超大规模分批加载内存
pca.fit(X)
print("explained variance ratio:%s"%str(pca.explained_variance_ratio_))
X_r = pca.transform(X)
#print(X_r)
return X_r
def SVR_train(*data):
X, Y = data
####3.1决策树回归####
from sklearn import tree
model_DecisionTreeRegressor = tree.DecisionTreeRegressor()
####3.2线性回归####
from sklearn import linear_model
model_LinearRegression = linear_model.LinearRegression()
####3.3SVM回归####
from sklearn import svm
model_SVR = svm.SVR()
model_SVR2 = svm.SVR(kernel='rbf', C=100, gamma=0.1)
####3.4KNN回归####
from sklearn import neighbors
model_KNeighborsRegressor = neighbors.KNeighborsRegressor()
####3.5随机森林回归####
from sklearn import ensemble
model_RandomForestRegressor = ensemble.RandomForestRegressor(n_estimators=20) # 这里使用20个决策树
####3.6Adaboost回归####
from sklearn import ensemble
model_AdaBoostRegressor = ensemble.AdaBoostRegressor(n_estimators=50) # 这里使用50个决策树
####3.7GBRT回归####
from sklearn import ensemble
model_GradientBoostingRegressor = ensemble.GradientBoostingRegressor(n_estimators=100) # 这里使用100个决策树
####3.8Bagging回归####
from sklearn.ensemble import BaggingRegressor
model_BaggingRegressor = BaggingRegressor()
####3.9ExtraTree极端随机树回归####
from sklearn.tree import ExtraTreeRegressor
model_ExtraTreeRegressor = ExtraTreeRegressor()
# Create the (parametrised) models
# print("Hit Rates/Confusion Matrices:\n")
models = [
(
"model_DecisionTreeRegressor", model_DecisionTreeRegressor
),
(
"model_LinearRegression", model_LinearRegression
),
(
"model_SVR", model_SVR2#model_SVR
),
(
"model_KNeighborsRegressor", model_KNeighborsRegressor
),
(
"model_RandomForestRegressor", model_RandomForestRegressor
),
(
"model_AdaBoostRegressor", model_AdaBoostRegressor
),
(
"model_GradientBoostingRegressor", model_GradientBoostingRegressor
),
(
"model_BaggingRegressor", model_BaggingRegressor
),
(
"model_ExtraTreeRegressor", model_ExtraTreeRegressor
)
]
for m in models:
#X = X.reset_index(drop=True)
#print(X)
# y = y.reset_index(drop=True)
# print(y)
from sklearn.model_selection import KFold
kf = KFold(n_splits=2, shuffle=False)
for train_index, test_index in kf.split(X):
# print(train_index, test_index)
# print(X.loc[[0,1,2]])
X_train, X_test, y_train, y_test = X[train_index], X[test_index], Y[train_index], Y[
test_index] # 这里的X_train,y_train为第iFold个fold的训练集,X_val,y_val为validation set
#print(X_test, y_test)
#print(X_train, y_train)
print('======================================')
import datetime
starttime = datetime.datetime.now()
print("正在训练%s模型:" % m[0])
m[1].fit(X_train, y_train)
# Make an array of predictions on the test set
pred = m[1].predict(X_test)
# Output the hit-rate and the confusion matrix for each model
score = m[1].score(X_test, y_test)
print("%s:\n%0.3f" % (m[0], m[1].score(X_test, y_test)))
# print("%s\n" % confusion_matrix(y_test, pred, labels=[-1.0, 1.0]))#labels=["ant", "bird", "cat"]
from sklearn.metrics import r2_score
r2 = r2_score(y_test, pred)
print('r2: ', r2)
endtime = datetime.datetime.now()
print('%s训练,预测耗费时间,单位秒:'%m[0], (endtime - starttime).seconds)
#result = m[1].predict(X_test)
import matplotlib.pyplot as plt
plt.figure()
plt.plot(np.arange(len(pred)), y_test, 'go-', label='true value')
plt.plot(np.arange(len(pred)), pred, 'ro-', label='predict value')
plt.title('score: %f' % score)
plt.legend()
plt.show()
if __name__=="__main__":
X, Y = loadData()
#print(X, Y)
X_t = transform_PCA(X, Y)
SVR_train(X_t, Y) |
997,212 | f94fadedac67a4acbd9fa7644fab30f496be03c8 | from urllib.parse import quote
from pandas_profiling.report.presentation.core import HTML, Table, Sequence, Warnings
def get_dataset_overview(summary):
dataset_info = Table(
[
{
"name": "Total Number of Records",
"value": summary["table"]["n"],
"fmt": "fmt_numeric",
},
{
"name": "Total Number of Columns",
"value": summary["table"]["n_var"],
"fmt": "fmt_numeric",
},
{
"name": "Missing row cells",
"value": summary["table"]["n_cells_missing"],
"fmt": "fmt_numeric",
},
{
"name": "Missing row cells (%)",
"value": summary["table"]["p_cells_missing"],
"fmt": "fmt_percent",
},
{
"name": "Duplicate rows",
"value": summary["table"]["n_duplicates"],
"fmt": "fmt_numeric",
},
{
"name": "Duplicate rows (%)",
"value": summary["table"]["p_duplicates"],
"fmt": "fmt_percent",
},
],
name="Table statistics",
)
dataset_types = Table(
[
{"name": type_name, "value": count, "fmt": "fmt_numeric"}
for type_name, count in summary["table"]["types"].items()
],
name="Variable types",
)
return Sequence(
[dataset_info, dataset_types],
anchor_id="dataset_overview",
name="Overview",
sequence_type="grid",
)
def get_dataset_warnings(warnings, count):
return Warnings(warnings=warnings, name=f"Analysis Summary ({count})", anchor_id="Analysis")
def get_dataset_reproduction(summary, date_start, date_end):
version = summary["package"]["pandas_profiling_version"]
config = quote(summary["package"]["pandas_profiling_config"])
return Table(
[
{"name": "Analysis started", "value": date_start, "fmt": "fmt"},
{"name": "Analysis finished", "value": date_end, "fmt": "fmt"},
],
name="Run Statistics",
anchor_id="run_statistics",
)
|
997,213 | 23feaf0565c4148ca3f7c7b2dc407ebabe77b97e | from django.db import models
class Job(models.Model):
title = models.CharField(max_length=255)
company = models.CharField(max_length=255)
city = models.CharField(max_length=255)
state = models.CharField(max_length=255)
start_date = models.DateField('Date started')
end_date = models.DateField('Date ended')
def __str__(self):
return self.company
class JobDetail(models.Model):
job = models.ForeignKey(Job, on_delete=models.CASCADE)
text = models.TextField('Detail text')
class School(models.Model):
school_name = models.CharField('school name', max_length=255)
degree = models.CharField(max_length=255)
major = models.CharField(max_length=255)
city = models.CharField(max_length=255)
state = models.CharField(max_length=255)
gpa = models.DecimalField(max_digits=2, decimal_places=1)
start_date = models.DateField('Date started')
end_date = models.DateField('Date ended')
def __str__(self):
return self.school_name
class Skill(models.Model):
name = models.CharField(max_length=255)
type = models.CharField(max_length=255)
def __str__(self):
return self.name
class Project(models.Model):
name = models.CharField(max_length=255)
technology = models.ManyToManyField(Skill)
link = models.URLField()
description = models.TextField()
def __str__(self):
return self.name
|
997,214 | 9ae7063c5f9537d6ed2e5459380cec873b6e4f43 | import asyncio
import json
from collections import OrderedDict
from django.http import Http404
from rest_framework.response import Response
from rest_framework import viewsets
from rest_framework.permissions import IsAuthenticated
from rest_framework import status
from aiohttp import ClientSession
from core.models import Customer
from core.models import FavoriteList
from api.serializers import CustomerSerializer
from api.serializers import FavoriteListSerializer
from api import utils
class CustomerViewSet(viewsets.ViewSet):
"""
API endpoint that allows customers to be viewed or edited.
"""
permission_classes = (IsAuthenticated, )
queryset = Customer.objects.all()
serializer_class = CustomerSerializer
responses = []
def list(self, request):
fields = ('name', 'email', 'url')
customers = Customer.objects.all()
serializer = CustomerSerializer(customers, many=True, context={'request': request}, fields=fields)
return Response(serializer.data)
def create(self, request):
product_id = request.data.get('product_id')
if product_id:
product_id_status = utils.check_product_id(request.data.get('product_id'))
# using false to make it explicit
if product_id_status is False:
return Response(
{"product_id": "This product does not exist."},
status.HTTP_400_BAD_REQUEST
)
fields = ('name', 'email', 'url')
serializer = CustomerSerializer(data=request.data, context={'request': request}, fields=fields)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def get_object(self, pk):
try:
return Customer.objects.get(pk=pk)
except Customer.DoesNotExist:
raise Http404
def get_data_response(self, serializer_data):
"""Method that returns the serialized data that will be returned by the API."""
all_product_ids = [p_id["product_id"] for p_id in serializer_data.get("favorites")]
async def run(product_ids):
"""Function that create and the run asynchronous tasks."""
url = "http://challenge-api.luizalabs.com/api/product/{}"
tasks = []
# Fetch all responses within one Client session,
# keep connection alive for all requests.
async with ClientSession() as session:
for product_id in product_ids:
task = asyncio.ensure_future(utils.fetch(url.format(product_id), session))
tasks.append(task)
self.responses = await asyncio.gather(*tasks)
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
future = asyncio.ensure_future(run(all_product_ids))
loop.run_until_complete(future)
needed_fields = ('id', 'title', 'image', 'price', 'reviewScore')
def get_product_values(response):
"""Function that returns the fields that matters."""
json_response = json.loads(response)
product_fields = OrderedDict([(field, json_response.get(field)) for field in needed_fields])
return product_fields
all_products = [get_product_values(response) for response in self.responses]
# Iteration through the products returned to insert the API url into the json
data_response = serializer_data
def add_url(p_id, url):
"""Function that adds the url and the product_id to the response dictionary."""
for product in all_products:
if product['id'] == p_id:
product['url'] = url
product['product_id'] = p_id
product.move_to_end('product_id', last=False)
urls_ids = [(favorite['product_id'], favorite['url']) for favorite in data_response['favorites']]
for url_id in urls_ids:
add_url(*url_id)
# Removing 'id' to make more sense because I am using the id as product_id
for product in all_products:
del product['id']
# Replacing favorites list to the return of the API
data_response['favorites'] = all_products
return data_response
def retrieve(self, request, pk, format=None):
fields = ('name', 'email', 'favorites')
customer = self.get_object(pk)
serializer = CustomerSerializer(customer, context={'request': request}, fields=fields)
data_response = self.get_data_response(serializer_data=serializer.data)
return Response(data_response)
def partial_update(self, request, pk, format=None):
favorites = request.data.get('favorites')
product_id_statuses = [utils.check_product_id(product_status.get("product_id")) for product_status in favorites]
if not any(product_id_statuses):
return Response(
{"product_id": "This product does not exist."},
status.HTTP_400_BAD_REQUEST
)
fields = ('name', 'email', 'favorites')
customer = self.get_object(pk)
serializer = CustomerSerializer(
customer,
data=request.data,
context={'request': request},
fields=fields
)
if serializer.is_valid():
serializer.save()
data_response = self.get_data_response(serializer_data=serializer.data)
return Response(data_response)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def destroy(self, request, pk, format=None):
customer = self.get_object(pk)
customer.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class FavoriteListViewSet(viewsets.ViewSet):
"""
API endpoint that allows products to be viewed or edited.
"""
permission_classes = (IsAuthenticated, )
queryset = FavoriteList.objects.all()
serializer_class = FavoriteListSerializer
def list(self, request):
products = FavoriteList.objects.all()
serializer = FavoriteListSerializer(
products,
many=True,
context={'request': request}
)
return Response(serializer.data)
def create(self, request):
product_id_status = utils.check_product_id(request.data.get('product_id'))
# using false to make it explicit
if product_id_status is False:
return Response(
{"product_id": "This product does not exist."},
status.HTTP_400_BAD_REQUEST
)
serializer = FavoriteListSerializer(
data=request.data,
context={'request': request}
)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def get_object(self, pk):
try:
return FavoriteList.objects.get(pk=pk)
except FavoriteList.DoesNotExist:
raise Http404
def retrieve(self, request, pk, format=None):
fields = ('product_id', 'customer', 'url')
product = self.get_object(pk)
serializer = FavoriteListSerializer(
product,
context={'request': request}
)
return Response(serializer.data)
def destroy(self, request, pk, format=None):
product = self.get_object(pk)
product.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
|
997,215 | 7f99395b8263906023c2c8fdaee50e181395cbca | # Solution 1, Top-Down
class Solution:
def minimumTotal(self, triangle: List[List[int]]) -> int:
pre = [0]
for row in triangle:
curr = row[::]
for i in range(len(row)):
if i == 0:
curr[i] = row[i] + pre[i]
elif i == len(row) - 1:
curr[i] = row[i] + pre[i - 1]
else:
curr[i] = min(pre[i - 1], pre[i]) + row[i]
pre = curr
return min(pre)
# Solution 2, Bottom-up, less special case
class Solution:
def minimumTotal(self, triangle: List[List[int]]) -> int:
if not triangle:
return 0
res = triangle[-1][::]
for j in range(len(triangle) - 2, -1, -1):
row = triangle[j]
for i in range(len(row)):
res[i] = row[i] + min(res[i], res[i + 1])
return res[0] |
997,216 | 81e0e6e7b22193ac7a347b9ca93beb6590e6bc59 | import sys
def solve(_s,_sz):
_c = 0
_ic = 0
for ch in _s:
if (ch == '1'):
_ic = _ic + 1
if (ch == '1'):
_c = _c + _ic
return _c
T = int(raw_input())
while T > 0:
_sz = int(raw_input())
_s = raw_input()
ans = solve(_s,_sz)
print ans
T = T - 1
|
997,217 | 823afe660ad54964496198491f12c08a9b80885e | class PreprocessedCommandException(Exception):
def __init__(self, message):
super(PreprocessedCommandException, self).__init__(message)
class VHBandNotIncludedException(Exception):
def __init__(self, message):
super(VHBandNotIncludedException, self).__init__(message)
class VVBandNotIncludedException(Exception):
def __init__(self, message):
super(VVBandNotIncludedException, self).__init__(message)
class OrbitNotIncludedException(Exception):
def __init__(self, message):
super(OrbitNotIncludedException, self).__init__(message)
class ZipException(Exception):
def __init__(self, message):
super(ZipException, self).__init__(message)
class FilenameNotFoundException(Exception):
def __init__(self, message):
super(FilenameNotFoundException, self).__init__(message)
|
997,218 | 60166d33f4ed577385e7dff7b057cc92a9866287 | # zyxwvutsrqponmlkjihgfedcba
# 54321098765432109876543210
# 01234567890123456789012345
# abcdefghijklmnopqrstuvwxyz
letters = "abcdefghijklmnopqrstuvwxyz"
backwards = letters[25:0:-1]
print(backwards)
backwards = letters[25::-1]
print(backwards)
backwards = letters[::-1]
print(backwards)
print()
print()
# create a slice that produces the characters qpo
print(letters[16:13:-1])
# slice the string to produce edcba
print(letters[4::-1])
# slice the string to produce the last 8 characters, in reverse order
print(letters[25:17:-1])
# or
print(letters[:-9:-1])
print()
# get last 4 letters
print(letters[-4:])
print(letters[-1:])
print(letters[:1])
print(letters[0]) |
997,219 | 8c8dec64786bc74265d7d60a36dac85997cac60c | #
# main_widget.py <Peter.Bienstman@UGent.be>
#
from mnemosyne.libmnemosyne.ui_component import UiComponent
class MainWidget(UiComponent):
"""Describes the interface that the main widget needs to implement
in order to be used by the main controller.
"""
component_type = "main_widget"
instantiate = UiComponent.IMMEDIATELY
def activate(self):
pass
def set_window_title(self, text):
pass
def show_information(self, text):
print(text)
def show_question(self, text, option0, option1, option2=""):
"""Returns 0, 1 or 2."""
raise NotImplementedError
def show_error(self, text):
print(text)
def default_font_size(self):
return 12
def get_filename_to_open(self, path, filter, caption=""):
raise NotImplementedError
def get_filename_to_save(self, path, filter, caption=""):
"""Should ask for confirmation on overwrite."""
raise NotImplementedError
def set_status_bar_message(self, text):
pass
def set_progress_text(self, text):
"""Resets all the attributes of the progress bar if one is still open,
and displays 'text'.
"""
print(text)
def set_progress_range(self, maximum):
"""Progress bar runs from 0 to 'maximum. If 'maximum' is zero, this is
just a busy dialog. Should be the default for set_progress_text.
"""
pass
def set_progress_update_interval(self, update_interval):
"""Sometimes updating the progress bar for a single step takes longer
than doing the actual processing. In this case, it is useful to set
'update_interval' and the progress bar will only be updated every
'update_interval' steps.
"""
pass
def increase_progress(self, value):
"""Increase the progress by 'value'."""
pass
def set_progress_value(self, value):
"""If 'value' is maximum or beyond, the dialog closes."""
pass
def close_progress(self):
"""Convenience function for closing a busy dialog."""
pass
def enable_edit_current_card(self, is_enabled):
pass
def enable_delete_current_card(self, is_enabled):
pass
def enable_browse_cards(self, is_enabled):
pass
|
997,220 | bf8058d6e8f1e1813341cde76abb8e640b06b3d2 | # -*- coding: utf-8 -*-
import sys
sys.path.append('../')
import os
import re
import scrapy
from urlparse import urljoin
import common
class Spider(scrapy.Spider):
name = "bills"
handle_httpstatus_list = [302]
allowed_domains = ["kcc.gov.tw"]
start_urls = ["http://www.kcc.gov.tw",]
download_delay = 0.5
county_abbr = os.path.dirname(os.path.realpath(__file__)).split('/')[-1]
election_year = common.election_year(county_abbr)
ads = {'2010': u'一', '2014': u'二', '2018': u'三'}
ad = ads[election_year]
def parse(self, response):
return response.follow(response.xpath(u'//a[re:test(., "^大會提案$")]/@href').extract_first(), callback=self.parse_query)
def parse_query(self, response):
payload = {
'ctl00$ContentPlaceHolder1$uscPeriodSessionMeeting$ddlSession': response.xpath(u'//select[@name="ctl00$ContentPlaceHolder1$uscPeriodSessionMeeting$ddlSession"]/option[re:test(., "%s屆")]/@value' % self.ad).extract_first(),
'ctl00$ContentPlaceHolder1$uscPeriodSessionMeeting$ddlMeeting': '',
'__EVENTTARGET': re.search('_PostBackOptions\("([^"]*)', response.css('#ContentPlaceHolder1_LinkButton1::attr(href)').extract_first()).group(1)
}
yield scrapy.FormRequest.from_response(response, formdata=payload, callback=self.parse_type, dont_filter=True, dont_click=True, headers=common.headers(self.county_abbr))
def parse_type(self, response):
tabs = response.xpath('//div[@id="tabs"]/ul/li/a')
for i, tab in enumerate(tabs, 1):
type, count = tab.xpath('text()').extract()
count = re.sub('\D', '', count)
if count:
payload = {"ctl00$ContentPlaceHolder1$DataPager%d$ctl02$txtPageSize" % i: count}
if i != 1:
payload["ctl00$ContentPlaceHolder1$btnGo%d" % i] = " Go "
else:
payload["ctl00$ContentPlaceHolder1$btnGo"] = " Go "
yield scrapy.FormRequest.from_response(response, formdata=payload, callback=self.parse_tab, dont_filter=True, meta={'type': tab.xpath('text()').extract_first().strip(), 'tab_id': 'tabs-%d' % i})
def parse_tab(self, response):
trs = response.xpath('//div[@id="%s"]/div/table/tr[count(td)>1]' % response.meta['tab_id'])
for tr in trs:
item = {}
item['election_year'] = self.election_year
item['type'] = response.meta['type']
item['last_action'] = tr.xpath('td[6]/text()').extract_first()
link = tr.xpath('td[@onclick]/@onclick').re(u"\.href='([^']+)'")[0]
yield response.follow(link, callback=self.parse_profile, meta={'dont_redirect': True, 'item': item})
def parse_profile(self, response):
item = response.meta['item']
item['id'] = '-'.join(re.findall(u'=([^&]*)', response.url))
for key, label in [('category', u'類別'), ('abstract', u'案由'), ('description', u'說明'), ('methods', u'辦法'), ('remark', u'備註'), ]:
content = response.xpath(u'string((//td[re:test(., "%s")]/following-sibling::td)[1])' % label).extract_first()
if content:
item[key] = content.strip()
item['proposed_by'] = re.split(u'\s|、', re.sub(u'(副?議長|議員)', '', u'、'.join([x.strip() for x in response.xpath(u'(//td[re:test(., "提案(人|單位)")]/following-sibling::td)[1]/text()').extract()])))
item['petitioned_by'] = re.split(u'\s|、', re.sub(u'(副?議長|議員)', '', u'、'.join([x.strip() for x in (response.xpath(u'(//td[re:test(., "連署人")]/following-sibling::td)[1]/text()').extract() or [])])))
item['motions'] = []
for motion in [u'一讀', u'委員會審查意見', u'二讀決議', u'三讀決議', ]:
date = common.ROC2AD(''.join(response.xpath(u'(//td[re:test(., "%s")]/following-sibling::td)[1]/span/text()' % motion).extract()))
resolution = ''.join([x.strip() for x in response.xpath(u'(//td[re:test(., "%s")]/following-sibling::td)[1]/text()' % motion).extract()])
if date or resolution:
item['motions'].append(dict(zip(['motion', 'resolution', 'date'], [motion, resolution, date])))
item['links'] = [
{
'url': response.url,
'note': 'original'
}
]
return item
|
997,221 | 4874c8fe9f2eed5ae5afbf1b9258a52740627e58 | list_url = 'https://hacker-news.firebaseio.com/v0/.json?print=pretty'
item_url = 'https://hacker-news.firebaseio.com/v0/item/.json?print=pretty'
categories = ['askstories', 'showstories', 'newstories', 'jobstories'] # categories
default_category = "newstories"
result_directory_name = "results"
log_file_name = "hn_parser.log"
report_file_name = "report.csv"
from_date = "2017-11-19"
score = "1"
|
997,222 | f906e7fb731a8f7988f49fb4017ae6d88ec2a74b | from scuba_app.secrets import SECRET, POSTGRES_URI
class Config():
SECRET_KEY = SECRET
SQLALCHEMY_TRACK_MODIFICATIONS = False
REDIS_HOST = 'localhost'
REDIS_PORT = '6379'
#MAIL_SERVER = ''
#MAIL_USERNAME = ''
#MAIL_PASSWORD = ''
#MAIL_PORT =
#MAIL_USE_SSL =
class DevConfig(Config):
DEBUG = True
SQLALCHEMY_DATABASE_URI = POSTGRES_URI
class ProdConfig(Config):
DEBUG = False
SQLALCHEMY_DATABASE_URI = ''
|
997,223 | 9c3310a0cbd8ea0ce5cfc10db2eac6ff8c0647a8 | """
4Sum II
Given four lists A, B, C, D of integer values, compute how many tuples (i, j, k, l) there are such that A[i] + B[j] + C[k] + D[l] is zero.
To make problem a bit easier, all A, B, C, D have same length of N where 0 ≤ N ≤ 500. All integers are in the range of -228 to 228 - 1 and the result is guaranteed to be at most 231 - 1.
Example:
Input:
A = [ 1, 2]
B = [-2,-1]
C = [-1, 2]
D = [ 0, 2]
Output:
2
Explanation:
The two tuples are:
1. (0, 0, 0, 1) -> A[0] + B[0] + C[0] + D[1] = 1 + (-2) + (-1) + 2 = 0
2. (1, 1, 0, 0) -> A[1] + B[1] + C[0] + D[0] = 2 + (-1) + (-1) + 0 = 0
"""
from collections import defaultdict
from typing import List
class Solution:
def fourSumCount(self, A: List[int], B: List[int], C: List[int], D: List[int]) -> int:
# Solution 1 - 360 ms
"""
AB = defaultdict(int)
for a in A:
for b in B:
AB[a + b] += 1
CD = defaultdict(int)
for c in C:
for d in D:
CD[c + d] += 1
ans = 0
for key in AB.keys():
ans += AB[key] * CD[-key]
return ans
"""
# Solution 2 - 140 ms
if len(A) == 0:
return 0
m1 = {}
for a in A:
if a in m1:
m1[a] += 1
else:
m1[a] = 1
m2 = {}
for a, v in m1.items():
for b in B:
ab = a + b
if ab in m2:
m2[ab] += v
else:
m2[ab] = v
m3 = {}
for c in C:
if c in m3:
m3[c] += 1
else:
m3[c] = 1
res = 0
for c, v in m3.items():
for d in D:
cd = - c - d
if cd in m2:
res += m2[cd] * v
return res
# Main Call
A = [1, 2]
B = [-2, -1]
C = [-1, 2]
D = [0, 2]
solution = Solution()
print(solution.fourSumCount(A, B, C, D))
|
997,224 | 91661c15cb5a1b405ccc45cf4f57995d459f4404 | # coding=utf-8
# created by WangZhe on 2014/12/23
log_path = 'G:/Program/python/contest/taobao/log/log'
train_log_path = 'G:/Program/python/contest/taobao/log/train_log'
label_file_path = 'G:/Program/python/contest/taobao/feature/uid_term3_score1.txt'
temp_path = 'G:/Program/python/contest/taobao/temp/'
feature_path = 'G:/Program/python/contest/taobao/feature/'
default_label = '0'
score_precision = 3 |
997,225 | a616f3f63971d2f8118e24172b00454c8568dd0a | import sys
import numpy as np
class Kinematics:
def __init__(self, *, initial_position: np.ndarray, initial_velocity: np.ndarray):
self._position = initial_position # meters
self._velocity = initial_velocity # meters per second
@property
def position(self) -> np.ndarray:
return self._position
@position.setter
def position(self, val: np.ndarray):
self._position = val
@property
def velocity(self) -> np.ndarray:
return self._velocity
@velocity.setter
def velocity(self, val: np.ndarray):
self._velocity = val
def update_shows_bug(self, *, delta_t: float):
# Tries to combine the getter and setter for self.position
# with the += operator, which will not work.
# Will cause this error:
# Exception has occurred: _UFuncOutputCastingError
# Cannot cast ufunc 'add' output from dtype('float64') to dtype('int64') with casting rule 'same_kind'
self.position += self.velocity * delta_t
def update_fixes_bug(self, *, delta_t: float):
# Fixes the bug exibited in the 'update_shows_bug' method.
self._position = self.velocity * delta_t + self.position
def main(argv):
# after an elapsed time of 2 seconds, calucate the new position
dt = 2.0 # seconds, elapsed time step
# construct a Kinematics object
x0, y0 = 1000, 2000 # meters
xdot0, ydot0 = 20, 30 # meters per second
k1 = Kinematics(
initial_position=np.array([x0, y0]), initial_velocity=np.array([xdot0, ydot0])
) # m and m/s
k2 = Kinematics(
initial_position=np.array([x0, y0]), initial_velocity=np.array([xdot0, ydot0])
) # m and m/s
# expected updated position is rate * time + initial_position
#
# x-direction
# = (20 m/s) * (2 s) + 1000 m
# = 40 m + 1000 m
# = 1040 m
#
# y-direction
# = (30 m/s) * (2 s) + 2000 m
# = 60 m + 2000 m
# = 2060 m
xf, yf = 1040, 2060 # meters
# k1.update_shows_bug(delta_t=dt) # will trigger error
# new_position_with_bug = k1.position
# assert new_position_with_bug[0] == xf # meters, succeeds
# assert new_position_with_bug[1] == yf # meters, succeeds
k2.update_fixes_bug(delta_t=dt)
new_position_without_bug = k2.position
assert new_position_without_bug[0] == xf # meters, succeeds
assert new_position_without_bug[1] == yf # meters, succeeds
print("Finished.")
if __name__ == "__main__":
main(sys.argv[1:])
|
997,226 | 7bfca4f73235405f373453260f5795466a7d2e94 | # Generated by Django 3.2.4 on 2021-07-17 05:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('SMS_web_app', '0014_alter_logisticdetail_dc_date'),
]
operations = [
migrations.AlterField(
model_name='logisticdetail',
name='DC_Date',
field=models.CharField(blank=True, max_length=50, null=True),
),
]
|
997,227 | 80e0fa84edb126365c43a00600c422b1cc574dcf | """
Copyright 2013 Twitter, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
import glob
import csv
import re
def evaluateMappings(result_files, start_point, print_count, output_filename):
output = ""
for result_file in result_files:
m = re.match(r'.*/rdf-(.*)-rd-(.*)-tb-(.*)\.csv', result_file)
if not m:
print "Cannot parse " + result_file
with open(result_file, 'r') as result:
result_reader = csv.reader(result, delimiter=',')
skip = 0
while skip < start_point:
result_reader.next()
skip += 1
moves = 0
std = 0
c = 0
for row in result_reader:
moves += int(row[5])
std += float(row[3])
c += 1
if print_count:
print c
output += m.group(1) + "," + m.group(2) + "," + m.group(3) + "," + str(moves) + "," + str(std) + "\n"
f = open(output_filename, 'w')
f.write(output)
f.close()
def main():
# parse the commandline arguments
parser = argparse.ArgumentParser(description='Evaluate mapping files for topologies from Blobstore')
parser.add_argument("-t", dest='result_path', type=str, required=True, help='path for the result files')
parser.add_argument("-o", dest='output_filename', type=str, required=True, help='output file name')
parser.add_argument("-s", dest='start_point', type=int, required=False, default=1, help='starting point for the calculation')
parser.add_argument("-c", dest='print_count', action="store_true", required=False, default=False, help='print count')
args = parser.parse_args()
# read topology files
result_files = glob.glob(args.result_path + "/*.csv")
evaluateMappings(sorted(result_files), args.start_point, args.print_count, args.output_filename)
if __name__ == '__main__':
main()
|
997,228 | 8e3c5e381e8041c00968e07d238b83b8f2479f95 | import hashlib
strs = '35eb09'
def md5(s):
return hashlib.md5(str(s).encode('utf-8')).hexdigest()
def main():
for i in range(10000000,100000000):
a = md5(i)
if a[0:6] == strs:
print(i)
exit(0)
if __name__ == '__main__':
main()
|
997,229 | 6fa8b90ced904f8470fd202a8bbb60764a5794b1 | import json
import logging
import uuid
from django.conf import settings
from django.contrib.auth import authenticate
from django.contrib.auth import get_user_model
from django.core.mail import EmailMultiAlternatives
from django.utils.translation import ugettext_lazy as _
from rest_framework import status
from rest_framework.authtoken.models import Token
from rest_framework.permissions import AllowAny,IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from django.utils import timezone
# import from app
from accounts.forms import LoginForm
from accounts.models import AppStudent, User, PasswordResetRequest
from accounts.serializers import UserSerializer
# import from project
from FitnessApp.utils import SUCCESS_DICT
logger = logging.getLogger(__name__)
# Create your views here.
User = get_user_model()
class Register(APIView):
permission_classes = (AllowAny, )
def post(self, request, format=None):
user = None
email = request.DATA.get('email', None)
if email:
userlist = list(User.objects.filter(email=email))
if len(userlist) > 0:
user = userlist[0]
if user:
return Response({'message': 'User with this email already exists:'}, status=status.HTTP_400_BAD_REQUEST)
serializer = UserSerializer(data=request.DATA)
if serializer.is_valid():
serializer.save()
if serializer.data['user_role'] == 'user':
try:
# subscription = UserSubscription.objects.create()
AppStudent.objects.create(app_user_id=serializer.data['id'])
except Exception as ex:
return Response({'success': False, 'detail': _('Student not created.')},
status=status.HTTP_500_INTERNAL_SERVER_ERROR)
logger.debug('New subscription created for user %s. %s', serializer.data['email'], serializer.data['id'])
token, created = Token.objects.get_or_create(user_id=serializer.data['id'])
if created:
token.save()
try:
if request.FILES:
edited_user =User.objects.get(id=serializer.data['id'])
edited_user.profile_image = request.FILES['profile_image']
edited_user.save()
serializer = UserSerializer(edited_user)
except Exception as ex:
return Response({'success': False,'detail': _('Image not uploaded.')},
status=status.HTTP_500_INTERNAL_SERVER_ERROR)
logger.debug('New Token created for user %s. %s', serializer.data['email'], token.key)
return Response({'success':True, 'token': token.key,'user':serializer.data}, status=status.HTTP_200_OK)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class TokenLogin(APIView):
permission_classes = (AllowAny, )
def post(self, request, format=None):
form = LoginForm(request.DATA)
if form.is_valid():
email = form.cleaned_data["email"]
password = form.cleaned_data["password"]
login_user = authenticate(username=email, password=password)
if login_user is not None:
if login_user.is_active:
serializer = UserSerializer(login_user)
token, created = Token.objects.get_or_create(user=login_user)
logger.debug("login_user object: %s, token: %s", login_user.email, token.key)
return Response({'success': True, 'token': token.key, 'user': serializer.data},
status=status.HTTP_200_OK)
else:
return Response({'success': False, "message":
"Your account is not active, Please contact administrator"}, status=status.HTTP_403_FORBIDDEN)
else:
logger.info('email %s attempt failed for login', email)
return Response({'token': None, 'message': 'Invalid email or password', 'success': False},
status=status.HTTP_200_OK)
else:
payload = {
'errors': [(k, v[0]) for k, v in form.errors.items()]
}
logger.debug('Invalid data. %s', payload)
return Response(json.dumps(payload), status=status.HTTP_400_BAD_REQUEST)
class AccountInformation(APIView):
permission_classes = (IsAuthenticated, )
def get(self, request):
login_user = request.user
if login_user.is_active:
serializer = UserSerializer(login_user)
logger.debug("account information object: %s", login_user.email)
return Response({'success': True, 'user': serializer.data},
status=status.HTTP_200_OK)
else:
return Response({'success': False, "message":
"Your account is not active, Please contact administrator"}, status=status.HTTP_403_FORBIDDEN)
class ChangePassword(APIView):
permission_classes = (IsAuthenticated, )
def post(self, request):
me = request.user
data = request.DATA
old_password = data['old_password']
new_password = data['new_password']
if me.check_password(old_password):
me.set_password(new_password)
me.save()
return Response(SUCCESS_DICT,status=status.HTTP_200_OK)
else:
return Response({"message": "Your old password does not match our records. Please verify and try again",
'success': False}, status=status.HTTP_200_OK)
class ForgetPasswordEmail(APIView):
permission_classes = (AllowAny, )
def post(self, request):
data = request.DATA
user = None
email = data.get('email', None)
if email:
userlist = list(User.objects.filter(email=email))
if len(userlist) > 0:
user = userlist[0]
if not user:
return Response({"message": "No user with this email exists in the system",'success': False},
status=status.HTTP_200_OK)
else:
#check for existing request for current user
existing_requests = PasswordResetRequest.objects.filter(user=user)
if existing_requests:
existing_requests.delete() #if existing request exists delete it
#generate a new user request here
reset_request = PasswordResetRequest()
reset_request.user = user
reset_request.hash = my_random_string()
reset_request.save()
to = user.email
msg = EmailMultiAlternatives("Password Reset",reset_request.hash, settings.DEFAULT_FROM_EMAIL, [to])
msg.send()
return Response({"message": "Kindly check your email for code.",'success': True},status=status.HTTP_200_OK)
class ResetPassword(APIView):
permission_classes = (AllowAny, )
def post(self, request):
data = request.DATA
if not data['password']:
return Response({"message": "Password Field is required", 'success': False}, status=status.HTTP_200_OK)
try:
reset_object = PasswordResetRequest.objects.get(hash=data['reset_code'])
user = reset_object.user
user.set_password(data['password'])
user.save()
reset_object.delete()
return Response(SUCCESS_DICT,status=status.HTTP_200_OK)
except Exception as ex:
return Response({"message": "Invalid Code", 'success':False}, status=status.HTTP_200_OK)
class DeleteAccount(APIView):
permission_classes = (IsAuthenticated, )
def post(self, request):
user = self.request.user
user.is_active = False
user.save()
return Response({"success": True, 'message': "Your account has been deactivated"})
# we putting this function here to resolve circular import with utils present in FitnessApp
def my_random_string(string_length=7):
"""Returns a random string of length string_length."""
flag = False
while flag == False:
random = str(uuid.uuid4()) # Convert UUID format to a Python string.
random = random.upper() # Make all characters uppercase.
random = random.replace("-","") # Remove the UUID '-'.
my_hash = random[0:string_length]
duplicate_check = PasswordResetRequest.objects.filter(hash=my_hash)
if not duplicate_check:
return my_hash
break; #although code will never reach here :)
class ParseInstallation(APIView):
permission_classes = (IsAuthenticated, )
def post(self, request):
data = request.DATA
if not data['parse_installation_id']:
return Response({"message": "Parse installation id is required", 'success': False}, status=status.HTTP_200_OK)
try:
user = self.request.user
student = AppStudent.objects.get(app_user=user)
student.parse_installation_id = data['parse_installation_id']
student.save()
return Response(SUCCESS_DICT,status=status.HTTP_200_OK)
except Exception as ex:
return Response({"message": "Error saving parse installation id", 'success':False}, status=status.HTTP_200_OK)
def get(self, request):
login_user = request.user
if login_user.is_active:
try:
student = AppStudent.objects.get(app_user=login_user)
logger.debug("Parse installation id: %s", login_user.email)
return Response({'success': True, 'parse_installation_id': student.parse_installation_id},
status=status.HTTP_200_OK) if student.parse_installation_id else Response({'success': False,
'message': 'You don not have parse subscription.'},status=status.HTTP_200_OK)
except Exception as ex:
return Response({"message": "Error getting parse installation id", 'success':False}, status=status.HTTP_200_OK)
else:
return Response({'success': False, "message":
"Your account is not active, Please contact administrator"}, status=status.HTTP_403_FORBIDDEN)
class AppleSubscription(APIView):
permission_classes = (IsAuthenticated, )
def post(self, request):
data = request.DATA
if not data['apple_subscription_id']:
return Response({"message": "Apple subscription id is required", 'success': False}, status=status.HTTP_200_OK)
try:
user = self.request.user
student = AppStudent.objects.get(app_user=user)
student.apple_subscription_id = data['apple_subscription_id']
student.apple_subscription_created_date = timezone.now()
student.subscription_choices = 2
student.save()
return Response(SUCCESS_DICT,status=status.HTTP_200_OK)
except Exception as ex:
return Response({"message": "Error saving Apple subscription id", 'success':False}, status=status.HTTP_200_OK)
def get(self, request):
login_user = request.user
if login_user.is_active:
try:
student = AppStudent.objects.get(app_user=login_user)
logger.debug("Apple subscription id: %s", login_user.email)
return Response({'success': True, 'apple_subscription_id': student.apple_subscription_id,
'apple_subscription_created_date': student.apple_subscription_created_date},
status=status.HTTP_200_OK) if student.apple_subscription_id else Response({'success': False,
'message': 'You don not have Apple subscription.'},status=status.HTTP_200_OK)
except Exception as ex:
return Response({"message": "Error getting Apple subscription id", 'success':False}, status=status.HTTP_200_OK)
else:
return Response({'success': False, "message":
"Your account is not active, Please contact administrator"}, status=status.HTTP_403_FORBIDDEN)
|
997,230 | a013ec674b030a17efe63ec3514e68781fed383b | from env import *
from . import PaxosOracle
import networkx as nx
class LSPaxosOracleControl (LSController):
def __init__ (self, name, ctx, address):
super(LSPaxosOracleControl, self).__init__(name, ctx, address)
self.hosts = set()
self.controllers = set([self.name])
self.oracle = PaxosOracle()
self.oracle.RegisterController(self)
self.update_messages = {}
self.link_version = {}
self.reason = None
self.GetSwitchInformation()
def PacketIn(self, pkt, src, switch, source, packet):
pass
def currentLeader (self, switch):
for c in sorted(list(self.controllers)):
if c not in self.graph:
self.graph.add_node(c)
for c in sorted(list(self.controllers)):
if nx.has_path(self.graph, c, switch):
return c #Find the first connected controller
def ComputeAndUpdatePaths (self):
sp = nx.shortest_paths.all_pairs_shortest_path(self.graph)
for host in self.hosts:
for h2 in self.hosts:
if h2 == host:
continue
if h2.name in sp[host.name]:
p = SourceDestinationPacket(host.address, h2.address)
path = zip(sp[host.name][h2.name], \
sp[host.name][h2.name][1:])
for (a, b) in path[1:]:
link = self.graph[a][b]['link']
if self.currentLeader(a) == self.name:
self.update_messages[self.reason] = self.update_messages.get(self.reason, 0) + 1
self.UpdateRules(a, [(p.pack(), link)])
def UpdateMembers (self, switch):
self.graph.add_node(switch.name)
if isinstance(switch, HostTrait):
self.hosts.add(switch)
if isinstance(switch, ControllerTrait):
self.controllers.add(switch.name)
def NotifySwitchUp (self, pkt, src, switch):
self.UpdateMembers(switch)
self.oracle.InformOracleEvent(self, (src, switch, ControlPacket.NotifySwitchUp))
def NotifyLinkUp (self, pkt, version, src, switch, link):
self.UpdateMembers(switch)
self.oracle.InformOracleEvent(self, (version, src, switch, link, ControlPacket.NotifyLinkUp))
def NotifyLinkDown (self, pkt, version, src, switch, link):
self.UpdateMembers(switch)
self.oracle.InformOracleEvent(self, (version, src, switch, link, ControlPacket.NotifyLinkDown))
def processSwitchUp (self, src, switch):
self.UpdateMembers(switch)
def processLinkUp (self, version, src, switch, link):
if self.link_version.get(version, 0) >= version:
return
self.link_version[link] = version
self.UpdateMembers(switch)
self.addLink(link)
#assert(switch.name in self.graph)
def processLinkDown (self, version, src, switch, link):
if self.link_version.get(version, 0) >= version:
return
self.link_version[link] = version
self.UpdateMembers(switch)
self.removeLink(link)
#assert(switch.name in self.graph)
def NotifySwitchInformation (self, pkt, src, switch, version_links):
for (v, l) in version_links:
if self.link_version.get(l, 0) < v:
self.oracle.InformOracleEvent(self, (v, src, switch, l, ControlPacket.NotifyLinkUp))
def NotifyOracleDecision (self, log):
self.reason = "NotifyOracleDecision"
# Just process all to get us to a good state
self.graph.clear()
self.hosts.clear()
self.controllers.clear()
self.link_version = {}
self.controllers.add(self.name)
for prop in sorted(log.keys()):
entry = log[prop]
if entry[-1] == ControlPacket.NotifyLinkUp:
self.processLinkUp(*entry[:-1])
elif entry[-1] == ControlPacket.NotifyLinkDown:
self.processLinkDown(*entry[:-1])
elif entry[-1] == ControlPacket.NotifySwitchUp:
self.processSwitchUp(*entry[:-1])
else:
print "Unknown entry entry"
self.ComputeAndUpdatePaths()
self.reason = None
|
997,231 | 110264d03316d9ed753fbc8b6637c46eccee5184 | import numpy as np
from utils import distance
# the node class used to form a graph for performing A*
class Node:
def __init__(self, coord_xy, end_xy, graph):
self.key = tuple(coord_xy)
self.coord_xy = coord_xy
self.heuristic = distance(self.coord_xy, end_xy)
self.shortest_dist = np.inf
self.prev_node = None
self.total_cost = 0
self.connections = graph[self.key]
def __lt__(self, other):
return self.total_cost < other.total_cost
def update_total_cost(self):
self.total_cost = self.heuristic + self.shortest_dist
|
997,232 | 3129204c70b762d75fe810f44a03390f4e715435 | '''
Created: 17.04.2018
@author: davidgraf
description: main application to run
parameter:
'''
# ---------------------
# Konfiguration
#DATA_DIR = "C:/Temp/DCASE2017_development_set"
# 'SVM' or 'DecisionTree' or 'RandomForest' or 'GaussianProcess' or 'AdaBoost' or 'NeuroNet' or 'NaiveBayes'
CLASSIFIER = 'NaiveBayes'
# for sampling 0.1 means only 10%
SAMPLERATE = 0.01
# ----------------------
# imports
from iodata.readData import readFold
from learning.classification import trainModel, testModel
from processing.featureEvaluation import featureClassCoerr
from processing.featureScaling import featureScale
import time
# read training data
traindata_feature, traindata_labels = readFold('fold1', 'train', SAMPLERATE)
# read test data
testdata_feature, testdata_labels = readFold('fold1', 'evaluate', SAMPLERATE)
# preprocssing (feature scaling, feature evaluation, feature selection)
# featureClassCoerr(featureMatrixTrain,labelsTrain,range(0,60))
# scale train data
featureMatrixTrain, scaler = featureScale(traindata_feature[:])
labelsTrain = traindata_labels
# scale test data according train values
featureMatrixTest = scaler.transform(testdata_feature)
labelsTest = testdata_labels
# data analysis
# ... analysis(data)
timeStart = time.time()
# training
model, meanCrossVal = trainModel(featureMatrixTrain, labelsTrain, CLASSIFIER)
timeStartPredict=time.time();
# testing
accuracy, precision, recall, f1 = testModel(model, featureMatrixTest, labelsTest)
print "Training time (sec.)",(timeStartPredict-timeStart)
print "Prediction time (sec.)",(time.time()-timeStartPredict)
|
997,233 | 461e59a249f0168af00862ad5cdeda559abb6e17 | # =============================================================================
# import lib
# =============================================================================
from __future__ import print_function
import matplotlib.pyplot as plt
import math
import numpy as np
import matplotlib.image as mpimg
import torch
import torch.optim
from models.GDD_denoising import gdd
from utils.sr_utils import *
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark =True
dtype = torch.cuda.FloatTensor
# =============================================================================
# load data
# =============================================================================
path_to_data = 'data/flash_noflash/'
n_im = 2
guide_np = mpimg.imread(path_to_data + 'im_flash.jpg')
input_np = mpimg.imread(path_to_data + 'im_noflash.jpg')
guide_np = guide_np.astype(np.float32) / 255
input_np = input_np.astype(np.float32) / 255
# =============================================================================
# show input and guidance images
# =============================================================================
figsize = 10
fig = plt.figure(figsize=(figsize,figsize))
plt.imshow(input_np)
fig = plt.figure(figsize=(figsize,figsize))
plt.imshow(guide_np)
# =============================================================================
# Set parameters and net
# =============================================================================
input_depth = input_np.shape[2]
method = '2D'
pad = 'reflection'
OPT_OVER = 'net'
show_every = 1000 #500
save_every = 1000
num_c = 32
LR = 0.01#try 0.01 0.001 0.0001
OPTIMIZER = 'adam'
num_iter = 1001#try 12000, 8000
reg_noise_std = 0.01 # try 0 0.03 0.05 0.08
mse_history = np.zeros(num_iter)
thresh_v = 0.01#0.000005, 0.00001
n_layer = 5
# layer size for each depth
im_layer_size = []
w,h = guide_np.shape[0], guide_np.shape[1]
for i in range(n_layer):
im_layer_size.append([w,h])
w, h = math.ceil(w/2), math.ceil(h/2)
net_param_fin = []
# =============================================================================
# Set net
# =============================================================================
# set input
net_input = get_noise(input_depth, method, ( math.ceil(guide_np.shape[0]/(2**n_layer)), math.ceil(guide_np.shape[1]/(2**n_layer))) ).type(dtype).detach()
# number of channels
input_depth = net_input.shape[1]
# define network structure
net = gdd(input_depth, input_np.shape[2],
num_channels_down = num_c,
num_channels_up = num_c,
num_channels_skip = num_c,
filter_size_up = 3, filter_size_down = 3, filter_skip_size=1,
upsample_mode='bilinear', # downsample_mode='avg',
need1x1_up=False,
need_sigmoid=True, need_bias=True, pad=pad, act_fun='LeakyReLU', im_layer_size = im_layer_size).type(dtype)
# define MSE loss
mse = torch.nn.MSELoss().type(dtype)
# convert numpy to torch
input_torch = torch.from_numpy(input_np.transpose(2,0,1)).type(dtype)
input_torch = input_torch[None, :].cuda()
guide_np_t = guide_np.transpose(2,0,1)
msi_torch = torch.from_numpy(guide_np_t[None, :]).type(dtype)
# =============================================================================
# Define closure and optimize
# =============================================================================
mse_last = 0#1000
last_net = [None] * num_iter
mse_history = [None] * num_iter
back_p = 0
repeat = 0
def closure(ind_iter):
global i, net_input, mse_last, last_net, back_p, repeat
if reg_noise_std > 0:
net_input = net_input_saved + (noise.normal_() * reg_noise_std)
out = net(msi_torch, net_input)
total_loss = mse(out, input_torch)
mse_i = total_loss.data.cpu().numpy()
total_loss.backward()
# Log
if i % 100 == 0:
print ('Iteration %05d MSE_gap %.7f' % (i, (mse_i - mse_last)))
# Track the loss function
if (mse_i - mse_last) > thresh_v and i > 1000:
print('increase in the loss at the pixel of %05d.' % (i))
print('MSE_gap %.7f' % (mse_i - mse_last))
if back_p == 0:
back_p = i-10#1
repeat = 150
for new_param, net_param in zip(last_net[back_p], net.parameters()):
net_param.data.copy_(new_param)
return total_loss*0
else:
if back_p > 0:
i = back_p
back_p = 0
elif repeat > 0:
repeat -= 1
return total_loss
else:
last_net[i] = [x.detach().cpu() for x in net.parameters()]
last_net[i-51] = None
#last_net[i-101] = None
if i>40:
mse_last = np.mean(mse_history[i-40:i-20])
# History
mse_history[i] = mse_i
i += 1
if i % show_every == 0:
out = out.detach().cpu().squeeze().numpy().transpose(1,2,0)
f, (ax1, ax2, ax3) = plt.subplots(1, 3, sharey=True, figsize=(15,15))
ax1.imshow(guide_np, cmap='gray')
ax2.imshow(input_np)
ax3.imshow(out)
plt.show()
return total_loss
# =============================================================================
# Optimization
# =============================================================================
net_input_saved = net_input.detach().clone()
noise = net_input.detach().clone()
i = 0
p = get_params(OPT_OVER, net, net_input)
print('Starting optimization with ADAM')
optimizer = torch.optim.Adam(p, lr=LR)
for j in range(num_iter):
optimizer.zero_grad()
total_loss = closure(j)
optimizer.step()
# save a final output and network parameters for each image
out = net(msi_torch, net_input)
out_np = out.detach().cpu().squeeze().numpy().transpose(1,2,0)
net_input_np = net_input.detach().cpu().squeeze().numpy().transpose(1,2,0)
net_param_fin.append(list(net.parameters()))
np.save("result/flash_noflash/out", out_np)
np.save("result/flash_noflash/rand_input.npy", net_input_np)
np.savez("result/flash_noflash/param.npz", np.array(net_param_fin))
|
997,234 | 0f51f5261295f82a24887cc28cd17a642830e8df | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
class Model(models.Model):
foreign_key = models.ForeignKey('auth.User', null=True, blank=True, related_name='+')
many_to_many = models.ManyToManyField('auth.User', blank=True, related_name='+')
|
997,235 | 1c2fb3f158067c67afc1cdeb2cac7e14464b4cc8 | from django.contrib import admin
from eLearn.models import Register
# Register your models here.
admin.site.register(Register)
|
997,236 | f6c12fcd5b85fca6f79ceb7da2b66589466b6a6d | def square_of_7():
print("I am before return")
return 7**2
print("I am after return") # wont print as the return stmt exits the function
result = square_of_7()
print(result) |
997,237 | 740acd30bc5fa2b5bbb5bb8033418b9414e17e4a | # Generated by Django 2.0.6 on 2018-06-20 20:43
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
atomic=False
dependencies = [
('catalog', '0020_auto_20180620_1442'),
]
operations = [
migrations.DeleteModel(
name='ServiceInstance',
),
migrations.RemoveField(
model_name='servicelineitem',
name='employee',
),
migrations.RemoveField(
model_name='servicelineitem',
name='lawn_mower',
),
migrations.RemoveField(
model_name='servicelineitem',
name='service',
),
migrations.AddField(
model_name='servicerecord',
name='cost',
field=models.CharField(default=1, help_text='actual charge for service', max_length=200),
preserve_default=False,
),
migrations.AddField(
model_name='servicerecord',
name='date',
field=models.DateField(blank=True, null=True),
),
migrations.AddField(
model_name='servicerecord',
name='employee',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='catalog.Employee'),
),
migrations.AddField(
model_name='servicerecord',
name='service',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='catalog.ServiceType'),
),
migrations.DeleteModel(
name='ServiceLineItem',
),
]
|
997,238 | 6f885bb89cb68a2468f6c3b2f99ecea003e3ed22 | from utils import *
#I implemented water. Water can spread in all cardinal directions but up.
#If it hasn't spread it will slow down its check frequency to spare the CPU.
#It will also spread through gold and ropes without changing the level.
#It will also slow down player movement!
class Water(object):
WATER_DELAY_SLOW=TIME_STEP*50
WATER_DELAY_QUICK=TIME_STEP*10
def __init__(self,x,y,window,level,Q):
self._x=x
self._y=y
self._window=window
self._level=level
self._queue=Q
Q.enqueue(Water.WATER_DELAY_QUICK,self)
def check_left(self):
return self._level.is_empty(self._x-1,self._y)
def check_right(self):
return self._level.is_empty(self._x+1,self._y)
def check_down(self):
return self._level.is_empty(self._x,self._y+1)
def check_down_for_permeable_not_water_or_ladder(self):
return self.permeable_but_not_water_or_ladder(self._x,self._y+1)
def permeable_but_not_water_or_ladder(self,x,y):
return self._level.is_permeable(x,y) and not(self._level.is_water(x,y) or self._level.is_ladder(x,y))
def spread(self):
just_spread=False
if self.check_down_for_permeable_not_water_or_ladder():
if self.check_down():
self._level.create_tile(5,index(self._x,self._y+1),self._window)
Water(self._x,self._y+1,self._window,self._level,self._queue)
just_spread=True
elif not(self._level.is_permeable(self._x,self._y+1)):
if self.check_right():
self._level.create_tile(5,index(self._x+1,self._y),self._window)
Water(self._x+1,self._y,self._window,self._level,self._queue)
just_spread=True
if self.check_left():
self._level.create_tile(5,index(self._x-1,self._y),self._window)
Water(self._x-1,self._y,self._window,self._level,self._queue)
just_spread=True
return just_spread
def event(self,queue):
just_spread=self.spread()
if just_spread:
delay=Water.WATER_DELAY_QUICK
else:
delay=Water.WATER_DELAY_SLOW
queue.enqueue(delay,self)
|
997,239 | 08fb3947ba932ccb452c8f34a106d217067d0907 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from typing import Tuple
import numpy as np
import pytest
from aicsimageio import exceptions
from aicsimageio.readers.default_reader import DefaultReader
from ..conftest import get_resource_full_path, host
from ..image_container_test_utils import run_image_file_checks
@host
@pytest.mark.parametrize(
"filename, set_scene, expected_shape, expected_dims_order",
[
("example.bmp", "Image:0", (480, 640, 4), "YXS"),
("example.png", "Image:0", (800, 537, 4), "YXS"),
("example.jpg", "Image:0", (452, 400, 3), "YXS"),
("example.gif", "Image:0", (72, 268, 268, 4), "TYXS"),
(
"example_invalid_frame_count.mp4",
"Image:0",
(55, 1080, 1920, 3),
"TYXS",
),
(
"example_valid_frame_count.mp4",
"Image:0",
(72, 272, 272, 3),
"TYXS",
),
pytest.param(
"example.txt",
None,
None,
None,
marks=pytest.mark.raises(exception=exceptions.UnsupportedFileFormatError),
),
pytest.param(
"example.png",
"Image:1",
None,
None,
marks=pytest.mark.raises(exception=IndexError),
),
],
)
def test_default_reader(
filename: str,
host: str,
set_scene: str,
expected_shape: Tuple[int, ...],
expected_dims_order: str,
) -> None:
# Construct full filepath
uri = get_resource_full_path(filename, host)
# Run checks
run_image_file_checks(
ImageContainer=DefaultReader,
image=uri,
set_scene=set_scene,
expected_scenes=("Image:0",),
expected_current_scene="Image:0",
expected_shape=expected_shape,
expected_dtype=np.dtype(np.uint8),
expected_dims_order=expected_dims_order,
expected_channel_names=None,
expected_physical_pixel_sizes=(None, None, None),
expected_metadata_type=dict,
)
def test_ffmpeg_header_fail() -> None:
with pytest.raises(IOError):
# Big Buck Bunny
DefaultReader("https://archive.org/embed/archive-video-files/test.mp4")
|
997,240 | a427c28a3d4fa69bd94067bcd47fd316004b3932 | # -*- coding: utf-8 -*-
"""
Created on Sun Dec 11 02:54:09 2016
@author: Fran Callejas
"""
'''
--------------------------
Francesca Callejas
ffc2108
12/10/2016
weighted_knn
The purpose of this code is to find the type of flower that the closest
neighbors of a point are. I added a count to see how many in the list of
neighbors were from each type of flower. I then verififeid the flower type with
an if statement. This time, instead of importing majority vote, I imported
weighted_majority_vote to calculate a weighted KNN.
--------------------------
'''
#Your code here
import create_data
import integerize_labels
import split
import find_k_nearest_neighbors as fknn
import weighted_majority_vote as wmv
def weighted_knn(train_data, test_data, k):
weighted_predicted_labels = []
for row in test_data:
neighbors = fknn.find_k_nearest_neighbors(row, test_data, k)
#this uses find_k_nearest_neighbors to calculate the neighbors
weighted_predicted_labels.append(wmv.weighted_majority_vote(row, \
neighbors))
#this uses weighted majority vote to predict what flower it will be
return weighted_predicted_labels
data = create_data.create_data("iris.data.txt")
(integerized_data,x) = integerize_labels.integerize_labels(data)
(train_data, test_data) = split.split(integerized_data)
weighted_knn(train_data, test_data, 4)
|
997,241 | a2983e059605064f714ca0cc0b74336003f4c3ef | def angrmain():
import angr
import claripy
FILE_NAME = 'filegr.elf'
IN_FILE_NAME = 'home/vladkuznetsov/Vl/Projects/Reverse/HW-08/12/hehuha.txt'
FIND = ()
BAN = ()
NUMBER_SIZE = 8
CHAR_SIZE = 8
proj = angr.Project('./' + FILE_NAME)
input_size_min = 32
input_size_max = 32
for input_size in range(input_size_min, input_size_max + 1):
print("test: " + str(input_size))
argv = claripy.BVS("argv", input_size * CHAR_SIZE)
file = angr.SimFile(IN_FILE_NAME, content=argv)
initial_state = proj.factory.entry_state(args=['./' + FILE_NAME, IN_FILE_NAME])
sm = proj.factory.simulation_manager(initial_state)
# import IPython
# IPython.embed()
sm.explore()
for end in sm.deadended:
out = end.posix.dumps(1)
if str(out).startswith("b'Succ"):
print("FOUND!")
s = end.solver.eval(argv, cast_to=bytes).decode('utf-8')
print(s)
return s
else:
print(out)
print("BAN!!!")
return "BAN!!!"
def do_better(found, argv):
return str(found[0].solver.eval(argv, cast_to=bytes))
if __name__ == '__main__':
print(angrmain())
|
997,242 | d294218181210846ba37973214f594adbf36f68b | class Solution(object):
def numWays(self, steps, arrLen):
dp = [[None for _ in range(arrLen + 1)] for _ in range(steps + 1)]
dp[0][0] = 1
for i in range(1, steps + 1):
for j in range(arrLen + 1):
if j == 0:
left = 0
if dp[i - 1][j + 1]:
right = dp[i - 1][j + 1]
else:
right = 0
if dp[i - 1][j]:
middle = dp[i - 1][j]
else:
middle = 0
elif j == arrLen:
right = 0
if dp[i - 1][j - 1]:
left = dp[i - 1][j - 1]
else:
left = 0
if dp[i - 1][j]:
middle = dp[i - 1][j]
else:
middle = 0
else:
if dp[i - 1][j - 1]:
left = dp[i - 1][j - 1]
else:
left = 0
if dp[i - 1][j + 1]:
right = dp[i - 1][j + 1]
else:
right = 0
if dp[i - 1][j]:
middle = dp[i - 1][j]
else:
middle = 0
dp[i][j] = left + middle + right
print dp
return dp[-1][0] if dp[-1][0] else -1
test = Solution()
print test.numWays(3, 2)
print test.numWays(2, 4)
print test.numWays(4, 2) |
997,243 | 810386a9982631104d7242ab6feddf1e770d3a09 | from random import randint
from math import sqrt
GENERATION_SIZE = 10
BRANCHING_FACTOR = 4
def heuristic(board, gridsize=4, blocksize=2):
collisions = 0
# run collision check for each cell
for i in range(gridsize):
for j in range(gridsize):
val = board[i][j]
# check row for collisions
for n in range(gridsize):
if n != i and board[n][j] == val:
collisions += 1
# check column for collisions
for m in range(gridsize):
if m != j and board[i][m] == val:
collisions += 1
# check block for collisions
squareX = j // blocksize
squareY = i // blocksize
for n in range(blocksize):
for m in range(blocksize):
if not (blocksize * squareX + m == j or blocksize * squareY + n == i) and board[blocksize * squareY + n][blocksize * squareX + m] == val:
collisions += 1
return collisions
def deepcopy_board(board):
ret = []
for row in board:
ret_row = []
for elem in row:
ret_row.append(elem)
ret.append(ret_row)
return ret
def generate_successor(board, size, fixed):
choices = map(lambda x: filter(lambda y: (x[0], y) not in fixed, x[1]),
enumerate([list(range(size)) for x in range(size)]))
row = randint(0,size-1)
index1 = randint(0, len(choices[row])-1)
choice1 = choices[row][index1]
del choices[row][index1]
index2 = randint(0, len(choices[row])-1)
choice2 = choices[row][index2]
del choices[row][index2]
ret = deepcopy_board(board)
ret[row][choice2], ret[row][choice1] = ret[row][choice1], ret[row][choice2]
return ret
def generate_board(original_board, size, fixed):
board = deepcopy_board(original_board)
choices = [filter(lambda y: y not in x, range(1, size+1))
for x in original_board
]
for i in range(size):
for j in range(size):
if (i,j) not in fixed:
index = randint(0, len(choices[i])-1)
board[i][j] = choices[i][index]
del choices[i][index]
return board
def solver(original_board = [
# [0, 0, 8, 2, 0, 0, 0, 0, 1],
# [0, 0, 7, 0, 0, 0, 4, 0, 0],
# [0, 3, 0, 5, 0, 0, 0, 8, 7],
# [0, 0, 5, 4, 0, 1, 8, 0, 0],
# [9, 0, 0, 0, 0, 0, 0, 0, 4],
# [3, 0, 0, 7, 0, 8, 0, 0, 9],
# [0, 9, 0, 6, 0, 0, 0, 1, 8],
# [0, 0, 3, 0, 0, 0, 5, 0, 0],
# [1, 0, 2, 9, 0, 0, 0, 0, 0]
[1, 0, 3, 0],
[0, 0, 0, 0],
[2, 0, 4, 0],
[0, 0, 0, 0]
], size=4):
fixed_values = set([])
for i in range(size):
for j in range(size):
if original_board[i][j] != 0:
fixed_values.add((i, j))
solved = False
solution = None
boards = []
# generate initial set
for i in range(GENERATION_SIZE):
board = generate_board(original_board, size, fixed_values)
boards.append(board)
## reset boards list to take both heuristics and states
boards = [(heuristic(board, gridsize=size, blocksize=int(sqrt(size))), board) for board in boards]
while not solved:
# order by heuristic value of boards
boards.sort(key=lambda x: x[0])
# take top 10 (lower heuristic values)
boards = boards[:GENERATION_SIZE]
# check first board
if boards[0][0] == 0:
# if heuristic is 0, set solved to true and set as solution
solved = True
solution = boards[0][1]
else:
# else, generate successors and loop
## generate successor boards
successors = []
for board in boards:
print "Heuristic score: %d" % board[0]
print reduce(lambda accumulator, x: accumulator + "\n" + str(x), board[1], "")
for i in range(BRANCHING_FACTOR):
successors.append(generate_successor(board[1], size, fixed_values))
## add each successor to current list with heuristic value
for s in successors:
boards.append((heuristic(s, gridsize=size, blocksize=int(sqrt(size))), s))
return solution
if __name__ == "__main__":
print reduce(lambda accumulator, x: accumulator + "\n" + str(x), solver(), "")
|
997,244 | 5c3ecd0cad18420b27399ada850e44a8a0d39d0d | import tkinter as tk
import platform
import os
# from PIL import Image, ImageTk
CELL_SIZE = 32 # the pixel for a single square for play board
from Manual_play_window import *
ROOT_DIR = "."
class Start_window:
def __init__(self, master):
self.master = master
self.frame = tk.Frame(self.master)
if platform.system() == "Darwin": ### if its a Mac
self.button1 = tk.Button(self.frame, text='Manual Play', width=25, command=self.new_window1, highlightbackground='#3E4149')
self.button2 = tk.Button(self.frame, text='1 robot test', width=25, command=self.new_window2, highlightbackground='#3E4149')
self.button3 = tk.Button(self.frame, text='8 robots battle', width=25, command=self.new_window3, highlightbackground='#3E4149')
else:
self.button1 = tk.Button(self.frame, text='Manual Play', width=25, command=self.new_window1)
self.button2 = tk.Button(self.frame, text='1 robot test', width=25, command=self.new_window2)
self.button3 = tk.Button(self.frame, text='8 robots battle', width=25, command=self.new_window3)
self.button1.pack()
self.button2.pack()
self.button3.pack()
self.frame.pack()
def new_window1(self): # manual play button
self.temp_new = tk.Toplevel(self.master)
self.app = Manual_play_window(self.temp_new)
def new_window2(self): # 1 robot test button
self.temp_new = tk.Toplevel(self.master)
self.app = One_robot_window(self.temp_new)
def new_window3(self): # 8 robot battle button
self.temp_new = tk.Toplevel(self.master)
self.app = Robot_battle_window(self.temp_new)
def close_windows(self):
self.master.destory()
# class Manual_play_window:
# def __init__(self, master):
# self.master = master
# self.frame = tk.Frame(self.master)
# self.widgets()
#
# self.rows = 10
# self.columns = 10
# self.size = CELL_SIZE
# self.color1 = 'white'
# self.color2 = 'grey'
# self.pieces = {}
#
# canvas_width = self.columns * self.size
# canvas_height = (self.rows) * self.size
#
# self.canvas = tk.Canvas(self.master, borderwidth=0, highlightthickness=0,
# width=canvas_width, height=canvas_height, background="bisque")
# self.canvas.pack(side="top", fill="both", expand=True, padx=2, pady=2)
#
# # this binding will cause a refresh if the user interactively
# # changes the window size
# self.canvas.bind("<Configure>", self.refresh)
# self.frame.pack()
#
# # menubar = tk.Menu(self.master)
# # menu_setting = tk.Menu(menubar, tearoff=0)
# # menubar.add_cascade(label="setting", menu=menu_setting)
# # menu_setting.add_command(label="new game", command=self.menu_action)
# # menu_setting.add_command(label="load game", command=self.menu_action)
# # menu_setting.add_command(label="restart game", command=self.menu_action)
# # menu_setting.add_separator()
# # menu_setting.add_command(label="Quit", command = self.master.destory)
# #
# # self.master.config(menu=menubar)
# # img_right = ImageTk.PhotoImage(Image.open(os.path.join(ROOT_DIR,"img_src","btn_right.png")).convert("RGB").resize((20,20)))
# # btn_right = tk.Button(self.master, image = img_right, command =self.move_right())
# # btn_right = tk.Button(self.master, image = tk.PhotoImage(file = r"./img_src/btn_right.png"), command =self.move_right())
#
# btn_right = tk.Button(self.master, text="test", command =self.move_right()).pack()
# # btn_right.place(x=50, y=self.rows * self.size+100)
#
# # btn_test = tk.Button(self.master, text= "test", command = self.move_right()).place(x=50, y=self.rows * self.size+100)
#
# def widgets(self):
# menubar = tk.Menu(root)
# menubar.add_command(label="File")
# # menubar.add_command(label="Quit", command=root.quit())
#
# root.config(menu=menubar)
#
# def menu_action(self):
# print("hit menu")
#
# def move_right(self):
# print("hit right")
# # credit to https://stackoverflow.com/questions/4954395/create-board-game-like-grid-in-python
# def addpiece(self, name, image, row=0, column=0):
# '''Add a piece to the playing board'''
# self.canvas.create_image(0, 0, image=image, tags=(name, "piece"), anchor="c")
# self.placepiece(name, row, column)
#
# def placepiece(self, name, row, column):
# '''Place a piece at the given row/column'''
# self.pieces[name] = (row, column)
# x0 = (column * self.size) + int(self.size / 2)
# y0 = (row * self.size) + int(self.size / 2)
# self.canvas.coords(name, x0, y0)
#
# def refresh(self, event):
# '''Redraw the board, possibly in response to window being resized'''
# xsize = int((event.width - 1) / self.columns)
# ysize = int((event.height - 1) / self.rows)
# self.size = min(xsize, ysize)
# self.canvas.delete("square")
# color = self.color2
# for row in range(self.rows):
# color = self.color1 if color == self.color2 else self.color2
# for col in range(self.columns):
# x1 = (col * self.size)
# y1 = (row * self.size)
# x2 = x1 + self.size
# y2 = y1 + self.size
# self.canvas.create_rectangle(x1, y1, x2, y2, outline="black", fill=color, tags="square")
# color = self.color1 if color == self.color2 else self.color2
# for name in self.pieces:
# self.placepiece(name, self.pieces[name][0], self.pieces[name][1])
# self.canvas.tag_raise("piece")
# self.canvas.tag_lower("square")
#
# def update_board(self):
# return NotImplementedError
#
# def close_windows(self):
# self.master.destory()
class One_robot_window:
def __init__(self, master):
return NotImplementedError
def close_windows(self):
self.master.destory()
class Robot_battle_window:
def __init__(self, master):
return NotImplementedError
def close_windows(self):
self.master.destory()
if __name__ == "__main__":
root = tk.Tk()
app = Manual_play_window(root)
# app = Start_window(root)
root.mainloop()
|
997,245 | c7fd85cf2f8b4ad104240377b361e4765abc86c0 | #!/usr/bin/env python
# encoding: utf-8
"""
A functional wrapper for UCSF Chimera & PLIP
"""
import sys
from cStringIO import StringIO
class Mock(object):
def __init__(self, *args, **kwargs):
pass
def __call__(self, *args, **kwargs):
return Mock()
@classmethod
def __getattr__(cls, name):
if name in ('__file__', '__path__'):
return '.',
elif name == '__all__':
return []
elif name[0] == name[0].upper():
mockType = type(name, (), {})
mockType.__module__ = __name__
return mockType
else:
return Mock()
def __getitem__(self, *args, **kwargs):
return
def __setitem__(self, *args, **kwargs):
return
# Patch unneeded PLIP dependencies
MOCK_MODULES = ('pymol',)
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
import chimera
from plip.modules.preparation import PDBComplex
from plip.modules.chimeraplip import ChimeraVisualizer
from plip.modules.plipremote import VisualizerData
from plip.modules.report import StructureReport
from plip.modules import config as plip_config
plip_config.PLUGIN_MODE = True
def export_temporary_pdbstream(molecule):
temp = StringIO()
chimera.pdbWrite([molecule], molecule.openState.xform, temp)
temp.seek(0)
return temp
def analyze_with_plip(pdb):
pdbcomplex = PDBComplex()
pdbcomplex.load_pdb(pdb, as_string=True)
pdbcomplex.analyze()
pdbcomplex.sourcefiles['filename'] = '/dev/null'
return pdbcomplex
def patch_molecule(molecule):
# Create copies of original models in Chimera &
# Patch molecule names to work with PLIP
stream = export_temporary_pdbstream(molecule)
pdb = chimera.PDBio()
molcopy, _ = pdb.readPDBstream(stream, '{}.pdb'.format(molecule.name), 0)
chimera.openModels.add(molcopy, sameAs=molecule)
molcopy, = molcopy
molcopy.name = 'PLIP-{}'.format(molecule.id)
molecule.display = False
return stream, molcopy
def depict_analysis(pdbcomplex, molecule):
# Export analysis back to Chimera
interactions = {}
for interaction in pdbcomplex.interaction_sets:
view_data = VisualizerData(pdbcomplex, interaction)
viewer = ChimeraVisualizer(view_data, chimera, molecule.id)
interactions[interaction] = viewer
for method in ('cationpi', 'halogen', 'hbonds', 'hydrophobic',
'metal', 'sbridges', 'stacking', 'wbridges'):
getattr(viewer, 'show_' + method)()
report = StructureReport(pdbcomplex)
return interactions, report
def do(molecules):
molecules = [m for m in molecules if not getattr(m, 'plip_copy', None)]
if len(molecules) != 1:
raise ValueError('Only one model can be analyzed at the same time.')
molecule = molecules[0]
stream, patched_molecule = patch_molecule(molecule)
molecule.plip_copy = patched_molecule
analyzed = analyze_with_plip(stream.getvalue())
stream.close()
return depict_analysis(analyzed, patched_molecule)
def undo():
pbnames = ['Water Bridges', 'Salt Bridges', 'Hydrophobic Interactions',
'HalogenBonds', 'pi-Stacking', 'Hydrogen Bonds',
'Metal Coordination', 'Cation-Pi']
for m in chimera.openModels.list(modelTypes=[chimera.Molecule]):
manager = m.pseudoBondMgr()
for group in manager.pseudoBondGroups:
if group.category.rsplit('-')[0] in pbnames:
manager.deletePseudoBondGroup(group)
if hasattr(m, 'plip_copy'):
m.display = True
delattr(m, 'plip_copy')
if m.name.startswith('PLIP-'):
m.destroy()
chimera.viewer.updateCB(chimera.viewer)
if __name__ == '__main__':
do()
|
997,246 | d566778a40c009c821f0b29de65b4efeedfbcf43 | # 둘 중 하나가 기준 값 보다 순위가 높아야 합격
T = int(input())
for tc in range(1,T+1):
N = int(input())
rank = [[]for _ in range(N)]
for i in range(N):
rank[i]=list(map(int,input().split()))
# 성적 기준으로 오름차순 정렬
rank.sort(key=lambda x :x[0])
cnt = 1
base = rank[0][1] #기준을 맨 처음 면접성적을 잡음. 합격하려면 작아야함
for i in range(1, N):
if rank[i][1] < base:
cnt += 1
base = rank[i][1]
print(cnt)
|
997,247 | e1dda3983f6c5b4134ce3c4fcf7e7fe9b2316fa8 | import torch
from torch.utils.data import DataLoader, TensorDataset
import tensorflow as tf
from sklearn.model_selection import train_test_split
import math
import unicodedata
import re
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from tqdm import tqdm
import sys
from collections import defaultdict
def gelu(x):
return 0.5 * x * (1 + torch.tanh(math.sqrt(math.pi / 2) * (x + 0.044715 * x.pow(3))))
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
################################
def create_padding_mask(x, pad_tok = 0.):
return (x == pad_tok).float()
def create_look_ahead_mask(x):
return torch.triu(torch.ones_like(x), diagonal=1).float()
def create_masks(input, target, pad_tok = 0.):
enc_padding_mask = create_padding_mask(input, pad_tok)
dec_padding_mask = create_padding_mask(input, pad_tok)
look_ahead_mask = create_look_ahead_mask(target)
dec_target_padding_mask = create_padding_mask(target, pad_tok)
combined_mask = torch.max(dec_target_padding_mask, look_ahead_mask)
return enc_padding_mask, combined_mask, dec_padding_mask
###############################
# Tokens
class Vocab:
def __init__(self, max_vocab_size=10000):
# self.vocab = tf.keras.preprocessing.text.Tokenizer(num_words=max_vocab_size, filters='', oov_token='<unk>')
self.vocab = defaultdict(lambda:1, {'<pad>':0, '<unk>':1, '<sos>':2, '<eos>':3})
self.index_word = defaultdict(lambda:'<unk>', {0:'<pad>', 1:'<unk>', 2:'<sos>', 3:'<eos>'})
self.PAD_token = 0
self.UNK_token = 1
self.SOS_token = 2
self.EOS_token = 3
self.num_tokens = 4
self.max_vocab_size = max_vocab_size
def build_vocab(self, data):
if self.num_tokens == self.max_vocab_size:
print('max token length acheived')
return
for sentence in data:
for word in sentence.split():
if word not in self.vocab:
self.vocab[word] = self.num_tokens
self.index_word[self.num_tokens] = word
self.num_tokens += 1
if self.num_tokens == self.max_vocab_size:
print('limit reached')
return
def to_sequence(self, data, pad=True):
tensor = [torch.tensor([self.vocab[word] for word in sentence.split()]).long() for sentence in data]
if pad:
return torch.nn.utils.rnn.pad_sequence(tensor, batch_first=True, padding_value=self.PAD_token)
return tensor
def is_special(self, tok, ignore=False):
if ignore:
return tok in [self.SOS_token, self.EOS_token, self.PAD_token]
return False
def to_string(self, tensor, remove_special=False):
return [ " ".join([self.index_word[idx.item()] for idx in t if not self.is_special(idx.item(), remove_special)]) for t in tensor]
def __len__(self):
return self.num_tokens
def unicode_to_ascii(s):
return ''.join(
c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn'
)
# change to spacy?
def preprocess_sentence(w):
w = unicode_to_ascii(w.lower().strip())
# creating a space between a word and the punctuation following it
# eg: "he is a boy." => "he is a boy ."
# Reference:- https://stackoverflow.com/questions/3645931/python-padding-punctuation-with-white-spaces-keeping-punctuation
w = re.sub(r"([?.!,¿])", r" \1 ", w)
w = re.sub(r'[" "]+', " ", w)
# replacing everything with space except (a-z, A-Z,, 0-9, ".", "?", "!", ",")
w = re.sub(r"[^a-zA-Z0-9$\-?.!,¿]+", " ", w)
w = w.rstrip().strip()
# adding a start and an end token to the sentence
# so that the model know when to start and stop predicting.
w = '<sos> ' + w + ' <eos>'
return w
def make_minibatch(src, tgt, max_size=32):
join = [(s, t) for s, t in zip(src, tgt)]
join.sort(key=lambda x: (len(x[0]), len(x[1])))
tensors = []
current_shape = (join[0][0].size(0), join[0][1].size(0))
current_tensor = (join[0][0].unsqueeze(0), join[0][1].unsqueeze(0))
for next_tensor in join[1:]:
shape = (next_tensor[0].size(0), next_tensor[1].size(0))
if shape == current_shape and current_tensor[0].size(0) < max_size:
current_tensor = (torch.cat((current_tensor[0], next_tensor[0].unsqueeze(0)), dim=0), \
torch.cat((current_tensor[1], next_tensor[1].unsqueeze(0)), dim=0)
# torch.cat((current_tensor[2], next_tensor[2].unsqueeze(0)), dim=0)
)
else:
tensors.append(current_tensor)
current_shape = (next_tensor[0].size(0), next_tensor[1].size(0))
current_tensor = (next_tensor[0].unsqueeze(0), next_tensor[1].unsqueeze(0))
tensors.append(current_tensor)
return tensors
def make_table_set(train_text, test_text, max_vocab_size=10000, pad=False):
src_train = [[preprocess_sentence(row) for row in table] for table in train_text[0]]
src_test = [[preprocess_sentence(row) for row in table] for table in test_text[0]]
tgt_train = [[preprocess_sentence(row) for row in table] for table in train_text[1]]
tgt_test = [[preprocess_sentence(row) for row in table] for table in test_text[1]]
# src_train, src_test, tgt_train, tgt_test = train_test_split(source_text, target_text, test_size=test_size)
src_vocab, tgt_vocab = Vocab(max_vocab_size), Vocab(max_vocab_size)
src_vocab.build_vocab([sent for sent in table for table in src_train]); tgt_vocab.build_vocab([sent for sent in table for table in tgt_train])
def make_dataset(train_text, test_text, train_batch_size=32, test_batch_size=64, max_vocab_size=10000, pad=False):
src_train = [preprocess_sentence(t) for t in train_text[0]]
src_test = [preprocess_sentence(t) for t in test_text[0]]
tgt_train = [preprocess_sentence(t) for t in train_text[1]]
tgt_test = [preprocess_sentence(t) for t in test_text[1]]
# src_train, src_test, tgt_train, tgt_test = train_test_split(source_text, target_text, test_size=test_size)
src_vocab, tgt_vocab = Vocab(max_vocab_size), Vocab(max_vocab_size)
src_vocab.build_vocab(src_train); tgt_vocab.build_vocab(tgt_train)
src_train, src_test = src_vocab.to_sequence(src_train, pad), src_vocab.to_sequence(src_test, pad)
tgt_train, tgt_test = tgt_vocab.to_sequence(tgt_train, pad), tgt_vocab.to_sequence(tgt_test, pad)
if pad:
train_loader = DataLoader(TensorDataset(src_train, tgt_train), batch_size=train_batch_size, shuffle=True)
test_loader = DataLoader(TensorDataset(src_test, tgt_test), batch_size=test_batch_size, shuffle=True)
else:
train_loader = make_minibatch(src_train, tgt_train, train_batch_size)
test_loader = make_minibatch(src_test, tgt_test, test_batch_size)
return src_vocab, tgt_vocab, train_loader, test_loader
##########################
def train_binary(model, iterator, labels, optimizer, criterion, clip=1, pad_tok=0):
model.train()
epoch_loss = 0
for i, ((src, tgt), label) in enumerate(tqdm(zip(iterator, labels), file=sys.stdout)):
optimizer.zero_grad()
# src.shape = (batch_size, src_seq_len)
# tgt.shape = (batch_size, tgt_seq_len)
src_mask = create_padding_mask(src, pad_tok)
src_mask, look_ahead_mask, dec_padding_mask = create_masks(src, tgt, pad_tok)
output = model(src)
loss = criterion(output, label.unsqueeze(-1))
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), clip)
optimizer.step()
epoch_loss += loss.item()
return epoch_loss / len(iterator)
def train(model, iterator, optimizer, criterion, clip=1, pad_tok=0):
model.train()
epoch_loss = 0
for i, (src, tgt) in enumerate(tqdm(iterator, file=sys.stdout)):
optimizer.zero_grad()
# src.shape = (batch_size, src_seq_len)
# tgt.shape = (batch_size, tgt_seq_len)
src_mask = create_padding_mask(src, pad_tok)
src_mask, look_ahead_mask, dec_padding_mask = create_masks(src, tgt, pad_tok)
if model.type == 'rnn':
output, _ = model(src, tgt, src_mask=src_mask)
# output.shape == (batch_size, tgt_seq_len, tgt_vocab_size)
# output = output[:, 1:, :]
tgt = tgt[:, 1:]
# loss = criterion(output, tgt)
elif model.type == 'conv':
output, _ = model(src, tgt)
# print(output.size())
# print(tgt.size(), tgt[:,1:].size())
tgt = tgt[:,1:]
elif model.type == 'transformer':
output, _ = model(src, tgt, src_mask, look_ahead_mask, dec_padding_mask)
tgt = tgt[:,1:]
loss = criterion(output, tgt)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), clip)
optimizer.step()
epoch_loss += loss.item()
return epoch_loss / len(iterator)
def evaluate(model, iterator, criterion, pad_tok=0):
model.eval()
epoch_loss = 0
with torch.no_grad():
for i, (src, tgt) in enumerate(tqdm(iterator, file=sys.stdout)):
# src.shape = (batch_size, src_seq_len)
# tgt.shape = (batch_size, tgt_seq_len)
src_mask = create_padding_mask(src, pad_tok)
src_mask, look_ahead_mask, dec_padding_mask = create_masks(src, tgt, pad_tok)
if model.type == 'rnn':
output, attention = model(src, None, src_mask) #turn off teacher forcing
# output.shape == (batch_size, max_length, tgt_vocab_size)
# print(output)
# output = output[:, 1:, :]
tgt = tgt[:, 1:]
elif model.type == 'conv':
output, attention = model(src, None) #turn off teacher forcing
tgt = tgt[:, 1:]
elif model.type == 'transformer':
output, _ = model(src, tgt, src_mask, look_ahead_mask, dec_padding_mask)
tgt = tgt[:,1:]
loss = criterion(output, tgt) # masked loss automatically slices for you
epoch_loss += loss.item()
return epoch_loss / len(iterator)
def translate(sentence, model, src_vocab, tgt_vocab, pad_tok=0):
with torch.no_grad():
model.eval()
if type(sentence) == str:
sentence = [sentence]
tokenized_sentence = [preprocess_sentence(sent) for sent in sentence]
tensor = src_vocab.to_sequence(tokenized_sentence)
tokenized_sent = src_vocab.to_string(tensor, remove_special=True)[0]
mask = create_padding_mask(tensor, pad_tok)
print(tensor)
translation_tensor_logits, attention = model(tensor, None, mask)
translation_tensor = torch.argmax(translation_tensor_logits, dim=-1)
print(translation_tensor)
translation = tgt_vocab.to_string(translation_tensor, remove_special=True)[0]
if attention is not None and not isinstance(attention, list):
attention = attention.detach().squeeze(0)[:len(translation.split()),:len(tokenized_sent.split())]
return translation, attention
def plot_attention(attention, sentence, predicted_sentence):
fig = plt.figure(figsize=(10,10))
ax = fig.add_subplot(1, 1, 1)
ax.matshow(attention.detach().squeeze(0), cmap='viridis')
fontdict = {'fontsize': 14}
ax.set_xticklabels([''] + sentence.split(), fontdict=fontdict, rotation=90)
ax.set_yticklabels([''] + predicted_sentence.split(), fontdict=fontdict)
ax.xaxis.set_major_locator(ticker.MultipleLocator(1))
ax.yaxis.set_major_locator(ticker.MultipleLocator(1))
def load_data(lang1='en', lang2='de'):
train = './data/train.'
val = './data/val.'
src_train = [line.rstrip('\n') for line in open(f"{train}{lang1}")]
tgt_train = [line.rstrip('\n') for line in open(f"{train}{lang2}")]
src_test = [line.rstrip('\n') for line in open(f"{val}{lang1}")]
tgt_test = [line.rstrip('\n') for line in open(f"{val}{lang2}")]
return (src_train, tgt_train), (src_test, tgt_test)
def load_summary(N=2000):
src = './data/sumdata/train/train.article.txt'
tgt = './data/sumdata/train/train.title.txt'
with open(src) as src_file:
src_train = [next(src_file).rstrip('\n') for _ in range(N)]
with open(tgt) as tgt_file:
tgt_train = [next(tgt_file).rstrip('\n') for _ in range(N)]
return src_train, tgt_train
|
997,248 | 09f0d8a939e3bbf53e0bb908079764e6595417da | # -*- coding: utf-8 -*-
"""
Created on Fri Jun 27 10:20:51 2014
@author: Maria
"""
from astropy.io import fits
import os
####Take out hard coding of key words and make them arguments#######
##########
# DESCRIPTION
# Starts at base_path and it will call functions that create a text catalog.
# PARAMETERS
# base_path - where you want the program to start running
# images - the fits files in the current path
# list - calls the function that will write the text file
# RETURNS
# nothing
##########
def main():
base_path = 'C:\Users\Maria\Physics_Research\mar28_14\Renamed'
os.chdir(base_path)
print os.getcwd()
files = os.listdir(base_path)
images = sort(files)
file(images,"Check_header.txt")
##########
# DESCRIPTION
# Sorts the images in the current folder so that you're left with fits files only
# PARAMETERS
# images - list of fits files in the current working directory
# RETURNS
# images
##########
def sort(files):
images = []
for i in files:
[name, ext] = os.path.splitext(i)
if ext == '.fits' or ext == '.fit' or ext == '.fts':
images.append(i)
return images
##########
# DESCRIPTION
# Creates and writes into a file called catalog. It puts the iformation into
# the text file.
# PARAMETERS
# images - list of fits image names
# filter - list of objects observed
# exp - list of exposure times
# time - list of times when the images were taken
# air - list of air mass
# foc - list of focuses
# filenum- list of filenumbers
# RETURNS
# nothing
##########
def file(images,filename):
filenum = filenum_build(images)
filter = color_build(images)
exp = exp_List_build(images)
foc = Focus_build(images)
time = date_build(images)
# if it already exists it will delete it and create a new one.
if os.path.exists(filename):
os.remove(filename)
# creates a new catalog and puts into the variable info
info = open(filename, "w")
info.write("%10s,%25s,%10s,%15s,%30s\n" % ("Filename","Object","Filter","Exposure Time","Time Taken"))
# wirtes information from lists into files
for i in range(len(images)):
info.write('%10s,' % filenum[i])
info.write('%25s,' % filter[i])
info.write('%10s,' % foc[i])
info.write('%15s,' % exp[i])
info.write('%30s,' % time[i])
info.write('\n')
info.close()
##########
# DESCRIPTION
# builds a list called filnum that contains filenumbers
# PARAMETERS
# filnum - list of filenumbers from filename()
# RETURNS
# filnum - retruns list to file()
##########
def filenum_build(images):
filnum = []
for i in images:
filnum.append(filename(i))
return filnum
##########
# DESCRIPTION
# loops through the filename and picks out the 4 character filenumber
# PARAMETERS
# f - constructs the filenumber
# RETURNS
# f - returns the filenumber back to filenum_build
##########
def filename(x):
for i in x:
f =""
for j in range(len(x)):
if j > 5 and j <10:
f = f + x[j]
return f
"""def extract(x):
f =""
for i in range(len(x)):
if i > 5 and i <10:
f = f + x[i]
return f"""
##########
# DESCRIPTION
# Takes each image and adds exp_Time's return value to the list exp.
# PARAMETERS
# images - list of fits files
# exp - a list of exposure times
# RETURNS
# list of exposure times
##########
def exp_List_build(images):
exp = []
for i in images:
exp.append(exp_Time(i))
return exp
##########
# DESCRIPTION
# Opens a fits header, takes the exposure time and returns it.
# PARAMETERS
# x - input file
# hdulist - image information
# head - the header
# t - the exposure time
# RETURN
# exposure time
##########
def exp_Time(x):
hdulist = fits.open(x)
head = hdulist[0].header
if 'EXPTIME' in head.keys():
t = head ['EXPTIME']
hdulist.close()
else:
t = "None"
hdulist.close()
return t
##########
# DESCRIPTION
# Takes each image and adds date_Time's return value to the list time.
# PARAMETERS
# images - list of fits files
# time - a list of times when the images were taken
# RETURNS
# list of times/dates
##########
def date_build(images):
time = []
for i in images:
time.append(date_Time(i))
return time
##########
# DESCRIPTION
# Opens a fits header, takes the time the image was taken and returns it.
# PARAMETERS
# x - input file
# hdulist - opens the image information
# head - the header
# d - the time when the image was taken
# RETURN
# time an image was taken
##########
def date_Time(x):
hdulist = fits.open(x)
head = hdulist[0].header
if 'DATE-OBS' in head.keys():
d = head ['DATE-OBS']
hdulist.close()
else:
d = "None"
hdulist.close()
return d
##########
# DESCRIPTION
# Takes each image and adds color's return value to the list filter.
# PARAMETERS
# images - list of fits files
# filter - a list of objects observed
# RETURNS
# filter
##########
def color_build(images):
filter = []
for i in images:
filter.append(color(i))
return filter
##########
# DESCRIPTION
# Opens a fits header, takes the object and returns it.
# PARAMETERS
# x - input file
# hdulist - image information
# head - the header
# f - the object
# RETURN
# object observed and through what filter if there was one
##########
def color(x):
hdulist = fits.open(x)
head = hdulist[0].header
if 'OBJECT' in head.keys():
f = head ['OBJECT']
hdulist.close()
else:
f = "None"
hdulist.close()
return f
##########
# DESCRIPTION
# builds a list called foc that contains the focus string from the header
# PARAMETERS
# foc - list of focus strings
# RETURNS
# foc - retruns list to file()
##########
def Focus_build(images):
foc = []
for i in images:
foc.append(focus(i))
return foc
##########
# DESCRIPTION
# Opens a fits header, takes the focus and returns it.
# PARAMETERS
# x - input file
# hdulist - image information
# head - the header
# fo - the focus
# RETURN
# fo - the focus returns to focus_build()
##########
def focus(x):
hdulist = fits.open(x)
head = hdulist[0].header
if 'FILTER' in head.keys():
fo = head ['FILTER']
hdulist.close()
else:
fo = "None"
hdulist.close()
return fo
if __name__=='__main__': main()
|
997,249 | b7a060df2a15e7603634b08af88ae2388204a09e | #!/usr/bin/env python
"""
Usage:
cloudmesh-indycar-deploy.py --info
cloudmesh-indycar-deploy.py --run [WORKFLOW] [--dashboard] [--stormui] [--ui] [--keep_history]
cloudmesh-indycar-deploy.py --step [--dashboard] [--stormui] [--keep_history]
cloudmesh-indycar-deploy.py --dashboard [--keep_history]
cloudmesh-indycar-deploy.py --stormui [--keep_history]
cloudmesh-indycar-deploy.py --kill [--keep_history]
cloudmesh-indycar-deploy.py --menu [--keep_history]
cloudmesh-indycar-deploy.py --token [--keep_history]
cloudmesh-indycar-deploy.py --mqtt [--keep_history]
cloudmesh-indycar-deploy.py --about
Deploys the indycar runtime environment on an ubuntu 20.04 system with the
help of cloudmesh-kubeman
Arguments:
FILE optional input file
CORRECTION correction angle, needs FILE, --left or --right to be present
Options:
-h --help
--info info command
--run run the default deploy workflow (till the bug)
--step run the default deploy workflow step by step
Description:
cloudmesh-indycar-deploy.py --info
gets information about the running services
cloudmesh-indycar-deploy.py --kill
kills all services
cloudmesh-indycar-deploy.py --run [--dashboard] [--stormui]
runs the workflow without interruption till the error occurs
If --dashboard and --storm are not specified neither GUI is started.
This helps on systems with commandline options only.
cloudmesh-indycar-deploy.py --step [--dashboard] [--stormui]
runs the workflow while asking in each mayor step if one wants to continue.
This helps to check for log files at a particular place in the workflow.
If the workflow is not continued it is interrupted.
cloudmesh-indycar-deploy.py --dashboard
starts the kubernetes dashboard. Minikube must have been setup before
cloudmesh-indycar-deploy.py --stormui
starts the storm gui. All of storm must be set up before.
Examples:
cloudmesh-indycar-deploy.py --run --dashboard --stormui
runs the workflow without interruptions including the k8 and storm dashboards
cloudmesh-indycar-deploy.py --step --dashboard --stormui
runs the workflow with continuation questions including the k8 and storm dashboards
cloudmesh-indycar-deploy.py --menu
allows the selction of a particular step in the workflow
less $INDYCAR/history.txt
Benchmark:
AMD5950
+----------------------+----------+---------+
| Name | Status | Time |
|----------------------+----------+---------|
| kill | ok | 17.134 |
| download_data | ok | 0 |
| setup_minikube | ok | 20.844 |
| setup_k8 | ok | 12.507 |
| setup_zookeeper | ok | 7.405 |
| setup_nimbus | ok | 8.462 |
| setup_storm_ui | ok | 4.312 |
| open_stopm_ui | ok | 173.242 |
| start_storm_workers | ok | 3.213 |
| install_htm_java | ok | 52.482 |
| setup_mqtt | ok | 11.591 |
| start_storm_topology | ok | 29.605 |
+----------------------+----------+---------+
EPY via vnc
+----------------------+----------+---------+
| Name | Status | Time |
|----------------------+----------+---------|
| kill | ok | 19.352 |
| download_data | ok | 0 |
| setup_minikube | ok | 31.828 |
| setup_k8 | ok | 12.775 |
| setup_zookeeper | ok | 60.753 |
| setup_nimbus | ok | 93.771 |
| setup_storm_ui | ok | 4.366 |
| open_stopm_ui | ok | 270.364 |
| start_storm_workers | ok | 3.213 |
| install_htm_java | ok | 183.767 |
| setup_mqtt | ok | 122.997 |
| start_storm_topology | ok | 52.876 |
| minikube_setup_sh | ok | 37.129 |
| start_socket_server | ok | 113.281 |
+----------------------+----------+---------+
Credits:
This script is authored by Gregor von Laszewski, any work conducted with it must cite the following:
This work is using cloudmesh/kubemanager developed by Gregor von Laszewski. Cube manager is available on GitHub at
\cite{github-las-kubemanager}.
@misc{github-las-cubemanager,
author={Gregor von Laszewski},
title={Cloudmesh Kubemanager},
url={TBD},
howpublished={GitHub, PyPi},
year=2022,
month=feb
}
Text entry for citation in other then LaTeX documents:
Gregor von Laszewski, Cloudmesh Kubemanager, published on GitHub, URL:TBD, Feb. 2022.
"""
import os
import time
from signal import signal, SIGINT
from docopt import docopt
from cloudmesh.common.Shell import Shell
from cloudmesh.common.StopWatch import StopWatch
from cloudmesh.common.console import Console
from cloudmesh.common.sudo import Sudo
from cloudmesh.common.util import readfile
from cloudmesh.common.util import writefile
from cloudmesh.common.util import yn_choice
from cloudmesh.kubeman.kubeman import Kubeman
LICENSE = \
"""
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
Copyright 2022 Gregor von Laszewski, University of Virginia
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Credits:
This script is authored by Gregor von Laszewski, any work
conducted with it must cite the following:
This work is using cloudmesh/kubemanager developed by
Gregor von Laszewski. Cube manager is available on GitHub
\cite{github-las-kubemanager}.
@misc{github-las-cubemanager,
author={Gregor von Laszewski},
title={Cloudmesh Kubemanager},
url={TBD},
howpublished={GitHub, PyPi},
year=2022,
month=feb
}
Text entry for citation in other then LaTeX documents:
This work is using cloudmesh/kubemanager developed
by Gregor von Laszewski. Cube manager is available
on GitHub [1].
[1] Gregor von Laszewski, Cloudmesh Kubemanager,
published on GitHub, URL:TBD, Feb. 2022.
"""
commands = {}
kubeman = Kubeman()
# cloudmesh/kubemanager
screen = os.get_terminal_size()
# cloudmesh/kubemanager
def exit_handler(signal_received, frame):
# Handle any cleanup here
StopWatch.start("exit")
print('SIGINT or CTRL-C detected. Exiting gracefully')
StopWatch.stop("exit")
exit(0)
# this is anow in cloudmesh common Shell
def rename(newname):
def decorator(f):
f.__name__ = newname
return f
return decorator
# cloudmesh/kubemanager
def benchmark(func):
@rename(func.__name__)
def wrapper(*args, **kwargs):
StopWatch.start(func.__name__)
func(*args, **kwargs)
StopWatch.stop(func.__name__)
return wrapper
@benchmark
def kill_indy_services():
pid = kubeman.find_pid("8001")
kubeman.kill_services(pid=pid)
HOME = os.environ["INDYCAR"] = os.getcwd()
CONTAINERIZE = f"{HOME}/containerize"
STORM = f"{HOME}/containerize/storm"
STREAMING = f"{HOME}/streaming"
DATA = f"{HOME}/data"
DASHBOARD = f"{HOME}/dashboard"
# def execute(commands, sleep_time=1, driver=Shell.run):
@benchmark
def get_code(home="/tmp"):
kubeman.banner("get_code")
script = kubeman.clean_script(f"""
mkdir -p {home}/indycar
cd {home}/indycar; git clone https://github.com/DSC-SPIDAL/IndyCar.git
""")
kubeman.execute(script)
@benchmark
def install_htm_java():
kubeman.banner("install_htm_java")
if Shell.which("mvn") == "":
kubeman.execute("sudo apt install -y maven", driver=os.system)
script = \
f"""
rm -rf ~/.m2
cd {STREAMING}; mvn install
"""
print(script)
try:
kubeman.execute(script, driver=os.system)
except:
pass # ignore error
script = \
f"""
rm -rf {STREAMING}/htm.java-examples
cd {STREAMING}; git clone https://github.com/numenta/htm.java-examples.git
cp -r {STREAMING}/htm.java-examples/libs/algorithmfoundry ~/.m2/repository
cd {STREAMING}; mvn clean install
"""
# script = clean_script(f"""
# cd {directory}; git clone https://github.com/numenta/htm.java-examples.git
# # cd {directory}; git clone git@github.com:laszewsk/htm.java-examples.git
# cp -r {directory}/htm.java-examples/libs/algorithmfoundry ~/.m2/repository
# """
# )
print(script)
kubeman.execute(script, driver=os.system)
@benchmark
def install_streaming(directory="/tmp"):
kubeman.banner("install_streaming")
script = kubeman.clean_script(f"""
cd {HOME}/streaming; mvn clean install
"""
)
print(script)
kubeman.execute(script, driver=os.system)
@benchmark
def download_data(id="1GMOyNnIOnq-P_TAR7iKtR7l-FraY8B76",
filename="./data/eRPGenerator_TGMLP_20170528_Indianapolis500_Race.log"):
kubeman.banner("download_data")
if not os.path.exists(filename):
directory = os.path.dirname(filename)
kubeman.execute(f"mkdir -p {directory}", driver=os.system)
FILEID = id
FILENAME = "eRPGenerator_TGMLP_20170528_Indianapolis500_Race.log" \
# command = f'wget --load-cookies /tmp/cookies.txt "https://docs.google.com/uc?export=download&confirm='\
# f"$(wget --quiet --save-cookies /tmp/cookies.txt "
# "--keep-session-cookies --no-check-certificate "
# "'https://docs.google.com/uc?export=download&id={FILEID}' -O- "\
# f"| sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')"\
# f'&id={FILEID}" -O {FILENAME} && rm -rf /tmp/cookies.txt'
# print(command)
# kubeman.execute(command, driver=os.system)
else:
print("data already downloaded")
# cloudmesh/kubemanager
@benchmark
def setup_minikube(memory=10000, cpus=8, sleep_time=0):
kubeman.banner("setup_minikube")
memory = memory * 8
script = f"""
minikube delete
minikube config set memory {memory}
minikube config set cpus {cpus}
minikube start driver=docker
"""
kubeman.execute(script, driver=os.system)
time.sleep(sleep_time)
@benchmark
def setup_zookeeper():
kubeman.banner("setup_zookeeper")
script = \
f"""
kubectl create -f {STORM}/zookeeper.json
kubectl create -f {STORM}/zookeeper-service.json
"""
kubeman.execute(script, driver=os.system)
# time.sleep(30)
kubeman.wait_for_pod("zookeeper")
@benchmark
def setup_nimbus():
kubeman.banner("setup_nimbus")
script = \
f"""
kubectl create -f {STORM}/storm-nimbus.json
kubectl create -f {STORM}/storm-nimbus-service.json
"""
kubeman.execute(script, driver=os.system)
kubeman.wait_for_pod("nimbus")
@benchmark
def setup_storm_ui():
kubeman.banner("setup_storm_ui")
script = \
f"""
kubectl create -f {STORM}/storm-ui.json
kubectl create -f {STORM}/storm-ui-service.json
"""
kubeman.execute(script, driver=os.system)
kubeman.wait_for_pod("storm-ui")
def storm_port():
r = kubeman.Shell_run("kubectl get services").splitlines()
r = Shell.find_lines_with(r, "storm-ui")[0].split()[4].split(":")[1].replace("/TCP", "")
return r
@benchmark
def open_storm_ui():
kubeman.banner("open_storm_ui")
port = storm_port()
ip = kubeman.get_minikube_ip()
wait_for_storm_ui()
def wait_for_storm_ui():
print("Probe storm-ui: ")
found = False
port = storm_port()
ip = kubeman.get_minikube_ip()
while not found:
try:
r = Shell.run(f"curl http://{ip}:{port}/index.html")
found = "Storm Flux YAML Viewer" in r
except:
pass
time.sleep(1)
print(".", end="", flush=True)
print(" ok")
if stormui:
kubeman.execute(f"gopen http://{ip}:{port}/index.html", driver=os.system)
@benchmark
def start_storm_workers():
kubeman.banner("setup_storm_workers")
script = \
f"""
kubectl create -f {STORM}/storm-worker-controller.json
"""
kubeman.execute(script, driver=os.system)
kubeman.wait_for_pod("storm-worker-controller")
@benchmark
def start_storm_service():
kubeman.banner("start_storm_service")
script = \
f"""
kubectl create -f {STORM}/storm-worker-service.json
"""
kubeman.execute(script, driver=os.system)
# wait_for("storm-worker-service")
time.sleep(2)
@benchmark
def setup_mqtt():
kubeman.banner("setup_mqtt")
script = \
f"""
kubectl create -f {CONTAINERIZE}/activemq-apollo.json
kubectl create -f {CONTAINERIZE}/activemq-apollo-service.json
"""
kubeman.execute(script, driver=os.system)
kubeman.wait_for_pod("activemq-apollo")
while not mqtt_running():
time.sleep(1)
# BUG add another wait till mqtt is running
def mqtt_port():
r = kubeman.Shell_run("kubectl get services").splitlines()
r = Shell.find_lines_with(r, "activemq-apollo")[0].split()[4].split(":")[0]
return r
@benchmark
def open_mqtt():
kubeman.banner("open_mqtt")
port = mqtt_port()
kubeman.execute(f"gopen http://localhost:{port}", driver=os.system)
kubeman.execute("kubectl port-forward activemq-apollo 61680:61680", os.system)
@benchmark
def start_storm_topology():
kubeman.banner("start_storm_topology")
ip = kubeman.get_minikube_ip()
key = Shell.run("minikube ssh-key").strip()
jar = "target/Indycar500-33-HTMBaseline-1.0-SNAPSHOT.jar"
script = \
f"""
cd {STREAMING}; mvn clean install
#cd {STREAMING}; scp -i {key} {jar} docker@$(minikube ip):/nfs/indycar/data/
cd {STREAMING}; scp -i {key} {jar} docker@{ip}:/nfs/indycar/data/
"""
print(script)
kubeman.execute(script, driver=os.system)
@benchmark
def minikube_setup_sh():
kubeman.banner("minikube_setup_sh")
LOGFILE = f"{DATA}/eRPGenerator_TGMLP_20170528_Indianapolis500_Race.log"
ip = kubeman.get_minikube_ip()
key = Shell.run("minikube ssh-key").strip()
libtensorflow = "libtensorflow_jni-cpu-linux-x86_64-1.14.0.tar.gz"
if not os.path.exists(libtensorflow):
kubeman.execute(f"wget https://storage.googleapis.com/tensorflow/libtensorflow/{libtensorflow}")
script = f"""
minikube ssh "sudo chmod -R 777 /nfs/indycar"
minikube ssh "mkdir /nfs/indycar/datalogs"
minikube ssh "mkdir /nfs/indycar/config/lib/"
# copy log file into minikube
# change the path of the log file accordingly.
scp -i {key} {LOGFILE} docker@{ip}:/nfs/indycar/datalogs/
# copy LSTM model files into minikube
scp -i {key} -r models docker@{ip}:/nfs/indycar/config/
# Following link is for Linux CPU only. For other platforms, check https://www.tensorflow.org/install/lang_java
mkdir -p tf-lib
tar -xzvf libtensorflow_jni-cpu-linux-x86_64-1.14.0.tar.gz -C tf-lib
scp -i {key} tf-lib/* docker@{ip}:/nfs/indycar/config/lib/
"""
kubeman.execute(script, driver=os.system)
# wait for something?
@benchmark
def start_socket_server():
kubeman.banner("start_socket_server")
script = \
f"""
cd {CONTAINERIZE}; kubectl create -f socket-server.yaml
"""
kubeman.execute(script, driver=os.system)
kubeman.wait_for_pod("indycar-socketserver")
def setup_jupyter_service():
kubeman.banner("setup_jupyter_service")
permission_script = \
f'minikube ssh "sudo chmod -R 777 /nfs/indycar"'
jupyter_script = \
f"cd {CONTAINERIZE}; kubectl create -f storm/jupyter.yaml"
kubeman.execute(permission_script, driver=os.system)
kubeman.execute(jupyter_script, driver=os.system)
kubeman.execute(permission_script, driver=os.system)
kubeman.wait_for_pod("jupyter-notebook", "CrashLoopBackOff")
kubeman.execute(permission_script, driver=os.system)
kubeman.wait_for_pod("jupyter-notebook", "Running")
time.sleep(2)
def notebook_port():
r = kubeman.Shell_run("kubectl get services").splitlines()
r = Shell.find_lines_with(r, "jupyter-notebook")[0].split()[4].split(":")[1].replace("/TCP", "")
return r
@benchmark
def show_notebook():
kubeman.banner("show_notebook")
port = notebook_port()
ip = kubeman.Shell_run("minikube ip").strip()
kubeman.execute(f"cd {CONTAINERIZE}; gopen http://{ip}:{port}", driver=os.system)
def is_note_book_done_yn():
yn_choice("Please run the jupyter notebook now and continue after it completed")
def wait_for_notebook_done():
Console.blue("Please load the jupyter noetbook 'car-notebook.ipynb' and run it.")
done = False
while not done:
print(".", end="", flush=True)
content = Shell.run("minikube ssh ls /nfs/indycar/notebooks/car-notebook-done.txt")
# print(content)
done = not "No such file or directory" in content
time.sleep(1)
print()
@benchmark
def create_notebook():
kubeman.banner("create_notebook")
# port = notebook_port()
# ip = kubeman.Shell_run("minikube ip").strip()
token = kubeman.get_token()
print(token)
for file in [
# f"{CONTAINERIZE}/car-notebook-in.py",
f"{CONTAINERIZE}/car-notebook-in.ipynb",
f"{CONTAINERIZE}/car-multi-notebook-in.ipynb"
]:
content = readfile(file)
content = content.replace("TOKEN", token)
kubeman.hline()
print(content)
kubeman.hline()
out = file.replace("-in", "")
writefile(out, content)
kubeman.banner(out)
destination = out.replace(f"{CONTAINERIZE}/", "")
kubeman.execute("sync")
kubeman.execute(f"cat {out}")
kubeman.execute(f'minikube ssh "sudo chmod -R 777 /nfs"')
kubeman.execute(f"minikube cp {out} /nfs/indycar/notebooks/{destination}")
kubeman.execute(f'minikube ssh "sudo chmod -R 777 /nfs"')
kubeman.execute("minikube cp containerize/IndyCar-API.ipynb /nfs/indycar/notebooks/IndyCar-API.ipynb")
kubeman.execute(f'minikube ssh "sudo chmod -R 777 /nfs"')
def socketserver_port():
r = kubeman.Shell_run("kubectl get services").splitlines()
r = Shell.find_lines_with(r, "indycar-socketserver")[0].split()[4].split(":")[1].replace("/TCP", "")
return r
def install_sass():
# scheck if the socket service_2017 is up and running
nscript = \
script = \
f"""
sudo apt install aptitude
sudo aptitude install npm -y
which npm
sudo npm install -g npm
sudo npm audit fix --force
sudo npm install -g sass
sudo npm install -g npm
sudo npm audit fix --force
which npm
npm -v
which sass
sass --version
"""
kubeman.execute(script, driver=os.system)
# make sure we have
# sass --version
# 1.49.8 compiled with dart2js 2.16.1
# /usr/bin/sass
def creae_index_js():
port = socketserver_port()
ip = kubeman.get_minikube_ip()
content = readfile(f"{DASHBOARD}/src/index-in.js")
content = content.replace("MINIKUBEIP", ip).replace("SOCKETSERVERPORT", port)
writefile(f"{DASHBOARD}/src/index.js", content)
kubeman.execute("sync", driver=os.system)
kubeman.execute(f"cat {DASHBOARD}/src/index.js", driver=os.system)
def show_dashboard():
# kubeman.execute(f"cd {DASHBOARD}; sass --watch src:src", driver=os.system)
kubeman.execute(f"cd {DASHBOARD}; sass src src", driver=os.system)
kubeman.execute(f"cd {DASHBOARD}; npm start", driver=os.system) # why is this needed?
# yn_choice("continue to race dashboard")
kubeman.execute(f"cd {DASHBOARD}; gopen http://localhost:3000", driver=os.system)
# cloudmesh/kubemanager
def _continue(msg=""):
global step
if step:
kubeman.banner(msg)
print(screen.columns * "-")
print()
if yn_choice(f"CONTINUE: {msg}?"):
return
else:
if yn_choice(f"I ask yo a final time! CONTINUE: {msg}?"):
return
kubeman.hline()
print()
raise RuntimeError("Workflow interrupted")
print(screen.columns * "-")
print()
# cloudmesh/kubemanager
def execute_step(s, interactive=False):
if interactive:
_continue(s.__name__)
s()
# cloudmesh/kubemanager
def execute_steps(steps, interactive=False):
for s, name in steps:
kubeman.banner(name)
execute_step(s, interactive)
def wait_for_storm_job():
kubeman.wait_for_pod("storm-job-indycar-", state="Completed")
def restart_socketserver():
r = kubeman.Shell_run("kubectl get pod").splitlines()
name = Shell.find_lines_with(r, "indycar-socketserver")[0].split()[0]
commands = f"kubectl delete pod {name}"
kubeman.execute(commands=commands, driver=os.system)
return r
def open_k8_dashboard():
global dashboard
kubeman.open_k8_dashboard(display=dashboard)
all_steps = [
kill_indy_services,
download_data,
setup_minikube,
kubeman.setup_k8,
open_k8_dashboard,
setup_zookeeper,
setup_nimbus,
setup_storm_ui,
open_storm_ui,
start_storm_workers,
start_storm_service, ##??
setup_mqtt,
install_htm_java,
start_storm_topology,
minikube_setup_sh,
start_socket_server,
setup_jupyter_service,
create_notebook,
show_notebook,
# is_note_book_done_yn(),
wait_for_notebook_done,
wait_for_storm_job,
# storm-job-indycar-22-addefefd-39e8-4077-a03a-140fdb582e7a 0/1 Completed 0 6m8s
# check for completed
# do this in the notebook -> car is in the notebook
install_sass,
creae_index_js,
# find the right pod and simply delete it ;-)
# kubectl delete pod indycar-socketserver-2017-85db4cd775-fhcxj
# restart_socketserver,
show_dashboard
]
notebook_steps = [
kill_indy_services,
download_data,
setup_minikube,
kubeman.setup_k8,
kubeman.open_k8_dashboard,
setup_zookeeper,
setup_nimbus,
setup_storm_ui,
open_storm_ui,
start_storm_workers,
start_storm_service, ##??
setup_mqtt,
install_htm_java,
start_storm_topology,
minikube_setup_sh,
start_socket_server,
setup_jupyter_service,
create_notebook,
show_notebook,
is_note_book_done_yn
# wait_for_notebook_done,
# wait_for_storm_job,
## storm-job-indycar-22-addefefd-39e8-4077-a03a-140fdb582e7a 0/1 Completed 0 6m8s
## check for completed
## do this in the notebook -> car is in the notebook
# install_sass,
# creae_index_js,
## find the right pod and simply delete it ;-)
## kubectl delete pod indycar-socketserver-2017-85db4cd775-fhcxj
## restart_socketserver,
# show_dashboard
]
# cloudmesh/kubemanager
def workflow(steps=None):
print(HOME)
print(CONTAINERIZE)
print(STREAMING)
print(DATA)
Sudo.password()
steps = steps or all_steps
try:
for step in steps:
_continue(step.__name__)
step()
StopWatch.benchmark(sysinfo=True, attributes="short", csv=False, total=True)
except Exception as e:
print(e)
StopWatch.benchmark(sysinfo=False, attributes="short", csv=False, total=True)
def zookeeper_running():
try:
r = kubeman.Shell_run("kubectl logs zookeeper").strip()
return "ZooKeeper audit is disabled." in r
except:
return False
def mqtt_running():
try:
r = kubeman.Shell_run("kubectl logs activemq-apollo").strip()
return "Administration interface available at: http://127.0.0.1:" in r
except:
return False
def deploy_info():
print("Zookeeper running:", zookeeper_running())
print("MQTT running:", mqtt_running())
try:
ip = kubeman.Shell_run(f"minikube ip")
print("IP: ", ip)
except:
pass
pods = kubeman.Shell_run(f"kubectl get pods")
print("PODS")
print(pods)
services = kubeman.Shell_run(f"kubectl get services")
print("SERVICES")
print(services)
print("PORTS")
try:
print("8001 pid:", kubeman.find_pid("8001"))
except:
pass
try:
print("storm-ui port:", storm_port())
except:
pass
try:
print("notebook port:", notebook_port())
except:
pass
print()
print("TOKEN")
kubeman.os_system(
"kubectl -n kubernetes-dashboard describe secret "
"$(kubectl -n kubernetes-dashboard get secret "
"| grep admin-user | awk '{print $1}')")
print()
if __name__ == '__main__':
arguments = docopt(__doc__)
# print(arguments)
signal(SIGINT, exit_handler)
global step
step = arguments["--step"]
info = arguments["--info"]
run = arguments["--run"]
clean = arguments["--kill"]
steps = arguments["WORKFLOW"] or "all"
global dashboard
dashboard = arguments["--dashboard"] or arguments["--ui"]
global stormui
stormui = arguments["--stormui"] or arguments["--ui"]
if step or run:
if steps.lower() in ["all", "a"]:
kubeman.banner("ALL STEPS")
workflow(steps=all_steps)
elif steps.lower() in ["j", "n", "jupyter", "notebook"]:
kubeman.banner("NOTEBOOK STEPS")
workflow(steps=notebook_steps)
else:
Console.error(f'arguments["WORKFLOW"] does not exist')
elif dashboard:
kubeman.open_k8_dashboard()
elif stormui:
open_storm_ui()
elif clean:
kill_indy_services()
elif info:
deploy_info()
elif arguments["--menu"]:
Sudo.password()
dashboard = True
stormui = True
kubeman.menu(all_steps)
elif arguments["--token"]:
kubeman.get_token()
elif arguments["--mqtt"]:
open_mqtt()
elif arguments["--about"]:
print(LICENSE)
else:
Console.error("Usage issue")
|
997,250 | 90191e062ea4b826f478632040d887217ff2f152 | from odoo import models, fields, api
class PurchaseOrderHSCodeLine(models.Model):
_inherit = 'purchase.order.line'
HSCode = fields.Text(string='HS Code', store=True, default="")
|
997,251 | dc54090d11078d3c9b5b7818fa903dcdfa2eff4d | from xml.etree import ElementTree as ET
import os
from collections import defaultdict
NEWSPAPERS = ["ANJO", "BDPO", "BLMY", "BNER", "BNWL", "BRPT", "CHPN", "CHTR", "CHTT", "CNMR", "CTCR", "CWPR", "DNLN", "DYMR", "ERLN", "EXLN", "FRJO", "GCLN", "GLAD", "GNDL", "GWHD", "HLPA", "HPTE", "IPJO", "IPNW", "JOJL", "LEMR", "LINP", "LNDH", "LVMR", "MCLN", "MRTM", "NECT", "NREC", "NRLR", "NRSR", "NRWC", "ODFW", "OPTE", "PMGU", "PMGZ", "PNCH", "RDNP", "SNSR", "TEFP", "WMCF"]
pageWord_regex = r"""^<pageWord coord="[0-9]+\,[0-9]+\,[0-9]+\,[0-9]+">[^<]*</pageWord>"""
class BLNewspaper(object):
def __init__(self, newspaper = "ANJO", load_available_years = True, paper_root_dir = "."):
if newspaper not in NEWSPAPERS:
raise Exception("{0} is not a newspaper code in the archive".format(newspaper))
self.newspaper = newspaper
self.clear_date()
self._archive = paper_root_dir
self._data = self._fingerprint(newspaper = newspaper)
if load_available_years:
self._load_available_years()
def _fingerprint(self, **kw):
return {k: kw.get(k, self._data.get(k)) for k in ['newspaper', 'year', 'month', 'day', 'page']}
def cursor(self):
data = self._fingerprint()
print("Newspaper: '{0}'\nYear: '{1}'\nMonth: '{2}'\nDay: '{3}'\nPage: '{4}'".format(*[data.get(k, "") for k in ['newspaper', 'year', 'month', 'day', 'page']]))
def decode_filename(self, filename):
s = ""
fn = filename.split("_")
d = fn[4].split("-")[0]
suffix = fn[-1].split("-")
page = suffix[-1].split(".")[0] # "aio_s_dj...oa_sjod-0001.xml" ==> "0001"
if d != suffix[0]:
s = suffix[0]
return [fn[1], fn[2], fn[3], d, page, s]
def encode_filename(self, prefix = "WO1", **kw):
p = self._fingerprint(**kw)
if kw.get('page','').lower().startswith("s") or kw.get('page','').lower().startswith("v"):
return os.path.join(self._archive, p['newspaper'], p['year'], "{5}_{0}_{1}_{2}_{3}_{4}.xml".format(p['newspaper'], p['year'], p['month'], p['day'], p['page'], prefix))
return os.path.join(self._archive, p['newspaper'], p['year'], "{5}_{0}_{1}_{2}_{3}-{4}.xml".format(p['newspaper'], p['year'], p['month'], p['day'], p['page'], prefix))
def update_cursor(self, **kw):
if 'clear' in kw and kw['clear']:
self.clear_date()
self._data = self._fingerprint(**kw)
def clear_date(self):
self._data = {'newspaper': self.newspaper, 'year':'', 'month':'', 'day':'', 'page':''}
self.years = set()
self.months = defaultdict(set)
self.days = defaultdict(set)
self.pages = defaultdict(set)
def _newspaper_path(self):
return os.path.join(self._archive, self.newspaper)
def _year_path(self, **kw):
p = self._fingerprint(**kw)
return os.path.join(self._archive, p['newspaper'], p['year'])
def _load_available_years(self):
self.years = set([year for year in os.listdir(self._newspaper_path()) if len(year) == 4])
def _refresh_yearlist(self):
self.years = set()
self.months = defaultdict(set)
self.days = defaultdict(set)
self.pages = defaultdict(set)
for fn in os.listdir(self._year_path()):
newspaper, year, month, day, page, supplement = self.decode_filename(fn)
self.years.add(year)
self.months[year].add(month)
self.days[year+month].add(day)
if supplement != "":
self.pages[year+month+day].add(supplement + "-" + page)
else:
self.pages[year+month+day].add(page)
def get_months(self, year = None):
if year:
self.update_cursor(year = year)
if year not in self.months:
self._refresh_yearlist()
return self.months[year]
def get_days(self, **kw):
if kw:
self.update_cursor(**kw)
if kw.get("year", "") not in self.months:
self._refresh_yearlist()
year = self._data['year']
month = self._data['month']
return self.days[year+month]
def get_pages(self, **kw):
if kw:
self.update_cursor(**kw)
if kw.get("year", "") not in self.months:
self._refresh_yearlist()
year = self._data['year']
month = self._data['month']
day = self._data['day']
return self.pages[year+month+day]
def get_page_doc(self, **kw):
fp = self.encode_filename(**kw)
assert(os.path.isfile(fp))
self.update_cursor(**kw)
text = []
with open(fp, "r") as fl:
r = fl.read()
doc = ET.fromstring(r)
return doc
def get_page_text(self, **kw):
doc = self.get_page_doc(**kw)
text = []
for word in doc.findall("BL_page/pageText/pageWord"):
text.append(word.text)
return u" ".join([x for x in text if x])
def get_article_text(self, **kw):
doc = self.get_page_doc(**kw)
text = []
for word in doc.findall("BL_article/image_metadata/articleImage/articleText/articleWord"):
text.append(word.text)
return u" ".join([x for x in text if x])
def get_article_metadata(self, **kw):
doc = self.get_page_doc(**kw)
md = {}
for x in ["BL_article/title_metadata","BL_article/issue_metadata",
"BL_article/article_metadata/dc_metadata"]:
for el in doc.findall(x):
md[el.tag] = el.text
return md
if __name__ == "__main__":
a = BLNewspaper()
a.get_months(year = "1870")
a.get_days(year = "1870", month = "06")
a.get_pages(year = "1870", month = "06", day = "01")
assert(os.path.isfile(a.encode_filename(year="1870", month="05", day="04", page="0004")))
assert(os.path.isfile(a.encode_filename(year="1870", month="05", day="04", page="S-0001")))
c = BLNewspaper("BNER")
assert(os.path.isfile(c.encode_filename(year="1858", month="01", day="06", page="V-0001")))
b = BLNewspaper("BRPT")
doc = b.get_page_text(year="1839", month="01", day="01", page="0003")
b.cursor()
print("'doc' holds the text") |
997,252 | 17a7e3956b4f0856ce3a67fca4e375f19855064a | #import utils
from utils import find_max
numbers = [10, 3, 6, 2, 5, 8]
#max = utils.find_max(numbers)
maximum = find_max(numbers)
print(maximum)
#print(maximum(number))
|
997,253 | 3210371ffe75665b59f784a2dbb546b27b5562fd | ###########################################
# Let's Have Some Fun
# File Name: 647.py
# Author: Weilin Liu
# Mail: liuweilin17@qq.com
# Created Time: Fri Apr 26 14:14:10 2019
###########################################
#coding=utf-8
#!/usr/bin/python
# 647. Palindromic Substrings
class Solution:
def countSubstrings(self, s: str) -> int:
N = len(s)
if N == 0: return 0
# dp[i][j] whether s_i,...,s_j is palindromic
dp = [N * [0] for i in range(N)]
for i in range(N-1):
dp[i][i] = 1
if s[i] == s[i+1]:
dp[i][i+1] = 1
dp[N-1][N-1] = 1
for i in range(N-3, -1, -1):
for j in range(i+2, N):
if s[i] == s[j]:
dp[i][j] = dp[i+1][j-1]
return sum([sum(dp[i]) for i in range(N)])
|
997,254 | 998702e0cbfc76c630142855a72912e96c298241 | from gi.repository import Gtk, Gdk, GLib
class Monitor(object):
def __init__(self):
pass
@classmethod
def from_monitor(cls, mon):
res = cls()
geometry = mon.get_geometry()
res.height_mm = mon.get_height_mm()
res.width_mm = mon.get_width_mm()
res.manufacturer = mon.get_manufacturer()
res.model = mon.get_model()
res.scale = mon.get_scale_factor()
res.app_x = geometry.x
res.app_y = geometry.y
res.app_width = geometry.width
res.app_height = geometry.height
# XXX: This is probably still wrong for 1.5 factor scaling!
res.width = geometry.width * res.scale
res.height = geometry.height * res.scale
res.hash = hash(mon)
return res
@classmethod
def from_screen(cls, screen, mon):
res = cls()
geometry = screen.get_monitor_geometry(mon)
res.height_mm = screen.get_monitor_height_mm(mon)
res.width_mm = screen.get_monitor_width_mm(mon)
res.manufacturer = 'UNKNOWN'
res.model = 'Plug: ' + screen.get_monitor_plug_name(mon)
res.scale = screen.get_monitor_scale_factor(mon)
res.app_x = geometry.x
res.app_y = geometry.y
res.app_width = geometry.width
res.app_height = geometry.height
# XXX: This is probably still wrong for 1.5 factor scaling!
res.width = geometry.width * res.scale
res.height = geometry.height * res.scale
# Use the plug as a unique identifier
res.hash = screen.get_monitor_plug_name(mon)
return res
def __repr__(self):
return 'Monitor(%s, %s, %i, %i, scale=%i)' % (self.manufacturer, self.model, self.app_width, self.app_height, self.scale)
|
997,255 | b8d5a489220b6407f81d0eeab3c84c5cc5d692cb | #display output
print("first python post")
|
997,256 | 39b54740206e6e7c82a6ab85652b898ddc0027c0 | import smtplib, ssl
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
import os
import time
import random
def send_img_email(Num_Emails:int, Sender_Email:str, Sender_Pass:str, Target_Email:str, Img_Subject:str, Joke_File:str, Img_File:str):
def random_line(file):
with open(file, "r") as f:
lines = f.readlines()
return (random.choice(lines))
for i in range(Num_Emails):
time.sleep(2)
message = MIMEMultipart("alternative")
message["Subject"] = Img_Subject
message["From"] = Sender_Email
message["To"] = Target_Email
Random_Joke = random_line(Joke_File)
img_scr = random_line(Img_File)
text = " " #idk why i need this but it breaks without it
html = f"<html><body><p>Here\'s a random joke scraped from the interwebs:</p><br><p>{Random_Joke}</p><br><p>I know you like {Img_Subject} so here's some pic's of them</p><img src={img_scr}</body></html>"
part1 = MIMEText(text, "plain")
part2 = MIMEText(html, "html")
message.attach(part1)
message.attach(part2)
context = ssl.create_default_context()
with smtplib.SMTP_SSL("smtp.gmail.com", 465, context=context) as server:
server.login(Sender_Email, Sender_Pass)
server.sendmail(
Sender_Email, Target_Email, message.as_string()
)
print(f"Email #{i + 1} sent")
|
997,257 | 77f21be9bd1ac907d6323e1462a3fc21aedccb75 | from pyam import IamDataFrame
import pytest
from numpy.testing import assert_array_equal
@pytest.mark.parametrize(
"axis, exp",
(["scenario", [0.5, 0.5, 1]], [["model", "scenario"], [1, 1, 1]]),
)
def test_debiasing_count(test_pd_df, axis, exp):
"""Check computing bias weights counting the number of scenarios by scenario name"""
# modify the default test data to have three distinct scenarios
test_pd_df.loc[1, "model"] = "model_b"
df = IamDataFrame(test_pd_df)
df.compute.bias(method="count", name="bias", axis=axis)
assert_array_equal(df["bias"].values, exp)
def test_debiasing_unknown_method(test_df_year):
"""Check computing bias weights counting the number of scenarios by scenario name"""
msg = "Unknown method foo for computing bias weights!"
with pytest.raises(ValueError, match=msg):
test_df_year.compute.bias(method="foo", name="bias", axis="scenario")
|
997,258 | 6b6ec007e77381c153571637c4b56c6b7f78020a | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2023 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""Envelopes generation speed for Behaviour act test."""
import itertools
import os
import struct
import sys
import time
from typing import Any, List, Tuple, Union, cast
import click
from aea.configurations.base import ConnectionConfig
from aea.identity.base import Identity
from aea.protocols.base import Message
from aea.registries.resources import Resources
from aea.runner import AEARunner
from aea.skills.base import Handler
from benchmark.checks.utils import get_mem_usage_in_mb # noqa: I100
from benchmark.checks.utils import (
make_agent,
make_envelope,
make_skill,
multi_run,
number_of_runs_deco,
output_format_deco,
print_results,
wait_for_condition,
)
from packages.fetchai.connections.local.connection import ( # noqa: E402 # pylint: disable=C0413
LocalNode,
OEFLocalConnection,
)
from packages.fetchai.protocols.default.message import DefaultMessage
ROOT_PATH = os.path.join(os.path.abspath(__file__), "..", "..")
sys.path.append(ROOT_PATH)
class TestHandler(Handler):
"""Dummy handler to handle messages."""
SUPPORTED_PROTOCOL = DefaultMessage.protocol_id
def setup(self) -> None:
"""Noop setup."""
self.count: int = 0 # pylint: disable=attribute-defined-outside-init
self.rtt_total_time: float = ( # pylint: disable=attribute-defined-outside-init
0.0
)
self.rtt_count: int = 0 # pylint: disable=attribute-defined-outside-init
self.latency_total_time: float = ( # pylint: disable=attribute-defined-outside-init
0.0
)
self.latency_count: int = 0 # pylint: disable=attribute-defined-outside-init
def teardown(self) -> None:
"""Noop teardown."""
def handle(self, message: Message) -> None:
"""Handle incoming message."""
self.count += 1
if message.dialogue_reference[0] != "":
rtt_ts, latency_ts = struct.unpack("dd", message.content) # type: ignore
if message.dialogue_reference[0] == self.context.agent_address:
self.rtt_total_time += time.time() - rtt_ts
self.rtt_count += 1
self.latency_total_time += time.time() - latency_ts
self.latency_count += 1
if message.dialogue_reference[0] in ["", self.context.agent_address]:
# create new
response_msg = DefaultMessage(
dialogue_reference=(self.context.agent_address, ""),
message_id=1,
target=0,
performative=DefaultMessage.Performative.BYTES,
content=struct.pack("dd", time.time(), time.time()),
)
else:
# update ttfb copy rtt
response_msg = DefaultMessage(
dialogue_reference=message.dialogue_reference,
message_id=1,
target=0,
performative=DefaultMessage.Performative.BYTES,
content=struct.pack("dd", rtt_ts, time.time()), # type: ignore
)
self.context.outbox.put(make_envelope(message.to, message.sender, response_msg))
def run(
duration: int,
runtime_mode: str,
runner_mode: str,
start_messages: int,
num_of_agents: int,
) -> List[Tuple[str, Union[int, float]]]:
"""Test multiagent message exchange."""
# pylint: disable=import-outside-toplevel,unused-import
# import manually due to some lazy imports in decision_maker
import aea.decision_maker.default # noqa: F401
local_node = LocalNode()
local_node.start()
agents = []
skills = []
for i in range(num_of_agents):
resources = Resources()
agent_name = f"agent{i}"
public_key = f"public_key{i}"
identity = Identity(agent_name, address=agent_name, public_key=public_key)
connection = OEFLocalConnection(
local_node,
configuration=ConnectionConfig(
connection_id=OEFLocalConnection.connection_id,
),
identity=identity,
data_dir="tmp",
)
resources.add_connection(connection)
agent = make_agent(
agent_name=agent_name,
runtime_mode=runtime_mode,
resources=resources,
identity=identity,
)
skill = make_skill(agent, handlers={"test": TestHandler})
agent.resources.add_skill(skill)
agents.append(agent)
skills.append(skill)
runner = AEARunner(agents, runner_mode)
runner.start(threaded=True)
for agent in agents:
wait_for_condition(
( # pylint: disable=unnecessary-direct-lambda-call
lambda agnt: lambda: agnt.is_running
)(agent),
timeout=5,
)
wait_for_condition(lambda: runner.is_running, timeout=5)
time.sleep(1)
for agent1, agent2 in itertools.permutations(agents, 2):
env = make_envelope(agent1.identity.address, agent2.identity.address)
for _ in range(int(start_messages)):
agent1.outbox.put(env)
time.sleep(duration)
mem_usage = get_mem_usage_in_mb()
local_node.stop()
runner.stop(timeout=5)
total_messages = sum(
cast(TestHandler, skill.handlers["test"]).count for skill in skills
)
rate = total_messages / duration
rtt_total_time = sum(
cast(TestHandler, skill.handlers["test"]).rtt_total_time for skill in skills
)
rtt_count = sum(
cast(TestHandler, skill.handlers["test"]).rtt_count for skill in skills
)
if rtt_count == 0:
rtt_count = -1
latency_total_time = sum(
cast(TestHandler, skill.handlers["test"]).latency_total_time for skill in skills
)
latency_count = sum(
cast(TestHandler, skill.handlers["test"]).latency_count for skill in skills
)
if latency_count == 0:
latency_count = -1
return [
("Total Messages handled", total_messages),
("Messages rate(envelopes/second)", rate),
("Mem usage(Mb)", mem_usage),
("RTT (ms)", rtt_total_time / rtt_count),
("Latency (ms)", latency_total_time / latency_count),
]
@click.command()
@click.option("--duration", default=1, help="Run time in seconds.")
@click.option(
"--runtime_mode", default="async", help="Runtime mode: async or threaded."
)
@click.option("--runner_mode", default="async", help="Runtime mode: async or threaded.")
@click.option(
"--start_messages", default=100, help="Amount of messages to prepopulate."
)
@click.option("--num_of_agents", default=2, help="Amount of agents to run.")
@number_of_runs_deco
@output_format_deco
def main(
duration: int,
runtime_mode: str,
runner_mode: str,
start_messages: int,
num_of_agents: int,
number_of_runs: int,
output_format: str,
) -> Any:
"""Run test."""
parameters = {
"Duration(seconds)": duration,
"Runtime mode": runtime_mode,
"Runner mode": runner_mode,
"Start messages": start_messages,
"Number of agents": num_of_agents,
"Number of runs": number_of_runs,
}
def result_fn() -> List[Tuple[str, Any, Any, Any]]:
return multi_run(
int(number_of_runs),
run,
(duration, runtime_mode, runner_mode, start_messages, num_of_agents),
)
return print_results(output_format, parameters, result_fn)
if __name__ == "__main__":
main() # pylint: disable=no-value-for-parameter
|
997,259 | c7e274f8ecc26f3d512be4cb78da25f7e5e003e9 | from random import random
from time import perf_counter
DARTS = 10000*10000
hits = 0.0
start = perf_counter()
for i in range(DARTS):
x, y = random(), random()
dist = pow(x**2+y**2, 0.5)
if dist < 1.0:
hits += 1
pi = 4*hits/DARTS
print('圆周率是:{:.6f}'.format(pi))
print('运行时间:{:.3f}'.format(perf_counter()-start))
|
997,260 | b77f2998308d381f69f943d025ae8c422aeb06e1 | """The registration module provides classes for image registration.
See Also:
- `ITK Registration <https://itk.org/Doxygen/html/RegistrationPage.html>`_
- `ITK Software Guide Registration <https://itk.org/ITKSoftwareGuide/html/Book2/ITKSoftwareGuide-Book2ch3.html>`_
"""
import abc
import enum
import os
import typing
import SimpleITK as sitk
import pymia.filtering.filter as pymia_fltr
class RegistrationType(enum.Enum):
"""Represents the registration transformation type."""
AFFINE = 1
SIMILARITY = 2
RIGID = 3
BSPLINE = 4
class RegistrationCallback(abc.ABC):
def __init__(self) -> None:
"""Represents the abstract handler for the registration callbacks."""
self.registration_method = None
self.fixed_image = None
self.moving_image = None
self.transform = None
def set_params(self, registration_method: sitk.ImageRegistrationMethod,
fixed_image: sitk.Image,
moving_image: sitk.Image,
transform: sitk.Transform):
"""Sets the parameters that might be used during the callbacks
Args:
registration_method (sitk.ImageRegistrationMethod): The registration method.
fixed_image (sitk.Image): The fixed image.
moving_image (sitk.Image): The moving image.
transform (sitk.Transform): The transformation.
"""
self.registration_method = registration_method
self.fixed_image = fixed_image
self.moving_image = moving_image
self.transform = transform
# link the callback functions to the events
self.registration_method.AddCommand(sitk.sitkStartEvent, self.registration_started)
self.registration_method.AddCommand(sitk.sitkEndEvent, self.registration_ended)
self.registration_method.AddCommand(sitk.sitkMultiResolutionIterationEvent,
self.registration_resolution_changed)
self.registration_method.AddCommand(sitk.sitkIterationEvent, self.registration_iteration_ended)
def registration_ended(self):
"""Callback for the EndEvent."""
pass
def registration_started(self):
"""Callback for the StartEvent."""
pass
def registration_resolution_changed(self):
"""Callback for the MultiResolutionIterationEvent."""
pass
def registration_iteration_ended(self):
"""Callback for the IterationEvent."""
pass
class MultiModalRegistrationParams(pymia_fltr.FilterParams):
def __init__(self, fixed_image: sitk.Image, fixed_image_mask: sitk.Image = None,
callbacks: typing.List[RegistrationCallback] = None):
"""Represents parameters for the multi-modal rigid registration used by the :class:`.MultiModalRegistration` filter.
Args:
fixed_image (sitk.Image): The fixed image for the registration.
fixed_image_mask (sitk.Image): A mask for the fixed image to limit the registration.
callbacks (t.List[RegistrationCallback]): Path to the directory where to plot the registration
progress if any. Note that this increases the computational time.
"""
self.fixed_image = fixed_image
self.fixed_image_mask = fixed_image_mask
self.callbacks = callbacks
class MultiModalRegistration(pymia_fltr.Filter):
def __init__(self,
registration_type: RegistrationType = RegistrationType.RIGID,
number_of_histogram_bins: int = 200,
learning_rate: float = 1.0,
step_size: float = 0.001,
number_of_iterations: int = 200,
relaxation_factor: float = 0.5,
shrink_factors: typing.List[int] = (2, 1, 1),
smoothing_sigmas: typing.List[float] = (2, 1, 0),
sampling_percentage: float = 0.2,
sampling_seed: int = sitk.sitkWallClock,
resampling_interpolator=sitk.sitkBSpline):
"""Represents a multi-modal image registration filter.
The filter estimates a 3-dimensional rigid or affine transformation between images of different modalities using
- Mutual information similarity metric
- Linear interpolation
- Gradient descent optimization
Args:
registration_type (RegistrationType): The type of the registration ('rigid' or 'affine').
number_of_histogram_bins (int): The number of histogram bins.
learning_rate (float): The optimizer's learning rate.
step_size (float): The optimizer's step size. Each step in the optimizer is at least this large.
number_of_iterations (int): The maximum number of optimization iterations.
relaxation_factor (float): The relaxation factor to penalize abrupt changes during optimization.
shrink_factors (typing.List[int]): The shrink factors at each shrinking level (from high to low).
smoothing_sigmas (typing.List[int]): The Gaussian sigmas for smoothing at each shrinking level (in physical units).
sampling_percentage (float): Fraction of voxel of the fixed image that will be used for registration (0, 1].
Typical values range from 0.01 (1 %) for low detail images to 0.2 (20 %) for high detail images.
The higher the fraction, the higher the computational time.
sampling_seed: The seed for reproducible behavior.
resampling_interpolator: Interpolation to be applied while resampling the image by the determined
transformation.
Examples:
The following example shows the usage of the MultiModalRegistration class.
>>> fixed_image = sitk.ReadImage('/path/to/image/fixed.mha')
>>> moving_image = sitk.ReadImage('/path/to/image/moving.mha')
>>> registration = MultiModalRegistration() # specify parameters to your needs
>>> parameters = MultiModalRegistrationParams(fixed_image)
>>> registered_image = registration.execute(moving_image, parameters)
"""
super().__init__()
if len(shrink_factors) != len(smoothing_sigmas):
raise ValueError("shrink_factors and smoothing_sigmas need to be same length")
self.registration_type = registration_type
self.number_of_histogram_bins = number_of_histogram_bins
self.learning_rate = learning_rate
self.step_size = step_size
self.number_of_iterations = number_of_iterations
self.relaxation_factor = relaxation_factor
self.shrink_factors = shrink_factors
self.smoothing_sigmas = smoothing_sigmas
self.sampling_percentage = sampling_percentage
self.sampling_seed = sampling_seed
self.resampling_interpolator = resampling_interpolator
registration = sitk.ImageRegistrationMethod()
# similarity metric
# will compare how well the two images match each other
# registration.SetMetricAsJointHistogramMutualInformation(self.number_of_histogram_bins, 1.5)
registration.SetMetricAsMattesMutualInformation(self.number_of_histogram_bins)
registration.SetMetricSamplingStrategy(registration.RANDOM)
registration.SetMetricSamplingPercentage(self.sampling_percentage, self.sampling_seed)
# An image gradient calculator based on ImageFunction is used instead of image gradient filters
# set to True uses GradientRecursiveGaussianImageFilter
# set to False uses CentralDifferenceImageFunction
# see also https://itk.org/Doxygen/html/classitk_1_1ImageToImageMetricv4.html
registration.SetMetricUseFixedImageGradientFilter(False)
registration.SetMetricUseMovingImageGradientFilter(False)
# interpolator
# will evaluate the intensities of the moving image at non-rigid positions
registration.SetInterpolator(sitk.sitkLinear)
# optimizer
# is required to explore the parameter space of the transform in search of optimal values of the metric
if self.registration_type == RegistrationType.BSPLINE:
registration.SetOptimizerAsLBFGSB()
else:
registration.SetOptimizerAsRegularStepGradientDescent(learningRate=self.learning_rate,
minStep=self.step_size,
numberOfIterations=self.number_of_iterations,
relaxationFactor=self.relaxation_factor,
gradientMagnitudeTolerance=1e-4,
estimateLearningRate=registration.EachIteration,
maximumStepSizeInPhysicalUnits=0.0)
registration.SetOptimizerScalesFromPhysicalShift()
# setup for the multi-resolution framework
registration.SetShrinkFactorsPerLevel(self.shrink_factors)
registration.SetSmoothingSigmasPerLevel(self.smoothing_sigmas)
registration.SmoothingSigmasAreSpecifiedInPhysicalUnitsOn()
self.registration = registration
self.transform = None
def execute(self, image: sitk.Image, params: MultiModalRegistrationParams = None) -> sitk.Image:
"""Executes a multi-modal rigid registration.
Args:
image (sitk.Image): The moving image to register.
params (MultiModalRegistrationParams): The parameters, which contain the fixed image.
Returns:
sitk.Image: The registered image.
"""
if params is None:
raise ValueError("params is not defined")
dimension = image.GetDimension()
if dimension not in (2, 3):
raise ValueError('Image dimension {} is not among the accepted (2, 3)'.format(dimension))
# set a transform that is applied to the moving image to initialize the registration
if self.registration_type == RegistrationType.BSPLINE:
transform_domain_mesh_size = [10] * image.GetDimension()
initial_transform = sitk.BSplineTransformInitializer(params.fixed_image, transform_domain_mesh_size)
else:
if self.registration_type == RegistrationType.RIGID:
transform_type = sitk.VersorRigid3DTransform() if dimension == 3 else sitk.Euler2DTransform()
elif self.registration_type == RegistrationType.AFFINE:
transform_type = sitk.AffineTransform(dimension)
elif self.registration_type == RegistrationType.SIMILARITY:
transform_type = sitk.Similarity3DTransform() if dimension == 3 else sitk.Similarity2DTransform()
else:
raise ValueError('not supported registration_type')
initial_transform = sitk.CenteredTransformInitializer(sitk.Cast(params.fixed_image,
image.GetPixelIDValue()),
image,
transform_type,
sitk.CenteredTransformInitializerFilter.GEOMETRY)
self.registration.SetInitialTransform(initial_transform, inPlace=True)
if params.fixed_image_mask:
self.registration.SetMetricFixedMask(params.fixed_image_mask)
if params.callbacks is not None:
for callback in params.callbacks:
callback.set_params(self.registration, params.fixed_image, image, initial_transform)
self.transform = self.registration.Execute(sitk.Cast(params.fixed_image, sitk.sitkFloat32),
sitk.Cast(image, sitk.sitkFloat32))
if self.verbose:
print('MultiModalRegistration:\n Final metric value: {0}'.format(self.registration.GetMetricValue()))
print(' Optimizer\'s stopping condition, {0}'.format(
self.registration.GetOptimizerStopConditionDescription()))
elif self.number_of_iterations == self.registration.GetOptimizerIteration():
print('MultiModalRegistration: Optimizer terminated at number of iterations and did not converge!')
return sitk.Resample(image, params.fixed_image, self.transform, self.resampling_interpolator, 0.0,
image.GetPixelIDValue())
def __str__(self):
"""Gets a nicely printable string representation.
Returns:
str: The string representation.
"""
return 'MultiModalRegistration:\n' \
' registration_type: {self.registration_type}\n' \
' number_of_histogram_bins: {self.number_of_histogram_bins}\n' \
' learning_rate: {self.learning_rate}\n' \
' step_size: {self.step_size}\n' \
' number_of_iterations: {self.number_of_iterations}\n' \
' relaxation_factor: {self.relaxation_factor}\n' \
' shrink_factors: {self.shrink_factors}\n' \
' smoothing_sigmas: {self.smoothing_sigmas}\n' \
' sampling_percentage: {self.sampling_percentage}\n' \
' resampling_interpolator: {self.resampling_interpolator}\n' \
.format(self=self)
class PlotOnResolutionChangeCallback(RegistrationCallback):
def __init__(self, plot_dir: str, file_name_prefix: str = '') -> None:
"""Represents a plotter for registrations.
Saves the moving image on each resolution change and the registration end.
Args:
plot_dir (str): Path to the directory where to save the plots.
file_name_prefix (str): The file name prefix for the plots.
"""
super().__init__()
self.plot_dir = plot_dir
self.file_name_prefix = file_name_prefix
self.resolution = 0
def registration_ended(self):
"""Callback for the EndEvent."""
self._write_image('end')
def registration_started(self):
"""Callback for the StartEvent."""
self.resolution = 0
def registration_resolution_changed(self):
"""Callback for the MultiResolutionIterationEvent."""
self._write_image('res' + str(self.resolution))
self.resolution = self.resolution + 1
def registration_iteration_ended(self):
"""Callback for the IterationEvent."""
def _write_image(self, file_name_suffix: str):
"""Writes an image."""
file_name = os.path.join(self.plot_dir, self.file_name_prefix + '_' + file_name_suffix + '.mha')
moving_transformed = sitk.Resample(self.moving_image, self.fixed_image, self.transform,
sitk.sitkLinear, 0.0, self.moving_image.GetPixelIDValue())
sitk.WriteImage(moving_transformed, file_name)
|
997,261 | e7622a9cddaeddff961c526b65f33e4cf446e21b | # -*- coding:utf-8 -*-
import numpy as np
import pandas as pd
import time
from collections import defaultdict
from sklearn.metrics import mean_squared_error
from gensim.models import word2vec
from keras.models import Sequential,load_model,Model
from keras.layers import Dense, Activation, Dropout, Embedding,BatchNormalization,Bidirectional,Conv1D,GlobalMaxPooling1D,Input,Lambda,TimeDistributed,Convolution1D
from keras.layers import LSTM,concatenate
from keras.callbacks import EarlyStopping,ModelCheckpoint
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras import backend as K
from keras.models import load_model
from keras.optimizers import Adam
from keras.utils import np_utils
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import log_loss
t1=time.time()
###################################################################################################################################
#get embedding
emb_dic={}
with open("../input/word_embed.txt") as f:
word_emb=f.readlines()
word_emb=word_emb
print(len(word_emb))
for w in word_emb:
w=w.replace("\n","")
content=w.split(" ")
emb_dic[content[0].lower()]=np.array(content[1:])
MAX_SEQUENCE_LENGTH = 20
MAX_NB_WORDS = 50000
EMBEDDING_DIM = len(content)-1
DROPOUT = 0.1
###################################################################################################################################
#get data
train = pd.read_csv('../input/train.csv')#[:10000]
test = pd.read_csv('../input/test.csv')#[:10000]
ques=pd.read_csv('../input/question.csv')
ques.columns=["q1","w1","c1"]
train=train.merge(ques,on="q1",how="left")
test=test.merge(ques,on="q1",how="left")
ques.columns=["q2","w2","c2"]
train=train.merge(ques,on="q2",how="left")
test=test.merge(ques,on="q2",how="left")
#############################################################################################################################
#MAGIC_FEATURE
train_df = pd.read_csv("../input/train.csv")#[:10000]
test_df = pd.read_csv("../input/test.csv")#[:10000]
test_df["label"]=-1
data = pd.concat([train_df[['q1', 'q2']], \
test_df[['q1', 'q2']]], axis=0).reset_index(drop='index')
q_dict = defaultdict(set)
for i in range(data.shape[0]):
q_dict[data.q1[i]].add(data.q2[i])
q_dict[data.q2[i]].add(data.q1[i])
def q1_freq(row):
return (len(q_dict[row['q1']]))
def q2_freq(row):
return (len(q_dict[row['q2']]))
def q1_q2_intersect(row):
return (len(set(q_dict[row['q1']]).intersection(set(q_dict[row['q2']]))))
train_df['q1_q2_intersect'] = train_df.apply(q1_q2_intersect, axis=1, raw=True)
train_df['q1_freq'] = train_df.apply(q1_freq, axis=1, raw=True)
train_df['q2_freq'] = train_df.apply(q2_freq, axis=1, raw=True)
test_df['q1_q2_intersect'] = test_df.apply(q1_q2_intersect, axis=1, raw=True)
test_df['q1_freq'] = test_df.apply(q1_freq, axis=1, raw=True)
test_df['q2_freq'] = test_df.apply(q2_freq, axis=1, raw=True)
leaks = train_df[['q1_q2_intersect', 'q1_freq', 'q2_freq']]
test_leaks = test_df[['q1_q2_intersect', 'q1_freq', 'q2_freq']]
ss = StandardScaler()
ss.fit(np.vstack((leaks, test_leaks)))
leaks = ss.transform(leaks)
test_leaks = ss.transform(test_leaks)
#############################################################################################################################
#process data
tokenizer = Tokenizer(nb_words=MAX_NB_WORDS,)
tokenizer.fit_on_texts(list(train["w1"])+list(test["w1"])+list(train["w2"])+list(test["w2"]))
column="w1"
sequences_all = tokenizer.texts_to_sequences(list(train[column]))
sequences_test = tokenizer.texts_to_sequences(list(test[column]))
X_train_1 = pad_sequences(sequences_all, maxlen=MAX_SEQUENCE_LENGTH,padding='post')
X_test_1 = pad_sequences(sequences_test, maxlen=MAX_SEQUENCE_LENGTH,padding='post')
column="w2"
sequences_all = tokenizer.texts_to_sequences(list(train[column]))
sequences_test = tokenizer.texts_to_sequences(list(test[column]))
X_train_2 = pad_sequences(sequences_all, maxlen=MAX_SEQUENCE_LENGTH,padding='post')
X_test_2 = pad_sequences(sequences_test, maxlen=MAX_SEQUENCE_LENGTH,padding='post')
word_index = tokenizer.word_index
nb_words = min(MAX_NB_WORDS, len(word_index))+1
print(nb_words)
ss=0
word_embedding_matrix = np.zeros((nb_words, EMBEDDING_DIM))
print(len(word_index.items()))
for word, i in word_index.items():
if word in emb_dic.keys():
ss+=1
word_embedding_matrix[i] = emb_dic[word]
else:
pass
print(ss)
print(word_embedding_matrix)
y=train["label"]
print(y.value_counts())
###################################################################################################################################
# 建立模型
from keras import *
from keras.layers import *
from keras.activations import softmax
from keras.models import Model
from keras.optimizers import Nadam, Adam
from keras.regularizers import l2
import keras.backend as K
from sklearn.cross_validation import StratifiedKFold,KFold
def unchanged_shape(input_shape):
"Function for Lambda layer"
return input_shape
def substract(input_1, input_2):
"Substract element-wise"
neg_input_2 = Lambda(lambda x: -x, output_shape=unchanged_shape)(input_2)
out_ = Add()([input_1, neg_input_2])
return out_
def submult(input_1, input_2):
"Get multiplication and subtraction then concatenate results"
mult = Multiply()([input_1, input_2])
sub = substract(input_1, input_2)
out_ = Concatenate()([sub, mult])
return out_
def apply_multiple(input_, layers):
"Apply layers to input then concatenate result"
if not len(layers) > 1:
raise ValueError('Layers list should contain more than 1 layer')
else:
agg_ = []
for layer in layers:
agg_.append(layer(input_))
out_ = Concatenate()(agg_)
return out_
def time_distributed(input_, layers):
"Apply a list of layers in TimeDistributed mode"
out_ = []
node_ = input_
for layer_ in layers:
node_ = TimeDistributed(layer_)(node_)
out_ = node_
return out_
def soft_attention_alignment(input_1, input_2):
"Align text representation with neural soft attention"
attention = Dot(axes=-1)([input_1, input_2])
w_att_1 = Lambda(lambda x: softmax(x, axis=1),
output_shape=unchanged_shape)(attention)
w_att_2 = Permute((2, 1))(Lambda(lambda x: softmax(x, axis=2),
output_shape=unchanged_shape)(attention))
in1_aligned = Dot(axes=1)([w_att_1, input_1])
in2_aligned = Dot(axes=1)([w_att_2, input_2])
return in1_aligned, in2_aligned
def build_model():
emb_layer = Embedding(nb_words, EMBEDDING_DIM, weights=[word_embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH, trainable=True)
# Define inputs
seq1 = Input(shape=(20,))
seq2 = Input(shape=(20,))
# Run inputs through embedding
emb1 = emb_layer(seq1)
emb2 = emb_layer(seq2)
lstm_layer = Bidirectional(LSTM(300, dropout=0.15, recurrent_dropout=0.15, return_sequences=True))
lstm_layer2 = Bidirectional(LSTM(300, dropout=0.15, recurrent_dropout=0.15))
# lstm_layer3 = Bidirectional(LSTM(300, dropout=0.15, recurrent_dropout=0.15))
que_1 = lstm_layer(emb1)
ans_1 = lstm_layer(emb2)
que = lstm_layer2(que_1)
ans = lstm_layer2(ans_1)
# Attention
q1_aligned, q2_aligned = soft_attention_alignment(que_1, ans_1)
# Compose
q1_combined = Concatenate()([que_1, q2_aligned, submult(que_1, q2_aligned)])
q2_combined = Concatenate()([que_1, q1_aligned, submult(ans_1, q1_aligned)])
q1_rep = apply_multiple(q1_combined, [GlobalAvgPool1D(), GlobalMaxPool1D()])
q2_rep = apply_multiple(q2_combined, [GlobalAvgPool1D(), GlobalMaxPool1D()])
mul = layers.multiply([que, ans])
sub = layers.subtract([que, ans])
diff = Lambda(lambda x: K.abs(x[0] - x[1]))([que, ans])
add = layers.add([que, ans])
#merge = concatenate([que, ans, mul, sub,diff,add])
leaks_input = Input(shape=(3,))
leaks_dense = Dense(150, activation='relu')(leaks_input)
merge = concatenate([mul, sub, diff,q1_rep,q2_rep,leaks_dense])
x = Dropout(0.5)(merge)
x = BatchNormalization()(x)
x = Dense(600, activation='elu')(x)
x = Dropout(0.5)(x)
x = BatchNormalization()(x)
x = Dense(600, activation='elu')(x)
x = BatchNormalization()(x)
pred = Dense(1, activation='sigmoid')(x)
# model = Model(inputs=[seq1, seq2, magic_input, distance_input], outputs=pred)
model = Model(inputs=[seq1, seq2,leaks_input], outputs=pred)
model.compile(loss='binary_crossentropy', optimizer='adam')
return model
skf=StratifiedKFold(y,n_folds=5,shuffle=True,random_state=1024)
#skf=KFold(y.shape[0],n_folds=5,shuffle=True,random_state=1024)
te_pred=np.zeros(X_train_1.shape[0])
test_pred=np.zeros((X_test_1.shape[0],1))
cnt=0
score=0
for idx_train, idx_val in skf:
X_train_1_tr=X_train_1[idx_train]
X_train_1_te=X_train_1[idx_val]
X_train_2_tr=X_train_2[idx_train]
X_train_2_te=X_train_2[idx_val]
leaks_tr=leaks[idx_train]
leaks_te=leaks[idx_val]
y_tr=y[idx_train]
y_te=y[idx_val]
model = build_model()
early_stop = EarlyStopping(patience=2)
check_point = ModelCheckpoint('paipaidai.hdf5', monitor="val_loss", mode="min", save_best_only=True, verbose=1)
history = model.fit([X_train_1_tr,X_train_2_tr,leaks_tr], y_tr, batch_size = 1024, epochs = 10,validation_data=([X_train_1_te,X_train_2_te,leaks_te], y_te),callbacks=[early_stop,check_point])
model.load_weights('paipaidai.hdf5')
preds_te = model.predict([X_train_1_te,X_train_2_te,leaks_te])
te_pred[idx_val] = preds_te[:, 0]
#print(y_te.shape)
#print(preds_te.shape)
#print("!!!##########################!!!score_test:",log_loss(y_te,preds_te))
#score+=log_loss(y_te,preds_te)
preds = model.predict([X_test_1,X_test_2,test_leaks])
test_pred+=preds
#break
#score/=5
score=log_loss(y,te_pred)
print(score)
name="plantsgo_%s"%str(round(score,6))
print(score)
t_p = pd.DataFrame()
t_p[name]=te_pred
t_p.to_csv("../meta_features/%s_train.csv"%name,index=False)
test_pred/=5
sub = pd.DataFrame()
sub[name]=test_pred[:,0]
sub.to_csv("../meta_features/%s_test.csv"%name,index=False)
|
997,262 | 6e784a114dda21a70c87cc9136ac444bbae26c17 | import os
import tweepy
from pprint import pprint
import json
# fetch the secrets from our virtual environment variables
CONSUMER_KEY = os.environ['TWITTER_CONSUMER_KEY']
CONSUMER_SECRET = os.environ['TWITTER_CONSUMER_SECRET']
ACCESS_TOKEN = os.environ['TWITTER_ACCESS_TOKEN']
ACCESS_SECRET = os.environ['TWITTER_ACCESS_SECRET']
# authenticate to the service we're accessing
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_TOKEN, ACCESS_SECRET)
# create the connection
api = tweepy.API(auth, wait_on_rate_limit=True,
wait_on_rate_limit_notify=True)
# try:
# api.verify_credentials()
# print("Authentication OK")
# except:
# print("Error during authentication")
# define a handle to inspect for quicker reference
# handle = 'rakyll' # for example purposes; prop any handle you want!
# user = api.get_user(handle)
# #num_friends = user.friends_count
# print(user.name)
# print(num_friends)
# for tweet in tweepy.Cursor(api.user_timeline).items(20):
# # Process a single status
# print(tweet.text)
#api.update_status("Hello Tweepy")
# timeline = api.home_timeline()
# for tweet in timeline:
# print(f"{tweet.user.name} said {tweet.text}")
'''timeline2 = api.user_timeline(screen_name='Kaushik0106')
for tweet in timeline2:
print(f"{tweet.id} said {tweet.text}")'''
# destroy_id = [1148844793704910849, 1146514958932463616, 1145943875934195712]
# for i in destroy_id:
# api.destroy_status(id=i)
# user = api.get_user("Kaushik0106")
#
# print("User details:")
# print(user.name)
# print(user.description)
# print(user.location)
# print("Last 20 Followers:")
# for follower in user.followers():
# print(follower.name)
'''tweets = api.home_timeline(count=1)
tweet = tweets[0]
print(f"Liking tweet {tweet.id} of {tweet.author.name}")
api.create_favorite(tweet.id)'''
# for tweet in api.search(q="Python", lang="en", rpp=10):
# print(f"{tweet.user.name}:{tweet.text}")
# trend_list = api.trends_available()
# pprint(trend_list)
# trends_result = api.trends_place(1)
# for trend in trends_result[0]["trends"]:
# pprint(trend["name"])
# for status in tweepy.Cursor(api.user_timeline, id='rakyll').items(1):
# print(status)
followers = api.followers(screen_name='rakyll')
followers_list = []
for i in followers:
# pprint(tweet._json) # uncomment to see the tweet data
followers_list.append(i._json)
with open('rakyll_follower.json', 'w') as f:
json.dump(followers_list, f)
|
997,263 | 3ed42da3ecb0241859fd8ff028b95cd38044db4c | x=[2, 5, 3]
asd=sum(x)
print(asd)
|
997,264 | 42a2bef495a254377d3430c6679f1f4ef7d4f2bf | def numberEight():
def mainFunction(a,b):
def subFunction(c,d):
return c+d
x = subFunction(a,b)
return x
result = mainFunction(5,10)
print(result)
|
997,265 | f76bbae1fffeed49a5654098370c846e04b66a21 | import unittest
from flowsample import baz
class TestBaz(unittest.TestCase):
def test_baz_returns_baz(self):
self.assertEquals(baz.baz(), 'baz')
|
997,266 | 0f6cb1c87bda05fbb840f9d2b5160d8a34dee702 | import codecs
import arff, os
#from keras.models import model_from_json
import numpy as np
#import tensorflow as tf
import scipy.io as sio
import scipy
import pickle
import pandas as pd
from PIL import Image
from scipy.io import arff as arff_v2
import tensorflow as tf
from keras.models import model_from_json
def loadArffAsArray(arff_file, columns2delete=[]):
file_ = codecs.open(arff_file, 'rb', 'utf-8')
arff_file = arff.load(file_)
arff_data = arff_file['data']
columns2delete.sort(reverse=True)
if(not columns2delete==[]):
for row in arff_data:
for col in columns2delete:
del row[col]
arff_data_array = np.asarray(arff_data,dtype=float)
return arff_data_array
def loadArffHeadersAsArray(arff_file,columns2delete = []):
file_ = codecs.open(arff_file, 'rb', 'utf-8')
arff_file = arff.load(file_)
arff_header = arff_file['attributes']
#arff_header_array = np.asarray(arff_header)
headerNames = [i[0] for i in arff_header]
for col in columns2delete:
del headerNames[col]
return headerNames
def loadCompleteArff(arff_file, columns2delete=[], stringAttr = False):
file_ = open(arff_file, 'r')#codecs.open(arff_file, 'rb', 'utf-8')
arff_file = arff.load(file_)
columns2delete.sort(reverse=True)
arff_data = arff_file['data']
arff_header = arff_file['attributes']
arff_relation = arff_file['relation']
# data
if (not stringAttr):
arff_data_array = np.asarray(arff_data, dtype=float)
for col in columns2delete:
arff_data_array = np.delete(arff_data_array, col, axis=1)
else:
for col in columns2delete:
del arff_data[0][col]
arff_data_array = arff_data #arff_data_array[0][0:5] #first index has to be unique
if(not isinstance(arff_data_array[0][0],str)):
arff_data_array = np.asarray(arff_data_array, dtype=float)
# headers
for col in columns2delete:
del arff_header[col]
return arff_data_array, arff_header, arff_relation, stringAttr
def change_string_columns_by_numbers(arff_data_array, column_number):
n=0
for arff_row in range(0,len(arff_data_array)):
arff_data_array[arff_row][column_number]=n
n+=1
arff_data_np = np.asarray(arff_data_array, dtype=float)
return arff_data_np
def loadCompleteArff_v2(arff_file, columns2delete=[]):
columns2delete.sort(reverse=True)
data, meta = arff_v2.loadarff(arff_file)
print("arff_loaded")
arff_header = meta._attrnames
arff_relation = meta.name
if(len(data)==0):
return data,"",""
else:
arff_data_array = np.asarray(data.tolist(), dtype=np.float32)
arff_data_array = np.delete(arff_data_array, columns2delete, axis=1)
for col in columns2delete:
del arff_header[col]
return arff_data_array, arff_header, arff_relation
def check_data_not_empty(arff_file):
if(arff_file):
print("tenemos file")
def loadArffAsDataset(arff_file, columns2delete):
file_ = codecs.open(arff_file, 'rb', 'utf-8')
arff_file = arff.load(file_)
return arff_file
def loadImageAsArray(path_img):
img = load_image(path_img)
imgAsArray = np.asarray(img)
return imgAsArray
def load_image(path_img):
return Image.open(path_img)
def loadMatFiles(path_mat):
dataMat = sio.loadmat(path_mat)
return dataMat
def loadCsv(filePath, delim, force=False, header = -1):
#header = 0 to infer headers from the first row
dataset = pd.read_csv(filePath, sep=delim, header=header, low_memory=True, encoding = "ISO-8859-1")
return dataset
# Import np matrix from pickle file
def loadMatrixfromPICKLE(filePath, name):
filePath = filePath + str(name) + ".p"
with open(filePath, 'rb') as f:
matrix = pickle.load(f)
f.close()
return matrix
def load_model_sklearn(pathOfModel):
model = pickle.load(open(pathOfModel, 'rb'))
return model
def loadMatrixFromMat(matPath):
return scipy.io.loadmat(matPath)
def loadtxt_as_list(txtPath):
f = open(txtPath, 'r')
data_list = f.read().splitlines()
f.close()
return data_list
def load_npy(npy_path):
return np.load(npy_path)
def load_summaries_tensorboard(path_summary):
for event in tf.train.summary_iterator(path_summary):
for value in event.summary.value:
print(value.tag)
if value.HasField('simple_value'):
print(str(value.simple_value))
def analyse_results_tensorboard_xval(initial_path, tag_to_save, num_folds=10):
# initial_path = "/home/cristinalunaj/Downloads/20190311-173353/"
# tag_to_save = "accuracy_1" # loss_function or accuracy_1
# num_folds = 10
output_file = initial_path + tag_to_save + ".csv"
# Auxiliar variables
max_train = np.zeros(num_folds)
max_val = np.zeros(num_folds)
max_test = np.zeros(num_folds)
last_train = np.zeros(num_folds)
last_val = np.zeros(num_folds)
last_test = np.zeros(num_folds)
for part in ["train", "val", "test"]:
for fold in range(num_folds):
path_file = "".join([initial_path, part, "/fold", str(fold), "/"])
if(not os.path.exists(path_file)):
continue
files = os.listdir(path_file)
for file in files:
if ("events" in file):
path_to_events_file = path_file + file
try:
for e in tf.train.summary_iterator(path_to_events_file):
for v in e.summary.value:
if v.tag == tag_to_save:
print(v.simple_value)
new_value = v.simple_value
if (part == "train"):
if (max_train[fold] < new_value):
max_train[fold] = new_value
last_train[fold] = new_value
elif (part == "val"):
if (max_val[fold] < new_value):
max_val[fold] = new_value
last_val[fold] = new_value
elif (part == "test"):
if (max_test[fold] < new_value):
max_test[fold] = new_value
last_test[fold] = new_value
except:
continue
# save average results in same folder
with open(output_file, "w") as f:
# headers
f.write(
"max_" + tag_to_save + "train,max_" + tag_to_save + "val,max_" + tag_to_save + "test" + "last_" + tag_to_save + "train,last_" + tag_to_save + "val,last_" + tag_to_save + "test\n")
# values
f.write(str(np.mean(max_train)) + "," + str(np.mean(max_val)) + "," + str(
np.mean(max_test)) + "," + str(
np.mean(last_train)) + "," + str(np.mean(last_val)) + "," + str(np.mean(last_test)) + "\n")
print("Written summary data into " + output_file)
return max_val
def loadModelKeras(weights_path,json_path): # For weights saved with model.save_weights(filepath): saves the weights of the model as a HDF5 file.
# load json and create model
json_file = open(json_path, 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights(weights_path) # example weigths.h5
print("Loaded model from disk")
return loaded_model
# initial_path = "/home/cris/PycharmProjects/InterSpeech19/data/results/LSTM_with_embeddings/lstm_variable_logs/split_xval_embeddings/"
# listFolders = os.listdir(initial_path)
# tag_to_save = "accuracy_1"
# for fold in listFolders:
# path = os.path.join(initial_path, fold)
# if('20190318' in fold):
# analyse_results_tensorboard_xval(path+"/", tag_to_save, num_folds=10)
# path_summaries = "data/results/LSTM_with_embeddings/lstm_variable_logs/split_xval_embeddings/20190311-173353/val/fold0/events.out.tfevents.1552322090.cris-X550VXK"
# load_summaries_tensorboard(path_summaries)
# #
# """
# Función que carga un modelo tensorflow en el programa
# filename: path del modelo a cargar (con id)
# return:
# session: sesión de del modelo tf
# tf_test_dataset: variable input para test del modelo
# test_prediction: variable output para test del modelo
# """
# def loadModel(filename):
# session = tf.Session()
# filenamemeta = filename + '.meta'
# new_saver = tf.train.import_meta_graph(filenamemeta)
# new_saver.restore(session, filename)
# tf_train_dataset = session.graph.get_tensor_by_name("trainds_input:0")
# tf_train_labels = session.graph.get_tensor_by_name("trainlb_input:0")
# # tf_valid_dataset = session.graph.get_tensor_by_name("valid_input:0")
# tf_test_dataset = session.graph.get_tensor_by_name("test_input:0")
# train_prediction = session.graph.get_tensor_by_name("train_output:0")
# # valid_prediction = session.graph.get_tensor_by_name("valid_output:0")
# test_prediction = session.graph.get_tensor_by_name("test_output:0")
# weights_SGD_1 = session.graph.get_tensor_by_name("weights_SGD_1:0")
# weights_SGD_2 = session.graph.get_tensor_by_name("weights_SGD_2:0")
# biases_SGD_1 = session.graph.get_tensor_by_name("biases_SGD_1:0")
# biases_SGD_2 = session.graph.get_tensor_by_name("biases_SGD_2:0")
#
# return session, tf_train_dataset, tf_train_labels, tf_test_dataset, train_prediction, test_prediction, weights_SGD_1, weights_SGD_2, biases_SGD_1, biases_SGD_2
#
# dataMat = loadMatFiles('/home/cris/Documentos/becas-DIE/AUDIO/datasets/RECOLA_dataset/avec_2016/ratings_gold_standard/gs_delayed_0seg.mat')
# dataMat['gold_standard'][0,0]
# print('Hola')
# print(dataMat) |
997,267 | 6dd7506f089140eb9700a726ecc51685d8b884c5 | # coding=utf-8
import datetime
import json
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.http.response import HttpResponseRedirect, HttpResponse
from django.utils.translation import ugettext as _
from django.views.generic import View
from accounts.views import LoginRequiredMixin, ProfileAwareView
from core.models import Work
from payments.banks import bank_codes
from payments.models import Item, Purchase, PurchaseStatus, PaymentMethod
from payments.processor import PaymentProcessor
class CreatePayment(LoginRequiredMixin, View):
PENDING_ID = 1
PAYPAL_METHOD_ID = 1
def get(self, request):
work_ids = request.GET.getlist('work_id', [])
works = Work.objects.filter(id__in=work_ids)
user = request.user
for work in works:
if work.is_owned_by(user):
# FIXME: Essa validação é temporaria, o certo é quando tivermos um carrinho ter uma pagina de confirmação da compra
# FIXME: onde essas validações serão feitas permitindo o usuário alterar o carrinho antes de finalizar a compra.
messages.error(request, _('You own this comic book!'))
return HttpResponseRedirect(reverse('payment.error'))
pending_status = PurchaseStatus.objects.get(pk=self.PENDING_ID)
purchase = Purchase(date=datetime.datetime.now(), buyer=user, status=pending_status)
purchase.save()
item_list = []
total_price = 0
for work in works:
work_price = work.price
total_price += work_price
item_list.append(Item(work=work, price=work_price, purchase=purchase, taxes=0))
Item.objects.bulk_create(item_list)
purchase.total = total_price
purchase.save()
processor = PaymentProcessor()
payment_method = PaymentMethod.objects.get(pk=self.PAYPAL_METHOD_ID)
return_url = request.build_absolute_uri(reverse('payments.paypal.execute'))
cancel_url = request.build_absolute_uri(reverse('core.index'))
payment = processor.create_payment(purchase, payment_method, request=request, return_url=return_url, cancel_url=cancel_url)
request.session['payment_id'] = payment.code
request.session['work_ids'] = work_ids
return processor.execute_payment(payment)
class PaymentDoesNotExist(LoginRequiredMixin, ProfileAwareView):
template_name = 'no_payments.html'
class PaymentErrorView(LoginRequiredMixin, ProfileAwareView):
template_name = 'payment_error.html'
class PaymentThanks(LoginRequiredMixin, ProfileAwareView):
template_name = 'thanks.html'
def get(self, request, *args, **kwargs):
kwargs['work_ids'] = request.session['work_ids']
return super(PaymentThanks, self).get(request, *args, **kwargs)
class BankCodeProvider(LoginRequiredMixin, View):
"""
Provides all bank codes as JSON
"""
def get(self, *args, **kwargs):
banks = map(lambda bank: {'id': bank['code'], 'text': "{0}-{1}".format(bank['code'], bank['name'])}, bank_codes)
data = json.dumps(banks)
return HttpResponse(data, content_type='application/json') |
997,268 | 584f9e577fd23c3881aaa738c4447b48d36d32f9 | from requests.auth import HTTPBasicAuth
import unittest
from unittest.mock import MagicMock, patch, PropertyMock
from lib.jira_api_call import JiraApiCall
from lib.api_call import RequestTypes
from lib.exceptions import JiraEmailNotSetException, JiraApiTokenNotSetException, JiraHostnameNotSetException
class TestJiraApiCall(unittest.TestCase):
def setUp(self) -> None:
var_value_patch = patch("lib.variable.Variable.value", new_callable=PropertyMock)
self.m_var_value = var_value_patch.start()
self.addCleanup(var_value_patch.stop)
def test_init_no_data(self):
"""JiraApiCall.__init__.no_data"""
api_call = JiraApiCall(RequestTypes.GET, "sample")
self.assertEqual(api_call.type, RequestTypes.GET)
self.assertEqual(api_call.url, "sample")
self.assertIsNone(api_call.data)
def test_init_with_data(self):
"""JiraApiCall.__init__.with_data"""
api_call = JiraApiCall(RequestTypes.POST, "sample", data={"name": "John"})
self.assertEqual(api_call.type, RequestTypes.POST)
self.assertEqual(api_call.url, "sample")
self.assertEqual(api_call.data, {"name": "John"})
def test_jira_email(self):
"""JiraApiCall.jira_email"""
api_call = JiraApiCall(RequestTypes.GET, "sample")
self.m_var_value.return_value = "test@mycf.co"
self.assertEqual(api_call.jira_email, "test@mycf.co")
def test_jira_token(self):
"""JiraApiCall.jira_token"""
api_call = JiraApiCall(RequestTypes.GET, "sample")
self.m_var_value.return_value = "abc123"
self.assertEqual(api_call.jira_email, "abc123")
def test_jira_hostname(self):
"""JiraApiCall.jira_hostname"""
api_call = JiraApiCall(RequestTypes.GET, "sample")
self.m_var_value.return_value = "mediayoucanfeel.atlassian.net"
self.assertEqual(api_call.jira_email, "mediayoucanfeel.atlassian.net")
@patch("lib.jira_api_call.JiraApiCall.jira_hostname", new_callable=PropertyMock)
@patch("lib.jira_api_call.JiraApiCall.jira_token", new_callable=PropertyMock)
@patch("lib.jira_api_call.JiraApiCall.jira_email", new_callable=PropertyMock)
def test_validate_environment_valid(self, m_jira_email, m_jira_token, m_jira_hostname):
"""JiraApiCall.validate_environment.valid"""
api_call = JiraApiCall(RequestTypes.GET, "sample")
m_jira_email.return_value = "test@mycf.co"
m_jira_token.return_value = "abc123"
m_jira_hostname.return_value = "mycf.atlassian.net"
api_call.validate_environment()
m_jira_email.assert_called()
m_jira_token.assert_called()
m_jira_hostname.assert_called()
@patch("lib.jira_api_call.JiraApiCall.jira_hostname", new_callable=PropertyMock)
@patch("lib.jira_api_call.JiraApiCall.jira_token", new_callable=PropertyMock)
@patch("lib.jira_api_call.JiraApiCall.jira_email", new_callable=PropertyMock)
def test_validate_environment_no_email(self, m_jira_email, m_jira_token, m_jira_hostname):
"""JiraApiCall.validate_environment.no_email"""
api_call = JiraApiCall(RequestTypes.GET, "sample")
m_jira_email.return_value = None
m_jira_token.return_value = "abc123"
m_jira_hostname.return_value = "mycf.atlassian.net"
self.assertRaises(JiraEmailNotSetException, api_call.validate_environment)
m_jira_email.assert_called()
m_jira_token.assert_not_called()
m_jira_hostname.assert_not_called()
@patch("lib.jira_api_call.JiraApiCall.jira_hostname", new_callable=PropertyMock)
@patch("lib.jira_api_call.JiraApiCall.jira_token", new_callable=PropertyMock)
@patch("lib.jira_api_call.JiraApiCall.jira_email", new_callable=PropertyMock)
def test_validate_environment_no_token(self, m_jira_email, m_jira_token, m_jira_hostname):
"""JiraApiCall.validate_environment.no_token"""
api_call = JiraApiCall(RequestTypes.GET, "sample")
m_jira_email.return_value = "test@mycf.co"
m_jira_token.return_value = None
m_jira_hostname.return_value = "mycf.atlassian.net"
self.assertRaises(JiraApiTokenNotSetException, api_call.validate_environment)
m_jira_email.assert_called()
m_jira_token.assert_called()
m_jira_hostname.assert_not_called()
@patch("lib.jira_api_call.JiraApiCall.jira_hostname", new_callable=PropertyMock)
@patch("lib.jira_api_call.JiraApiCall.jira_token", new_callable=PropertyMock)
@patch("lib.jira_api_call.JiraApiCall.jira_email", new_callable=PropertyMock)
def test_validate_environment_no_hostname(self, m_jira_email, m_jira_token, m_jira_hostname):
"""JiraApiCall.validate_environment.no_hostname"""
api_call = JiraApiCall(RequestTypes.GET, "sample")
m_jira_email.return_value = "test@mycf.co"
m_jira_token.return_value = "abc123"
m_jira_hostname.return_value = None
self.assertRaises(JiraHostnameNotSetException, api_call.validate_environment)
m_jira_email.assert_called()
m_jira_token.assert_called()
m_jira_hostname.assert_called()
@patch("lib.jira_api_call.JiraApiCall.jira_hostname", new_callable=PropertyMock)
@patch("lib.jira_api_call.JiraApiCall.jira_token", new_callable=PropertyMock)
@patch("lib.jira_api_call.JiraApiCall.jira_email", new_callable=PropertyMock)
@patch("requests.get")
@patch("lib.jira_api_call.JiraApiCall.validate_environment")
def test_exec(self, m_val_env, m_get, m_jira_email, m_jira_token, m_jira_hostname):
"""JiraApiCall.exec"""
api_call = JiraApiCall(RequestTypes.GET, "sample")
m_val_env.return_value = None
m_response = MagicMock()
m_get.return_value = m_response
m_jira_email.return_value = "test@mycf.co"
m_jira_token.return_value = "abc123"
m_jira_hostname.return_value = "mycf.atl.net/"
auth = HTTPBasicAuth("test@mycf.co", "abc123")
response = api_call.exec()
m_get.assert_called_with("mycf.atl.net/sample", headers={"X-Atlassian-Token": "no-check"}, auth=auth, json=None)
self.assertEqual(response, m_response)
if __name__ == '__main__':
import xmlrunner
unittest.main(testRunner=xmlrunner.XMLTestRunner(output='test-reports'))
|
997,269 | 694d4e62a0bc27815c86a85a0ca487d28aa13ddd | import numpy as np
import netCDF4
import os
import sys
import subprocess
import pyroms
from pyroms_toolbox import jday2date
from mpl_toolkits.basemap import Basemap
import numpy as np
import matplotlib.pyplot as plt
from datetime import datetime
# draw line around map projection limb.
# color background of map projection region.
# missing values over land will show up this color.
# plot sst, then ice with pcolor
# add a title.
#year = int(sys.argv[1])
#lst_year = [year]
lst_file = []
#for year in lst_year:
# year = np.str(year)
#lst = subprocess.getoutput('ls clima/*.nc')
lst = subprocess.getoutput('ls 19800104.ocean_daily_old.nc')
lst = lst.split()
lst_file = lst_file + lst
#grd = pyroms_toolbox.BGrid_GFDL.get_nc_BGrid_GFDL('prog.nc')
grd = netCDF4.Dataset('sea_ice_geometry.nc', "r")
clat = grd.variables["geolat"][:]
clon = grd.variables["geolon"][:]
m = Basemap(projection='stere', lat_0=90, lon_0=180, llcrnrlon=-210,
llcrnrlat=40, urcrnrlon=-50, urcrnrlat=50, resolution='h')
#m = Basemap(llcrnrlon=-121., llcrnrlat=17., urcrnrlon=-125.0, urcrnrlat=53.0,\
# rsphere=(6378137.00,6356752.3142),\
# resolution='h', projection='lcc',\
# lat_0=30., lat_1=40.0, lon_0=-78.)
x, y = m(clon, clat)
levels = np.arange(32.5, 35.5, 0.05)
cmap = plt.cm.get_cmap("plasma_r")
for file in lst_file:
print("Plotting "+file)
nc = netCDF4.Dataset(file, "r")
time = nc.variables["time"][:]
ntim = len(time)
# for it in range(10):
for it in range(0,ntim,30):
fig = plt.figure(figsize=(4,9))
ax = fig.add_subplot(111)
ax.set_aspect('equal')
# ax.axis(xmin=-300,xmax=300)
# m.drawmapboundary(fill_color='0.3')
m.drawcoastlines()
ssh = nc.variables["sss"][it,:,:]
time = nc.variables["time"][it]
cs = m.contourf(x, y, ssh, levels=levels, cmap=cmap, extend='both')
# csa = m.contour(x, y, ssh, levels=levels, linewidths=(0.5,))
# cs = plt.contourf(clon, clat, ssh, levels=levels, cmap=cmap, extend='both')
plt.title('Surface salt')
# csa = plt.contour(clon, clat, ssh, levels=levels, linewidths=(0.5,))
cbaxes = fig.add_axes([0.1, 0.05, 0.8, 0.02])
plt.colorbar(orientation='horizontal', cax=cbaxes)
print('printing frame:', it)
fig.savefig('movie_sss/sss_%(number)04d.png'%{'number': it})
plt.close()
nc.close()
|
997,270 | adf9a1c272c6644e456b6ef9d3b4a30d25e7f244 | from django.shortcuts import render, redirect
from django.urls import reverse
from .forms import UserForm
from django.contrib.auth import login, authenticate
# Create your views here.
def signup(request):
if request.method == 'POST':
form = UserForm(request.POST)
if form.is_valid():
user = form.save()
login(request, user)
return redirect('polls:home')
else:
form = UserForm()
return render(request,'registration/reg_index.html',{'form':form})
def login(request):
# form = UserForm()
if request.method == "POST":
username = request.POST.get('username', False)
# username = request.POST['username']
# password = request.POST['password']
password = request.POST.get('password', False)
user = authenticate(request, username=username, password=password)
if user is not None:
login(request, user)
# return redirect(reverse('register_login_landing'))
redirect('polls:home')
else:
return "You have provided wrong infromation"
return render(request, 'registration/login.html', {})
|
997,271 | ba569827bf4ad3cf24b7097a06eab9bceec2d403 | from django.urls import path
from server.views import IndexView, PredictView
urlpatterns = [
path("", IndexView, name="home"),
path("predict", PredictView, name="predict")
]
|
997,272 | 66ab9f8d76c1cea21ae9951145681abb0f7e03b2 | # /usr/bin/evn python
# -*-coding:utf-8 -*-
# Author : XK
import xlrd
import os
class SheetTypeError:
pass
class ExcelReader:
def __init__(self,excel_file,sheet_by):
#判断文件是否存在
if os.path.exists(excel_file):
self.excel_file = excel_file
self.sheet_by = sheet_by
self._data = list()
else:
raise FileNotFoundError("文件不存在,请确认")
def data(self):
if not self._data:
workbook = xlrd.open_workbook(self.excel_file)
if type(self.sheet_by) not in [str,int]:
raise SheetTypeError('sheet类型不正确,请检查')
elif isinstance(self.sheet_by,int):
sheet = workbook.sheet_by_index(self.sheet_by)
elif isinstance(self.sheet_by,str):
sheet = workbook.sheet_by_name(self.sheet_by)
#获取首行信息
title = sheet.row_values(0)
# data = []
for r in range(1,sheet.nrows):
row_value = sheet.row_values(r)
self._data.append(dict(zip(title,row_value)))
return self._data
if __name__ =="__main__":
reader =ExcelReader("../data/data.xls","TestCases")
print(reader.data()) |
997,273 | fe47b11b8c9140a09f1130760c6aa0f88c6f9372 | from django.conf.urls import url
from .views import *
urlpatterns = [
url('get', get),
url('by_name', by_name),
url('by_id', by_id),
url('check', check),
url('delete', delete),
url('create/', create),
]
|
997,274 | 018bb73b334b0671c0c19471720fed10cc47893d | import datetime
import json
import logging
import re
import pandas as pd
from covid19_scrapers.census import get_aa_pop_stats
from covid19_scrapers.utils.http import get_cached_url
from covid19_scrapers.utils.parse import raw_string_to_int
from covid19_scrapers.utils.misc import to_percentage
from covid19_scrapers.utils.html import url_to_soup
from covid19_scrapers.scraper import ScraperBase
_logger = logging.getLogger(__name__)
class CaliforniaLosAngeles(ScraperBase):
"""Los Angeles publishes demographic breakdowns of COVID-19 cases and
deaths on a county web page, but the summary data and update date
are loaded dynamically in a script.
We scrape this data from the script, and the demographic
breakdowns from the main page's HTML.
"""
JS_URL = 'http://publichealth.lacounty.gov/media/Coronavirus/js/casecounter.js'
DATA_URL = 'http://publichealth.lacounty.gov/media/Coronavirus/locations.htm'
def __init__(self, **kwargs):
super().__init__(**kwargs)
def name(self):
return 'California - Los Angeles'
@classmethod
def is_beta(cls):
return getattr(cls, 'BETA_SCRAPER', True)
def _get_aa_pop_stats(self):
return get_aa_pop_stats(self.census_api, 'California',
county='Los Angeles')
@staticmethod
def _extract_by_race_table(header_tr):
data = []
for tr in header_tr.find_all_next('tr'):
td = tr.td
if not td or not td.text:
break
data.append([td.text.strip()[1:].strip(),
raw_string_to_int(td.next_sibling.text)])
return pd.DataFrame(data, columns=['race', 'count']).set_index('race')
def _scrape(self, **kwargs):
r = get_cached_url(self.JS_URL)
json_str = re.search(r'data = (([^;]|\n)*)',
r.text, re.MULTILINE).group(1).strip()
# Commas on the last item in a list or object are valid in
# JavaScript, but not in JSON.
json_str = re.sub(r',(\s|\n)*([]}]|$)', r'\2',
json_str, re.MULTILINE)
_logger.debug(f'Extracted JSON: {json_str}')
data = json.loads(json_str)['content']
# Find the update date
month, day, year = map(int, re.search(
r'(\d{2})/(\d{2})/(\d{4})',
data['info']).groups())
date = datetime.date(year, month, day)
_logger.info(f'Processing data for {date}')
# Extract the total counts
total_cases = raw_string_to_int(data['count'])
total_deaths = raw_string_to_int(data['death'])
# Fetch the HTML page
soup = url_to_soup(self.DATA_URL)
# Extract the Black/AA counts
cases = self._extract_by_race_table(soup.find(id='race'))
deaths = self._extract_by_race_table(soup.find(id='race-d'))
_logger.debug(f'cases: {cases}')
_logger.debug(f'deaths: {deaths}')
known_cases = cases.drop('Under Investigation')['count'].sum()
known_deaths = deaths.drop('Under Investigation')['count'].sum()
aa_cases = cases.loc['Black', 'count'].sum()
aa_deaths = deaths.loc['Black', 'count'].sum()
aa_cases_pct = to_percentage(aa_cases, known_cases)
aa_deaths_pct = to_percentage(aa_deaths, known_deaths)
return [self._make_series(
date=date,
cases=total_cases,
deaths=total_deaths,
aa_cases=aa_cases,
aa_deaths=aa_deaths,
pct_aa_cases=aa_cases_pct,
pct_aa_deaths=aa_deaths_pct,
pct_includes_unknown_race=False,
pct_includes_hispanic_black=False,
known_race_cases=known_cases,
known_race_deaths=known_deaths,
)]
|
997,275 | ab8f7c3fb8b7de5ef602bc087fdc8b797495e19c | from django.contrib import admin
# Register your models here.
from .models import JeWorker
admin.site.register(JeWorker)
from .models import JeWorkerPortfolio
admin.site.register(JeWorkerPortfolio)
from .models import JeWorkerSkill
admin.site.register(JeWorkerSkill)
from .models import JeWorkerCertification
admin.site.register(JeWorkerCertification)
from .models import JeCertification
admin.site.register(JeCertification)
|
997,276 | ecc0a0be60c476b02a704114f73f5d8d84a8630b | #!/usr/bin/env python3
from .error_logger import RuntimeException
class Environment:
def __init__(self, enclosing=None):
self.enclosing = enclosing
self.values = {}
def define(self, name, val):
self.values[name] = val
def get(self, name):
if name.lexeme in self.values:
return self.values[name.lexeme]
if self.enclosing:
return self.enclosing.get(name)
raise RuntimeException(name, f"Undefined variable {name.lexeme}.")
def assign(self, name, val):
if name.lexeme in self.values:
self.values[name.lexeme] = val
return
if self.enclosing:
return self.enclosing.assign(name, val)
raise RuntimeException(name, f"Undefined variable {name.lexeme}.")
def getAt(self, depth, name):
return self.nthAncestor(depth).values.get(name)
def assignAt(self, depth, name, value):
self.nthAncestor(depth).values[name] = value
def nthAncestor(self, n):
env = self
while n:
env = env.enclosing
n = n - 1
return env
|
997,277 | 292444ba4e60991eb20122080a5c149a6413c7e7 | """
Client controller module.
Consists of internal and external interfaces to the Client Controller, as well as ClientModel - the data structure
that is shared between these interfaces.
Both interfaces of the Client Controller may be started with start_client_controller function.
"""
import logging
import sys
import time
import argparse
import cloud_controller.knowledge.knowledge_pb2_grpc as servicers
import cloud_controller.middleware.middleware_pb2_grpc as mw_servicers
from cloud_controller import CLIENT_CONTROLLER_HOST, CLIENT_CONTROLLER_PORT, MAX_CLIENTS, DEFAULT_WAIT_SIGNAL_FREQUENCY
from cloud_controller.client_controller.client_model import ClientModel
from cloud_controller.client_controller.client import ClientStatus
from cloud_controller.client_controller.external import ClientControllerExternal
from cloud_controller.client_controller.internal import ClientControllerInternal
from cloud_controller.middleware import CLIENT_CONTROLLER_EXTERNAL_HOST, CLIENT_CONTROLLER_EXTERNAL_PORT
from cloud_controller.middleware.helpers import setup_logging, start_grpc_server
def start_client_controller(wait_signal_frequency=DEFAULT_WAIT_SIGNAL_FREQUENCY) -> None:
"""
Starts both external and internal Client Controller interfaces. Creates the shared data structure for these
interfaces. Runs the liveness check thread that checks whether any clients have been disconnected.
This function does not return.
May be invoked from a thread or as a separate process.
"""
setup_logging()
# A common data structure for external and internal interfaces:
client_model = ClientModel(wait_signal_frequency)
internal_server = start_grpc_server(
servicer=ClientControllerInternal(client_model),
adder=servicers.add_ClientControllerInternalServicer_to_server,
host=CLIENT_CONTROLLER_HOST,
port=CLIENT_CONTROLLER_PORT
)
external_server = start_grpc_server(
servicer=ClientControllerExternal(client_model),
adder=mw_servicers.add_ClientControllerExternalServicer_to_server,
host=CLIENT_CONTROLLER_EXTERNAL_HOST,
port=CLIENT_CONTROLLER_EXTERNAL_PORT,
threads=MAX_CLIENTS
)
try:
while True:
time.sleep(client_model.liveness_check_frequency)
client_model.update_distances()
timestamp = time.perf_counter()
clients_to_delete = []
for application in client_model.clients:
for id_ in client_model.clients[application]:
client = client_model.clients[application][id_]
if client.last_call < timestamp - client_model.liveness_check_frequency and \
client.status == ClientStatus.CONNECTED:
clients_to_delete.append((application, id_))
for app, id_ in clients_to_delete:
logging.info("Cancelling the call for client (%s:%s)" % (app, id_))
client_model.disconnect_client(app, id_)
except KeyboardInterrupt:
print("ClientController: ^C received, ending")
external_server.stop(0)
internal_server.stop(0)
if __name__ == "__main__":
arg_parser = argparse.ArgumentParser(description="Client controller")
arg_parser.add_argument("-f", "--wait-signal-freq", type=float, default=5,
help="Frequency in seconds after which the WAIT command is send to a connected client.")
args = arg_parser.parse_args()
if not 0 < args.wait_signal_freq <= 60:
print(f"Wait signal frequency {args.wait_signal_freq} is out of bounds.", file=sys.stderr)
exit(1)
start_client_controller(args.wait_signal_freq)
|
997,278 | 854396bcfa7aefef2632007d783bfd0c290221b6 | /home/runner/.cache/pip/pool/ea/a5/2f/7f48105f6f5f352e799880d111018c8c33d37a8fdbc434dd9a889c117d |
997,279 | 76d8d1400ff74dc37cd860c45a6b5e8b372e1d30 | from random import randint
seq_num = 12
num_jogadas = 3
round = 1
print ('#######################')
print ('Ask To Oracle, the game')
print ('#######################')
while (round <= num_jogadas):
print ('Tentativa {} de {}'.format(round,num_jogadas))
tentativa = int(input('Advinhe a sequencia numérica: '))
num_maior = tentativa > seq_num
num_menor = tentativa < seq_num
if tentativa == seq_num:
print ('você acertou!')
break
else:
if num_maior:
print('O número digitado está acima do número buscado!')
elif num_menor:
print('O número digitado está abaixo do número buscado!')
round += 1
print ('O número digitado foi: ', tentativa)
print('----------------------------------------------------')
while tentativa != seq_num:
print ('Game Over!')
break |
997,280 | 2c2849247a8acd683e85111453a5e70a080bd320 | #!/bin/env python
# This script was modified from program_09_template.py by Joshua Tellier on 3/18/2020 as part of the lab 9 assignment for ABE65100
#Joshua Tellier, Purdue University
import pandas as pd
import numpy as np
def ReadData( fileName ):
"""This function takes a filename as input, and returns a dataframe with
raw data read from that file in a Pandas DataFrame. The DataFrame index
should be the year, month and day of the observation. DataFrame headers
should be "Date", "Precip", "Max Temp", "Min Temp", "Wind Speed". Function
returns the completed DataFrame, and a dictionary designed to contain all
missing value counts."""
# define column names
colNames = ['Date','Precip','Max Temp', 'Min Temp','Wind Speed'] #NOTE: I changed the column names because .query() would not work when referencing column names with spaces
global DataDF #added this line to make the dataframe visible in the variable explorer
global ReplacedValuesDF #added this line to make the dataframe visible in the variable explorer
# open and read the file
DataDF = pd.read_csv("DataQualityChecking.txt",header=None, names=colNames,
delimiter=r"\s+",parse_dates=[0])
DataDF = DataDF.set_index('Date')
# define and initialize the missing data dictionary
ReplacedValuesDF = pd.DataFrame(0, index=["1. No Data","2. Gross Error","3. Swapped","4. Range Fail"], columns=colNames[1:]) #added additional indexed rows to make adding the values later easier
return( DataDF, ReplacedValuesDF )
def Check01_RemoveNoDataValues( DataDF, ReplacedValuesDF ):
"""This check replaces the defined No Data value with the NumPy NaN value
so that further analysis does not use the No Data values. Function returns
the modified DataFrame and a count of No Data values replaced."""
#add your code here
for i in range(0,len(DataDF)-1): #checks for a specific value in each cell, then replaces it with nan if it meets the criteria
for j in range(0,3):
if DataDF.iloc[i,j] == -999:
DataDF.iloc[i,j]=np.nan
ReplacedValuesDF.iloc[0,0]=DataDF['Precip'].isna().sum() #counts the number of "nan" values for the referenced variable, then plugs it into the correct cell in the replacedvaluesdf
ReplacedValuesDF.iloc[0,1]=DataDF['Max Temp'].isna().sum()
ReplacedValuesDF.iloc[0,2]=DataDF['Min Temp'].isna().sum()
ReplacedValuesDF.iloc[0,3]=DataDF['Wind Speed'].isna().sum()
return( DataDF, ReplacedValuesDF )
def Check02_GrossErrors( DataDF, ReplacedValuesDF ):
"""This function checks for gross errors, values well outside the expected
range, and removes them from the dataset. The function returns modified
DataFrames with data the has passed, and counts of data that have not
passed the check."""
# add your code here
for i in range(0,len(DataDF)-1): #checks for a specific range in each cell for the precip variable, then replaces it with nan if outside the range
if 0 > DataDF['Precip'].iloc[i] or DataDF['Precip'].iloc[i] > 25:
DataDF['Precip'].iloc[i]=np.nan
for i in range(0,len(DataDF)-1): #checks for a specific range in each cell for the maxtemp variable, then replaces it with nan if outside the range
if -25 > DataDF['Max Temp'].iloc[i] or DataDF['Max Temp'].iloc[i] > 35:
DataDF['Max Temp'].iloc[i]=np.nan
for i in range(0,len(DataDF)-1): #checks for a specific range in each cell for the mintemp variable, then replaces it with nan if outside the range
if -25 > DataDF['Min Temp'].iloc[i] or DataDF['Min Temp'].iloc[i] > 35:
DataDF['Min Temp'].iloc[i]=np.nan
for i in range(0,len(DataDF)-1): #checks for a specific range in each cell for the windspeed variable, then replaces it with nan if outside the range
if 0 > DataDF['Wind Speed'].iloc[i] or DataDF['Wind Speed'].iloc[i] > 10:
DataDF['Wind Speed'].iloc[i]=np.nan
""" the following lines count the number of nan's that resulted from ONLY this second error check"""
ReplacedValuesDF.iloc[1,0]=DataDF['Precip'].isna().sum() - ReplacedValuesDF.iloc[0,0]
ReplacedValuesDF.iloc[1,1]=DataDF['Max Temp'].isna().sum() - ReplacedValuesDF.iloc[0,1]
ReplacedValuesDF.iloc[1,2]=DataDF['Min Temp'].isna().sum() - ReplacedValuesDF.iloc[0,2]
ReplacedValuesDF.iloc[1,3]=DataDF['Wind Speed'].isna().sum() - ReplacedValuesDF.iloc[0,3]
return( DataDF, ReplacedValuesDF )
def Check03_TmaxTminSwapped( DataDF, ReplacedValuesDF ):
"""This function checks for days when maximum air temperture is less than
minimum air temperature, and swaps the values when found. The function
returns modified DataFrames with data that has been fixed, and with counts
of how many times the fix has been applied."""
# add your code here
ReplacedValuesDF.iloc[2,1]=(DataDF['Min Temp'] > DataDF['Max Temp']).sum() #Here we record how many get swapped BEFORE swapping them to get the correct count
ReplacedValuesDF.iloc[2,2]=(DataDF['Min Temp'] > DataDF['Max Temp']).sum()
for i in range(0,len(DataDF)-1):
if DataDF['Min Temp'].iloc[i] > DataDF['Max Temp'].iloc[i]: #if Tmin > Tmax
hold = DataDF['Max Temp'].iloc[i] #put Tmax value into a placeholder variable
DataDF['Max Temp'].iloc[i] = DataDF['Min Temp'].iloc[i] #supplant Tmax value with the Tmin value
DataDF['Min Temp'].iloc[i] = hold #supplant Tmin value with the old Tmax value (that was in the placeholder)
return( DataDF, ReplacedValuesDF )
def Check04_TmaxTminRange( DataDF, ReplacedValuesDF ):
"""This function checks for days when maximum air temperture minus
minimum air temperature exceeds a maximum range, and replaces both values
with NaNs when found. The function returns modified DataFrames with data
that has been checked, and with counts of how many days of data have been
removed through the process."""
# add your code here
ReplacedValuesDF.iloc[3,1]=(DataDF['Max Temp'] - DataDF['Min Temp'] > 25).sum() #Here we count the number of days in which the temperature range was greater than 25 degrees
ReplacedValuesDF.iloc[3,2]=(DataDF['Max Temp'] - DataDF['Min Temp'] > 25).sum()
for i in range(0,len(DataDF)-1):
if DataDF['Max Temp'].iloc[i] - DataDF['Min Temp'].iloc[i] > 25: #if the difference between tmax & tmin > 25
DataDF['Max Temp'].iloc[i] = np.nan #replace tmax w/ nan
DataDF['Min Temp'].iloc[i] = np.nan #replace tmin w/ nan
return( DataDF, ReplacedValuesDF )
# the following condition checks whether we are running as a script, in which
# case run the test code, otherwise functions are being imported so do not.
# put the main routines from your code after this conditional check.
if __name__ == '__main__':
fileName = "DataQualityChecking.txt"
DataDF, ReplacedValuesDF = ReadData(fileName)
print("\nRaw data.....\n", DataDF.describe())
DataDF, ReplacedValuesDF = Check01_RemoveNoDataValues( DataDF, ReplacedValuesDF )
print("\nMissing values removed.....\n", DataDF.describe())
DataDF, ReplacedValuesDF = Check02_GrossErrors( DataDF, ReplacedValuesDF )
print("\nCheck for gross errors complete.....\n", DataDF.describe())
DataDF, ReplacedValuesDF = Check03_TmaxTminSwapped( DataDF, ReplacedValuesDF )
print("\nCheck for swapped temperatures complete.....\n", DataDF.describe())
DataDF, ReplacedValuesDF = Check04_TmaxTminRange( DataDF, ReplacedValuesDF )
print("\nAll processing finished.....\n", DataDF.describe())
print("\nFinal changed values counts.....\n", ReplacedValuesDF)
"""Done modifying functions, now I will work on the rest of the lab
i.e. creating plots and file output"""
import matplotlib.pyplot as plt
#First, we import the data, assign the RAW data to a new frame so that we can plot them side-by-side, and then QC the data
ReadData('DataQualityChecking.txt')
RawData = DataDF.copy()
Check01_RemoveNoDataValues(DataDF,ReplacedValuesDF)
Check02_GrossErrors(DataDF,ReplacedValuesDF)
Check03_TmaxTminSwapped(DataDF,ReplacedValuesDF)
Check04_TmaxTminRange(DataDF,ReplacedValuesDF)
""" Precipitation comparison figure"""
fig1 = plt.figure()
ax1 = fig1.add_subplot(1,1,1)
ax1.scatter(x=DataDF.index.values, y=RawData['Precip'], s=3, c='b', marker='s', label="Raw Data")
ax1.scatter(x=DataDF.index.values, y=DataDF['Precip'], s=3, c='r', marker='o', label='QC Data')
plt.xlabel('Date')
plt.ylabel('Precipitation (mm)')
plt.legend(loc = 'lower left')
plt.savefig('Precipitation_Raw_vs_QC.pdf')
plt.close()
"Max temp comparison figure"""
fig2 = plt.figure()
ax2 = fig2.add_subplot(1,1,1)
ax2.scatter(x=DataDF.index.values, y=RawData['Max Temp'], s=3, c='b', marker='s', label="Raw Data")
ax2.scatter(x=DataDF.index.values, y=DataDF['Max Temp'], s=3, c='r', marker='o', label='QC Data')
plt.xlabel('Date')
plt.ylabel('Maximum Temperature (degrees Celsius)')
plt.legend(loc = 'lower left')
plt.savefig('Maxtemp_Raw_vs_QC.pdf')
plt.close()
"""Min temp comparsion figure"""
fig2 = plt.figure()
ax2 = fig2.add_subplot(1,1,1)
ax2.scatter(x=DataDF.index.values, y=RawData['Min Temp'], s=3, c='b', marker='s', label="Raw Data")
ax2.scatter(x=DataDF.index.values, y=DataDF['Min Temp'], s=3, c='r', marker='o', label='QC Data')
plt.xlabel('Date')
plt.ylabel('Minimum Temperature (degrees Celsius)')
plt.legend(loc = 'lower left')
plt.savefig('Mintemp_Raw_vs_QC.pdf')
plt.close()
"""Wind speed comparison figure"""
fig2 = plt.figure()
ax2 = fig2.add_subplot(1,1,1)
ax2.scatter(x=DataDF.index.values, y=RawData['Wind Speed'], s=3, c='b', marker='s', label="Raw Data")
ax2.scatter(x=DataDF.index.values, y=DataDF['Wind Speed'], s=3, c='r', marker='o', label='QC Data')
plt.xlabel('Date')
plt.ylabel('Wind Speed (m/s)')
plt.legend(loc = 'upper right')
plt.savefig('Windspeed_Raw_vs_QC.pdf')
plt.close()
""" Data output """
DataDF.to_csv('Quality_Checked_Data.txt', sep='\t', index=True) #writing the corrected data to a tab-delimited text file
ReplacedValuesDF.to_csv('ReplacedValueInfo.txt', sep='\t', index=True) #writing the correctio info to a tab-delimited text file
|
997,281 | 692b622efb9408546ebd3c01ea89e89cfd5caa24 | # -*- coding: utf-8 -*-
"""
Created on Thu Apr 03 09:27:01 2017
"""
import sys
import urllib3
from bs4 import BeautifulSoup
import urllib
import hashlib
import certifi
import ssl
import pymysql
http = urllib3.PoolManager(
cert_reqs='CERT_REQUIRED', # Force certificate check.
ca_certs=certifi.where(), # Path to the Certifi bundle.
)
urllib3.disable_warnings()
db=pymysql.connect("localhost","root","admin","opiodDB")
def getRecordCount(url,cursor):
global db
cursor2=db.cursor()
query="select count(*) as countRec from opioddb.updatecheck WHERE URL like'%"+url+"%'"
cursor2.execute(query)
rows=cursor2.fetchall()
recCount=0
for row in rows:
recCount=int(row[0])
return (recCount)
def insertNewData(newRow):
global db
cursor1=db.cursor()
query="Insert into updatecheck(URL,md5Hash) VALUES (%s,%s)"
col1=str(newRow['URL'])
col2=str(newRow['hashKey'])
data=(col1,col2)
cursor1.execute(query,data)
db.commit()
def updateQuery(url,hashKey,cursor):
query="UPDATE opiodDB.updatecheck SET md5Hash='"+hashKey+"' where URL='"+url+"'"
cursor.execute(query)
def checkForData(newRow,cursor):
recCount=getRecordCount(newRow["URL"],cursor)
if(recCount==0):
insertNewData(newRow)
else:
query="Select md5Hash from opiodDB.updatecheck where URL like '%"+str(newRow["URL"])+"%'" #get existing MD5Hash key from table and compare it to the new MD5Hash Key
cursor.execute(query)
existingMD5=" "
rows=cursor.fetchall()
for row in rows:
existingMD5=row[0]
if(existingMD5!=str(newRow["hashKey"])): #if existing MD5Hash value is not equal to new MD5Hash, this means new updates are available
print("New updates available for "+ str(newRow["URL"]))
updateQuery(str(newRow["URL"]),str(newRow["hashKey"]),cursor)
def censusCountyDataUpdate(cursor):
newRow={}
newRow["URL"]='CENSUS'
soup_url=BeautifulSoup(urllib.request.urlopen('https://www.census.gov/geo/reference/county-changes.html').read())
result=soup_url.find("div", {"id":"middle-column"}).get_text().encode('utf-8') #encode the text extracted . Extracted text must be encoded before using MD5Hash function
m = hashlib.md5() #get md5Hash value
m.update(result)
newRow["hashKey"]=(str(m.hexdigest()))
checkForData(newRow,cursor)
def MedicaidEnrollDataUpdate(cursor): #Requires html parser in python
newRow={}
newRow["URL"]='MEDICAID'
soup_url = BeautifulSoup(urllib.request.urlopen('https://www.medicaid.gov/medicaid/program-information/medicaid-and-chip-enrollment-data/enrollment-mbes/index.html').read())
result=soup_url.find(attrs={'class':'threeColumns'}).get_text()
url_data=result.split("Quarterly Medicaid Enrollment and Expenditure Reports")[1].split("About the Reports")[0]
data=url_data.split(" ")
reqData=str(' '.join(map(str, data))).strip().encode('utf-8') #encode the text extracted . Extracted text must be encoded before using MD5Hash function
m = hashlib.md5() #get md5Hash value
m.update(reqData)
newRow["hashKey"]=(str(m.hexdigest()))
checkForData(newRow,cursor)
def sahieDataUpdate(cursor):
newRow={}
newRow["URL"]='SAHIE'
soup_url=BeautifulSoup(urllib.request.urlopen('https://www.census.gov/did/www/sahie/data/20082014/index.html').read())
reqData=str(soup_url.get_text()).encode('utf-8') #encode the text extracted . Extracted text must be encoded before using MD5Hash function
m = hashlib.md5()
m.update(reqData)
newRow["hashKey"]=(str(m.hexdigest()))
checkForData(newRow,cursor)
def nsDuhUpdate(cursor):
ssl._create_default_https_context = ssl._create_unverified_context #this url has SSL verification.
newRow={}
newRow["URL"]='NSDUH'
req=urllib.request.urlopen("https://www.samhsa.gov/data/population-data-nsduh/reports?tab=38#tgr-tabs-34")
charset=req.info().get_content_charset()
content=req.read().decode(charset)
test=content.encode('utf-8') #encode the text extracted . Extracted text must be encoded before using MD5Hash function
m = hashlib.md5()
m.update(test)
newRow["hashKey"]=(str(m.hexdigest()))
checkForData(newRow,cursor)
def aidsVuUpdate(cursor):
newRow={}
newRow["URL"]='AIDSVU'
soup_url =BeautifulSoup( http.request('GET', 'https://aidsvu.org/resources/downloadable-maps-and-resources/',preload_content=False).read())
result=soup_url.find(attrs={'class':'tab-nav'}).get_text().encode('utf-8')
m = hashlib.md5()
m.update(result)
newRow["hashKey"]=str(m.hexdigest())
checkForData(newRow,cursor)
def main():
cursor=db.cursor()
count=0 #check if the table already exists in datbase.If not, create the corresponding table
query="SELECT count(*) FROM information_schema.tables WHERE table_schema = 'opioddb' AND table_name = 'updatecheck'"
cursor.execute(query)
rows=cursor.fetchall()
for row in rows:
count=row[0]
if(count==0):
queryCreate="CREATE TABLE `opioddb`.`updatecheck`(`idupdateCheck` INT NOT NULL AUTO_INCREMENT,`URL` VARCHAR(105) NULL,`md5Hash` VARCHAR(105) NULL,PRIMARY KEY (`idupdateCheck`))"
cursor.execute(queryCreate)
print("table created")
censusCountyDataUpdate(cursor)
MedicaidEnrollDataUpdate(cursor)
sahieDataUpdate(cursor)
nsDuhUpdate(cursor)
aidsVuUpdate(cursor)
db.commit()
db.close()
return 'Complete!'
if __name__ == '__main__':
status = main()
sys.exit(status) |
997,282 | ffbe35492046f7eeeb4ce46697c8ad91157cb63a | # nombre = input('Digite su nombre: ')
# print(f"Hola {nombre}")
numero = float(input('Digite un numero: ')) # input solo guarta texto asi se escriban numeros
print(f"El numero es {numero+1}")
|
997,283 | 9d73ab975dab01357b9f990d81dbf5d2e7d9d37f | from . import discharge_summary
from . import ecg
from . import clinic_letter
exports = {
"discharge summary": discharge_summary.main,
"ecg": ecg.main,
"clinic letter": clinic_letter.main,
} |
997,284 | 8c2e01e976ae9ca0689db4209ffa1a08c8ecade1 | from django.conf.urls import url, include
from django.contrib.auth.views import LogoutView
from django.urls import path
from django.contrib.auth import views as auth_views
from django.views.i18n import JavaScriptCatalog
from dccrecibo.accounts import views
app_name = 'accounts'
urlpatterns = [
path('logout/', LogoutView.as_view(next_page='/'), name='logout'),
path('', include('django.contrib.auth.urls')),
path('login/', auth_views.auth_login,{'template_name': 'registration/login.html'}, name='login'),
path('registro/', views.register, name='register'),
url(r'^jsi18n/$', JavaScriptCatalog.as_view(), name='javascript-catalog'),
url(r'^i18n/', include('django.conf.urls.i18n')),
] |
997,285 | c5c81ed3351d22bd447be55f348cad22b1cd6c84 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import io
import subprocess
from os import path
root_dir = path.abspath(path.dirname(path.dirname(__file__)))
def git_version():
return subprocess.run(
['git', 'describe', '--abbrev=0', '--tags'],
stdout=subprocess.PIPE,
text=True,
check=True,
cwd=root_dir,
).stdout.strip()
def git_describe():
return subprocess.run(
['git', 'describe', '--dirty'],
stdout=subprocess.PIPE,
text=True,
check=True,
cwd=root_dir,
).stdout.strip()
def read_version():
return io.open(path.join(root_dir, 'storyscript', 'VERSION'), 'r',
encoding='utf8').read().strip()
def get_version():
# try detecting the git version first
try:
return git_describe()
except Exception:
pass
# fallback to a VERSION file (e.g. for a released storyscript)
try:
return read_version()
except Exception:
pass
# soft fallback in case everything fails
return '0.0.0'
def get_release_version():
# try detecting the git version first
try:
return git_version()
except Exception:
pass
# fallback to a VERSION file (e.g. for a released storyscript)
try:
return read_version()
except Exception:
pass
# soft fallback in case everything fails
return '0.0.0'
version = get_version()
release_version = get_release_version()
|
997,286 | 8c57485ed2b91ca2c8538066c6595dce362a74ed | """
Pedir dos números y decir si son iguales o no.
"""
n_1 = int(input("Write the first number: \n"))
n_2 = int(input("Write the second number: \n"))
print(n_1 == n_2)
|
997,287 | 0154fb3a5ed57bcbc7ed92211a5c0098bf83f718 | #Write a program to manipulate List data
A=['Shreyas','Atharv','Abhishek','Amit','Yashraj','abc',64,44,29,66,00]
print("\n\nList A :",A[:])
print("List A : 2 to 5",A[2:6])
print("List A In Reverse:",A[::-1])
A.append('Abhishek')
print("List A After Appending :",A[:])
A.insert(4,'Nikhil')
print("List A After Inserting :",A[:])
A.pop(4)
print("List A After Poping :",A[:])
A.remove('abc')
print("List A After Removing :",A[:])
del A[0]
print("List A After Deleteing :",A[:])
A.clear()
print("List A After Clearing :",A[:])
#Write a program to manipulate Tuple data
B=("Shreyas","Atharv","Abhishek","Amit","Yashraj","abc","Amit",64,44,29,0)
print("\n\nTuple B:",B)
print("Tuple B: 2 to 5",B[2:6])
print("Tuple B in Reverse:",B[::-1])
print("Count of Amit is :",B.count('Amit'))
print("Index of Amit",B.index('Amit')) |
997,288 | fc6a4e1a0cd5310021f6ec7f7e5e13975ca650e7 | import requests
import pprint
import json
import os
import datetime
SPOTIFY_TOKEN = None
def get_spotify_token():
global SPOTIFY_TOKEN
current_time = datetime.datetime.now()
if SPOTIFY_TOKEN is None or current_time > SPOTIFY_TOKEN['expiration']:
url = "https://accounts.spotify.com/api/token"
payload = {
'grant_type': 'client_credentials'
}
client_id = os.environ['SPOTIFY_CLIENT_ID']
client_secret = os.environ['SPOTIFY_CLIENT_SECRET']
auth = (client_id, client_secret)
response = requests.post(url, data=payload, auth = (client_id, client_secret))
if response.status_code != 200:
raise Exception("Error getting spotify token!")
response_data = json.loads(response.text)
expiration_time = current_time + datetime.timedelta(0,int(response_data['expires_in']))
SPOTIFY_TOKEN = {
'access_token': response_data['access_token'],
'expiration': expiration_time
}
return SPOTIFY_TOKEN
def get_user_playlists(user):
limit = 50
offset = 0
token = get_spotify_token()
url = "https://api.spotify.com/v1/users/{user}/playlists?limit={limit}&offset={offset}".format(user=user, limit = limit, offset = offset)
headers = {
'content-type': 'application/json',
'Authorization': 'Bearer {token}'.format(token = token['access_token'])
}
user_playlists = []
next_url = url
while True:
response = requests.get(next_url, headers=headers)
if response.status_code != 200:
raise Exception(response.text)
response_json = response.json()
playlists = response_json['items']
for p in playlists:
playlist = {}
playlist['id'] = p['id']
playlist['name'] = p['name']
playlist['snapshot_id'] = p['snapshot_id']
user_playlists.append(p)
next_url = response_json['next']
if next_url is None:
break
return user_playlists
def call_user_playlist_tracks_api(user_id, playlist_id):
token = get_spotify_token()
url = "https://api.spotify.com/v1/users/{user_id}/playlists/{playlist_id}/tracks".format(user_id = user_id, playlist_id = playlist_id)
print "URL " + url
headers = {
'content-type': 'application/json',
'Authorization': 'Bearer {token}'.format(token = token['access_token'])
}
response = requests.get(url, headers=headers)
if response.status_code != 200:
raise Exception(response.text)
response_json = response.json()
tracks = response_json['items']
return tracks
|
997,289 | fe3d7f8a17dda33f0159791ec8f3b2bfc64e265c | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
# COPYRIGHT NOTICE STARTS HERE
# Copyright 2019 © Samsung Electronics Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# COPYRIGHT NOTICE ENDS HERE
import argparse
import docker
import logging
import sys
from downloader import AbstractDownloader
from docker_downloader import DockerDownloader
def main():
parser = argparse.ArgumentParser()
parser.add_argument('image_lists', nargs='+', help='Images to keep')
parser.add_argument('--debug', '-d', action='store_true', help='Debugging messages')
args = parser.parse_args()
if args.debug:
logging.basicConfig(level=logging.DEBUG, stream=sys.stdout)
else:
logging.basicConfig(level=logging.INFO, stream=sys.stdout, format='%(message)s')
target = set()
for lst in args.image_lists:
target = target.union(AbstractDownloader.load_list(lst))
target = set(map(DockerDownloader.image_registry_name, target))
client = docker.client.DockerClient(version='auto')
errors = 0
for image in client.images.list():
for tag in image.tags:
logging.debug('Checking {}'.format(tag))
if tag not in target:
logging.debug('Image \'{}\' not in lists'.format(tag))
logging.info('Removing: {}'.format(tag))
try:
client.images.remove(tag)
logging.info('Removed: {}'.format(tag))
except docker.errors.APIError as err:
errors += 1
logging.exception(err)
else:
logging.debug('Image \'{}\' found in lists.'.format(tag))
sys.exit(errors)
if __name__ == '__main__':
main()
|
997,290 | 391deed48fc5b62627dde98b808a9c8be5dd4ff4 | '''
Write a program which accept range from user and return addition of all even
numbers in between that range. (Range should contains positive numbers only)
Input : 23 30
Output : 108
Input : 10 18
Output : 70
Input : -10 2
Output : Invalid range
'''
def DisplayEven(iStart,iEnd):
sum = 0;
if((iStart > iEnd) or (iStart < 0) or (iEnd < 0)):
print("Invalid range");
return False;
for i in range(iStart,iEnd+1):
if(i%2 == 0):
sum = sum + i;
return sum
def main():
iStart = int(input("Enter start range:"));
iEnd = int(input("Enter end range:"));
ans = 0;
ans = DisplayEven(iStart,iEnd);
if(ans != False):
print(ans);
if __name__ == "__main__":
main(); |
997,291 | dd9551a76243d5b309273fdbcd7932c362e3fb55 | # To support both python 2 and python 3
from __future__ import division, print_function, unicode_literals
# Common imports
import numpy as np
import os
# Chapter import
from sklearn.cluster import SpectralClustering
from sklearn.datasets import make_moons
# To plot pretty figures
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
plt.rcParams['axes.labelsize'] = 14
plt.rcParams['xtick.labelsize'] = 12
plt.rcParams['ytick.labelsize'] = 12
# Where to save the figures
WORKING_PATH = os.path.abspath(os.path.join(os.getcwd(), '..'))
ROOT_PATH = os.path.join(WORKING_PATH, 'Hands on SK and TS\\')
CHAPTER_ID = "dimensionality_reduction"
def save_fig(fig_id, tight_layout=True):
path = image_path(fig_id) + ".png"
print("Saving figure", fig_id)
if tight_layout:
plt.tight_layout()
plt.savefig(path, format='png', dpi=300) # cannot save file if path doesn't exist
def image_path(fig_id):
return os.path.join(ROOT_PATH, "images", CHAPTER_ID, fig_id)
def plot_spectral_clustering(sc, X, size, alpha, show_xlabels=True, show_ylabels=True):
plt.scatter(X[:, 0], X[:, 1], marker='o', s=size, c='gray', cmap="Paired", alpha=alpha)
plt.scatter(X[:, 0], X[:, 1], marker='o', s=30, c='w')
plt.scatter(X[:, 0], X[:, 1], marker='.', s=10, c=sc.labels_, cmap="Paired")
if show_xlabels:
plt.xlabel("$x_1$", fontsize=14)
else:
plt.tick_params(labelbottom='off')
if show_ylabels:
plt.ylabel("$x_2$", fontsize=14, rotation=0)
else:
plt.tick_params(labelleft='off')
plt.title("RBF gamma={}".format(sc.gamma), fontsize=14)
if __name__ == '__main__':
# refer to cloud note spectral clustering
# data set
X, y = make_moons(n_samples=1000, noise=0.05, random_state=42)
# build models
sc1 = SpectralClustering(n_clusters=2, gamma=100, random_state=42)
sc1.fit(X)
sc2 = SpectralClustering(n_clusters=2, gamma=1, random_state=42)
sc2.fit(X)
print(np.percentile(sc1.affinity_matrix_, 95)) # 0.04251990648936265
print(np.percentile(sc2.affinity_matrix_, 95)) # 0.9689155435458034
# plot
plt.figure(figsize=(9, 3.2))
plt.subplot(121)
plot_spectral_clustering(sc1, X, size=500, alpha=0.1)
plt.subplot(122)
plot_spectral_clustering(sc2, X, size=4000, alpha=0.01, show_ylabels=False)
plt.show() |
997,292 | 87e3c43c4437dd551b7bae1a266c9806284e9a63 | """
A basic server to convert email addresses into proper emails without mailto.
"""
import argparse
from base64 import b64decode
from email.mime.text import MIMEText
from http.server import BaseHTTPRequestHandler, HTTPServer
import logging
import os
import re
import smtplib
import sys
from threading import Thread
from_address, to_address, password = None, None, None
check = re.compile('\S+@\S+\.\S+')
_cred = os.path.expanduser('~/.config/email_credentials.txt')
with open(_cred) as in_file:
decoded = b64decode(in_file.read().encode('utf-8')).decode('utf-8')
from_address, password = decoded.split(':')
class Server(BaseHTTPRequestHandler):
def do_POST(self): # noqa
"""Handles posted email addresses."""
l = int(self.headers['Content-Length'])
new_address = self.rfile.read(l).decode('utf-8')
if check.match(new_address) is not None:
logging.info("Forwarding {} to sales.".format(new_address))
Thread(target=self.send_email, args=(new_address, )).start()
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.send_header('Access-Control-Allow-Origin',
'http://numat-tech.com')
self.end_headers()
self.wfile.write(new_address.encode('utf-8'))
else:
logging.exception("Received malformed email: " + new_address)
self.send_response(500)
def do_OPTIONS(self): # noqa
self.send_response(200, 'ok')
self.send_header('Access-Control-Allow-Origin',
'http://numat-tech.com')
self.send_header('Access-Control-Allow-Methods', 'POST, OPTIONS')
self.send_header('Access-Control-Allow-Headers', 'X-Requested-With')
self.send_header('Access-Control-Allow-Headers', 'Content-Type')
self.end_headers()
def send_email(self, new_address):
"""Sends an email with new user information."""
s = smtplib.SMTP('smtp.gmail.com:587')
s.starttls()
s.login(from_address, password)
email = MIMEText("Received a request for ION-X information from:\n{}"
.format(new_address))
email['To'] = to_address
email['From'] = from_address
email['Subject'] = "Website Request Received"
s.sendmail(from_address, to_address, email.as_string())
s.quit()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Forwards posted emails to "
"sales@numat-tech.com.")
parser.add_argument('-p', '--port', type=int, default=80, help="The "
"port on which to run the server.")
args = parser.parse_args()
to_address = 'sales@numat-tech.com'
log = os.path.expanduser('~/ionx_server.log')
logger = logging.getLogger()
logger.setLevel(logging.INFO)
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s '
'%(funcName)s(%(lineno)d)\n%(message)s\n'))
logger.addHandler(handler)
daemon = HTTPServer(('', args.port), Server)
try:
daemon.serve_forever()
except:
daemon.socket.close()
logging.exception("Quitting server.")
|
997,293 | f129878d48a425f75c95dd873c027a24a7c24724 | n = int(input())
p = list(map(int, input().split()))
p.sort()
median = p[n // 2]
diff = 0
for i in p:
diff += abs(median - i)
print(diff)
|
997,294 | 1895e0b53c4abfaa3f79685091f61b8eceff790b | import torch
from PIL import Image
from torchvision import transforms
from torch.utils.data import Dataset
class CustomDataset(Dataset):
"""Face Landmarks dataset."""
def __init__(self, imgs_path, lbls,is_training):
self.imgs_path = imgs_path
self.lbls = lbls
#self.idx = list(range(0,len(lbls)))
if is_training:
self.transform = transforms.Compose([
# transforms.Resize(256),
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
else:
self.transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
def __len__(self):
return len(self.imgs_path)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
if self.transform:
img = Image.open(self.imgs_path[idx])
if len(img.mode) != 3 or len(img.getbands())!=3:
# print(len(img.mode),len(img.getbands()))
# print(self.imgs_path[idx])
img = img.convert('RGB')
img = self.transform(img)
# sample = {'image': self.transform(img), 'label': self.lbls[idx],'index': idx}
else:
img = Image.open(self.imgs_path[idx])
# sample = {'image': Image.open(self.imgs_path[idx]), 'label': self.lbls[idx],'index': idx}
return img,self.lbls[idx]
|
997,295 | 32086327aad051204d63709fec39f9ca594e4dec | from django import forms
from First.models import Employee
class EmpForm(forms.ModelForm):
class Meta:
model=Employee
fields=["ename","eemail","econtact"] |
997,296 | cccb306e5f2f300547290b71d90c9a8b19aaaa28 | # Original Version: Taehoon Kim (http://carpedm20.github.io)
# + Source: https://github.com/carpedm20/DCGAN-tensorflow/blob/e30539fb5e20d5a0fed40935853da97e9e55eee8/utils.py
# + License: MIT
# (Modified) Koki Yoshida and Chenduo Huang
# 2017-06-01
"""
Some codes from https://github.com/Newmu/dcgan_code
"""
import math
import random
import scipy.misc
import numpy as np
import time
import os
import tensorflow as tf
get_stddev = lambda x, k_h, k_w: 1/math.sqrt(k_w*k_h*x.get_shape()[-1])
# Given a trained model, this function completes the images with specified mask
def complete_images(model, num_iters, input_image_paths, mask, output_dir,\
adam_config, save_per_num_iters=100, log_l1_loss=False):
image_shape = imread(input_image_paths[0]).shape
# Assumes output images are square images
image_size, num_imgs = image_shape[0], len(input_image_paths)
start_time = time.time()
if log_l1_loss:
f = open('./log.txt', 'w')
batch_idxs = int(np.ceil(num_imgs/model.batch_size))
for idx in range(1):
last_batch = idx == batch_idxs -1
lower_bound = idx * model.batch_size
upper_bound = num_imgs if last_batch else (idx+1) * model.batch_size
cur_size = upper_bound - lower_bound
cur_batch = input_image_paths[lower_bound : upper_bound]
cur_images = [get_image(cur_path, image_size, is_crop=model.is_crop) \
for cur_path in cur_batch]
cur_images = np.array(cur_images).astype(np.float32)
if cur_size < model.batch_size:
print("Padding the last batch with dummy images...")
pad_size = ((0, int(model.batch_size - cur_size)), (0,0), (0,0), (0,0))
cur_images = np.pad(cur_images, pad_size, 'constant').astype(np.float32)
batch_mask = np.resize(mask, [model.batch_size] + list(image_shape))
masked_images = np.multiply(cur_images, batch_mask)
input_z = np.random.uniform(-1, 1, size=(model.batch_size, model.z_dim))
# For Adam optimizer update on input noises
m, v = 0, 0
for i in range(num_iters):
loss, g, G_imgs, contextual_loss = model.step_completion(input_z, batch_mask, cur_images)
if log_l1_loss:
f.write('%5.2f,%5.2f\n' % ((time.time() - start_time), np.mean(contextual_loss[:cur_size])))
beta1, beta2 = adam_config['beta1'], adam_config['beta2']
lr, eps = adam_config['lr'], adam_config['eps']
m_prev, v_prev = np.copy(m), np.copy(v)
m = beta1 * m_prev + (1 - beta1) * g[0]
v = beta2 * v_prev + (1 - beta2) * np.multiply(g[0], g[0])
m_hat = m / (1 - beta1 ** (i + 1))
v_hat = v / (1 - beta2 ** (i + 1))
input_z += - np.true_divide(lr * m_hat, (np.sqrt(v_hat) + eps))
input_z = np.clip(input_z, -1, 1)
if i % save_per_num_iters == 0:
cur_time = time.time()
diff = cur_time - start_time
print("After %d iterations(%5.2f), current average loss of batch %d is: %f" %\
(i, diff, idx, np.mean(loss[:cur_size])))
batch_dir = os.path.join(output_dir, 'batch_idx_%d' % idx)
zhats_dir = os.path.join(batch_dir, 'zhats_iter_%d' % i)
completed_dir = os.path.join(batch_dir, 'completed_iter_%d' % i)
os.makedirs(batch_dir, exist_ok=True)
os.makedirs(zhats_dir, exist_ok=True)
os.makedirs(completed_dir, exist_ok=True)
completed_images = masked_images + np.multiply(G_imgs, 1.0 - batch_mask)
for path_idx, path in enumerate(cur_batch):
zhats_image_out_path = os.path.join(zhats_dir, str(path_idx)+'.png')
completed_image_out_path = os.path.join(completed_dir, str(path_idx)+'.png')
save_image(G_imgs[path_idx, :, :, :], zhats_image_out_path)
save_image(completed_images[path_idx, :, :, :], completed_image_out_path)
if log_l1_loss:
f.close()
def get_image(image_path, image_size, is_crop=True):
return transform(imread(image_path), image_size, is_crop)
def save_image(image, image_path):
return scipy.misc.imsave(image_path, inverse_transform(image))
def save_images(images, size, image_path):
return imsave(inverse_transform(images), size, image_path)
def imread(path):
return scipy.misc.imread(path, mode='RGB').astype(np.float)
def merge_images(images, size):
return inverse_transform(images)
def merge(images, size):
h, w = images.shape[1], images.shape[2]
img = np.zeros((int(h * size[0]), int(w * size[1]), 3))
for idx, image in enumerate(images):
i = idx % size[1]
j = idx // size[1]
img[j*h:j*h+h, i*w:i*w+w, :] = image
return img
def imsave(images, size, path):
return scipy.misc.imsave(path, merge(images, size))
def center_crop(x, crop_h, crop_w=None, resize_w=64):
if crop_w is None:
crop_w = crop_h
h, w = x.shape[:2]
j = int(round((h - crop_h)/2.))
i = int(round((w - crop_w)/2.))
return scipy.misc.imresize(x[j:j+crop_h, i:i+crop_w],
[resize_w, resize_w])
def transform(image, npx=64, is_crop=True):
# npx : # of pixels width/height of image
if is_crop:
cropped_image = center_crop(image, npx)
else:
cropped_image = image
return np.array(cropped_image)/127.5 - 1.
def inverse_transform(images):
return (images+1.)/2.
|
997,297 | d1a3471cb09ce784da636950b01eb018ed5cbe54 | from nose.tools import istest, assert_equal
from wordbridge.openxml import numbering
from wordbridge import openxml
@istest
def numbering_instance_is_read_from_num_element_with_abstract_num_base():
numbering_xml = _create_numbering_xml("""
<w:abstractNum w:abstractNumId="0">
<w:lvl w:ilvl="0">
<w:start w:val="1"/>
<w:numFmt w:val="bullet"/>
<w:lvlText w:val="o"/>
</w:lvl>
<w:lvl w:ilvl="1">
<w:start w:val="2"/>
<w:numFmt w:val="bullet"/>
<w:lvlText w:val="o"/>
</w:lvl>
</w:abstractNum>
<w:num w:numId="1">
<w:abstractNumId w:val="0"/>
</w:num>
""")
result = numbering.read_string(numbering_xml)
expected_numbering = numbering.numbering({
"1": numbering.definition(levels={
0: numbering.level(start=1),
1: numbering.level(start=2)
})
})
assert_equal(expected_numbering, result)
def _create_numbering_xml(inner_xml):
return _NUMBERING_TEMPLATE.format(inner_xml)
_NUMBERING_TEMPLATE = """<?xml version="1.0" ?>
<w:numbering mc:Ignorable="w14 wp14" xmlns:m="http://schemas.openxmlformats.org/officeDocument/2006/math" xmlns:mc="http://schemas.openxmlformats.org/markup-compatibility/2006" xmlns:o="urn:schemas-microsoft-com:office:office" xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships" xmlns:v="urn:schemas-microsoft-com:vml" xmlns:w="http://schemas.openxmlformats.org/wordprocessingml/2006/main" xmlns:w10="urn:schemas-microsoft-com:office:word" xmlns:w14="http://schemas.microsoft.com/office/word/2010/wordml" xmlns:wne="http://schemas.microsoft.com/office/word/2006/wordml" xmlns:wp="http://schemas.openxmlformats.org/drawingml/2006/wordprocessingDrawing" xmlns:wp14="http://schemas.microsoft.com/office/word/2010/wordprocessingDrawing" xmlns:wpc="http://schemas.microsoft.com/office/word/2010/wordprocessingCanvas" xmlns:wpg="http://schemas.microsoft.com/office/word/2010/wordprocessingGroup" xmlns:wpi="http://schemas.microsoft.com/office/word/2010/wordprocessingInk" xmlns:wps="http://schemas.microsoft.com/office/word/2010/wordprocessingShape">
{0}
</w:numbering>
"""
|
997,298 | c8470d7973e8ae4aa801d5f51510d07310123b3b | from sympy.core.function import (Derivative, Function)
from sympy.core.numbers import (I, Rational, oo, pi)
from sympy.core.relational import (Eq, Ge, Gt, Le, Lt, Ne)
from sympy.core.symbol import (Symbol, symbols)
from sympy.functions.elementary.complexes import (Abs, conjugate)
from sympy.functions.elementary.exponential import (exp, log)
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.functions.elementary.trigonometric import sin
from sympy.integrals.integrals import Integral
from sympy.matrices.dense import Matrix
from sympy.series.limits import limit
from sympy.printing.python import python
from sympy.testing.pytest import raises, XFAIL
x, y = symbols('x,y')
th = Symbol('theta')
ph = Symbol('phi')
def test_python_basic():
# Simple numbers/symbols
assert python(-Rational(1)/2) == "e = Rational(-1, 2)"
assert python(-Rational(13)/22) == "e = Rational(-13, 22)"
assert python(oo) == "e = oo"
# Powers
assert python(x**2) == "x = Symbol(\'x\')\ne = x**2"
assert python(1/x) == "x = Symbol('x')\ne = 1/x"
assert python(y*x**-2) == "y = Symbol('y')\nx = Symbol('x')\ne = y/x**2"
assert python(
x**Rational(-5, 2)) == "x = Symbol('x')\ne = x**Rational(-5, 2)"
# Sums of terms
assert python(x**2 + x + 1) in [
"x = Symbol('x')\ne = 1 + x + x**2",
"x = Symbol('x')\ne = x + x**2 + 1",
"x = Symbol('x')\ne = x**2 + x + 1", ]
assert python(1 - x) in [
"x = Symbol('x')\ne = 1 - x",
"x = Symbol('x')\ne = -x + 1"]
assert python(1 - 2*x) in [
"x = Symbol('x')\ne = 1 - 2*x",
"x = Symbol('x')\ne = -2*x + 1"]
assert python(1 - Rational(3, 2)*y/x) in [
"y = Symbol('y')\nx = Symbol('x')\ne = 1 - 3/2*y/x",
"y = Symbol('y')\nx = Symbol('x')\ne = -3/2*y/x + 1",
"y = Symbol('y')\nx = Symbol('x')\ne = 1 - 3*y/(2*x)"]
# Multiplication
assert python(x/y) == "x = Symbol('x')\ny = Symbol('y')\ne = x/y"
assert python(-x/y) == "x = Symbol('x')\ny = Symbol('y')\ne = -x/y"
assert python((x + 2)/y) in [
"y = Symbol('y')\nx = Symbol('x')\ne = 1/y*(2 + x)",
"y = Symbol('y')\nx = Symbol('x')\ne = 1/y*(x + 2)",
"x = Symbol('x')\ny = Symbol('y')\ne = 1/y*(2 + x)",
"x = Symbol('x')\ny = Symbol('y')\ne = (2 + x)/y",
"x = Symbol('x')\ny = Symbol('y')\ne = (x + 2)/y"]
assert python((1 + x)*y) in [
"y = Symbol('y')\nx = Symbol('x')\ne = y*(1 + x)",
"y = Symbol('y')\nx = Symbol('x')\ne = y*(x + 1)", ]
# Check for proper placement of negative sign
assert python(-5*x/(x + 10)) == "x = Symbol('x')\ne = -5*x/(x + 10)"
assert python(1 - Rational(3, 2)*(x + 1)) in [
"x = Symbol('x')\ne = Rational(-3, 2)*x + Rational(-1, 2)",
"x = Symbol('x')\ne = -3*x/2 + Rational(-1, 2)",
"x = Symbol('x')\ne = -3*x/2 + Rational(-1, 2)"
]
def test_python_keyword_symbol_name_escaping():
# Check for escaping of keywords
assert python(
5*Symbol("lambda")) == "lambda_ = Symbol('lambda')\ne = 5*lambda_"
assert (python(5*Symbol("lambda") + 7*Symbol("lambda_")) ==
"lambda__ = Symbol('lambda')\nlambda_ = Symbol('lambda_')\ne = 7*lambda_ + 5*lambda__")
assert (python(5*Symbol("for") + Function("for_")(8)) ==
"for__ = Symbol('for')\nfor_ = Function('for_')\ne = 5*for__ + for_(8)")
def test_python_keyword_function_name_escaping():
assert python(
5*Function("for")(8)) == "for_ = Function('for')\ne = 5*for_(8)"
def test_python_relational():
assert python(Eq(x, y)) == "x = Symbol('x')\ny = Symbol('y')\ne = Eq(x, y)"
assert python(Ge(x, y)) == "x = Symbol('x')\ny = Symbol('y')\ne = x >= y"
assert python(Le(x, y)) == "x = Symbol('x')\ny = Symbol('y')\ne = x <= y"
assert python(Gt(x, y)) == "x = Symbol('x')\ny = Symbol('y')\ne = x > y"
assert python(Lt(x, y)) == "x = Symbol('x')\ny = Symbol('y')\ne = x < y"
assert python(Ne(x/(y + 1), y**2)) in [
"x = Symbol('x')\ny = Symbol('y')\ne = Ne(x/(1 + y), y**2)",
"x = Symbol('x')\ny = Symbol('y')\ne = Ne(x/(y + 1), y**2)"]
def test_python_functions():
# Simple
assert python(2*x + exp(x)) in "x = Symbol('x')\ne = 2*x + exp(x)"
assert python(sqrt(2)) == 'e = sqrt(2)'
assert python(2**Rational(1, 3)) == 'e = 2**Rational(1, 3)'
assert python(sqrt(2 + pi)) == 'e = sqrt(2 + pi)'
assert python((2 + pi)**Rational(1, 3)) == 'e = (2 + pi)**Rational(1, 3)'
assert python(2**Rational(1, 4)) == 'e = 2**Rational(1, 4)'
assert python(Abs(x)) == "x = Symbol('x')\ne = Abs(x)"
assert python(
Abs(x/(x**2 + 1))) in ["x = Symbol('x')\ne = Abs(x/(1 + x**2))",
"x = Symbol('x')\ne = Abs(x/(x**2 + 1))"]
# Univariate/Multivariate functions
f = Function('f')
assert python(f(x)) == "x = Symbol('x')\nf = Function('f')\ne = f(x)"
assert python(f(x, y)) == "x = Symbol('x')\ny = Symbol('y')\nf = Function('f')\ne = f(x, y)"
assert python(f(x/(y + 1), y)) in [
"x = Symbol('x')\ny = Symbol('y')\nf = Function('f')\ne = f(x/(1 + y), y)",
"x = Symbol('x')\ny = Symbol('y')\nf = Function('f')\ne = f(x/(y + 1), y)"]
# Nesting of square roots
assert python(sqrt((sqrt(x + 1)) + 1)) in [
"x = Symbol('x')\ne = sqrt(1 + sqrt(1 + x))",
"x = Symbol('x')\ne = sqrt(sqrt(x + 1) + 1)"]
# Nesting of powers
assert python((((x + 1)**Rational(1, 3)) + 1)**Rational(1, 3)) in [
"x = Symbol('x')\ne = (1 + (1 + x)**Rational(1, 3))**Rational(1, 3)",
"x = Symbol('x')\ne = ((x + 1)**Rational(1, 3) + 1)**Rational(1, 3)"]
# Function powers
assert python(sin(x)**2) == "x = Symbol('x')\ne = sin(x)**2"
@XFAIL
def test_python_functions_conjugates():
a, b = map(Symbol, 'ab')
assert python( conjugate(a + b*I) ) == '_ _\na - I*b'
assert python( conjugate(exp(a + b*I)) ) == ' _ _\n a - I*b\ne '
def test_python_derivatives():
# Simple
f_1 = Derivative(log(x), x, evaluate=False)
assert python(f_1) == "x = Symbol('x')\ne = Derivative(log(x), x)"
f_2 = Derivative(log(x), x, evaluate=False) + x
assert python(f_2) == "x = Symbol('x')\ne = x + Derivative(log(x), x)"
# Multiple symbols
f_3 = Derivative(log(x) + x**2, x, y, evaluate=False)
assert python(f_3) == \
"x = Symbol('x')\ny = Symbol('y')\ne = Derivative(x**2 + log(x), x, y)"
f_4 = Derivative(2*x*y, y, x, evaluate=False) + x**2
assert python(f_4) in [
"x = Symbol('x')\ny = Symbol('y')\ne = x**2 + Derivative(2*x*y, y, x)",
"x = Symbol('x')\ny = Symbol('y')\ne = Derivative(2*x*y, y, x) + x**2"]
def test_python_integrals():
# Simple
f_1 = Integral(log(x), x)
assert python(f_1) == "x = Symbol('x')\ne = Integral(log(x), x)"
f_2 = Integral(x**2, x)
assert python(f_2) == "x = Symbol('x')\ne = Integral(x**2, x)"
# Double nesting of pow
f_3 = Integral(x**(2**x), x)
assert python(f_3) == "x = Symbol('x')\ne = Integral(x**(2**x), x)"
# Definite integrals
f_4 = Integral(x**2, (x, 1, 2))
assert python(f_4) == "x = Symbol('x')\ne = Integral(x**2, (x, 1, 2))"
f_5 = Integral(x**2, (x, Rational(1, 2), 10))
assert python(
f_5) == "x = Symbol('x')\ne = Integral(x**2, (x, Rational(1, 2), 10))"
# Nested integrals
f_6 = Integral(x**2*y**2, x, y)
assert python(f_6) == "x = Symbol('x')\ny = Symbol('y')\ne = Integral(x**2*y**2, x, y)"
def test_python_matrix():
p = python(Matrix([[x**2+1, 1], [y, x+y]]))
s = "x = Symbol('x')\ny = Symbol('y')\ne = MutableDenseMatrix([[x**2 + 1, 1], [y, x + y]])"
assert p == s
def test_python_limits():
assert python(limit(x, x, oo)) == 'e = oo'
assert python(limit(x**2, x, 0)) == 'e = 0'
def test_issue_20762():
# Make sure Python removes curly braces from subscripted variables
a_b = Symbol('a_{b}')
b = Symbol('b')
expr = a_b*b
assert python(expr) == "a_b = Symbol('a_{b}')\nb = Symbol('b')\ne = a_b*b"
def test_settings():
raises(TypeError, lambda: python(x, method="garbage"))
|
997,299 | 1dcdd762aad7b47ad861aca4bccdaa6dd57f5bb7 | #!/usr/bin/python
"""
Feature extraction module for SemEval Shared Task 1.
"""
__author__ = 'Johannes Bjerva, and Rob van der Goot'
__email__ = 'j.bjerva@rug.nl'
import os
import requests
import numpy as np
from collections import defaultdict
from scipy.spatial.distance import cosine
from nltk.corpus import wordnet as wn
from nltk.corpus.reader.wordnet import WordNetError
import drs_complexity
import load_semeval_data
import config
import math
def word_overlap2(sentence_a, sentence_b):
"""
Calculate the word overlap of two sentences.
"""
a_set = set(word for word in sentence_a) - config.stop_list
b_set = set(word for word in sentence_b) - config.stop_list
score = len(a_set&b_set)/float(len(a_set|b_set))# len(s1&s2)/max(len(s1),len(s2))
return score
def word_overlap3(t_raw, h_raw, replacements):
"""
Calculate the word overlap of two sentences and tries to use paraphrases to get a higher score
"""
t_set = set(word for word in t_raw) - config.stop_list
h_set = set(word for word in h_raw) - config.stop_list
score = len(t_set & h_set) / float(len(t_set|h_set))
highestscore = 0
for replacement in replacements:
t_set = set(word for word in replacement[2]) - config.stop_list # replacement[1] = t_raw
h_set = set(word for word in replacement[3]) - config.stop_list # replacement[2] = h_raw
newScore = len(t_set & h_set) / float(len(t_set|h_set))
if newScore > highestscore:
highestscore = newScore
return (score + highestscore) /2
def sentence_lengths(sentence_a, sentence_b):
"""
Calculate the proportionate difference in sentence lengths.
"""
return abs(len(sentence_a)-len(sentence_b))/float(min(len(sentence_a),len(sentence_b)))
def bigrams(sentence):
"""
Since the skipgram model includes bigrams, look for them.
These are represented as word1_word2.
"""
return [word+'_'+sentence[i+1]
if word+'_'+sentence[i+1] in word_ids else None
for i, word in enumerate(sentence[:-1])] if config.USE_BIGRAMS else []
def trigrams(sentence):
"""
Since the skipgram model includes trigrams, look for them.
These are represented as word1_word2_word3.
"""
return [word+'_'+sentence[i+1]+'_'+sentence[i+2]
if word+'_'+sentence[i+1]+'_'+sentence[i+2] in word_ids else None
for i, word in enumerate(sentence[:-2])] if config.USE_TRIGRAMS else []
def sentence_distance(sentence_a, sentence_b):
"""
Return the cosine distance between two sentences
"""
sent_a = np.sum([projections[word_ids.get(word, 0)]
if word in word_ids else [0]
for word in sentence_a+bigrams(sentence_a)+trigrams(sentence_a)], axis=0)
sent_b = np.sum([projections[word_ids.get(word, 0)]
if word in word_ids else [0]
for word in sentence_b+bigrams(sentence_b)+trigrams(sentence_b)], axis=0)
return float(cosine(sent_a, sent_b))
def get_synset_overlap(sentence_a, sentence_b):
"""
Calculate the synset overlap of two sentences.
Currently uses the first 5 noun senses.
"""
def synsets(word):
sense_lemmas = []
for pos in ('n'):#,'a'):
for i in xrange(5):
try:
sense_lemmas += [lemma.name
for lemma in wn.synset('{0}.{1}.0{2}'.format(word, pos, i)).lemmas]
except WordNetError:
pass
return sense_lemmas
a_set = set(lemma for word in sentence_a for lemma in synsets(word))
b_set = set(lemma for word in sentence_b for lemma in synsets(word))
score = len(a_set&b_set)/float(len(a_set|b_set))
return score
def synset_overlap(sentence_a, sentence_b, replacements):
score = get_synset_overlap(sentence_a, sentence_b)
for replacement in replacements:
new_score = get_synset_overlap(replacement[2], replacement[3])
if new_score > score:
score = new_score
return score
def get_synset_distance(sentence_a, sentence_b):
def distance(word, sentence_b):
try:
synset_a = wn.synset('{0}.n.01'.format(word))
except WordNetError:
return 0.0
max_similarity = 0.0
for word2 in sentence_b:
try:
similarity = synset_a.path_similarity(wn.synset('{0}.n.01'.format(word2)))
if similarity > max_similarity:
max_similarity = similarity
except WordNetError:
continue
return max_similarity
distances = [distance(word, sentence_b) for word in sentence_a]
if float(len([1 for i in distances if i > 0.0])) == 0:
return 0
return sum(distances)/float(len([1 for i in distances if i > 0.0]))
def synset_distance(sentence_a, sentence_b, replacements):
score = get_synset_distance(sentence_a, sentence_b)
for replacement in replacements:
new_score = get_synset_distance(replacement[2], replacement[3])
if new_score > score:
score = new_score
return score
def get_number_of_instances(model):
"""
Return the number of instances in the model
"""
if model is None:
return 0
else:
return float(len(model[0].split('d'))-2)
def get_instance_overlap(kt_mod, kh_mod, kth_mod):
"""
Calculate the amount of overlap using the number of instance overlap
"""
kt = get_number_of_instances(kt_mod)
kh = get_number_of_instances(kh_mod)
kth = get_number_of_instances(kth_mod)
if kh == 0 or kt == 0 or kth == 0:
return 0
else:
return 1 - (kth - kt) / kh
def instance_overlap(kt_mod, kh_mod, kth_mod, replacements):
"""
Calculate the amount of overlap between the instances in the models of sentence_a (t) and sentence_b (h).
And also try to do the same while replacing words with paraphrases to obtain a higher score.
"""
score = get_instance_overlap(kt_mod, kh_mod, kth_mod)
for replacement in replacements:
new_score = get_instance_overlap(replacement[6], replacement[7], replacement[8])
if new_score > score:
score = new_score
return score
def get_number_of_relations(model):
"""
Return the amount of relations in the modelfile.
"""
if model == None:
return 0
counter = 0
for line in model:
if line.find('f(2') >= 0:
counter += 1
return float(counter)
#TODO when multiples of same relation, the result is still 1
def get_relation_overlap(kt_mod, kh_mod, kth_mod):
"""
Calculate the amount of overlap using the number of relations
"""
kt = get_number_of_relations(kt_mod)
kh = get_number_of_relations(kh_mod)
kth = get_number_of_relations(kth_mod)
if kh == 0 or kt == 0 or kth == 0:
return 0
else:
return 1 - (kth - kt) / kh
def relation_overlap(kt_mod, kh_mod, kth_mod, replacements):
"""
Calculate the amount of overlap between the relations in the models of t and h.
"""
score = get_relation_overlap(kt_mod, kh_mod, kth_mod)
for replacement in replacements:
new_score = get_relation_overlap(replacement[6], replacement[7], replacement[8])
if new_score > score:
score = new_score
return score
def get_nouns(root):
"""
Return the list of nouns as found in the boxer xml 'root'
"""
nouns = []
for child in root.findall("./xdrs/taggedtokens/tagtoken/tags"):
noun = False
for grandchildren in child.findall("./tag[@type='pos']"):
if grandchildren.text == 'NN' or grandchildren.text == 'NNS':
noun = True
if noun == True:
for grandchildren in child.findall("./tag[@type='lemma']"):
nouns.append(grandchildren.text)
return nouns
def noun_overlap(t_xml, h_xml, replacements):
"""
Calculate the amount of overlap between all nouns in t and h
"""
score = 0
if t_xml == None or h_xml == None:
return 0
t_set = set(get_nouns(t_xml.getroot()))
h_set = set(get_nouns(h_xml.getroot()))
if float(len(t_set | h_set)) > 0:
score = len(t_set & h_set) / float(len(t_set | h_set))
for replacement in replacements:
if replacement[9] != None and replacement[10] != None:
t_set = set(get_nouns(replacement[9].getroot()))
h_set = set(get_nouns(replacement[10].getroot()))
if float(len(t_set | h_set)) > 0:
new_score = len(t_set & h_set) / float(len(t_set | h_set))
if new_score > score:
score = new_score
return score
def get_verbs(root):
"""
Return the list of verbs as found in the boxer xml 'root'
"""
verbs = []
for child in root.findall("./xdrs/taggedtokens/tagtoken/tags"):
noun = False
for grandchildren in child.findall("./tag[@type='pos']"):
if grandchildren.text == 'VBP' or grandchildren.text == 'VBG':
noun = True
if noun == True:
for grandchildren in child.findall("./tag[@type='lemma']"):
verbs.append(grandchildren.text)
return verbs
def verb_overlap(t_xml, h_xml, replacements):
"""
Calculate the amount of overlap between all verbs in t and h
"""
score = 0
if t_xml == None or h_xml == None:
return 0
t_set = set(get_verbs(t_xml.getroot()))
h_set = set(get_verbs(h_xml.getroot()))
if float(len(t_set | h_set)) > 0:
score = len(t_set & h_set) / float(len(t_set | h_set))
for replacement in replacements:
if replacement[9] != None and replacement[10] != None:
t_set = set(get_verbs(replacement[9].getroot()))
h_set = set(get_verbs(replacement[10].getroot()))
if float(len(t_set | h_set)) > 0:
new_score = len(t_set & h_set) / float(len(t_set | h_set))
if new_score > score:
score = new_score
return score
def get_agent(drs):
"""
Return all agents in the drs data as a list
"""
agents = []
for line in drs:
if line.strip().startswith('sem'):
datalist = line.split(':')
for word in datalist:
if word.count('agent') > 0:
variable = word[6:7]
for word in datalist:
if word.startswith('pred({0}'.format(variable)):
agents.append(word.split(',')[1])
return agents
def agent_overlap(t_drs, h_drs, replacements):
"""
Calculates the overlap between the agents in 2 drs's
"""
t_agents = get_agent(t_drs)
h_agents = get_agent(h_drs)
length = len(t_agents) + len(h_agents)
if len(t_agents) is 0:
return 0
common = 0
for agent in t_agents:
if agent in h_agents:
h_agents.pop(h_agents.index(agent))
common =+ 1
if common > 1:
print(common)
return len(h_agents)/len(t_agents) #seems to work better then real comparison
'''
else:
for replacement in replacements:
if get_agent(replacement[15]) == get_agent(replacement[16]):
return 1
'''
def get_patient(drs):
"""
Returns the patient in a drs as a list
"""
for line in drs:
if line.strip().startswith('sem'):
datalist = line.split(':')
for word in datalist:
if word.count('patient') > 0:
variable = word[6:7]
for word in datalist:
if word.startswith('pred({0}'.format(variable)):
return word.split(',')[1]
def patient_overlap(t_drs, h_drs, replacements):
"""
calculate the patient overlap in 2 drs's
"""
if get_patient(t_drs) == get_agent(h_drs):
return 1
else:
for replacement in replacements:
if get_patient(replacement[15]) == get_patient(replacement[16]):
return 1
return 0
def get_pred(drs_file):
"""
Returns a list of all rel and pred words in a drs
"""
pred = []
for line in drs_file:
if line.strip().startswith('sem'):
datalist = line.split(':')
for statement in datalist:
if statement.startswith('rel('):
pred.append(statement.split(',')[2])
if statement.startswith('pred('):
pred.append(statement.split(',')[1])
return pred
def pred_overlap(t, h):
"""
A naive overlap of a drs
"""
a_set = set(get_pred(t))
b_set = set(get_pred(h))
return len(a_set&b_set)/float(len(a_set|b_set))
def get_drs(drs_file):
pred = []
rel = []
for line in drs_file:
if line.strip().startswith('sem'):
datalist = line.split(':')
for statement in datalist:
if statement.startswith('rel('):
statement_list = statement.split(',')
rel.append([statement_list[2], statement_list[0][-1:], statement_list[1]])
if statement.startswith('pred('):
statement_list = statement.split(',')
pred.append([statement_list[1], statement_list[0][-1:]])
# results in:
# pred = [['kid', 'B'], ['smile', 'C'], ['man', 'D'], ['play', 'E'], ['outdoors', 'F']]
# rel = [['near', 'E', 'D'], ['with', 'D', 'C'], ['patient', 'E', 'F'], ['agent', 'E', 'B']]
list_all = []
for itr_rel in rel:
match1 = False
symbol1 = ''
symbol2 = ''
for itr_pred in pred:
if itr_rel[1] is itr_pred[1]:
match1 = True
symbol1 = itr_pred[0]
match2 = False
for itr_pred in pred:
if itr_rel[2] is itr_pred[1]:
match2 = True
symbol2 = itr_pred[0]
if match1 is False or match2 is False:
#TODO something more complicated is going on in the drs...
pass
else:
list_all.append('{0} {1} {2}'.format(itr_rel[0], symbol1, symbol2))
return list_all
def drs(t_drs, h_drs):
t = set(get_pred(t_drs))
h = set(get_pred(h_drs))
score = len(t&h)/float(len(t|h))
return score
def tfidf(t, h):
"""
Calculate the wordoverlap using a sort of tfidf (also doc_freq available)
"""
h[0] = h[0].lower()
t[0] = t[0].lower()
score = 0
for word in t:
word = word.strip()
if word in h:
if word in config.doc_freq:
score += (float(config.total_sentences) - config.word_freq[word]) / config.total_sentences
else:
score += 1
return score
# Used to encode the entailment judgements numerically
prediction_ids = defaultdict(lambda:len(prediction_ids))
prover_ids = defaultdict(lambda:len(prover_ids))
def get_johans_features(modsizedif, prediction, id):
"""
Read the outputs of johans system
"""
data = []
prover_output = 0
if modsizedif == None:
print id
return ['1','1','1','1','1','1','1']
if modsizedif[0].split()[0] == 'contradiction.':
prover_output = 0.0
if modsizedif[0].split()[0] == 'unknown.':
prover_output = 0.5
if modsizedif[0].split()[0] == 'proof.':
prover_output = 1.0
data.append(prover_output) # prover output
data.append(float(modsizedif[1].split()[0][:-1])) # domain novelty
data.append(float(modsizedif[2].split()[0][:-1])) # relation novelty
data.append(float(modsizedif[3].split()[0][:-1])) # wordnet novelty
data.append(float(modsizedif[4].split()[0][:-1])) # model novelty
data.append(float(modsizedif[5].split()[0][:-1])) # word overlap
if prediction[0].split()[0] == 'informative': # prediction.txt
data.append(0)
else:
data.append(1)
return data
#TODO, also use sick2?
def get_prediction_judgement(id):
"""
Get relation predictions from Johan's system,
return as a dict mapping to a list with the appropriate index set to 1.
"""
for line in open('working/sick.run'):
if line.split()[0] is str(id):
return line.split()[2]
print line.split()[2]
return 2.5
def get_entailment_judgements():
"""
Get entailment judgements from Johan's system,
return as a dict mapping to a list with the appropriate index set to 1.
"""
results = defaultdict(lambda: [0,0,0])
mapping = dict(zip(('CONTRADICTION','ENTAILMENT','NEUTRAL'), range(3)))
firstline = True
for line in open('working/sick.run'):
if firstline:
firstline = False
else:
words = line.split()
sick_id = str(words[0])
result = words[1]
# Set the index correspoinding to the judgement to 1
results[sick_id][mapping[result]] = 1
return results
############################################################
url = 'http://127.0.0.1:7777/raw/pipeline?format=xml'
def sent_complexity(sentence):
r = requests.post(url, data=' '.join(sentence))
complexity = drs_complexity.parse_xml(r.text)
return complexity
def drs_complexity_difference(sentence_a, sentence_b):
sent_a_complexity = sent_complexity(sentence_a)
sent_b_complexity = sent_complexity(sentence_b)
return abs(sent_a_complexity-sent_b_complexity)
if config.RECALC_FEATURES:
# Load projection data
word_ids, projections = load_semeval_data.load_embeddings()
entailment_judgements = get_entailment_judgements()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.