index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
18,600 | 64dbfd189e6bf6ec57a6d9e1561d42bce32e744d | import RPi.GPIO as GPIO
class Water:
def _init_(self, gpio_pin=18,pull_up_down=UP):
PIO.setmode(GPIO.BCM)
self.pull_up_down="GPIO.PUD_%s"(str(pull_up_down))
GPIO.setup(gpio_pin, GPIO.IN, pull_up_down)
self.wet
def read_sensors(self):
self.wet = GPIO.input(gpio_pin)
print('wet=%s'(self.wet))
def get_json(self):
self.read_sensors()
return {"wet": self.wet}
|
18,601 | 4247bb2618b49b034472a272a2562d376dd5c592 | # conding:utf-8
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from time import sleep
class BrowserHandling:
def __init__(self, url, driver_path):
self._url = url
self._driver_path = driver_path
# open url path
@property
def url(self):
return self._url
@url.setter
def url(self, url):
self._url = url
@property
def driver_path(self):
return self._driver_path
@driver_path.setter
def driver_path(self, driver_path):
self._driver_path = driver_path
@classmethod
def control_browser(self):
# Maximize Browser option
options = webdriver.ChromeOptions()
options.add_argument("--kiosk")
driver = webdriver.Chrome(executable_path = "driver/2.3.5/chromedriver_mac")
#chrome_options=options)
#driver = webdriver.Chrome(chrome_options=options)
driver.get(self.url)
driver.find_element_by_id("lst-ib").send_keys("selenium")
driver.find_element_by_id("lst-ib").send_keys(Keys.ENTER)
driver.find_element_by_link_text("Selenium - Web Browser Automation")
sleep(5)
driver.close()
|
18,602 | 1215d0e33e30586f1ebd126df61ade07c7d97457 | import csv
import logging
import pandas as pd
import numpy as np
import time
import math
from shutil import copyfile
from statistics import mean
import numpy as np
import pickle
import os
user_name=input('Please type unique user name: ')
path = os.getcwd()
path = path + '\\'+user_name
print(path)
try:
os.mkdir(path)
except OSError:
print("Creation of user folder failed")
else:
print("successfully made user folder")
try:
copyfile('eye_gaze_data_cal.csv',path+'\\'+user_name+'_eye_gaze_data_cal.csv')
print('coppied eye gaze data cal')
os.remove('eye_gaze_data_cal.csv')
except:
print('unable to copy eye gaze cal data')
try:
copyfile('video_gazed_cal.csv',path+'\\'+user_name+'_video_gazed_cal.csv')
print('copied video gazed cal')
os.remove('video_gazed_cal.csv')
except:
print('unable to copy video_gazed cal data')
try:
copyfile('frame_angles_cal.csv',path+'\\'+user_name+'_frame_angles_cal.csv')
print('copied frame angles cal')
os.remove('frame_angles_cal.csv')
except:
print('unable to copy frame angles file')
try:
copyfile('dotchase.mp4',path+'\\'+user_name+'_dotchase.mp4')
print('copied dot chase')
os.remove('dotchase.mp4')
except:
print('unabel to copy dot chase video')
try:
copyfile('cal_error_summary.csv',path+'\\'+user_name+'_cal_error_summary.csv')
print('copied cal error summary')
os.remove('cal_error_summary.csv')
except:
print('unable to copy cal error summary')
try:
copyfile('cal_error_data.csv',path+'\\'+user_name+'_cal_error_data.csv')
print('copied cal error data')
os.remove('cal_error_data.csv')
except:
print('unable to copy cal error data')
try:
copyfile('cal_params.pkl',path+'\\'+user_name+'_cal_params.pkl')
print('copied cal params')
except:
print('unable to copy cal params')
try:
copyfile('blink_profile_cal.csv',path+'\\'+user_name+'_blink_profile_cal.csv')
print('copied cal blink profile')
os.remove('blink_profile_cal.csv')
except:
print('unable to copy cal blink profile')
|
18,603 | 0283794290571c018358a450bf32208fff863fa3 | #!/usr/bin/env python
# coding: utf-8
# In[2]:
if True and False:
print("Hello") # Hello is not printed.
# In[3]:
if True or False:
print("Hello")
# In[7]:
students = ["Ravi", "Havi", "Gavi", "Rovi"]
print("Ravi" in students)
# In[11]:
if "Gavi" in students:
print("TomTom")
else:
print ("NA")
# In[15]:
if "R0ovi" not in students:
print("Bengali")
if not "Rovii" in students:
print("Gujju")
# In[ ]:
|
18,604 | 96c057338aae94d22038be8a3bc0981ec2b50432 | # counting binary number in decimal range
# Developed by Nazim...
li=[]
i=0;
while (i<=1024):
y=list(bin(i))
y.pop(0)
y.pop(0)
k=int("".join(x for x in y))
li.append(k) # 1 to 512 binaray is apending li list
i+=1
n=int(input())
if((n<1) or (n>10**9)):
print("Input the number in correct range")
else:
l=len(str(n))
s=(2**(l-1))
j=1
count=0
while li[s+j]<=n:
if(li[l+j]==n):
j=j+1
count=count+1
break
else:
count=count+1
j=j+1
print(s+count)
|
18,605 | 88b47913981e6bc1842f0dbdee3a77dec33a2993 | # 第1章 统计学习方法概论
# 高斯于1823年在误差e1,…, en独立同分布的假定下, 证明了最小二乘方法的一个最优性质: 在所有无偏的线性估计类中, 最小二乘方法是其中方差最小的!
### 使用最小二乘法拟和曲线
#
# 对于数据$(x_i, y_i)(i=1, 2, 3..., m)$
#
# 拟合出函数$h(x)$
#
# 有误差,即残差:$r_i = h(x_i) - y_i$
#
# 此时L2范数(残差平方和)
# 最小时,h(x)
# 和
# y
# 相似度最高,更拟合
# 一般的H(x)
# 为n次的多项式,$H(x) = w_0 + w_1x + w_2x ^ 2 + ...
# w_nx ^ n$
#
# $w(w_0, w_1, w_2, ..., w_n)$为参数
#
# 最小二乘法就是要找到一组 $w(w_0, w_1, w_2, ..., w_n)$ 使得$\sum_
# {i = 1} ^ n(h(x_i) - y_i) ^ 2$ (残差平方和)
# 最小
#
# 即,求 $min\sum_
# {i = 1} ^ n(h(x_i) - y_i) ^ 2$
# 举例:我们用目标函数$y = sin2
# {\pi}x$, 加上一个正太分布的噪音干扰,用多项式去拟合【例1
# .1
# 11
# 页】
import numpy as np
import scipy as sp
from scipy.optimize import leastsq
import matplotlib.pyplot as plt
# 目标函数
def real_func(x):
return np.sin(2 * np.pi * x)
# 多项式
def fit_func(p, x):
f = np.poly1d(p)
return f(x)
# 残差
def residuals_func(p, x, y):
ret = fit_func(p, x) - y
return ret
# 十个点
x = np.linspace(0, 1, 10)
x_points = np.linspace(0, 1, 1000)
# 加上正态分布噪音的目标函数的值
y_ = real_func(x)
y = [np.random.normal(0, 0.1) + y1 for y1 in y_]
def fitting(M=0):
"""
M 为 多项式的次数
"""
# 随机初始化多项式参数
p_init = np.random.rand(M + 1)
# 最小二乘法
p_lsq = leastsq(residuals_func, p_init, args=(x, y))
print('Fitting Parameters:', p_lsq[0])
# 可视化
plt.plot(x_points, real_func(x_points), label='real')
plt.plot(x_points, fit_func(p_lsq[0], x_points), label='fitted curve')
plt.plot(x, y, 'bo', label='noise')
plt.legend()
return p_lsq
# M=0
p_lsq_0 = fitting(M=0)
# M=1
p_lsq_1 = fitting(M=1)
# M=3
p_lsq_3 = fitting(M=3)
# M=9
p_lsq_9 = fitting(M=9)
regularization = 0.0001
def residuals_func_regularization(p, x, y):
ret = fit_func(p, x) - y
ret = np.append(ret, np.sqrt(0.5 * regularization * np.square(p))) # L2范数作为正则化项
return ret
# 最小二乘法,加正则化项
p_init = np.random.rand(9 + 1)
p_lsq_regularization = leastsq(residuals_func_regularization, p_init, args=(x, y))
plt.plot(x_points, real_func(x_points), label='real')
plt.plot(x_points, fit_func(p_lsq_9[0], x_points), label='fitted curve')
plt.plot(x_points, fit_func(p_lsq_regularization[0], x_points), label='regularization')
plt.plot(x, y, 'bo', label='noise')
plt.legend()
plt.show() |
18,606 | 0c0a1adef66c46bedc65d08f62ea902dea04ab36 | from bs4 import BeautifulSoup
import requests
import lxml
import smtplib
GMAIL ="smtp.gmail.com"
email = "thiagosampaio60@gmail.com"
senha = "312907Thiago"
URL = "https://www.amazon.com.br/Monitor-Professional-Widescreen-P2419H-Preto/dp/B07FDNTS33/ref=sr_1_2?__mk_pt_BR=%C3%85M%C3%85%C5%BD%C3%95%C3%91&dchild=1&keywords=monitor+dell&qid=1617462846&sr=8-2"
header = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.114 Safari/537.36",
"Accept-Language": "pt-BR,pt;q=0.9,en-US;q=0.8,en;q=0.7"
}
response = requests.get(URL, headers= header)
yc_web_page = response.text
soup = BeautifulSoup(yc_web_page, "lxml")
price_div = soup.find(id="priceblock_ourprice").get_text()
price_str = price_div.split("R$")[1]
price_str = price_str.replace(".","")
price_str = price_str.replace(",",".")
price_final = float(price_str)
if price_final < 950:
with smtplib.SMTP(GMAIL) as connection:
connection.starttls()
connection.login(email,senha)
connection.sendmail(from_addr=email,
to_addrs="thiagosampaioparticular@gmail.com",
msg = f"Subject: AMAZON PRICE ALERT! \n\nPreco de 1299 foi para {price_final}\n{URL}") |
18,607 | 54efb75e43418c41684f82d77df26a67629848ac | #!/usr/bin/env python
#coding:utf-8
import io
import xlrd
from lxml import etree
import json
excel1 = 'city.xls'
def readxls():
data = {}
#start a xlrd object
book = xlrd.open_workbook(excel1)
#select a working sheet
sheet = book.sheet_by_name('city')
#from sheet get row and col
# rows = sheet.nrows
# cols = sheet.ncols
#put data into a dict
# for i in range(rows):
for i in range(sheet.nrows):
data[sheet.cell_value(i, 0)] = sheet.cell_value(i, 1)
print data
print json.dumps(data)
print json.dumps(data, ensure_ascii = False)
return json.dumps(data, ensure_ascii = False)
def toxml(data):
#Create a document and elements
root = etree.Element('root')
stu = etree.SubElement(root, 'city')
# Create a comment
stu.append(etree.Comment(u' 城市信息\t'))
# Create a text
stu.text = data
# Save to file
tree = etree.ElementTree(root)
tree.write('city.xml', encoding='utf-8', pretty_print=True, xml_declaration=True)
if __name__ == '__main__':
data = readxls()
toxml(data) |
18,608 | affd0bcdc18297d61774480b191ebec5edbc4981 | # built in functions i.e built in APIs
name = "Fionna"
#print.upper()
print(name) # Not possible , because data Fionna is constant hence cnt be updated and so cnt be chnged to uppercase
# hence STRINGS ARE IMMUTABLE
newName = name.upper() # now possible, i.e output will be in uppercase
print(newName)
authorName = "John Keats"
print(authorName,hex(id(authorName))) # hashcode
authorName = authorName.capitalize() # we are copying hashcode of old variable authorName into the same variable but string has new hashcode
#capitalizes, first letter J --->Memory leak takes place i.e. No reference to john watson is there hence it will eb deleted later but untill then it lies in constant pool called as constant pool
print(authorName,hex(id(authorName)))
# there should be no Memory leaks....use instant deletion to avoid overflowing the constant pool--> Memory leak
# using various algorithms at the back end...
names = "John", "Jennie", "Jack", "Joe" # this is string
print(names[0]) # output is John
print(names[len(names)-1]) # name is Joe
idx = names.index("Jennie") # index will be 6 because whole names are exactly string
print(idx)
"""num = names.count("John", 0, len(names))
print(">> num is :",num)
"""
#using function to count a no of word occuring in the given string
quotes= """ Work hard,get Luckier
Search the candle rather than cursing the darkness """
#HOME WORK---> DONE IN 5 MIN
def count(data, word, start, end):
c = 0
j = 0
print("we r finding word--> the")
for i in range(start,end):
if data[i]==word[j]:
if data[i+1] == word[j+1]:
if data[i+2] == word[j+2]:
c = c+1
else:
continue
return c
print(">> the occurs :", count(quotes,"the",0,len(quotes)),"times") # we are checking the no of times the occurs
# in the above quotes
#Using split function
names = "John, Jennie, Jack, Joe"
splittedNames = print(names.split(",")) # splitting names on the basis of ',' along with the spaces
print(splittedNames)
# in order to strip spaces in the output of splittedNames---> use STRIP()
for name in splittedNames:
print(name.strip())
#HW is to implement functioning of split...which is to print words in the sentence
|
18,609 | 0036d827a89d6ddeb56d7bd8fb9221b2f57de2fd | from Bishop import *
from Knight import *
from Queen import *
from King import *
from Rook import *
from Pawn import *
#from Logic.Pieces.Pawn import * NOT THERE YET
class White():
white_pieces=[]
@staticmethod
def start_new_game():
rightWhiteBishop = Bishop("c1", 0)
leftWhiteBishop = Bishop("f1", 0)
rightWhiteRook = Rook("a1", 0)
leftWhiteRook = Rook("h1", 0)
rightWhiteKnight = Knight("b1", 0)
leftWhiteKnight = Knight("g1", 0)
whiteKing = King("e1", 0)
whiteQueen = Queen("d1", 0)
White.white_pieces = [whiteKing, whiteQueen, rightWhiteBishop, leftWhiteBishop, rightWhiteKnight, leftWhiteKnight,
rightWhiteRook, leftWhiteRook,
Pawn("a2", 0), Pawn("b2", 0), Pawn("c2", 0), Pawn("d2", 0), Pawn("e2", 0), Pawn("f2", 0),
Pawn("g2", 0), Pawn("h2", 0)]
#all the pawns
#put them in an array. their index will serve as to the column they originate from.
#DO NOT FORGET: Index starts at 0, column starts at 1 --> decalage
#make a general array for all the pieces
#1st index (0): King
#2nd index (1): Queen
#3rd index (2): Bishop
#(3): Bishop
#(4): Knight
#(5): Knight
#(6): Rook
#(7): Rook
#(8): Pawn
#+Pawn*7 for the next 7 indexes
|
18,610 | 9fff289a7e56f2c1a9c4ed448165a969eabe5610 | def on_pubmsg(self, c, e):
if "Katrina" in e.arguments[0]:
c.kick(e.target, e.source.nick, "D'où tu parles de ma meuf ? Tu vas tater de mon marteau !")
def on_privmsg(self, c, e):
pass
def on_namreply(self, c, e):
pass
def on_invite(self, c, e):
if e.source.nick not in self.config.get("blacklist"):
c.join(e.arguments[0])
def on_join(self, c, e):
if e.source.nick == self.config.get("nick"):
self.logger.info(f"Joined {e.target}")
def on_welcome(self, c, e):
if self.config.get("password"):
c.privmsg("NickServ", text='RECOVER {} {}'.format(
self.config.get("nick"),
self.config.get("password")))
c.nick(self.config.get("nick"))
c.privmsg("NickServ", text='IDENTIFY {}'.format(
self.config.get("password")))
for chan in self.config.get("channels", []):
c.join(chan)
|
18,611 | 2a79e4ad86ae08c14a03d7c01b80347dc4e51ff7 | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from gym.envs.mujoco.half_cheetah import HalfCheetahEnv
from gym.envs.mujoco.walker2d import Walker2dEnv
from gym.envs.mujoco import mujoco_env
import numpy as np
import os
def tolerance(x, bounds, margin):
'''Returns 1 when x is within the bounds, and decays sigmoidally
when x is within a certain margin outside the bounds.
We've copied the function from [1] to reduce dependencies.
[1] Tassa, Yuval, et al. "DeepMind Control Suite." arXiv preprint
arXiv:1801.00690 (2018).
'''
(lower, upper) = bounds
if lower <= x <= upper:
return 0
elif x < lower:
dist_from_margin = lower - x
else:
assert x > upper
dist_from_margin = x - upper
loss_at_margin = 0.95
w = np.arctanh(np.sqrt(loss_at_margin)) / margin
s = np.tanh(w * dist_from_margin)
return s*s
def huber(x, p):
return np.sqrt(x*x + p*p) - p
class CliffCheetahEnv(HalfCheetahEnv):
def __init__(self):
envs_folder = os.path.dirname(os.path.abspath(__file__))
xml_filename = os.path.join(envs_folder,
'assets/cliff_cheetah.xml')
mujoco_env.MujocoEnv.__init__(self, xml_filename, 5)
def step(self, a):
(s, _, done, info) = super(CliffCheetahEnv, self).step(a)
r = self._get_rewards(s, a)[0]
return (s, r, done, info)
def _get_obs(self):
'''Modified to include the x coordinate.'''
return np.concatenate([
self.model.data.qpos.flat,
self.model.data.qvel.flat,
])
def _get_rewards(self, s, a):
(x, z, theta) = s[:3]
xvel = s[9]
# Reward the forward agent for running 9 - 11 m/s.
forward_reward = (1.0 - tolerance(xvel, (9, 11), 7))
theta_reward = 1.0 - tolerance(theta,
bounds=(-0.05, 0.05),
margin=0.1)
# Reward the reset agent for being at the origin, plus
# reward shaping to be near the origin and upright.
reset_reward = 0.8 * (np.abs(x) < 0.5) + 0.1 * (1 - 0.2 * np.abs(x)) + 0.1 * theta_reward
return (forward_reward, reset_reward)
class CliffWalkerEnv(Walker2dEnv):
def __init__(self):
envs_folder = os.path.dirname(os.path.abspath(__file__))
xml_filename = os.path.join(envs_folder,
'assets/cliff_walker.xml')
mujoco_env.MujocoEnv.__init__(self, xml_filename, 5)
def step(self, a):
(s, _, done, info) = super(CliffWalkerEnv, self).step(a)
r = self._get_rewards(s, a)[0]
return (s, r, done, info)
def _get_obs(self):
'''Modified to include the x coordinate.'''
qpos = self.model.data.qpos
qvel = self.model.data.qvel
return np.concatenate([qpos[:], np.clip(qvel, -10, 10)]).ravel()
def _get_rewards(self, s, a):
x = s[0]
running_vel = s[9] - 2.0
torso_height = s[1]
is_standing = float(torso_height > 1.2)
is_falling = float(torso_height < 0.7)
run_reward = np.clip(1 - 0.2 * huber(running_vel, p=0.1), 0, 1)
stand_reward = np.clip(0.25 * torso_height +
0.25 * is_standing +
0.5 * (1 - is_falling), 0, 1)
control_reward = np.clip(1 - 0.05 * np.dot(a, a), 0, 1)
reset_location_reward = 0.8 * (np.abs(x) < 0.5) + 0.2 * (1 - 0.2 * np.abs(x))
forward_reward = 0.5 * run_reward + 0.25 * stand_reward + 0.25 * control_reward
reset_reward = 0.5 * reset_location_reward + 0.25 * stand_reward + 0.25 * control_reward
return (forward_reward, reset_reward)
if __name__ == '__main__':
import time
# env = CliffCheetahEnv()
env = CliffWalkerEnv()
env.reset()
for _ in range(10000):
action = env.action_space.sample()
env.step(action)
env.render()
time.sleep(0.01)
|
18,612 | 0c89c034abd5c3bb07027ab42a59ceb59deab374 | import re
class Rune(object):
def __init__(self, data):
self.info = data[:data.find("Set")]
self.level = 0
self.position = 1
self.start = 0
self.main_attr = ()
self.sub_attr = {}
self.init_rune()
pass
def init_rune(self):
if self.set_position():
self.set_position()
self.set_level()
self.set_attr()
self.set_star()
def set_level(self):
level_map = {
"Normal": 0,
"Magic" : 1,
"Rare" : 2,
"Hero" : 3,
"Legend": 4
}
for key in level_map:
if key in self.info:
self.level = level_map[key]
def set_attr(self):
attribute = re.findall(r"((HP|DEF|ATK|SPD|CRI Rate|CRI Dmg|Resistance|Accuracy) ?\+\d+%?)", self.info)
for id, attr in enumerate(attribute):
cur_attr = attr
if id == 0:
self.main_attr = [cur_attr[1], cur_attr[0]]
else:
self.sub_attr[cur_attr[1]] = cur_attr[0]
def set_position(self):
position = re.findall(r'Rune \((\d)\)', self.info)
if position:
self.position = int(position[0])
return True
else:
return False
def set_star(self):
if self.main_attr[1] in ["HP +11%", "HP +360", "ATK +11%", "ATK +22", "DEF +11%", "DEF +22", "SPD +7",
"CRI Rate +7%", "CRI Dmg +11%", "Resistance +12%", "Accuracy +12%"]:
self.start = 6
else:
self.start = 5
def is_sell_rune(self):
# 2,4,6号位符文 加固定值除速度符文 直接扔
if self.position % 2 == 0:
if self.main_attr[1].find("%") == -1 or self.main_attr[0].find("SPD") != -1:
return True
# 五星英雄以下直接卖
if self.start == 5 and self.level < 3:
return True
# 六星副属性至少两个百分比
if self.start == 6:
per_count = 0
for key in self.sub_attr:
if key == "SPD" or self.sub_attr[key].find("%") != -1:
per_count += 1
if per_count >= 2:
return False
else:
return True
return False
if __name__ == '__main__':
from PIL import Image as Images, ImageDraw
import pytesseract
def binarizing(img, threshold):
pixdata = img.load()
w, h = img.size
for y in range(h):
for x in range(w):
if pixdata[x, y] < threshold:
pixdata[x, y] = 0
else:
pixdata[x, y] = 255
return img
im = Images.open(r"C:\Project\Python\Moling\rune\1566114545.8649945sell_.png")
region = (im.size[0] * 0.31, im.size[1] * 0.25, im.size[0] * 0.69, im.size[1] * 0.75)
im = im.crop(region)
img = im.convert('L')
img = binarizing(img, 120)
img.show()
data = pytesseract.image_to_string(img)
print(data)
a = Rune(data)
print(a.is_sell_rune())
|
18,613 | 2094a4de35ff5713a65b7e50585b4002cea4cb48 | from django.shortcuts import render, redirect
from django.contrib import messages
from django.contrib.auth.models import User,auth
from django.contrib.auth import authenticate, login
from .models import ShippingLabel
# Create your views here.
def index(request):
if request.method != 'POST':
return render(request, 'index.html')
username = request.POST['username']
password = request.POST['password']
user = authenticate(request, username=username, password=password)
if user is not None:
login(request, user)
return redirect('home')
else:
try:
user = User.objects.get(username=username)
messages.info(request,"Username and password didn't match")
return redirect('index')
except Exception:
messages.info(request,'User does not exists !!!')
return redirect('index')
def register(request):
if request.method != 'POST':
return render(request, 'register.html')
username = request.POST.get('username')
email = request.POST.get('email')
password = request.POST.get('password')
try:
user = User.objects.get(username=username)
messages.info(request,'User with this username already exists !!!')
return redirect('register')
except Exception:
user = User.objects.create_user(username=username, email=email, password=password)
user.save()
messages.info(request,'Account created successfully')
return redirect('index')
def home(request):
return render(request, 'home.html')
def generate_new_label(request):
if request.method != 'POST':
return render(request, 'generate_new_label.html')
new_label = ShippingLabel(customer_name=request.POST['customer_name'], phone_number=int(request.POST['phone_number']), falt_house_no_building_name=request.POST['flat_house_no_building_name'], street_colony=request.POST['street_colony'], pincode=request.POST['pincode'], city=request.POST['city'], state=request.POST['state'], landmark=request.POST['landmark'])
new_label.save()
return redirect('history')
def edit_label(request, id):
shipping_label = ShippingLabel.objects.get(id=id)
if request.method != 'POST':
return render(request, 'edit_label.html', context={'shipping_label':shipping_label})
shipping_label.customer_name=request.POST['customer_name']
shipping_label.phone_number=int(request.POST['phone_number'])
shipping_label.falt_house_no_building_name=request.POST['flat_house_no_building_name']
shipping_label.street_colony=request.POST['street_colony']
shipping_label.pincode=request.POST['pincode']
shipping_label.city=request.POST['city']
shipping_label.state=request.POST['state']
shipping_label.landmark=request.POST['landmark']
shipping_label.save()
return redirect('history')
def delete_label(request, id):
shipping_label = ShippingLabel.objects.get(id=id)
shipping_label.delete()
return redirect('history')
def history(request):
if request.method != 'POST':
shipping_labels = ShippingLabel.objects.all().order_by('-updated_at')
return render(request, 'history.html' , {'shipping_labels':shipping_labels})
return redirect('history')
def print_label(request, id):
shipping_label = ShippingLabel.objects.get(id=id)
return render(request, 'print_template.html', {'shipping_label':shipping_label})
def logout(request):
auth.logout(request)
return redirect('/') |
18,614 | 310b24e5d31a58d4833a7491b9430c653be870e4 | # -*- coding: utf-8 -*-
import time
from .device import Device
class HUBER(Device):
T_set = None
Power = None
def __init__(self, connection_type, host, port):
super(HUBER, self).__init__(connection_type=connection_type, host=host, port=port)
self.getAndSetParameter()
def initialize(self):
self.getAndSetParameter()
def getAndSetParameter(self):
self.T_set = self.getSetTemperature()
self.Power = self.getPowerStatus()
def getPowerStatus(self, iChannel=-1):
sPower = self.ask('CA?')
return bool(sPower[7])
def enablePower(self, sBool, iChannel=-1):
self.write('CA@{:05.0f}'.format(sBool))
self.Power = sBool
def getSetTemperature(self, iChannel=-1):
sTemp = self.ask('SP?')
return float('{:+06.2f}'.format(float(sTemp[3:])/100))
def getInTemperature(self, iChannel=-1):
sTemp = self.ask('TI?')
return float('{:+06.2f}'.format(float(sTemp[3:])/100))
def setTemperature(self, Tset, iChannel=-1):
self.write('SP@{:+06.0f}'.format(100 * Tset))
pass
def output(self, show = True):
bPower = self.Power
if show:
self.printOutput('Minichiller:')
if bPower:
self.printOutput('Power: \033[32m ON \033[0m')
else:
self.printOutput('Power: \033[31m OFF \033[0m')
fTset = self.getSetTemperature()
fTin = self.getInTemperature()
if show:
self.printOutput('T_set = {:.2f}'.format(fTset) + '\t' + 'T_in = {:.2f}'.format(fTin))
return([['Power', 'Tset[C]', 'Tin[C]'], [ str(bPower), str(fTset), str(fTin)]])
def interaction(self, gui=False):
if gui:
device_dict = {
'enablePower': True,
'getSetTemperature': True,
}
return device_dict
else:
print('1: enable Power')
print('2: set new Temperature')
x = input('Number? \n')
while x != '1' and x != '2':
x = input('Possible Inputs: 1 or 2! \n')
if x == '1':
bO = input('Please enter ON or OFF! \n')
if bO == 'ON' or bO == 'on':
self.enablePower(True)
elif bO == 'OFF' or bO == 'off':
self.enablePower(False)
else:
pass
elif x == '2':
fT = input('Please enter new Temperature in °C \n')
self.setTemperature(float(fT))
time.sleep(0.5)
|
18,615 | 5b843936de70c13d837fc068c5781d8402982ba3 | import returns
# from returns import create_full_name
import random
print(returns.create_full_name("Sahana", "Moogi"))
print(random.randint(0, 100))
|
18,616 | 580957e0396ccf388c6e997ce9eb86501df5147b | import boto3
import json
import requests
import os
import botocore
import time
session = boto3.Session(profile_name='tt-dev',
region_name='us-east-1'
)
logs_client = session.client('logs')
ecs_client = session.client('ecs')
task = os.environ['task_number']
commit = os.environ['obie_commit']
try:
waiter = ecs_client.get_waiter('tasks_running')
waiter.wait(cluster='obiecluster',
tasks=[task],
WaiterConfig={
'Delay': 15,
'MaxAttempts': 120,
}
)
except botocore.exceptions.WaiterError:
pass
time.sleep(5)
response = logs_client.get_log_events(logGroupName='/ecs/obie_app',
logStreamName='ecs/obie/{task_number}'.format(task_number=task)
)
commit_comment = ''
for event in response['events']:
commit_comment = ''.join([commit_comment,
'\n',
event['message']
])
endpoint = 'https://git.corp.adobe.com/api/v3/repos/dsirbu/tt_infra/commits/{commit_hash}/comments'.format(
commit_hash=commit)
headers = {'Authorization': 'token {token}'.format(token='0f2561352222adea293f502e64b84c7f54cad3e5')}
data = {'body': commit_comment}
response = requests.post(url=endpoint, data=json.dumps(data), headers=headers)
print(commit_comment)
|
18,617 | 929bd92ca22b72665b862dccbf12296e386b33be | # coding: utf-8
try:
from ru.curs.showcase.core.jython import JythonDTO
from ru.curs.showcase.util import XMLJSONConverter
except:
from ru.curs.celesta.showcase import JythonDTO
from ru.curs.celesta.showcase.utils import XMLJSONConverter
import basicForm
import json
def getFormInstance(context, formId):
if '_lyraForms' in context.data:
lf = context.data['_lyraForms']
else:
lf = {}
context.data['_lyraForms'] = lf
if formId in lf:
result = lf[formId]
#refreshing call context...
result.setCallContext(context)
else:
if not formId in basicForm._formclasses:
raise Exception('No form %s registered' % formId)
c = basicForm._formclasses[formId]
result = c(context)
lf[formId] = result
result._beforeShow(context)
return result
def getTemplate(context, main, add=None, filterinfo=None, session=None, elementId=None):
formInstance = getFormInstance(context, main)
formInstance.setContext(session, main, add, elementId)
return JythonDTO(unicode(formInstance._buildForm()))
def getInstance(context, main=None, add=None, filterinfo=None, session=None, elementId=None):
formInstance = getFormInstance(context, main)
formInstance.setContext(session, main, add, elementId)
cardData = formInstance.findRec()
cardSettings = formInstance.getActions()
return JythonDTO(cardData, cardSettings)
def submissionFirst(context, main=None, add=None, filterinfo=None, session=None, data=None):
formId = json.loads(data)['schema']['@formId']
formInstance = getFormInstance(context, formId)
formInstance.setContext(session, main, add, None)
cardData = formInstance.move('-', XMLJSONConverter.jsonToXml(data))
return cardData
def submissionPrev(context, main=None, add=None, filterinfo=None, session=None, data=None):
formId = json.loads(data)['schema']['@formId']
formInstance = getFormInstance(context, formId)
formInstance.setContext(session, main, add, None)
cardData = formInstance.move('<', XMLJSONConverter.jsonToXml(data))
return cardData
def submissionNext(context, main=None, add=None, filterinfo=None, session=None, data=None):
formId = json.loads(data)['schema']['@formId']
formInstance = getFormInstance(context, formId)
formInstance.setContext(session, main, add, None)
cardData = formInstance.move('>', XMLJSONConverter.jsonToXml(data))
return cardData
def submissionLast(context, main=None, add=None, filterinfo=None, session=None, data=None):
formId = json.loads(data)['schema']['@formId']
formInstance = getFormInstance(context, formId)
formInstance.setContext(session, main, add, None)
cardData = formInstance.move('+', XMLJSONConverter.jsonToXml(data))
return cardData
def submissionNew(context, main=None, add=None, filterinfo=None, session=None, data=None):
formId = json.loads(data)['schema']['@formId']
formInstance = getFormInstance(context, formId)
formInstance.setContext(session, main, add, None)
cardData = formInstance.newRec()
return cardData
def submissionDel(context, main=None, add=None, filterinfo=None, session=None, data=None):
formId = json.loads(data)['schema']['@formId']
formInstance = getFormInstance(context, formId)
formInstance.setContext(session, main, add, None)
cardData = formInstance.deleteRec(XMLJSONConverter.jsonToXml(data))
return cardData
def submissionRevert(context, main=None, add=None, filterinfo=None, session=None, data=None):
formId = json.loads(data)['schema']['@formId']
formInstance = getFormInstance(context, formId)
formInstance.setContext(session, main, add, None)
cardData = formInstance.revert(XMLJSONConverter.jsonToXml(data))
return cardData
def submissionSave(context, main=None, add=None, filterinfo=None, session=None, data=None):
formId = json.loads(data)['schema']['@formId']
formInstance = getFormInstance(context, formId)
formInstance.setContext(session, main, add, None)
cardData = formInstance.move('=', XMLJSONConverter.jsonToXml(data))
return cardData |
18,618 | 41abbabf50cb9a1e75252a0ea4b1b000c72e2d76 | # -*- coding: utf-8 -*-
"""
Created on Sat Sep 16 11:29:26 2017
@author: ZHENGHAN ZHANG
"""
#define the list of lists
list_of_lists = [ ['a', 'b', 'c'], ['d', 'e', 'f'], ['g', 'h', 'i'] ]
m=''
#loop
for i in range(len(list_of_lists)):
m=''
print('+-'*len(list_of_lists[i]),'+',sep='')
for j in range(len(list_of_lists[i])):
m+='|'+list_of_lists[i][j]
print(m,'|',sep='')
print('+-'*len(list_of_lists[i]),'+',sep='') |
18,619 | 334f9cf0fa36487031274b6f60e4f93a54a9805f | from sklearn.ensemble import RandomForestClassifier
from sklearn.datasets import make_classification
from rfvis import gui
def main():
# Create an example dataset
X, y = make_classification(n_samples=20000,
n_classes=3,
n_features=4,
n_informative=3,
n_redundant=1,
flip_y=0.05)
# Create and fit the model
model = RandomForestClassifier(n_estimators=20, oob_score=True)
model.fit(X, y)
# Start the RFVis GUI
return gui(model, X, y)
if __name__ == "__main__":
main()
|
18,620 | ec0a66fc8837f46c08a52fcebe7285b0c62e3084 | import cv2
from utils.base_utils import perspective_transform
from utils.extend_utils.extend_utils_fn import find_nearest_point_idx
import numpy as np
def nn_match(feats_que, feats_ref, use_cuda=True):
if use_cuda:
idxs = find_nearest_point_idx(feats_ref, feats_que)
else:
from pyflann.index import FLANN
flann = FLANN()
idxs, dists = flann.nn(feats_ref, feats_que, 1, algorithm='linear', trees=4)
return idxs
def compute_match_pairs(feats0, kps0, feats1, kps1, H):
# 0 to 1
idxs = nn_match(feats0, feats1, True)
pr01 = kps1[idxs]
gt01 = perspective_transform(kps0, H)
idxs = nn_match(feats1, feats0, True)
pr10 = kps0[idxs]
gt10 = perspective_transform(kps1, np.linalg.inv(H))
return pr01, gt01, pr10, gt10
def keep_valid_kps_feats(kps, feats, H, h, w):
n, _ = kps.shape
warp_kps = perspective_transform(kps, H)
mask = (warp_kps[:, 0] >= 0) & (warp_kps[:, 0] < w) & \
(warp_kps[:, 1] >= 0) & (warp_kps[:, 1] < h)
return kps[mask], feats[mask]
def compute_angle(rotation_diff):
trace = np.trace(rotation_diff)
trace = trace if trace <= 3 else 3
angular_distance = np.rad2deg(np.arccos((trace - 1.) / 2.))
return angular_distance
|
18,621 | 6042023e5bc6e9834d0ab5f963f21768c0942583 | # Generated by Django 2.2.5 on 2019-09-21 10:00
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('facility', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Notify',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('phone', models.CharField(max_length=200)),
('sex', models.CharField(max_length=200)),
('age', models.PositiveIntegerField()),
('address', models.CharField(max_length=200)),
('registration_group', models.PositiveIntegerField()),
('specimen_collected_date', models.DateField()),
('x_pert_result', models.CharField(max_length=5)),
('x_pert_result_date', models.DateField()),
('culture_result', models.CharField(max_length=10)),
('dst_result', models.CharField(max_length=10)),
('died_before_treatment', models.BooleanField(default=False)),
('died_before_treatment_date', models.DateField()),
('sld_treatment_at_private_date', models.DateField()),
('refused_date', models.DateField()),
('lfu_date', models.DateField()),
('mdr_treatment_start_date', models.DateField()),
('mdr_treatment_registration_no', models.CharField(max_length=10)),
('remark', models.CharField(max_length=200)),
('created_at', models.DateField(auto_now_add=True)),
('updated_at', models.DateField(auto_now=True)),
('created_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='notify', to=settings.AUTH_USER_MODEL)),
('name_of_treatment_center', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='notify', to='facility.Facility')),
('township', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='notify', to='facility.Township')),
],
),
]
|
18,622 | 1ce95da85485b7adb8da66da41fba33bd9bda023 | """
Training script. Should be pretty adaptable to whatever.
"""
# -*- coding: UTF-8 -*-
import os
# os.environ["CUDA_VISIBLE_DEVICES"] = "3"
# f = os.popen("python train.py -params multiatt/default.json -folder saves/flagship_answer")
import sys
PYTHON_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(PYTHON_PATH)
import argparse
import shutil
import multiprocessing
import numpy as np
import pandas as pd
import torch
from allennlp.common.params import Params
from allennlp.training.learning_rate_schedulers import LearningRateScheduler
from allennlp.training.optimizers import Optimizer
from torch.nn import DataParallel
from tqdm import tqdm
from torch.utils.data import DataLoader
from dataloaders.vcr_attribute import VCR, VCRLoader
from utils.pytorch_misc import time_batch, save_checkpoint, clip_grad_norm, \
restore_checkpoint, print_para, restore_best_checkpoint
import logging
from tensorboardX import SummaryWriter
import json
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', level=logging.DEBUG)
# This is needed to make the imports work
from allennlp.models import Model
import models
torch.backends.cudnn.enabled = False
torch.set_printoptions(threshold=500000000, linewidth=8000000)
#################################
#################################
######## Data loading stuff
#################################
#################################
parser = argparse.ArgumentParser(description='train')
parser.add_argument(
'-params',
dest='params',
help='Params location',
type=str,
)
parser.add_argument(
'-rationale',
action="store_true",
help='use rationale',
)
parser.add_argument(
'-output',
type=str
)
parser.add_argument(
'-no_tqdm',
dest='no_tqdm',
action='store_true',
)
parser.add_argument(
'-batch_size',
dest='batch_size',
type=int,
default=96
)
parser.add_argument(
'-records',
type=str,
default='records.json'
)
parser.add_argument(
'-describe',
type=str,
default=''
)
args = parser.parse_args()
seed = 1111
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
batch_size = args.batch_size
only_use_relevant_dets = False
# args.rationale = True
# args.params = 'models/multiatt/default2.json'
folder = f'saves/{args.output}'
writer = SummaryWriter(f'/home/share/wangkejie/vcr1/runs/{args.output}')
params = Params.from_file(args.params)
train, val, test = VCR.splits(embs_to_load=params['dataset_reader'].get('embs', 'bert_da'),
only_use_relevant_dets=params['dataset_reader'].get('only_use_relevant_dets',
only_use_relevant_dets)) ########################这个地方我改成了false,使用全部的box
# NUM_GPUS = torch.cuda.device_count() # NUM_GPUS = 4
NUM_GPUS = 1
NUM_CPUS = multiprocessing.cpu_count() # NUM_CPUS = 32
if NUM_GPUS == 0:
raise ValueError("you need gpus!")
def _to_gpu(td):
if NUM_GPUS > 1:
return td
for k in td:
if k != 'metadata':
td[k] = {k2: v.cuda(non_blocking=True) for k2, v in td[k].items()} if isinstance(td[k], dict) else td[
k].cuda(
non_blocking=True)
return td
# num_workers = (8 * NUM_GPUS if NUM_CPUS == 32 else 2 * NUM_GPUS)
num_workers = 8
print(f"Using {num_workers} workers out of {NUM_CPUS} possible", flush=True)
loader_params = {'batch_size': batch_size, 'num_workers': num_workers, "pin_memory": True}
# train_loader = DataLoader(train, shuffle=True, collate_fn=collate_fn, drop_last=True, batch_size=batch_size, num_workers=num_workers)
# val_loader = DataLoader(val, shuffle=False, collate_fn=collate_fn, batch_size=batch_size, num_workers=num_workers)
# test_loader = DataLoader(test, shuffle=False, collate_fn=collate_fn, batch_size=batch_size, num_workers=num_workers)
train_loader = VCRLoader.from_dataset(train, **loader_params)
val_loader = VCRLoader.from_dataset(val, **loader_params)
test_loader = VCRLoader.from_dataset(test, **loader_params)
# train_loader = CudaDataLoader(train_loader, device='cuda', queue_size=4)
# val_loader = CudaDataLoader(val_loader, device='cuda', queue_size=4)
# test_loader = CudaDataLoader(test_loader, device='cuda', queue_size=4)
ARGS_RESET_EVERY = 600
print("Loading {} for {}".format(params['model'].get('type', 'WTF?'), 'rationales' if args.rationale else 'answer'),
flush=True)
model = Model.from_params(vocab=train.vocab, params=params['model'])
model = DataParallel(model).cuda() if NUM_GPUS > 1 else model.cuda()
optimizer = Optimizer.from_params(model_parameters=[x for x in model.named_parameters() if x[1].requires_grad],
params=params['trainer']['optimizer'])
lr_scheduler_params = params['trainer'].pop("learning_rate_scheduler", None)
scheduler = LearningRateScheduler.from_params(optimizer=optimizer,
params=lr_scheduler_params) if lr_scheduler_params else None
if os.path.exists(folder):
print("Found folder! restoring", flush=True)
start_epoch, val_metric_per_epoch = restore_checkpoint(model, optimizer, serialization_dir=folder,
learning_rate_scheduler=scheduler)
# start_epoch, val_metric_per_epoch = 0, []
print(start_epoch)
print(val_metric_per_epoch)
else:
print("Making directories")
os.makedirs(folder, exist_ok=True)
start_epoch, val_metric_per_epoch = 0, []
shutil.copy2(args.params, folder)
with open(os.path.join(folder, 'describe.txt'), 'a') as fp:
fp.write(args.describe)
fp.write('\n--------------------------\n')
logger = open(f'saves/{args.output}/log.txt', mode='a', encoding='utf8')
# store best performance of all models in a file
param_shapes = print_para(model)
num_batches = 0
for epoch_num in range(start_epoch, params['trainer']['num_epochs'] + start_epoch + 10):
train_results = []
norms = []
model.train()
for b, (time_per_batch, batch) in enumerate(
time_batch(train_loader if args.no_tqdm else tqdm(train_loader, ncols=80), reset_every=ARGS_RESET_EVERY)):
batch = _to_gpu(batch)
optimizer.zero_grad()
output_dict = model(**batch)
loss = output_dict['loss'].mean()
loss.backward()
num_batches += 1
if scheduler:
scheduler.step_batch(num_batches)
norms.append(
clip_grad_norm(model.named_parameters(), max_norm=params['trainer']['grad_norm'], clip=True, verbose=False)
)
optimizer.step()
train_results.append(pd.Series({'loss': output_dict['loss'].mean().item(),
**(model.module if NUM_GPUS > 1 else model).get_metrics(
reset=(b % ARGS_RESET_EVERY) == 0),
'sec_per_batch': time_per_batch,
'hr_per_epoch': len(train_loader) * time_per_batch / 3600,
}))
if b % ARGS_RESET_EVERY == 0 and b > 0:
norms_df = pd.DataFrame(pd.DataFrame(norms[-ARGS_RESET_EVERY:]).mean(), columns=['norm']).join(
param_shapes[['shape', 'size']]).sort_values('norm', ascending=False)
print("e{:2d}b{:5d}/{:5d}. norms: \n{}\nsumm:\n{}\n~~~~~~~~~~~~~~~~~~\n".format(
epoch_num, b, len(train_loader),
norms_df.to_string(formatters={'norm': '{:.7f}'.format}),
pd.DataFrame(train_results[-ARGS_RESET_EVERY:]).mean(),
), flush=True)
logger.write("e{:2d}b{:5d}/{:5d}. norms: \n{}\nsumm:\n{}\n~~~~~~~~~~~~~~~~~~\n".format(
epoch_num, b, len(train_loader),
norms_df.to_string(formatters={'norm': '{:.7f}'.format}),
pd.DataFrame(train_results[-ARGS_RESET_EVERY:]).mean(),
))
writer.add_scalar('training_loss', pd.DataFrame(train_results[-ARGS_RESET_EVERY:]).mean()['loss'],
global_step=num_batches)
writer.add_scalar('training_accuracy', pd.DataFrame(train_results[-ARGS_RESET_EVERY:]).mean()['accuracy'],
global_step=num_batches)
print("---\nTRAIN EPOCH {:2d}:\n{}\n----".format(epoch_num, pd.DataFrame(train_results).mean()))
logger.write("---\nTRAIN EPOCH {:2d}:\n{}\n----".format(epoch_num, pd.DataFrame(train_results).mean()))
val_probs = []
val_labels = []
val_loss_sum = 0.0
q_att1 = []
a_att1 = []
q_att2 = []
a_att2 = []
model.eval()
for b, (time_per_batch, batch) in enumerate(time_batch(val_loader)):
with torch.no_grad():
batch = _to_gpu(batch)
output_dict = model(**batch)
val_probs.append(output_dict['label_probs'].detach().cpu().numpy())
val_labels.append(batch['label'].detach().cpu().numpy())
val_loss_sum += output_dict['loss'].mean().item() * batch['label'].shape[0]
q_att1.append(output_dict['q_att1'])
a_att1.append(output_dict['a_att1'])
q_att2.append(output_dict['q_att2'])
a_att2.append(output_dict['a_att2'])
val_labels = np.concatenate(val_labels, 0)
val_probs = np.concatenate(val_probs, 0)
val_loss_avg = val_loss_sum / val_labels.shape[0]
val_metric_per_epoch.append(float(np.mean(val_labels == val_probs.argmax(1))))
if scheduler:
scheduler.step(val_metric_per_epoch[-1])
print("Val epoch {} has acc {:.4f} and loss {:.4f}".format(epoch_num, val_metric_per_epoch[-1], val_loss_avg),
flush=True)
logger.write(
"Val epoch {} has acc {:.4f} and loss {:.4f}".format(epoch_num, val_metric_per_epoch[-1], val_loss_avg))
if int(np.argmax(val_metric_per_epoch)) < (len(val_metric_per_epoch) - 1 - params['trainer']['patience']):
print("Stopping at epoch {:2d}".format(epoch_num))
logger.write("Stopping at epoch {:2d}".format(epoch_num))
break
save_checkpoint(model, optimizer, folder, epoch_num, val_metric_per_epoch,
is_best=int(np.argmax(val_metric_per_epoch)) == (len(val_metric_per_epoch) - 1),
q_att1=q_att1, a_att1=a_att1, q_att2=q_att2, a_att2=a_att2)
writer.add_scalar('val_loss', val_loss_avg, global_step=epoch_num)
writer.add_scalar('val_accuracy', val_metric_per_epoch[-1], global_step=epoch_num)
print("STOPPING. now running the best model on the validation set", flush=True)
logger.write("STOPPING. now running the best model on the validation set")
# Load best
restore_best_checkpoint(model, folder)
model.eval()
val_probs = []
val_labels = []
for b, (time_per_batch, batch) in enumerate(time_batch(val_loader)):
with torch.no_grad():
batch = _to_gpu(batch)
output_dict = model(**batch)
val_probs.append(output_dict['label_probs'].detach().cpu().numpy())
val_labels.append(batch['label'].detach().cpu().numpy())
val_labels = np.concatenate(val_labels, 0)
val_probs = np.concatenate(val_probs, 0)
acc = float(np.mean(val_labels == val_probs.argmax(1)))
print("Final val accuracy is {:.4f}".format(acc))
logger.write("Final val accuracy is {:.4f}".format(acc))
np.save(os.path.join(folder, f'valpreds.npy'), val_probs)
|
18,623 | c6a88f7089a61394bca7a49ecad6a8a8d3dd5139 | '''
This module is used to simulate light curves and heavily relise on the pyPplusS
package developed by Edan Rein & Aviv Ofir in 2019, which is available via pip
install or the github repository (https://github.com/EdanRein/pyPplusS).
The scientific paper describing the package and astrophysical applications was
published (https://academic.oup.com/mnras/article-abstract/490/1/1111/5568385).
pyPplusS was adapted in the simulate_lightcurve function as such:
1. where pyPplusS determines the amount of light blocked by the occulter
in physical space, this module converts to time space as follows
a. introduces the transverse velocity to convert positions to time
b. generalises the movement by assuming that the planet transits the
star in a straight line with fixed impact parameter
2. where pyPplusS allows a singular ringed planet, here we extend it so
that an extended ring system can be modelled
a. rings are defined by their inner and outer radii along with opacity
b. the rings are deemed to be coplanar and concentric
Further lightcurve simulation tools have been added namely the ability to
i. add noise
ii. remove data (replicable by using non-uniform time array)
iii. generating a random ringsystem (number, dimensions & opacities)
Calculation tools have been added
i. calculate slopes in the light curve
ii. determine the minimum transverse velocity of the ringsystem. this
is done via the Van Werkhoven et al. 2014
(https://academic.oup.com/mnras/article/441/4/2845/1206172)
Plotting functions have been added
i. plot ringsystem (image)
ii. plot light curve
iii. plot combination of both
iv. all the relevant helper functions
Finally if the module is run as a script (instead of imported from elsewhere)
a tutorial of each of the functions will be given (i.e. a description will be
printed along with relevant plots to show the working of the functions in this
module).
'''
###############################################################################
############################# IMPORT MAIN MODULES #############################
###############################################################################
# calculations
import numpy as np
from pyppluss.segment_models import LC_ringed
# plotting
import matplotlib.pyplot as plt
from matplotlib.path import Path
from matplotlib.patches import Circle, Ellipse, PathPatch
###############################################################################
############################# SIMULATE LIGHT CURVE ############################
###############################################################################
def simulate_lightcurve(time, planet_radius, inner_radii, outer_radii,
opacities, inclination, tilt, impact_parameter, dt,
limb_darkening, transverse_velocity=1):
'''
This function simulates a light curve based on the input parameters.
Parameters
----------
time : array_like (1-D)
Time points at which to calculate the light curve.
planet_radius : float
Size of the planet [R*].
inner_radii : array_like (1-D)
Inner dimensions of the rings [R*].
outer_radii : array_like (1-D)
Outer dimensions of the rings [R*].
opacities : array_like (1-D)
Opacities of each ring [-].
inclination : float
Inclination of the ring system [deg].
tilt : float
Tilt of the rings, the angle between the path of orbital motion and
the semi-major axis of the projected ellipse [deg].
impact_parameter : float
Impact parameter between the centre of the rings w.r.t. the centre of
the star [R*].
dt : float
This is a delta time parameter that can be used to shift the light
curve left or right in time space [day].
limb_darkening : float
Limb-darkening parameter, u, of the star according to the linear law,
I(mu)/I(1) = 1 - u * (1 - mu), where mu = cos(y), where y is the angle
between the line-of-sight and the emergent intensity [-].
transverse_velocity : float
The transiting velocity of the ring system across the star [R*/day].
Returns
-------
lightcurve : array_like (1-D)
Simulated theoretical light curve (normalised flux) based on the
inputs [L*].
lightcurve_components : list of array_like (1-D)
List containing the lightcurves produced by each of the components of
the companion (planet + rings/disks) [L*].
'''
# create zeros and ones array
zero = np.zeros_like(time)
ones = np.ones_like(time)
# initialise (p)lanet
planet_x = (time - dt) * transverse_velocity
planet_y = impact_parameter * ones
planet_r = planet_radius * ones
# inclination and tilt from degrees to radians
inclination_rad = np.deg2rad(inclination)
tilt_rad = np.deg2rad(tilt)
# stellar limb darkening parameters
c1 = 0
c2 = limb_darkening
c3 = 0
c4 = 0
# light curve variables
lightcurve = 0
lightcurve_components = []
# determine lightcurve components
# if planet touches the star: calculate transit | else: ones
if (np.abs(impact_parameter) - planet_radius) < 1:
r0 = 1e-16 * ones
r1 = 2e-16 * ones
planet_lightcurve = LC_ringed(planet_r, r0, r1, planet_x, planet_y,
inclination_rad, tilt_rad, 0, c1, c2,
c3, c4)
else:
planet_lightcurve = ones
# add to lightcurve and append to lightcurve_components
lightcurve += planet_lightcurve
lightcurve_components.append(planet_lightcurve)
# ensure that first inner radius != 0 (requirement of pyPplusS)
if inner_radii[0] == 0:
inner_radii[0] = 1e-16
# loop over rings
ring_params = (inner_radii, outer_radii, opacities)
for inner_radius, outer_radius, opacity in zip(*ring_params):
# if ring boundary touches the star calculate impact else ones
ring_height = np.abs(outer_radius * np.sin(tilt_rad))
if (np.abs(impact_parameter) - ring_height) < 1:
# set-up ring bounds
r0 = inner_radius * ones
r1 = outer_radius * ones
# group parameters
lightcurve_params = (zero, r0, r1, planet_x, planet_y, inclination_rad,
tilt_rad, opacity, c1, c2, c3, c4)
ring_lightcurve = LC_ringed(*lightcurve_params)
else:
ring_lightcurve = ones
# add to lightcurve and append to lightcurve_components
lightcurve += ring_lightcurve - 1
lightcurve_components.append(ring_lightcurve)
return lightcurve, lightcurve_components
def generate_random_ringsystem(radius_max, ring_num_min=3, ring_num_max=12,
tau_min=0.0, tau_max=1.0, print_rings=True):
'''
This function splits a disk into a ring system with a random number of
rings each with random opacities.
Parameters
----------
radius_max : float
Maximum size of the disk [R*].
ring_num_min : int
Minimum number of rings to separate the disk into.
ring_num_max : int
Maximum number of rings to separate the disk into.
tau_min : float
Minimum opacity of a ring [default = 0].
tau_max : float
Maximum opacity of a ring [default = 1].
print_rings : bool
If true then prints ring stats [default = True].
Returns
-------
inner_radii : array_like (1-D)
Inner dimensions of the rings [R*].
outer_radii : array_like (1-D)
Outer dimensions of the rings [R*].
opacities : array_like (1-D)
Opacities of each ring [-].
'''
# random number of rings
num_rings = np.random.randint(ring_num_min, ring_num_max)
# random ring_edge fractions
ring_edges = np.random.uniform(0, 1, num_rings - 1) * radius_max
ring_edges = np.sort(ring_edges)
# define outer radii
outer_radii = np.zeros(num_rings)
outer_radii[:-1] = ring_edges
outer_radii[-1] = radius_max
# define inner radii
inner_radii = np.zeros(num_rings)
inner_radii[1:] = ring_edges
inner_radii[0] = 1e-16
# random opacities
opacities = np.random.uniform(tau_min, tau_max, num_rings)
if print_rings == True:
print('There are a total of %i rings' % num_rings)
template = ' ring %s runs from %s to %s [R*] with an opacity of %.4f'
for n in range(num_rings):
ring_num = ('%i' % (n+1)).rjust(2)
ring_in = ('%.2f' % inner_radii[n]).rjust(6)
ring_out = ('%.2f' % outer_radii[n]).rjust(6)
pars = (ring_num, ring_in, ring_out, opacities[n])
print(template % pars)
return inner_radii, outer_radii, opacities
def add_noise(lightcurve, noise_func, noise_args, seed=None):
'''
This function adds noise to the light curve given a random number function
and its given inputs. It also then re-normalises the lightcurve with the
out-of-eclipse data.
Parameters
----------
lightcurve : array_like (1-D)
Simulated theoretical light curve (normalised flux) based on the
inputs [L*].
noise_func : function
This function must be one such that it produces random numbers and has
an argument size (see np.random documentation).
noise_args : tuple
This is an ordered tuple containing all the relevant arguments for the
noise_func, with the exception of size (see np.random documentation).
seed : int
This sets the random noise generator so that you can extend noise runs
performed at an earlier time.
Returns
-------
noisy_lightcurve : array_like (1-D)
Simulated theoretical lightcurve (normalised flux) with additional
noise components defined by this function.
'''
# determine where the out-of-transit data is
stellar_flux_mask = (lightcurve >= 0.999999)
# determine the noise
num_data = len(lightcurve)
np.random.seed(seed)
noise = noise_func(*noise_args, size=num_data)
# add the noise
noisy_lightcurve = noise + lightcurve
# renormalise the lightcurve
median = np.median(noisy_lightcurve[stellar_flux_mask]) - 1
noisy_lightcurve -= median
return noisy_lightcurve
def remove_data(time, lightcurve, remove=None):
'''
This function removes data from a light curve to produce holes in the
data simulate incomplete coverage.
Parameters
----------
time : array_like (1-D)
Time data for the light curve [day].
lightcurve : array_like (1-D)
Normalised flux data for the light curve [L*].
remove : int or array_like (1-D) of int
Contains either the number of points to removed (chosen at random)
or an index array for which points to remove.
Returns
-------
incomplete_time : array_like (1-D)
Time data for the light curve with data removed [day].
incomplete_lightcurve : array_like (1-D)
Normalised flux data for the light curve with data removed [L*].
'''
if type(remove) == int:
remove = np.random.randint(0, len(time) - 1, remove)
incomplete_time = np.delete(time, remove)
incomplete_lightcurve = np.delete(lightcurve, remove)
return incomplete_time, incomplete_lightcurve
###############################################################################
################################ CALCULATIONS #################################
###############################################################################
def calculate_slope(time, lightcurve, slope_bounds):
'''
This function determines the slope of the light curve, between the times
defined by slope bounds.
Parameters
----------
time : array_like (1-D)
Time data for the light curve [day].
lightcurve : array_like (1-D)
Normalised flux data for the light curve [L*].
slope_bounds : tuple
Contains the time bounds for the slope calculation.
Returns
-------
slope_time : array_like (1-D)
Time at which slope is measured [day].
slope : array_like (1-D)
Slope measured in the lightcurve [L*/day].
'''
# select the relevant section of the lightcurve
mask = (time >= slope_bounds[0]) * (time <= slope_bounds[1])
# fit a line to the relevant points
p0 = np.polyfit(time[mask], lightcurve[mask], 1)
# get slope_time and slope
slope_time = 0.5 * (time[mask][0] + time[mask][-1])
slope = p0[0]
return slope_time, slope
def calculate_slopes(time, lightcurve, slope_bounds_list):
'''
This function bulkifies the calculate slope function by requiring
a list of slope_bounds.
Parameters
----------
time : array_like (1-D)
Time data for the light curve [days].
lightcurve : array_like (1-D)
Normalised flux data for the light curve [L*].
slope_bounds_list : list of tuples
Contains a list of slope bound tuples, which contain the lower
and upper time bounds which the slopes are calculated [day].
Returns
-------
slope_times : array_like (1-D)
Time at which slope is measured [day].
slopes : array_like (1-D)
Slopes measured in the lightcurve [L*/day].
'''
# set up arrays
num_slopes = len(slope_bounds_list)
slope_times = np.zeros(num_slopes)
slopes = np.zeros(num_slopes)
# loop through slope_bounds_list
for k, slope_bounds in enumerate(slope_bounds_list):
# calculate the slopes
slope_time, slope = calculate_slope(time, lightcurve, slope_bounds)
slope_times[k] = slope_time
slopes[k] = slope
return slope_times, slopes
def get_min_velocity(slopes, limb_darkening):
'''
This function determines the minimum transverse velocity of the occulting
object according to the Van Werkhoven et al. (2014) method. We determine
this is as a function of the stellar radius.
Parameters
----------
slopes : array_like (1-D)
Slopes measured in the light curve [L*/day].
limb_darkening : float
Limb-darkening parameter, u, of the star according to the linear law,
I(mu)/I(1) = 1 - u * (1 - mu), where mu = cos(y), where y is the angle
between the line-of-sight and the emergent intensity [-].
Returns
-------
min_velocity : float
The minimum transverse velocity of the occulting object [R*/day].
'''
# determine the maximum slope dL/dt
dLdt = np.amax(np.abs(slopes))
# define the minimum velocity (excluding R from van Werkhoven et al. 2014)
top = 2 * limb_darkening - 6
bot = 12 - 12 * limb_darkening + 3 * np.pi * limb_darkening
# calculate the minimum transverse velocity
min_velocity = np.abs(dLdt * np.pi * (top / bot))
return min_velocity
def get_min_disk_radius(min_velocity, eclipse_duration):
'''
This function converts the minimum transverse velocity of the disk to a
minimum disk size based on the duration of the eclipse. This is based on
the simple assumption that given a velocity and a time, we can find a
distance, which we can say is the minimum disk diameter.
Parameters
----------
min_velocity : float
The minimum transverse velocity of the occulting object [R*/day].
eclipse_duration : float
Duration of the eclipse [day].
Returns
-------
min_disk_radius : float
Minimum size of the disk [R*].
'''
min_disk_radius = 0.5 * min_velocity * eclipse_duration
return min_disk_radius
##############################################################################
############################### PLOT FUNCTIONS ###############################
##############################################################################
def get_slope_lines(time, lightcurve, slope_times, slopes, length=0.1):
'''
This function produces the (x, y) coordinates of a line that represents
each slope in the light curve at the correct position on the plot
(slope_times, lightcurve @ slope_times).
Parameters
----------
time : array_like (1-D)
Time data for the light curve [day].
lightcurve : array_like (1-D)
Normalised flux data for the light curve [L*].
slope_times : array_like (1-D)
Time at which slope is measured [day].
slopes : array_like (1-D)
Slopes measured in the lightcurve [L*/day].
length : float
Length of the line [day].
Returns
-------
slope_lines : array_like (3-D)
x and y coordinates of a line centred at (slope_times, lightcurve @
slope_times) with input length. The dimensions are as follows: the
0th dim is the line corresponding to a given slope, 1st dim is either
the x or the y points, with the 2nd dim being the actual points. This
allows one to loop over the slopes in the 0th dimension.
'''
# calculate the dx and dy values for the line
dx = np.sqrt(length / (slopes**2 + 1))
dy = slopes * dx
# determine the points on which the lines are centred (x, y)
x = slope_times
y = np.interp(x, time, lightcurve)
# calculate the lines
slope_lines = np.array([[x - dx, x, x + dx], [y - dy, y, y + dy]])
# rearrange array such that the dimensions become (slope, x/y, points)
slope_lines = np.transpose(slope_lines, (2, 0, 1))
return slope_lines
def get_ring_patch(inner_radius, outer_radius, opacity, inclination, tilt,
impact_parameter, dt, facecolor='black'):
'''
This function has been edited from a function written by Matthew
Kenworthy. The variable names, comments and documentation have been
changed, but the functionality has not.
Parameters
----------
inner_radius : float
The inner radius of the ring [R*].
outer_radius : float
The outer radius of the ring [R*].
opacity : float
The opacity of the ring [-].
inclination : float
The inclination of the ring [deg].
tilt : float
Tilt of the rings, the angle between the path of orbital motion and
the semi-major axis of the projected ellipse [deg].
impact_parameter : float
Impact parameter between the centre of the rings w.r.t. the centre of
the star [R*].
dt : float
This is a delta time parameter that can be used to shift the light
curve left or right in time space [day]. Note here that as this has no
effect on the shape it can be different from the actual dt value.
facecolor : str
The color of the ring patch [default = 'black'].
Returns
-------
ring_patch : matplotlib.patch
Patch of the ring with input parameters.
Notes
-----
dt here has no effect on the ring system besides a translation along the
orbital path. You may want to use a different value than the dt used to
calculate the light curve for visualisation purposes.
'''
# convert inclination and tilt to radians
inclination_rad = np.deg2rad(inclination)
tilt_rad = np.deg2rad(tilt)
# centre location
dr = np.array([dt, impact_parameter])
# get an Ellipse patch that has an ellipse defined with eight CURVE4 Bezier
# curves actual parameters are irrelevant - get_path() returns only a
# normalised Bezier curve ellipse which we then subsequently transform
ellipse = Ellipse((1, 1), 1, 1, 0)
# get the Path points for the ellipse (8 Bezier curves with 3 additional
# control points)
ellipse_path = ellipse.get_path()
# define rotation matrix
rotation_matrix = np.array([[np.cos(tilt_rad), np.sin(tilt_rad)],
[np.sin(tilt_rad), -np.cos(tilt_rad)]])
# squeeze the circle to the appropriate ellipse
annulus1 = ellipse_path.vertices * ([ 1., np.cos(inclination_rad)])
annulus2 = ellipse_path.vertices * ([-1., np.cos(inclination_rad)])
# rotate and shift the ellipses
ellipse_rot1 = np.dot(annulus1 * outer_radius, rotation_matrix) + dr
ellipse_rot2 = np.dot(annulus2 * inner_radius, rotation_matrix) + dr
# produce the arrays neccesary to produce a new Path and Patch object
ring_vertices = np.vstack((ellipse_rot1, ellipse_rot2))
ring_commands = np.hstack((ellipse_path.codes, ellipse_path.codes))
# create the Path and Patch objects
ring_path = Path(ring_vertices, ring_commands)
ring_patch = PathPatch(ring_path, facecolor=facecolor, edgecolor='none',
alpha=opacity)
return ring_patch
def get_ringsystem_patches(planet_radius, inner_radii, outer_radii, opacities,
inclination, tilt, impact_parameter, dt,
facecolor='black'):
'''
This function produces all the matplotlib patches necessary to draw the
ringsystem defined by the input parameters.
Parameters
----------
planet_radius : float
Size of the planet [R*].
inner_radii : array_like (1-D)
Inner dimensions of the rings [R*].
outer_radii : array_like (1-D)
Outer dimensions of the rings [R*].
opacities : array_like (1-D)
Opacities of each ring [-].
inclination : float
Inclination of the ring system [deg].
tilt : float
Tilt of the rings, the angle between the path of orbital motion and
the semi-major axis of the projected ellipse [deg].
impact_parameter : float
Impact parameter between the centre of the rings w.r.t. the centre of
the star [R*].
dt : float
This is a delta time parameter that can be used to shift the light
curve left or right in time space [day].
facecolor : str
The color of the ring patches [default = 'black'].
Returns
-------
ringsystem_patches : list of matplotlib.patch
List containing all the ring patches that make up the ring system
described by the input parameters and a circular patch for the planet.
Notes
-----
dt here has no effect on the ring system besides a translation along the
orbital path. You may want to use a different value than the dt used to
calculate the light curve for visualisation purposes.
'''
# create empty list
ringsystem_patches = []
# create planet patch and append to ringsystem_patches
planet = Circle((dt, impact_parameter), planet_radius, facecolor=facecolor)
ringsystem_patches.append(planet)
# bundle ring parameters
ring_params = (inner_radii, outer_radii, opacities)
# loop through rings and append to ring_patches
for inner_radius, outer_radius, opacity in zip(*ring_params):
ring = get_ring_patch(inner_radius, outer_radius, opacity, inclination,
tilt, impact_parameter, dt, facecolor)
ringsystem_patches.append(ring)
return ringsystem_patches
def plot_lightcurve(time, lightcurve, lightcurve_components, slope_lines=None,
components=True, xlim=None, ylim=None, ax=None):
'''
This function plots the light curve for the provided ringsystem and can
include the slopes lines provided.
Parameters
----------
time : array_like (1-D)
Time data for the light curve [day].
lightcurve : array_like (1-D)
Normalised flux data for the light curve [L*].
lightcurve_components : list of array_like (1-D)
List containing the lightcurves produced by each of the components of
the companion (planet + rings/disks) [L*/day].
slope_lines : array_like (3-D)
x and y coordinates of a line centred at (slope_times, lightcurve @
slope_times) with input length. the dimensions are as follows: the
0th dim is the line corresponding to a given slope, 1st dim is either
the x or the y points, with the 2nd dim being the actual points. This
allows one to loop over the slopes in the 0th dimension.
components : bool
Determines whether or not the lightcurves of the companion components
are plotted [default = True].
xlim : tuple
x-limits of the plot.
ylim : tuple
y-limits of the plot.
ax : matplotlib.axes()
Potentially contains an axes object to plot the light curve on to.
Returns
-------
ax : matplotlib.axes()
Contains the axes with the light curve plot.
'''
# check slope_lines
if isinstance(slope_lines, type(None)):
slope_lines = []
# check axes
if isinstance(ax, type(None)):
ax = plt.gca()
# plot components if requested
if components == True:
lbl = 'planet'
for k, lightcurve_component in enumerate(lightcurve_components):
mask = lightcurve_component < 1
ax.plot(time[mask], lightcurve_component[mask], marker='.', ls='',
label=lbl, alpha=0.6)
lbl = 'ring #%i' % (k+1)
lbl = 'full lightcurve'
else:
lbl = None
# plot the slope lines
for slope_line in slope_lines:
ax.plot(slope_line[0], slope_line[1], 'm:', lw=4)
# plot the full light curve
ax.plot(time, lightcurve, 'k-', lw=2, label=lbl, alpha=0.5)
# add legend
if components == True:
ax.legend(bbox_to_anchor=[1.0, 0.0], loc='lower left')
# set x/y labels and limits
ax.set_xlabel('Time [days]')
ax.set_ylabel('Normalised Flux [-]')
ax.set_ylim(ylim)
ax.set_xlim(xlim)
return ax
def plot_ringsystem(ringsystem_patches, xlim=None, ylim=None, ax=None):
'''
This function creates the ringsystem cartoon plot.
Parameters
----------
ringsystem_patches : list of matplotlib.patch
List containing all the ring patches that make up the ring system
described by the input parameters and a circular patch for the planet.
xlim : tuple
x-limits of the plot.
ylim : tuple
y-limits of the plot.
ax : matplotlib.axes()
Potentially contains an axes object to plot the light curve on to.
Returns
-------
ax : matplotlib.axes()
Contains the axes with the ringsystem cartoon.
'''
# get stellar patch
star = Circle((0, 0), 1, facecolor='r')
# create axes
if ax == None:
ax = plt.gca()
ax.set_aspect('equal')
# add star
ax.add_patch(star)
# add companion (planet + ringsystem)
for component in ringsystem_patches:
ax.add_patch(component)
# set x/y labels and limits
ax.set_xlabel('x [R*]')
ax.set_ylabel('y [R*]')
ax.set_ylim(ylim)
ax.set_xlim(xlim)
return ax
def plot_combined(ringsystem_params, lightcurve_params, savename='test.png',
figsize=(12, 10), title=''):
'''
This function creates a figure with two subplots, the ringsystem cartoon
on the top and the lightcurve on the bottom.
Parameters
----------
ringsystem_params : tuple
Contains all the input parameters for plot_ringsystem(), which are
time, lightcurve, lightcurve_components, slope_lines=[],
components=True, xlim=None, ylim=None, ax=None, where ax should be
ignored.
lightcurve_params : tuple
Contains all the input parameters for plot_lightcurve(), which are
ringsystem_patches, xlim=None, ylim=None, ax=None, where ax should
be ignored.
savename : str
Name of the file to be saved [default = 'test.png'].
figsize : tuple
Size of the plot [default = (12, 10)].
title : str
Title of the figure [default = ''].
Returns
-------
matplotlib.figure()
Notes
-----
For both ringsystem_params and lightcurve_params the axes object should
NOT be specified.
'''
fig = plt.figure(figsize=figsize)
fig.suptitle(title)
ax0 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)
ax1 = plt.subplot2grid((3, 1), (2, 0))
ax0 = plot_ringsystem(*ringsystem_params, ax=ax0)
ax1 = plot_lightcurve(*lightcurve_params, ax=ax1)
fig.savefig(savename)
fig.show()
return None
##############################################################################
################################# MODULE DEMO ################################
##############################################################################
if __name__ == "__main__":
# import extra modules for demo
from matplotlib.patches import Rectangle
# start demos
print('')
print('========================================================')
print('ALL THE METHODS IN SIMULATE_LIGHTCURVE.PY WILL BE DEMOED')
print('========================================================')
print('')
### SIMULATE_LIGHTCURVE() ###
print('1. simulate_lightcurve.simulate_lightcurve()')
print('--------------------------------------------')
print('This function simulates the light curve of a transiting ring')
print('system and each of the ring system\'s components.')
# initialise parameters
print(' a. initialising input parameters:')
time = np.linspace(-85, 85, 301)
time_pars = (time[0], time[-1], len(time))
planet_radius = 0.3
ring_edges = np.linspace(0, 130, 6)
inner_radii = ring_edges[:-1]
inner_radii[0] = 1e-16
outer_radii = ring_edges[1:]
opacities = np.random.uniform(0, 1, 5)
inclination = 76
tilt = 37
impact_parameter = 12
dt = 5
limb_darkening = 0.4
transverse_velocity = 1
print(' time: from %.2f to %.2f day in %i equal steps' % time_pars)
print(' planet_radius: %.2f [R*]' % planet_radius)
print(' inner_radii: ', inner_radii, ' [R*]')
print(' outer_radii: ', outer_radii, ' [R*]')
print(' opacities: ', opacities, ' [-]')
print(' inclination: %.2f [deg]' % inclination)
print(' tilt: %.2f [deg]' % tilt)
print(' impact_parameter: %.2f [R*]' % impact_parameter)
print(' dt: %.2f [day]' % dt)
print(' limb_darkening: %.2f' % limb_darkening)
print(' transverse_velocity: %.2f [R*/day]' % transverse_velocity)
# list dependencies
print(' b. demo via:')
print(' simulate_lightcurve.plot_lightcurve()')
print(' - slope_lines demoed later')
# prepare demo
print(' c. running simulate_lightcurve.simulate_lightcurve() demo')
sim_args = (time, planet_radius, inner_radii, outer_radii, opacities,
inclination, tilt, impact_parameter, dt, limb_darkening,
transverse_velocity)
lightcurve, lightcurve_components = simulate_lightcurve(*sim_args)
fig, ax = plt.subplots(figsize=(12,6))
fig.suptitle('Demo: simulate_lightcurve.simulate_lightcurve()')
ax = plot_lightcurve(time, lightcurve, lightcurve_components)
plt.show()
print('\n')
### GENERATE RANDOM RINGSYSTEM() ###
print('2. simulate_lightcurve.generate_random_ringsystem()')
print('---------------------------------------------------')
print('This function breaks up a circumplanetary disk into a ring system')
print('by separating the disk into random connected rings with random')
print('opacities.')
# intialise input parameters
print(' a. initialising input parameters:')
disk_radius = outer_radii[-1]
ring_num_min = 3
ring_num_max = 12
tau_min = 0.0
tau_max = 1.0
print_rings = False
gen_args = (disk_radius, ring_num_min, ring_num_max, tau_min, tau_max,
print_rings)
print(' disk_radius: %.2f [R*]' % disk_radius)
print(' ring_num_min: %i' % ring_num_min)
print(' ring_num_max: %i' % ring_num_max)
print(' tau_min: %.2f [-]' % tau_min)
print(' tau_min: %.2f [-]' % tau_max)
print(' print_rings: %r' % print_rings)
# list dependencies
print(' b. demo via:')
print(' simulate_lightcurve.plot_ringsystem()')
print(' - helper: simulate_lightcurve.get_ringsystem_patches()')
print(' - helper: simulate_lightcurve.get_ring_patch()')
# prepare demo
print(' c. running simulate_lightcurve.generate_random_ringsystem() demo')
fig, axes = plt.subplots(2, 2, figsize=(14, 10))
fig.suptitle('Demo simulate_lightcurve.generate_random_ringsystem()')
rs_xlim = (-120, 120)
rs_ylim = (-100, 100)
for i in range(2):
for j in range(2):
ax = axes[i,j]
rin, rout, tau = generate_random_ringsystem(*gen_args)
rsp_args = (planet_radius, rin, rout, tau, inclination, tilt,
impact_parameter, dt)
rs_patches = get_ringsystem_patches(*rsp_args)
ax = plot_ringsystem(rs_patches, rs_xlim, rs_ylim, ax)
plt.show()
print('\n')
### ADD_NOISE() ###
print('3. simulate_lightcurve.add_noise()')
print('----------------------------------')
print('This function adds noise to a light curve given a certain noise')
print('distribution.')
# intialise input parameters
print(' a. intialising input parameters:')
noise_func = np.random.normal
mean = np.zeros(4)
std = np.array([0.00, 0.02, 0.05, 0.10])
seed = np.random.randint(0, 100000, 1)
print(' noise_func: np.random.normal')
print(' noise_args:')
print(' mean = 0, std = %.2f' % std[0])
print(' mean = 0, std = %.2f' % std[1])
print(' mean = 0, std = %.2f' % std[2])
print(' mean = 0, std = %.2f' % std[3])
print(' seed: %i (can be None)' % seed)
# list dependencies
print(' b. demo via:')
print(' simulate_lightcurve.plot_lightcurve()')
print(' - slope_lines demoed later')
# prepare demo
print(' c. running simulate_lightcurve.add_noise() demo')
fig, axes = plt.subplots(2, 2, figsize=(14, 10))
fig.suptitle('Demo simulate_lightcurve.add_noise()')
for i in range(2):
for j in range(2):
noise_ind = 2 * i + j
noise_args = (mean[noise_ind], std[noise_ind])
noisy_lightcurve = add_noise(lightcurve, noise_func, noise_args,
seed)
ax = axes[i, j]
ax = plot_lightcurve(time, noisy_lightcurve, lightcurve,
components=False, ax=ax)
ax.set_title('noise = %.2f' % std[noise_ind])
plt.show()
print('\n')
### REMOVE_DATA() ###
print('4. simulate_lightcurve.remove_data()')
print('------------------------------------')
print('This function removes data from an eclipse in two fashions, either')
print('by supplying an integer (in which case that many random points will')
print('be removed) or an index array (removing those particular data')
print('points.')
# intialise input parameters
print(' a. initialising input parameters:')
remove_int = 200
remove_array = np.array([15, 16, 17, 18, 19, 20, 67, 68, 69, 70, 71, 72,
73, 74, 75, 76, 77, 78, 79, 80, 100, 101, 102,
103, 104, 230, 231, 232])
remove = [remove_int, remove_array]
remove_lbl = ['type(remove) = int', 'type(remove) = list/array']
print(' remove (int) = %i' % remove_int)
print(' remove (array) = ', remove_array)
# list dependencies
print(' b. demo via:')
print(' simulate_lightcurve.plot_lightcurve()')
print(' - slope_lines demoed later')
# prepare demo
print(' c. running simulate_lightcurve.remove_data() demo')
fig, axes = plt.subplots(2, 1, figsize= (12, 10))
fig.suptitle('Demo: simulate_lightcurve.remove_data()')
for i in range(2):
itime, ilightcurve = remove_data(time, lightcurve, remove[i])
axes[i] = plot_lightcurve(time, lightcurve, None, components=False,
ax=axes[i])
axes[i].plot(time, lightcurve, 'ko', label='original lightcurve')
axes[i].plot(itime, ilightcurve, 'go', label='data after removal')
axes[i].legend()
axes[i].set_title(remove_lbl[i])
plt.show()
print('\n')
### CALCULATE_SLOPES ###
print('5. simulate_lightcurve.calculate_slopes()')
print('-----------------------------------------')
print('This function is used to calculate slopes in the light curve that')
print('can be used for further processing (determining the minimum')
print('transverse velocity of the ringsystem and to carve out the sjalot')
print('explorer [separate BeyonCE module].')
# initialise input parameters
print(' a. initialising input parameters:')
slope_bounds_list = [(-42, -39.5), (-32, -28.8), (25.5, 28.5), (46, 49.5)]
print(' slope_bounds_list:')
for sb in slope_bounds_list:
print(' slope_bound = (%.2f, %.2f)' % sb)
# list dependencies
print(' b. demo via:')
print(' helper: simulate_lightcurve.calculate_slope()')
print(' simulate_lightcurve.plot_lightcurve()')
print(' - helper: simulate_lightcurve.get_slope_line()')
# prepare demo
print(' c. running simulate_lightcurve.calculate_slopes() demo')
fig, ax = plt.subplots(figsize=(12, 6))
fig.suptitle('Demo: simulate_lightcurve.calculate_slopes()')
slope_times, slopes = calculate_slopes(time, lightcurve, slope_bounds_list)
slope_lines = get_slope_lines(time, lightcurve, slope_times, slopes)
ax = plot_lightcurve(time, lightcurve, None, slope_lines=slope_lines,
ax=ax, components=False)
for slope_bounds in slope_bounds_list:
tl, tu = slope_bounds
bounds = Rectangle((tl, 0), tu-tl, 2, color='g', alpha=0.2)
ax.add_patch(bounds)
ax.plot(time, lightcurve, 'kx')
plt.show()
print('\n')
### GET_MIN_VELOCITY() ###
print('6. simulate_lightcurve.get_min_velocity()')
print('-----------------------------------------')
print('This function follows equation 12 from Van Werkhoven et al. 2014')
print('(https://academic.oup.com/mnras/article/441/4/2845/1206172).')
print('The inputs are the measured slopes in the lightcurve, which can be')
print('measured using simulate_lightcurve.calculate_slopes() and the')
print('linear limb-darkening parameter of the star. [Demo n/a]')
print('\n')
### GET_MIN_DISK_RADIUS() ###
print('7. simulate_lightcurve.get_min_disk_radius()')
print('--------------------------------------------')
print('This function takes the minimum velocity of the disk provided by')
print('simulate_lightcurve.get_min_velocity() and the duration of the')
print('eclipse to determine the minimum disk radius of the transiting')
print('ring system. [Demo n/a]')
print('\n')
### PLOT_COMBINED() ###
print('8. simulate_lightcurve.plot_combined()')
print('--------------------------------------')
# initialise parameters
print(' a. initialising parameters:')
print(' plot_ringsystem parameters')
print(' plot_lightcurve parameters')
rsp_args = (planet_radius, inner_radii, outer_radii, opacities,
inclination, tilt, impact_parameter, dt)
ringsystem_patches = get_ringsystem_patches(*rsp_args)
rs_xlim = (-120, 120)
rs_ylim = (-100, 100)
ringsystem_params = (ringsystem_patches, rs_xlim, rs_ylim)
lightcurve_params = (time, lightcurve, lightcurve_components, slope_lines,
True)
# list dependencies
print(' b. demo via:')
print(' helper: get_ringsystem_patches()')
print(' - helper: get_ring_patch()')
# prepare demo
print(' c. running simulate_lightcurve.plot_combined() demo')
plot_combined(ringsystem_params, lightcurve_params)
print(' figure saved to \'./test.png\'')
print('\n')
print('==========================================================')
print('ALL THE METHODS IN SIMULATE_LIGHTCURVE.PY HAVE BEEN DEMOED')
print('==========================================================')
print('')
|
18,624 | 9456b39a58409dc4efbbd4ce8e99d9740257d129 | from main import login_manager
from app.models.user import User as UserModel
from app.session.user import User
@login_manager.user_loader
def load_user(user_id):
obj_user = UserModel.query.filter_by(email=user_id).first()
if obj_user is None:
return None
return User(obj_user.email, obj_user.email)
|
18,625 | 4c9987b0139bf2e55c06d15d45fc0bb708a2179f | from keras.models import load_model
from keras.preprocessing import image
import numpy as np
model = load_model('Xception_299x299_1x4k.hdf5')
img = image.load_img("spa.jpg", target_size=(256, 256))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = np.divide(x, 255)
scores = model.predict(x)
print(np.argmax(scores))
|
18,626 | 04223e283462a47a38cb817d083918514a4eacd7 | from .rule_validator import RuleViolation
from .rules_helper import get_apigateway_integration, get_path_headers, get_integration_response_parameters, \
get_path_verbs
class CORSInconsistentHeadersRule:
def __init__(self):
self.rule_name = 'options_cors_incosistent_headers'
def validate(self, spec):
violations = []
for path in spec['paths']:
if 'options' not in get_path_verbs(spec, path):
continue
integration = get_apigateway_integration(spec, path, 'options')
path_headers = get_path_headers(spec, path)
for response in integration['responses']:
if 'responses' not in integration or response not in integration['responses'] or \
'responseParameters' not in integration['responses'][response]:
continue
integration_response_params = get_integration_response_parameters(spec, path, 'options', response)
if 'method.response.header.Access-Control-Allow-Headers' in integration_response_params:
integration_headers = self.get_access_control_allow_headers(integration_response_params)
headers_difference = set(path_headers).symmetric_difference(set(integration_headers))
for header in headers_difference:
message = 'Extra Allow-Header "{}" included in parameters or responseParameters.'.format(header)
violations.append(RuleViolation('options_cors_incosistent_headers',
message=message,
path=path))
return violations
def get_access_control_allow_headers(self, integration_response_params):
allow_headers_value = integration_response_params['method.response.header.Access-Control-Allow-Headers']
split_headers = map(lambda x: x.strip(), allow_headers_value[1:-1].split(','))
split_headers = filter(lambda h: len(h.strip()) > 0, split_headers)
return split_headers
|
18,627 | b84b17080ee201ed6fac1859e22a54ae4fdb3122 | # Copyright © 2020 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Dharmendra G Patel <dhpatel@redhat.com>
#
"""Stack analyses API v2 response builder class."""
import logging
from bayesian.utils import request_timed_out
logger = logging.getLogger(__file__)
class StackAnalysesResponseBuilder:
"""Stack analysis response builder for API v2.
Class takes db result, stack result and recommendation data to verify and build stack
analyses response for V1.
"""
def __init__(self, external_request_id, rdb_analyses):
"""Response Builder, Build Json Response for Stack Analyses."""
self.external_request_id = external_request_id
self.rdb_analyses = rdb_analyses
def get_response(self):
"""Aggregate, build and return json response for the given request id."""
logger.debug('SA Get request id: {}'.format(self.external_request_id))
# Get db result, stack result and recm data from rdb.
self._db_result = self.rdb_analyses.get_request_data()
self._stack_result = self.rdb_analyses.get_stack_result()
self._recm_data = self.rdb_analyses.get_recommendation_data()
# If request is invalid, it will raise exception with proper message.
self._raise_if_invalid()
# If request is inprogress or timeout, it will raise exception with proper message.
self._raise_if_inprogress_or_timeout()
# Proceed with building actual response from data.
stack_task_result = self._stack_result.get('task_result')
stack_audit = stack_task_result.get('_audit', {})
return {
'version': stack_audit.get('version', None),
'started_at': stack_audit.get('started_at', None),
'ended_at': stack_audit.get('ended_at', None),
'external_request_id': self.external_request_id,
'registration_status': stack_task_result.get('registration_status', ''),
'manifest_file_path': stack_task_result.get('manifest_file_path', ''),
'manifest_name': stack_task_result.get('manifest_name', ''),
'ecosystem': stack_task_result.get('ecosystem', ''),
'unknown_dependencies': stack_task_result.get('unknown_dependencies', ''),
'license_analysis': stack_task_result.get('license_analysis', ''),
'recommendation': self._recm_data.get('task_result', {}),
'registration_link': stack_task_result.get('registration_link', ''),
'analyzed_dependencies': stack_task_result.get('analyzed_dependencies', [])
}
def _raise_if_invalid(self):
"""If request is invalid than it shall raise an exception."""
if self._stack_result == -1 and self._recm_data == -1:
error_message = 'Worker result for request ID {} does not exist yet'.format(
self.external_request_id)
logger.exception(error_message)
raise SARBRequestInvalidException(error_message)
def _raise_if_inprogress_or_timeout(self):
"""Check if request is in progress."""
if self._stack_result is None or self._recm_data is None:
# If the response is not ready and the timeout period is over, send error 408
if request_timed_out(self._db_result):
error_message = 'Stack analysis request {} has timed out. Please retry ' \
'with a new analysis.'.format(self.external_request_id)
logger.error(error_message)
raise SARBRequestTimeoutException(error_message)
else:
error_message = 'Analysis for request ID {} is in progress'.format(
self.external_request_id)
logger.warning(error_message)
raise SARBRequestInprogressException(error_message)
class SARBRequestInvalidException(Exception):
"""Exception raised when both stack result and recommendation data is empty in RDB.
Indicate RDB could not get either result data / recommendation data for a given request id.
"""
pass
class SARBRequestInprogressException(Exception):
"""Exception raised when request is in progress.
Indicate stack analyses backbone service is still processing the given request id.
"""
pass
class SARBRequestTimeoutException(Exception):
"""Exception raised when request timeout.
Indicate given request id was timed out while generating stack analyses data.
"""
pass
|
18,628 | 67a14411503f8e91243e70a00c25a832a45309cc | from nltk.corpus import wordnet
import random
from knowledge.synonymizer import Synonymizer
import logging
logger = logging.getLogger(__name__)
class WordNet(Synonymizer):
"""Generate synonyms for a word using Wordnet thesaurus"""
def synonym(self, word):
all_synsets = wordnet.synsets(word)
if not all_synsets:
morphed = wordnet.morphy(word)
logger.info("Word '{}' not found in wordnet; morphing to '{}'".format(word, morphed))
if morphed is None:
return word
if morphed:
all_synsets = wordnet.synsets(morphed)
if not all_synsets:
return word
synset = random.choice(all_synsets)
terms = [t for h in synset.hyponyms() for t in h.lemmas()]
if terms:
lemma = random.choice(terms)
synonym = lemma.name()
else:
terms = synset.lemma_names()
if not terms:
return word
synonym = random.choice(terms)
return(synonym.replace("_", " "))
def synonyms(self, init_list, n=10):
"Take a list of words and return a list of n words, including\
repetitions and synonyms etc. of the originals"
if isinstance(init_list, str):
init_list = init_list.split()
seeds = []
for i in range(0, n):
idx = i * len(init_list) // n
term = init_list[idx]
if random.random() < 0.75: # sometimes keep the original word
term = self.synonym(term)
seeds.append(term)
return(seeds)
def path(self, root, n=10, stepsize=3):
"""Return a list of terms by repeatedly finding the most-similar terms
from a word2vec model. Stepsize specifies how many terms to return
from each node in the chain."""
seq = []
seq.append(root)
while len(seq) < n:
next = self.synonyms([seq[-1]], stepsize)
random.shuffle(next)
maxToAdd = stepsize
added_something = False
for j in next:
if j not in seq:
seq.append(j)
added_something = True
maxToAdd -= 1
if maxToAdd <= 0:
break
if added_something is False:
seq.append(root)
return(seq[0:n])
def dev():
wn = WordNet()
print(wn.synonyms(["denial", "tabular", "dog"], 20))
print(wn.path("dog", 200))
|
18,629 | 2fb8582799e932e3114edf6fc6cfcd89bb33f816 | from banco import Banco
b = Banco()
b.__del__()
'''
cursor = cnx.cursor() #criando cursor
dados = "call `database`.INSERTTAB('Jonatas', 1200, 'M', 44485312879, @V_RESPOSTA)"
try:
cursor.execute(dados)
cnx.commit()
print("Inserido com sucesso")
except:
print("Falhou :( ")
cnx.close()
'''
|
18,630 | 7af61d36751818b12c7dc75bf1545db00bd81927 |
def myfunc(a,b,*c,**d):
print a
print b
for i in c:
print i
for i in d:
print i
print d[i]
myfunc(1,2,bar='a',foo='c')
|
18,631 | 4cc14213662b233fee43d36ecd1952e559b57218 | # Sort a file
# Need to do this since I'm getting different sort results from the
# system sort
import sys
def usage():
print sys.argv[0], " [list of files to process]"
print sys.argv[0], " sort the lines from the input files (use '-' for stdin)"
if len(sys.argv) < 2:
usage()
sys.exit(-2)
comesh_total = []
sep = '|'
for i in sys.argv[1:]:
sys.stderr.write('Processing File ' + i + '\n')
if i=='-' :
f=sys.stdin
else:
f=open(i, 'r')
for line in f:
tuple=line.rstrip().split(sep)
key=tuple[1]+sep+tuple[2] # comesh
val=int(tuple[0]) # count
comesh_total.append( (key, val) )
f.close()
sys.stderr.write('Sorting ' + i + '\n')
comesh_total.sort()
# print comesh_total
for (key, val) in comesh_total:
print str(val)+sep+key
|
18,632 | 98ddbd72acf3f7ddc730cddd8e522b96cebec301 | from urllib.request import urlopen
from bs4 import BeautifulSoup
import pandas as pd
import numpy as np
july = [23, 24, 25, 26, 27, 28, 29, 30]
month = 7
day = 23
for day in july:
url = "https://www.baseball-reference.com/boxes/?month=" + str(month) + "&day=" + str(day) + "&year=2020"
html = urlopen(url)
soup = BeautifulSoup(html)
AL_East = pd.DataFrame(columns=headers)
table = soup.find("table", {"id": "standings-upto-AL-E"})
AL_East.at[0, "Team"] = table.find_all("tr")[1].th.a.get_text()
AL_East.at[0, "Wins"] = table.find_all("tr")[1].find("td", {"data-stat": "W"}).get_text()
AL_East.at[0, "Loses"] = table.find_all("tr")[1].find("td", attrs={"data-stat": "L"}).get_text()
AL_East.at[0, "Win%"] = table.find_all("tr")[1].find("td", attrs={"data-stat": "win_loss_perc"}).get_text()
AL_East.at[0, "GB"] = table.find_all("tr")[1].find("td", attrs={"data-stat": "games_back"}).get_text()
AL_East.at[1, "Team"] = table.find_all("tr")[2].th.a.get_text()
AL_East.at[1, "Wins"] = table.find_all("tr")[2].find("td", attrs={"data-stat": "W"}).get_text()
AL_East.at[1, "Loses"] = table.find_all("tr")[2].find("td", attrs={"data-stat": "L"}).get_text()
AL_East.at[1, "Win%"] = table.find_all("tr")[2].find("td", attrs={"data-stat": "win_loss_perc"}).get_text()
AL_East.at[1, "GB"] = table.find_all("tr")[2].find("td", attrs={"data-stat": "games_back"}).get_text()
AL_East.at[2, "Team"] = table.find_all("tr")[3].th.a.get_text()
AL_East.at[2, "Wins"] = table.find_all("tr")[3].find("td", attrs={"data-stat": "W"}).get_text()
AL_East.at[2, "Loses"] = table.find_all("tr")[3].find("td", attrs={"data-stat": "L"}).get_text()
AL_East.at[2, "Win%"] = table.find_all("tr")[3].find("td", attrs={"data-stat": "win_loss_perc"}).get_text()
AL_East.at[2, "GB"] = table.find_all("tr")[3].find("td", attrs={"data-stat": "games_back"}).get_text()
AL_East.at[3, "Team"] = table.find_all("tr")[4].th.a.get_text()
AL_East.at[3, "Wins"] = table.find_all("tr")[4].find("td", attrs={"data-stat": "W"}).get_text()
AL_East.at[3, "Loses"] = table.find_all("tr")[4].find("td", attrs={"data-stat": "L"}).get_text()
AL_East.at[3, "Win%"] = table.find_all("tr")[4].find("td", attrs={"data-stat": "win_loss_perc"}).get_text()
AL_East.at[3, "GB"] = table.find_all("tr")[4].find("td", attrs={"data-stat": "games_back"}).get_text()
AL_East.at[4, "Team"] = table.find_all("tr")[5].th.a.get_text()
AL_East.at[4, "Wins"] = table.find_all("tr")[5].find("td", attrs={"data-stat": "W"}).get_text()
AL_East.at[4, "Loses"] = table.find_all("tr")[5].find("td", attrs={"data-stat": "L"}).get_text()
AL_East.at[4, "Win%"] = table.find_all("tr")[5].find("td", attrs={"data-stat": "win_loss_perc"}).get_text()
AL_East.at[4, "GB"] = table.find_all("tr")[5].find("td", attrs={"data-stat": "games_back"}).get_text()
AL_Central = pd.DataFrame(columns=headers)
table = soup.find("table", {"id": "standings-upto-AL-C"})
AL_Central.at[0, "Team"] = table.find_all("tr")[1].th.a.get_text()
AL_Central.at[0, "Wins"] = table.find_all("tr")[1].find("td", {"data-stat": "W"}).get_text()
AL_Central.at[0, "Loses"] = table.find_all("tr")[1].find("td", attrs={"data-stat": "L"}).get_text()
AL_Central.at[0, "Win%"] = table.find_all("tr")[1].find("td", attrs={"data-stat": "win_loss_perc"}).get_text()
AL_Central.at[0, "GB"] = table.find_all("tr")[1].find("td", attrs={"data-stat": "games_back"}).get_text()
AL_Central.at[1, "Team"] = table.find_all("tr")[2].th.a.get_text()
AL_Central.at[1, "Wins"] = table.find_all("tr")[2].find("td", attrs={"data-stat": "W"}).get_text()
AL_Central.at[1, "Loses"] = table.find_all("tr")[2].find("td", attrs={"data-stat": "L"}).get_text()
AL_Central.at[1, "Win%"] = table.find_all("tr")[2].find("td", attrs={"data-stat": "win_loss_perc"}).get_text()
AL_Central.at[1, "GB"] = table.find_all("tr")[2].find("td", attrs={"data-stat": "games_back"}).get_text()
AL_Central.at[2, "Team"] = table.find_all("tr")[3].th.a.get_text()
AL_Central.at[2, "Wins"] = table.find_all("tr")[3].find("td", attrs={"data-stat": "W"}).get_text()
AL_Central.at[2, "Loses"] = table.find_all("tr")[3].find("td", attrs={"data-stat": "L"}).get_text()
AL_Central.at[2, "Win%"] = table.find_all("tr")[3].find("td", attrs={"data-stat": "win_loss_perc"}).get_text()
AL_Central.at[2, "GB"] = table.find_all("tr")[3].find("td", attrs={"data-stat": "games_back"}).get_text()
AL_Central.at[3, "Team"] = table.find_all("tr")[4].th.a.get_text()
AL_Central.at[3, "Wins"] = table.find_all("tr")[4].find("td", attrs={"data-stat": "W"}).get_text()
AL_Central.at[3, "Loses"] = table.find_all("tr")[4].find("td", attrs={"data-stat": "L"}).get_text()
AL_Central.at[3, "Win%"] = table.find_all("tr")[4].find("td", attrs={"data-stat": "win_loss_perc"}).get_text()
AL_Central.at[3, "GB"] = table.find_all("tr")[4].find("td", attrs={"data-stat": "games_back"}).get_text()
AL_Central.at[4, "Team"] = table.find_all("tr")[5].th.a.get_text()
AL_Central.at[4, "Wins"] = table.find_all("tr")[5].find("td", attrs={"data-stat": "W"}).get_text()
AL_Central.at[4, "Loses"] = table.find_all("tr")[5].find("td", attrs={"data-stat": "L"}).get_text()
AL_Central.at[4, "Win%"] = table.find_all("tr")[5].find("td", attrs={"data-stat": "win_loss_perc"}).get_text()
AL_Central.at[4, "GB"] = table.find_all("tr")[5].find("td", attrs={"data-stat": "games_back"}).get_text()
AL_West = pd.DataFrame(columns=headers)
table = soup.find("table", {"id": "standings-upto-AL-W"})
AL_West.at[0, "Team"] = table.find_all("tr")[1].th.a.get_text()
AL_West.at[0, "Wins"] = table.find_all("tr")[1].find("td", {"data-stat": "W"}).get_text()
AL_West.at[0, "Loses"] = table.find_all("tr")[1].find("td", attrs={"data-stat": "L"}).get_text()
AL_West.at[0, "Win%"] = table.find_all("tr")[1].find("td", attrs={"data-stat": "win_loss_perc"}).get_text()
AL_West.at[0, "GB"] = table.find_all("tr")[1].find("td", attrs={"data-stat": "games_back"}).get_text()
AL_West.at[1, "Team"] = table.find_all("tr")[2].th.a.get_text()
AL_West.at[1, "Wins"] = table.find_all("tr")[2].find("td", attrs={"data-stat": "W"}).get_text()
AL_West.at[1, "Loses"] = table.find_all("tr")[2].find("td", attrs={"data-stat": "L"}).get_text()
AL_West.at[1, "Win%"] = table.find_all("tr")[2].find("td", attrs={"data-stat": "win_loss_perc"}).get_text()
AL_West.at[1, "GB"] = table.find_all("tr")[2].find("td", attrs={"data-stat": "games_back"}).get_text()
AL_West.at[2, "Team"] = table.find_all("tr")[3].th.a.get_text()
AL_West.at[2, "Wins"] = table.find_all("tr")[3].find("td", attrs={"data-stat": "W"}).get_text()
AL_West.at[2, "Loses"] = table.find_all("tr")[3].find("td", attrs={"data-stat": "L"}).get_text()
AL_West.at[2, "Win%"] = table.find_all("tr")[3].find("td", attrs={"data-stat": "win_loss_perc"}).get_text()
AL_West.at[2, "GB"] = table.find_all("tr")[3].find("td", attrs={"data-stat": "games_back"}).get_text()
AL_West.at[3, "Team"] = table.find_all("tr")[4].th.a.get_text()
AL_West.at[3, "Wins"] = table.find_all("tr")[4].find("td", attrs={"data-stat": "W"}).get_text()
AL_West.at[3, "Loses"] = table.find_all("tr")[4].find("td", attrs={"data-stat": "L"}).get_text()
AL_West.at[3, "Win%"] = table.find_all("tr")[4].find("td", attrs={"data-stat": "win_loss_perc"}).get_text()
AL_West.at[3, "GB"] = table.find_all("tr")[4].find("td", attrs={"data-stat": "games_back"}).get_text()
AL_West.at[4, "Team"] = table.find_all("tr")[5].th.a.get_text()
AL_West.at[4, "Wins"] = table.find_all("tr")[5].find("td", attrs={"data-stat": "W"}).get_text()
AL_West.at[4, "Loses"] = table.find_all("tr")[5].find("td", attrs={"data-stat": "L"}).get_text()
AL_West.at[4, "Win%"] = table.find_all("tr")[5].find("td", attrs={"data-stat": "win_loss_perc"}).get_text()
AL_West.at[4, "GB"] = table.find_all("tr")[5].find("td", attrs={"data-stat": "games_back"}).get_text()
NL_East = pd.DataFrame(columns=headers)
table = soup.find("table", {"id": "standings-upto-NL-E"})
NL_East.at[0, "Team"] = table.find_all("tr")[1].th.a.get_text()
NL_East.at[0, "Wins"] = table.find_all("tr")[1].find("td", {"data-stat": "W"}).get_text()
NL_East.at[0, "Loses"] = table.find_all("tr")[1].find("td", attrs={"data-stat": "L"}).get_text()
NL_East.at[0, "Win%"] = table.find_all("tr")[1].find("td", attrs={"data-stat": "win_loss_perc"}).get_text()
NL_East.at[0, "GB"] = table.find_all("tr")[1].find("td", attrs={"data-stat": "games_back"}).get_text()
NL_East.at[1, "Team"] = table.find_all("tr")[2].th.a.get_text()
NL_East.at[1, "Wins"] = table.find_all("tr")[2].find("td", attrs={"data-stat": "W"}).get_text()
NL_East.at[1, "Loses"] = table.find_all("tr")[2].find("td", attrs={"data-stat": "L"}).get_text()
NL_East.at[1, "Win%"] = table.find_all("tr")[2].find("td", attrs={"data-stat": "win_loss_perc"}).get_text()
NL_East.at[1, "GB"] = table.find_all("tr")[2].find("td", attrs={"data-stat": "games_back"}).get_text()
NL_East.at[2, "Team"] = table.find_all("tr")[3].th.a.get_text()
NL_East.at[2, "Wins"] = table.find_all("tr")[3].find("td", attrs={"data-stat": "W"}).get_text()
NL_East.at[2, "Loses"] = table.find_all("tr")[3].find("td", attrs={"data-stat": "L"}).get_text()
NL_East.at[2, "Win%"] = table.find_all("tr")[3].find("td", attrs={"data-stat": "win_loss_perc"}).get_text()
NL_East.at[2, "GB"] = table.find_all("tr")[3].find("td", attrs={"data-stat": "games_back"}).get_text()
NL_East.at[3, "Team"] = table.find_all("tr")[4].th.a.get_text()
NL_East.at[3, "Wins"] = table.find_all("tr")[4].find("td", attrs={"data-stat": "W"}).get_text()
NL_East.at[3, "Loses"] = table.find_all("tr")[4].find("td", attrs={"data-stat": "L"}).get_text()
NL_East.at[3, "Win%"] = table.find_all("tr")[4].find("td", attrs={"data-stat": "win_loss_perc"}).get_text()
NL_East.at[3, "GB"] = table.find_all("tr")[4].find("td", attrs={"data-stat": "games_back"}).get_text()
NL_East.at[4, "Team"] = table.find_all("tr")[5].th.a.get_text()
NL_East.at[4, "Wins"] = table.find_all("tr")[5].find("td", attrs={"data-stat": "W"}).get_text()
NL_East.at[4, "Loses"] = table.find_all("tr")[5].find("td", attrs={"data-stat": "L"}).get_text()
NL_East.at[4, "Win%"] = table.find_all("tr")[5].find("td", attrs={"data-stat": "win_loss_perc"}).get_text()
NL_East.at[4, "GB"] = table.find_all("tr")[5].find("td", attrs={"data-stat": "games_back"}).get_text()
NL_Central = pd.DataFrame(columns=headers)
table = soup.find("table", {"id": "standings-upto-NL-C"})
NL_Central.at[0, "Team"] = table.find_all("tr")[1].th.a.get_text()
NL_Central.at[0, "Wins"] = table.find_all("tr")[1].find("td", {"data-stat": "W"}).get_text()
NL_Central.at[0, "Loses"] = table.find_all("tr")[1].find("td", attrs={"data-stat": "L"}).get_text()
NL_Central.at[0, "Win%"] = table.find_all("tr")[1].find("td", attrs={"data-stat": "win_loss_perc"}).get_text()
NL_Central.at[0, "GB"] = table.find_all("tr")[1].find("td", attrs={"data-stat": "games_back"}).get_text()
NL_Central.at[1, "Team"] = table.find_all("tr")[2].th.a.get_text()
NL_Central.at[1, "Wins"] = table.find_all("tr")[2].find("td", attrs={"data-stat": "W"}).get_text()
NL_Central.at[1, "Loses"] = table.find_all("tr")[2].find("td", attrs={"data-stat": "L"}).get_text()
NL_Central.at[1, "Win%"] = table.find_all("tr")[2].find("td", attrs={"data-stat": "win_loss_perc"}).get_text()
NL_Central.at[1, "GB"] = table.find_all("tr")[2].find("td", attrs={"data-stat": "games_back"}).get_text()
NL_Central.at[2, "Team"] = table.find_all("tr")[3].th.a.get_text()
NL_Central.at[2, "Wins"] = table.find_all("tr")[3].find("td", attrs={"data-stat": "W"}).get_text()
NL_Central.at[2, "Loses"] = table.find_all("tr")[3].find("td", attrs={"data-stat": "L"}).get_text()
NL_Central.at[2, "Win%"] = table.find_all("tr")[3].find("td", attrs={"data-stat": "win_loss_perc"}).get_text()
NL_Central.at[2, "GB"] = table.find_all("tr")[3].find("td", attrs={"data-stat": "games_back"}).get_text()
NL_Central.at[3, "Team"] = table.find_all("tr")[4].th.a.get_text()
NL_Central.at[3, "Wins"] = table.find_all("tr")[4].find("td", attrs={"data-stat": "W"}).get_text()
NL_Central.at[3, "Loses"] = table.find_all("tr")[4].find("td", attrs={"data-stat": "L"}).get_text()
NL_Central.at[3, "Win%"] = table.find_all("tr")[4].find("td", attrs={"data-stat": "win_loss_perc"}).get_text()
NL_Central.at[3, "GB"] = table.find_all("tr")[4].find("td", attrs={"data-stat": "games_back"}).get_text()
NL_Central.at[4, "Team"] = table.find_all("tr")[5].th.a.get_text()
NL_Central.at[4, "Wins"] = table.find_all("tr")[5].find("td", attrs={"data-stat": "W"}).get_text()
NL_Central.at[4, "Loses"] = table.find_all("tr")[5].find("td", attrs={"data-stat": "L"}).get_text()
NL_Central.at[4, "Win%"] = table.find_all("tr")[5].find("td", attrs={"data-stat": "win_loss_perc"}).get_text()
NL_Central.at[4, "GB"] = table.find_all("tr")[5].find("td", attrs={"data-stat": "games_back"}).get_text()
NL_West = pd.DataFrame(columns=headers)
table = soup.find("table", {"id": "standings-upto-NL-W"})
NL_West.at[0, "Team"] = table.find_all("tr")[1].th.a.get_text()
NL_West.at[0, "Wins"] = table.find_all("tr")[1].find("td", {"data-stat": "W"}).get_text()
NL_West.at[0, "Loses"] = table.find_all("tr")[1].find("td", attrs={"data-stat": "L"}).get_text()
NL_West.at[0, "Win%"] = table.find_all("tr")[1].find("td", attrs={"data-stat": "win_loss_perc"}).get_text()
NL_West.at[0, "GB"] = table.find_all("tr")[1].find("td", attrs={"data-stat": "games_back"}).get_text()
NL_West.at[1, "Team"] = table.find_all("tr")[2].th.a.get_text()
NL_West.at[1, "Wins"] = table.find_all("tr")[2].find("td", attrs={"data-stat": "W"}).get_text()
NL_West.at[1, "Loses"] = table.find_all("tr")[2].find("td", attrs={"data-stat": "L"}).get_text()
NL_West.at[1, "Win%"] = table.find_all("tr")[2].find("td", attrs={"data-stat": "win_loss_perc"}).get_text()
NL_West.at[1, "GB"] = table.find_all("tr")[2].find("td", attrs={"data-stat": "games_back"}).get_text()
NL_West.at[2, "Team"] = table.find_all("tr")[3].th.a.get_text()
NL_West.at[2, "Wins"] = table.find_all("tr")[3].find("td", attrs={"data-stat": "W"}).get_text()
NL_West.at[2, "Loses"] = table.find_all("tr")[3].find("td", attrs={"data-stat": "L"}).get_text()
NL_West.at[2, "Win%"] = table.find_all("tr")[3].find("td", attrs={"data-stat": "win_loss_perc"}).get_text()
NL_West.at[2, "GB"] = table.find_all("tr")[3].find("td", attrs={"data-stat": "games_back"}).get_text()
NL_West.at[3, "Team"] = table.find_all("tr")[4].th.a.get_text()
NL_West.at[3, "Wins"] = table.find_all("tr")[4].find("td", attrs={"data-stat": "W"}).get_text()
NL_West.at[3, "Loses"] = table.find_all("tr")[4].find("td", attrs={"data-stat": "L"}).get_text()
NL_West.at[3, "Win%"] = table.find_all("tr")[4].find("td", attrs={"data-stat": "win_loss_perc"}).get_text()
NL_West.at[3, "GB"] = table.find_all("tr")[4].find("td", attrs={"data-stat": "games_back"}).get_text()
NL_West.at[4, "Team"] = table.find_all("tr")[5].th.a.get_text()
NL_West.at[4, "Wins"] = table.find_all("tr")[5].find("td", attrs={"data-stat": "W"}).get_text()
NL_West.at[4, "Loses"] = table.find_all("tr")[5].find("td", attrs={"data-stat": "L"}).get_text()
NL_West.at[4, "Win%"] = table.find_all("tr")[5].find("td", attrs={"data-stat": "win_loss_perc"}).get_text()
NL_West.at[4, "GB"] = table.find_all("tr")[5].find("td", attrs={"data-stat": "games_back"}).get_text()
if len(str(day)) < 2:
date = "070" + str(day) + "20"
if len(str(day)) == 2:
date = "07" + str(day) + "20"
AL_East.to_csv("AL_East." + date + ".csv")
AL_Central.to_csv("AL_Central." + date + ".csv")
AL_West.to_csv("AL_West." + date + ".csv")
NL_East.to_csv("NL_East." + date + ".csv")
NL_Central.to_csv("NL_Central." + date + ".csv")
NL_West.to_csv("NL_West." + date + ".csv") |
18,633 | 64c446279d8f4f9ccbfe1f6dabd99f59a4e0965e | import math
import sys
org_arr=[]
for line in open(sys.argv[1],"r"):
segs = line.strip().split("\t")
org_arr.append((int(segs[0]),float(segs[1])))
def calc():
global org_arr
idx = 0
rank = 0
M = 0
N = 0
arr = sorted(org_arr,key=lambda p: p[1])
count = len(arr)
while idx < count:
if arr[idx][0] == 0:
N = N + 1
elif arr[idx][0] == 1:
M = M + 1
rank = rank + idx + 1
idx += 1
auc = (rank*1.0 - M*(M+1)/2.0) / ( M * N * 1.0 )
print auc
calc()
|
18,634 | e101617b28f3025b9f3464592884e8646880f74f | import matplotlib.pyplot as plt
import pickle
from Experiment import *
import os
import statistics
import pandas as pd
import numpy as np
from tabulate import tabulate
class DataAnalytics:
def __init__(self):
files = os.listdir(Experiment.DIR_NAME)
data_folder = Path(Experiment.DIR_NAME)
self.experiments = []
for f in files:
f = open(data_folder / f, "rb")
experiment = pickle.load(f)
while 1:
try:
experiment.testResults.update(pickle.load(f))
except EOFError:
break
self.experiments.append(experiment)
def drawChart(self, property, constants, xLabel, title):
filtered = []
AVERAGE = "average"
BEST = "best"
WORST = "worst"
for exp in self.experiments:
keep = False
for key, value in constants.items():
if getattr(exp, key) == value:
keep = True
break
if keep:
cp = {}
cp[property] = getattr(exp, property)
cp[AVERAGE] = exp.calculateAverage()
cp[BEST] = exp.calculateBest()
cp[WORST] = exp.calculateWorst()
filtered.append(cp)
df = pd.DataFrame(filtered, columns = filtered[0].keys())
self.createTable(df, title)
df_avg = df.groupby(property)[AVERAGE].agg('mean').reset_index()
df_best = df.groupby(property)[BEST].agg('min').reset_index()
df_worst = df.groupby(property)[WORST].agg('max').reset_index()
plt.plot(property, AVERAGE, data= df_avg, marker='', color='olive', linewidth=2, linestyle='dashed', label="Average")
plt.plot(property, BEST, data= df_best, marker='', color='blue', linewidth=2, linestyle='solid', label="Best")
plt.plot(property, WORST, data= df_worst, marker='', color='red', linewidth=2, linestyle='solid', label="Worst")
img_folder = Path("charts")
filename = title+ ".png"
file_to_save = img_folder / filename
plt.savefig(file_to_save)
plt.legend()
plt.xlabel(xLabel)
plt.ylabel('Fitness')
plt.show()
def createTable(self, df, file_name):
data_folder = Path("tables")
filename = file_name + ".txt"
file_to_save = data_folder / filename
f = open(file_to_save, "w+")
f.write(tabulate(df, tablefmt="pipe", headers="keys"))
f.close()
def drawChartByMutationType(self, property, constants, xLabel, title):
filtered = []
AVERAGE = "average"
BEST = "best"
WORST = "worst"
for exp in self.experiments:
keep = False
for key, value in constants.items():
if getattr(exp, key) == value:
keep = True
break
if keep:
cp = {}
cp[property] = getattr(exp, property)
cp[AVERAGE] = exp.calculateAverage()
cp["mutation"] = str(exp.mutation)
filtered.append(cp)
df = pd.DataFrame(filtered, columns = filtered[0].keys())
self.createTable(df, title)
fig, ax = plt.subplots(figsize=(8, 6))
for label, df in df.groupby("mutation"):
df.average.plot(kind="line", ax=ax, label=label)
plt.legend()
plt.xlabel(xLabel)
plt.ylabel('Fitness')
plt.show()
|
18,635 | b9c07bdaa224e2ab4a6d06ac5d7b95cb93422bb7 | """
Input functions for training and inference.
Author: Philipp Jund, 2018
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from SpatialRelationCNN.model import utility
from SpatialRelationCNN.model.generator_factory import GeneratorFactory
import SpatialRelationCNN.model.augmentation as augment
import numpy as np
import tensorflow as tf
import tfquaternion as tfq
class InputLayer(object):
"""The input pipeline base class, from RelationDataset to projection."""
def __init__(self, dataset, more_augmentation=False):
"""The input pipeline, from RelationDataset to projection.
Args:
dataset: A `RelationDataset` object.
"""
self.dataset = dataset
self.generator_factory = GeneratorFactory(self.dataset,
more_augmentation)
phases = ["train", "validation", "test"]
self.generators = {n: None for n in phases}
self.iterator = None
self.iterator_init_ops = {n: None for n in phases}
self.clouds_tensor, self.cloud_slice_indices = \
self.create_cloud_constants()
self.obj_ids_pl = tf.placeholder(tf.int32, shape=(None, 2),
name="obj_ids")
self.translations_pl = tf.placeholder(tf.float32, shape=(None, 2, 3),
name="translations")
self.rotations_pl = tf.placeholder(tf.float32, shape=(None, 2, 4),
name="rotations")
self.rotations = None # stores the resulting rotations ...
self.translations = None # ... and translations when generalizing
self.translation_vars = []
self.rotation_vars = []
@utility.scope_wrapper
def create_cloud_constants(self):
"""Create two `tf.constant`s of the obj point clouds and their ranges.
The point clouds have differing numbers of points. To efficiently
process them, all object point clouds are concatenated into one
constant. To retrieve them afterwards, we create a second constant with
shape (N+1), containing the start index for each point cloud with the
length as an additional index. With this we can use slicing, which
should be more efficient than using tf.where
"""
np_clouds = [self.dataset.clouds[n] for n in self.dataset.cloud_names]
# Create the slice indices as float32, as they'll only be used with
# tf.gather which has no GPU kernel for integers.
cloud_slice_indices = np.cumsum([0] + [len(c) for c in np_clouds],
dtype=np.float32)
tf_clouds = tf.constant(np.concatenate(np_clouds), dtype=tf.float32)
return tf_clouds, cloud_slice_indices
def switch_input(self, phase, sess):
"""Switch between test and training data."""
if phase in self.iterator_init_ops:
print("Switching input to {}.".format(phase))
sess.run(self.iterator_init_ops[phase])
else:
raise Exception("Invalid phase name, must be one of {}."
"".format(self.iterator_init_ops))
def _create_tf_datasets(self, split, batch_size):
"""Helper function that creates the train and test tf.data.Dataset."""
out_types = (tf.int32, tf.float32, tf.float32, tf.int32, tf.bool)
# out_shapes has an additional batch dim (None) and 3 or 1 scenes.
out_shapes = ((None, None, 2), (None, None, 2, 3), (None, None, 2, 4),
(None, None), (None,))
self.iterator = tf.data.Iterator.from_structure(out_types, out_shapes)
for p in ["train", "validation", "test"]:
# generator factory throws if there's no validation data
try:
self.generators[p] = self.generator_factory.scene_desc_generator(split, p)
except ValueError:
continue
out_shapes = tuple([np.array(x).shape for x in next(self.generators[p]())])
d = tf.data.Dataset.from_generator(self.generators[p], out_types,
out_shapes)
d = d.batch(batch_size if p == "train" else 1)
# d = d.prefetch(3)
self.iterator_init_ops[p] = self.iterator.make_initializer(d)
@staticmethod
def _repeat(a, repeats, batch_size, training_batch_size):
"""Repeat a[i] repeats[i] times."""
return tf.cond(tf.equal(batch_size, 1),
lambda: utility.repeat(a, repeats, num_repeats=2),
lambda: utility.repeat(a, repeats, training_batch_size))
@utility.scope_wrapper
def _input_fn(self, obj_ids, translations, rotations, train_batch_size,
num_objs, do_augmentation):
"""The input function 's part that is shared.
This function creates the scene point clouds from scene descriptions.
Returns: Two tf.Tensors, the first contains all points of the
objects in the batch with shape (N, 3) and the second contains the
corresponding segment ids, the shape is (N,).
"""
batch_size = tf.shape(obj_ids)[0]
# flatten all inputs
obj_ids = tf.reshape(obj_ids, (-1,))
translations = tf.reshape(translations, (-1, 3))
rotations = tf.reshape(rotations, (-1, 4))
clouds_num_points = (self.cloud_slice_indices[1:] -
self.cloud_slice_indices[:-1])
# vector with the number of points of each cloud
num_points = tf.gather(clouds_num_points, obj_ids)
# vector with a range where each number i is num_points[i] repeated
segment_ids = self._repeat(tf.range(tf.shape(num_points)[0]),
tf.to_int32(num_points), batch_size,
num_objs)
segment_ids = tf.to_int32(segment_ids)
# repeat translations[i] and rotations[i] num_points[i] times
translations = tf.gather(translations, segment_ids)
rotations = tf.gather(rotations, segment_ids)
rotations = tfq.Quaternion(rotations)
obj_ids = tf.gather(tf.to_float(obj_ids), segment_ids)
# indices of points consist of the start index plus range(num_points)
start = tf.gather(self.cloud_slice_indices, tf.to_int32(obj_ids))
ranges = tf.cond(tf.equal(batch_size, 1),
lambda: tf.concat([tf.range(num_points[i])
for i in range(2)], axis=0),
lambda: tf.concat([tf.range(num_points[i])
for i in range(num_objs)], axis=0))
point_ids = tf.to_int32(start + ranges)
points = tf.gather(self.clouds_tensor, point_ids)
# Rotate objects. Note that the quaternions are relative to the object
# clouds' origins, so no centering using the mean is required.
points = tfq.rotate_vector_by_quaternion(rotations, points, 2, 2)
points = tf.squeeze(points) + translations
# if we're training, randomly rotate around the z_axis
if do_augmentation:
points = augment.pointcloud(points, segment_ids, batch_size,
train_batch_size)
return points, tf.to_float(segment_ids)
def dataset_input_fn(self, train_batch_size, split):
"""The train input function using the tf.data.Dataset API.
Args:
train_batch_size: `int`, the batch size. Test batch size is always
one.
split: `int` in the interval [1, 15], the index of the split.
"""
self._create_tf_datasets(split, train_batch_size)
next_el = self.iterator.get_next()
obj_ids, translations, rotations, labels, is_augmented = next_el
points, segment_ids = self._input_fn(obj_ids, translations,
rotations, train_batch_size,
train_batch_size * 6, True)
return (points, segment_ids, labels, is_augmented)
def generalize_input_fn(self, trainable, disable_rotation=None):
"""Create the input function to use when running the generalization.
This input function creates translation and rotation variables for
each object, if trainable[i] is true or a constant if trainable[i]
is false.
Args:
trainable: A list of `bool`s with one entry for each object that
will be passed via self.obj_ids_pl.
If trainable[i] is true, the translation and rotation for
the i-th object in the batch will be trainable.
disable_rotation: A list of `bool`s with one entry for each object
that will be passed via self.obj_ids_pl. If trainable is set to
true for this object and disable_rotation is set to true, only
the translation of this object will be optimized
"""
if self.translation_vars:
raise ValueError("generalize_input_fn can only be called once per "
"input layer instance")
for i, (t, no_rot) in enumerate(zip(trainable, disable_rotation)):
tensor_t = tf.Variable if t else tf.constant
self.translation_vars += [tensor_t([(0, 0, 0)], dtype=tf.float32,
name="translation" + str(i))]
if no_rot:
tensor_t = tf.constant
self.rotation_vars += [tensor_t([(0, 0, 0)], dtype=tf.float32,
name="rotation" + str(i))]
translation_delta = tf.reshape(self.translation_vars, (-1, 2, 3))
rotation_delta = tf.reshape(self.rotation_vars, (-1, 2, 3))
self.translations = self.translations_pl + translation_delta
# don't optimize w of quaternion to prevent numerical instability
rotation_delta = tf.pad(rotation_delta, [[0, 0], [0, 0], [1, 0]])
self.rotations = self.rotations_pl + rotation_delta
return self._input_fn(self.obj_ids_pl, self.translations,
self.rotations, None, len(trainable), False)
def get_transform_vars(self):
"""Return all variables created to perform rotation and translation."""
return [v for v in (self.rotation_vars + self.translation_vars)
if isinstance(v, tf.Variable)]
def reset_transform_vars(self, sess):
"""Reset translation and rotation to identity."""
for v in self.get_transform_vars():
sess.run(v.initializer)
|
18,636 | 2629ae62d8d2ea2233e8693d6a3b4802eb588932 | import pygame as pg
from main.objects.buttons import ButtonGetLevel, ButtonGetStartMenu
from main.window import clock, screen
from main.objects.group_sprites import all_sprites, offset_x_group
from main.delete_all_sprites import delete_all_sprites
from main.objects.scrollbar import Scrollbar
from main.objects.label import Label
import traceback
import os
class LevelMenu:
def __init__(self):
delete_all_sprites()
self.list_levels = [ButtonGetLevel(x * 60 + 300, 250, os.listdir('levels')[x],
(all_sprites, offset_x_group)) for x in
range(len(os.listdir('levels')))]
self.btn_get_initial_menu = ButtonGetStartMenu(300, 375, (all_sprites,))
self.labels = {'help': Label(all_sprites, (550, 540), (235, 100), background=(0, 198, 255),
text='ПКМ - открытие статистики\n'
'ЛКМ - запустить уровень\n'
'Колёсико мыши - перемещение уровней')}
self.scrollbar = Scrollbar(0, 0, (offset_x_group,), offset_x=True)
self.run()
def run(self):
running = True
while running:
screen.fill((0, 198, 255))
# clock.tick(120)
for event in pg.event.get():
if event.type == pg.QUIT:
quit()
all_sprites.update(event)
self.scrollbar.update(event)
all_sprites.draw(screen)
pg.display.flip()
if self.btn_get_initial_menu.event[0]:
event = self.btn_get_initial_menu.event
running = False
for btn_lvl in self.list_levels:
if btn_lvl.event[0]:
event = btn_lvl.event
running = False
try:
if event[1] == 'initial_menu':
from main.initial_menu import InitialMenu
InitialMenu()
elif event[1] == 'play':
from main.playing import Play
print(event[2])
Play(event[2])
elif event[1] == 'statistics':
from main.statistics_menu import StatisticsMenu
print(event)
StatisticsMenu(event[2])
except BaseException:
print('Ошибка:\n', traceback.format_exc())
|
18,637 | 06d05b3c2420697e1837dbdf23005d0a3662a907 | # -*- coding: future_fstrings -*-
import os
import requests
import urllib3
import warnings
from configparser import ConfigParser
from datetime import datetime
##
## Omada API calls expect a timestamp in milliseconds.
##
def timestamp():
return int( datetime.utcnow().timestamp() * 1000 )
##
## Display errorCode and optional message returned from Omada API.
##
class OmadaError(Exception):
def __init__(self, json):
self.errorCode = 0
self.msg = None
if json is None:
raise TypeError('json cannot be None')
if 'errorCode' in json:
self.errorCode = json['errorCode']
if 'msg' in json:
self.msg = '"' + json['msg'] + '"'
def __str__(self):
return f"errorCode={self.errorCode}, msg={self.msg}"
##
## The main Omada API class.
##
class Omada:
##
## Initialize a new Omada API instance.
##
def __init__(self, config='omada.cfg', baseurl=None, site='Default', verify=True, warnings=True):
self.config = None
self.token = None
if baseurl is not None:
# use the provided configuration
self.baseurl = baseurl
self.site = site
self.verify = verify
self.warnings = warnings
elif os.path.isfile( config ):
# read from configuration file
self.config = ConfigParser()
try:
self.config.read( config )
self.baseurl = self.config['omada'].get('baseurl')
self.site = self.config['omada'].get('site', 'Default')
self.verify = self.config['omada'].getboolean('verify', True)
self.warnings = self.config['omada'].getboolean('warnings', True)
except:
raise
else:
# could not find configuration
raise FileNotFoundError(config)
# create a new session to hold cookies
self.session = requests.Session()
self.session.verify = self.verify
# hide warnings about insecure SSL requests
if self.verify == False and self.warnings == False:
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
##
## Current API path.
##
ApiPath = '/api/v2'
##
## Build a URL for the provided path.
##
def url_for(self, path):
return self.baseurl + Omada.ApiPath + path
##
## Perform a GET request and return the result.
##
def get(self, path, params=None, data=None, json=None):
if params is None and self.token is not None:
params = {'token':self.token,'_':timestamp()}
response = self.session.get( self.url_for(path), params=params, data=data, json=json )
response.raise_for_status()
json = response.json()
if json['errorCode'] == 0:
return json['result'] if 'result' in json else None
raise OmadaError(json)
##
## Perform a POST request and return the result.
##
def post(self, path, params=None, data=None, json=None):
if params is None and self.token is not None:
params = {'token':self.token,'_':timestamp()}
response = self.session.post( self.url_for(path), params=params, data=data, json=json )
response.raise_for_status()
json = response.json()
if json['errorCode'] == 0:
return json['result'] if 'result' in json else None
raise OmadaError(json)
##
## Perform a PATCH request and return the result.
##
def patch(self, path, params=None, data=None, json=None):
if params is None and self.token is not None:
params = {'token':self.token,'_':timestamp()}
response = self.session.patch( self.url_for(path), params=params, data=data, json=json )
response.raise_for_status()
json = response.json()
if json['errorCode'] == 0:
return json['result'] if 'result' in json else None
raise OmadaError(json)
##
## Log in with the provided credentials and return the result.
##
def login(self, username=None, password=None):
if username is None and password is None:
if self.config is None:
raise TypeError('username and password cannot be None')
try:
username = self.config['omada'].get('username')
password = self.config['omada'].get('password')
except:
raise
result = self.post( '/login', json={'username':username,'password':password} )
self.token = result['token']
return result
##
## Log out of the current session. Return value is always None.
##
def logout(self):
return self.post( '/logout' )
##
## Returns the current login status.
##
def getLoginStatus(self):
return self.get( '/loginStatus' )
##
## Returns the current user information.
##
def getCurrentUser(self):
return self.get( '/users/current' )
## Group Types
IPGroup = 0 # "IP Group"
PortGroup = 1 # "IP-Port Group"
MACGroup = 2 # "MAC Group"
##
## Returns the list of groups for the given site.
##
def getSiteGroups(self, site=None, type=None):
if site is None:
site = self.site
if type is None:
result = self.get( f'/sites/{site}/setting/profiles/groups' )
else:
result = self.get( f'/sites/{site}/setting/profiles/groups/{type}' )
return result['data']
##
## Returns the list of portal candidates for the given site.
##
## This is the "SSID & Network" list on Settings > Authentication > Portal > Basic Info.
##
def getPortalCandidates(self, site=None):
if site is None:
site = self.site
return self.get( f'/sites/{site}/setting/portal/candidates' )
##
## Returns the list of RADIUS profiles for the given site.
##
def getRadiusProfiles(self, site=None):
if site is None:
site = self.site
return self.get( f'/sites/{site}/setting/radiusProfiles' )
##
## Returns the list of scenarios.
##
def getScenarios(self):
return self.get( '/scenarios' )
##
## Returns the list of devices for given site.
##
def getSiteDevices(self, site=None):
if site is None:
site = self.site
return self.get( f'/sites/{site}/devices' )
##
## Returns the list of settings for the given site.
##
def getSiteSettings(self, site=None):
if site is None:
site = self.site
result = self.get( f'/sites/{site}/setting' )
# work-around for error when sending PATCH for site settings (see below)
if 'beaconControl' in result:
if self.warnings:
warnings.warn( "settings['beaconControl'] was removed as it causes an error", stacklevel=2 )
del result['beaconControl']
return result
##
## Push back the settings for the site.
##
def setSiteSettings(self, settings, site=None):
if site is None:
site = self.site
# not sure why but setting 'beaconControl' here does not work, returns {'errorCode': -1001}
if 'beaconControl' in settings:
if self.warnings:
warnings.warn( "settings['beaconControl'] was removed as it causes an error", stacklevel=2 )
del settings['beaconControl']
return self.patch( f'/sites/{site}/setting', json=settings )
##
## Returns the list of timerange profiles for the given site.
##
def getTimeRanges(self, site=None):
if site is None:
site = self.site
return self.get( f'/sites/{site}/setting/profiles/timeranges' )
##
## Returns the list of wireless network groups.
##
## This is the "WLAN Group" list on Settings > Wireless Networks.
##
def getWirelessGroups(self, site=None):
if site is None:
site = self.site
result = self.get( f'/sites/{site}/setting/wlans' )
return result['data']
##
## Returns the list of wireless networks for the given group.
##
## This is the main SSID list on Settings > Wireless Networks.
##
def getWirelessNetworks(self, group, site=None):
if site is None:
site = self.site
result = self.get( f'/sites/{site}/setting/wlans/{group}/ssids' )
return result['data']
|
18,638 | deff41c933ba6d81b308f2b9895c51947182ded7 | # -*- coding: utf-8 -*-
import os
import cv2
from shutil import copyfile
import numpy as np
class Evaluator:
def __init__(self, myList, name, test_path, save_dir, fine_size, useHeadCrop = True):
self.USE_AVERAGE = True
self.HIST_COUNT_COUNT = 5
self.fine_size = fine_size
self.SAVE_FILE_COUNT = 30
self.save_dir = save_dir
self.myList = myList
self.name = name
self.test_path = test_path
self.origin_file = 'train.jpg'
self.gt_file = 'gt.jpg'
self.useHeadCrop = useHeadCrop
def sortList(self, item):
return item[0]
def saveList(self, llist, dest):
for item in llist:
filename = item[1]
copyfile(os.path.join(self.test_path, filename), os.path.join(dest, filename))
def saveFirstAndLast(self):
print("saveing " + self.name)
self.myList.sort(key=self.sortList)
os.mkdir(os.path.join(self.save_dir, self.name + '_top'))
self.saveList(self.myList[:self.SAVE_FILE_COUNT], os.path.join(self.save_dir, self.name + '_top'))
os.mkdir(os.path.join(self.save_dir, self.name + '_last'))
self.saveList(self.myList[-self.SAVE_FILE_COUNT:], os.path.join(self.save_dir, self.name + '_last'))
def sortHistByArea(self):
countList = []
img = cv2.imread(self.origin_file, cv2.IMREAD_COLOR)
for index, item in enumerate(self.myList):
value = item[0]
file = item[1]
posList = file.split('_')
pos = posList[1] + '_' + posList[2]
if int(posList[1]) + self.fine_size > img.shape[1] or int(posList[2]) + self.fine_size > img.shape[0]:
continue
found = -1
for j, c in enumerate(countList):
if c[1] == pos:
found = j
break
if found > -1:
if self.USE_AVERAGE:
countList[found][2] += index
countList[found][0] = (value + countList[found][0]) / 2
else: # use index range
countList[found][0] += index
countList[found][2] = (value + countList[found][2]) / 2
else:
if self.USE_AVERAGE:
countList.append([value, pos, index])
else:
countList.append([index, pos, value])
countList.sort(key=self.sortList)
return countList
def saveHistCountImage(self, countList):
img = cv2.imread(self.origin_file, cv2.IMREAD_COLOR)
for i in range(self.HIST_COUNT_COUNT):
if self.useHeadCrop:
head = countList[i][1].split('_')
x = int(head[0])
y = int(head[1])
cv2.rectangle(img, (x, y), (x+self.fine_size, y+self.fine_size), (0, 0, 255), 3)
if self.USE_AVERAGE:
cv2.putText(img, "{}:{:.5f}".format(i, countList[i][0]), (x, y+int(self.fine_size/2)), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,255), 2)
else:
cv2.putText(img, "{}:{:.5f}".format(i, countList[i][2]), (x, y+int(self.fine_size/2)), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,255), 2)
else:
tail = countList[-i-1][1].split('_')
x = int(tail[0])
y = int(tail[1])
cv2.rectangle(img, (x, y), (x+self.fine_size, y+self.fine_size), (255, 0, 0), 3)
if self.USE_AVERAGE:
cv2.putText(img, "{}:{:.5f}".format(i, countList[-i-1][0]), (x, y+int(self.fine_size/2)), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,255), 2)
else:
cv2.putText(img, "{}:{:.5f}".format(i, countList[-i-1][2]), (x, y+int(self.fine_size/2)), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,255), 2)
if not os.path.isdir('png'):
os.mkdir('png')
cv2.imwrite('png/' + self.name + '.png', img)
def saveHistCount(self):
print("saveing " + self.name + " count list")
countList = self.sortHistByArea()
self.saveHistCountImage(countList)
def getCurve(self, gtImg, rect_TP_FP):
curve = []
FN_ALL = np.count_nonzero(gtImg == 255)
TN_ALL = np.count_nonzero(gtImg == 0)
TP_SUM = 0
FP_SUM = 0
for item in rect_TP_FP:
TP_SUM += item[0]
FP_SUM += item[1]
FN = FN_ALL - TP_SUM
TN = TN_ALL - FP_SUM
TPR = TP_SUM / (TP_SUM + FN)
FPR = FP_SUM / (FP_SUM + TN)
curve.append([FPR, TPR])
lastPt = curve[-1]
if lastPt[0] < 1 or lastPt[1] < 1:
curve.append([1,1])
return curve
def getROC(self, countList, gtImg):
# explain
# https://www.cnblogs.com/dlml/p/4403482.html
rect_TP_FP = []
for i in range(len(countList)):
rect = None
if self.useHeadCrop:
rect = countList[i][1].split('_')
else:
rect = countList[-i-1][1].split('_')
x = int(rect[0])
y = int(rect[1])
TP = np.count_nonzero(gtImg[y:y+self.fine_size, x:x+self.fine_size] == 255)
FP = np.count_nonzero(gtImg[y:y+self.fine_size, x:x+self.fine_size] == 0)
rect_TP_FP.append([TP, FP])
return self.getCurve(gtImg, rect_TP_FP)
def writeCurveFile(self, curve, path):
if not os.path.isdir(path):
os.mkdir(path)
out_file = path + '/' + self.name + '_ROC_curve.csv'
outFile = open(out_file, 'w')
outFile.write('x, y\n')
for i in range(len(curve)):
outFile.write(str(curve[i][0]) + ', ' + str(curve[i][1]) + '\n')
outFile.close()
def ROCCurve(self):
print("calculating " + self.name + " ROC curve")
countList = self.sortHistByArea()
gtImg = cv2.imread(self.gt_file, cv2.IMREAD_GRAYSCALE)
curve = self.getROC(countList, gtImg)
self.writeCurveFile(curve, 'csv')
def getPos(self, file):
posList = file.split('_')
x = int(posList[1])
y = int(posList[2])
x1 = int(posList[3])
y1 = int(posList[4])
width = int(posList[5].split('.')[0])
x += x1
y += y1
return x, y, width
def testRocCurve(self, reverse):
print("calculating " + self.name + " test ROC curve")
gtImg = cv2.imread(self.gt_file, cv2.IMREAD_GRAYSCALE)
self.myList.sort(key=self.sortList, reverse=reverse)
rect_TP_FP = []
for item in self.myList:
value = item[0]
file = item[1]
x, y, width = self.getPos(file)
TP = np.count_nonzero(gtImg[y:y+width, x:x+width] == 255)
FP = np.count_nonzero(gtImg[y:y+width, x:x+width] == 0)
rect_TP_FP.append([TP, FP])
curve = self.getCurve(gtImg, rect_TP_FP)
self.writeCurveFile(curve, 'test_csv')
def drawRange(self, percent, reverse):
print("drawing " + self.name + " range image")
img = cv2.imread(self.origin_file)
self.myList.sort(key=self.sortList, reverse=reverse)
for index in range(int(len(self.myList) * percent)):
item = self.myList[index]
x, y, width = self.getPos(item[1])
cv2.rectangle(img, (x, y), (x+width, y+width), (0, 0, 255), 3)
if not os.path.isdir('rangePng'):
os.mkdir('rangePng')
cv2.imwrite('rangePng/' + self.name + '_' + str(percent) + '.png', img)
|
18,639 | 65a9292507c6362808247435b8ecce3ae72a97e7 | from unittest import TestCase
import tensorflow as tf
from tensorflow.python.ops import data_flow_ops
class TfQueueTest(TestCase):
def test_tf_queue(self):
# tf.InteractiveSession()
# declare a queue node
# index_queue = tf.FIFOQueue(capacity=1000,dtypes=[tf.float32],shapes=[()])
# # declare a variable and initialize
# data = tf.get_variable(name='data',shape=[(10)],dtype=tf.float32,initializer=tf.truncated_normal_initializer(stddev=1e-1))
# tf.global_variables_initializer().run()
# # declare a enqueue op
# index_queue_enqueue = index_queue.enqueue_many(data)
#
# index_queue_enqueue.run()
#
# index_queue_dequeue = index_queue.dequeue_many(5)
#
# print 'Data generated:', data.eval()
# print index_queue_dequeue.eval()
# declare
# index_queue = tf.train.range_input_producer(100, num_epochs=None,
# shuffle=True, seed=None, capacity=20)
# index_dequeue_op = index_queue.dequeue_many(5, 'index_dequeue')
# with tf.Session() as sess:
# coord = tf.train.Coordinator()
# tf.train.start_queue_runners(coord=coord, sess=sess)
# print sess.run(index_dequeue_op)
labels_placeholder = tf.placeholder(tf.int64, shape=(None,3), name='labels')
image_paths_placeholder = tf.placeholder(tf.string, shape=(None, 3), name='image_paths')
input_queue = data_flow_ops.FIFOQueue(capacity=100000,
dtypes=[tf.string, tf.int64],
shapes=[(3,), (3,)],
shared_name=None, name=None)
enqueue_op = input_queue.enqueue_many([image_paths_placeholder, labels_placeholder])
dequeue_op = input_queue.dequeue_many(1)
nrof_preprocess_threads = 4
dic = []
for _ in range(nrof_preprocess_threads):
filenames, label = input_queue.dequeue()
dic.append([filenames, label])
image_paths_array=[['path1','path2','path3'],['path4','path5','path6']]
labels_array=[[1,2,3],[4,5,6]]
with tf.Session() as sess:
sess.run(enqueue_op, {image_paths_placeholder: image_paths_array, labels_placeholder: labels_array})
print sess.run(dequeue_op) |
18,640 | 01658706fe5fc797428ee1315ad2ae07e2afe377 | '''
New Agent Simulator Environment
Utilizes matrix for speeding up the performance of the simulator
Built upon the fundamental vesion of Qingbiao's simulator
-------------------------
Ver. 1.0 beta
Weizhe Lin @ 12/05/2020
Ver. 2.0
Weizhe Lin @ 11/08/2020
-------------------------
'''
import torch
import os
import sys
import yaml
import numpy as np
import random
import time
import scipy.io as sio
import shutil
random.seed(1337)
from torch import nn
import utils.graphUtils.graphTools as graph
from scipy.spatial.distance import squareform, pdist
# from dataloader.statetransformer import AgentState
# from dataloader.statetransformer_localGuidance import AgentState
# from dataloader.statetransformer_localGuidance_SDObs import AgentState
# from dataloader.statetransformer_localGuidance_SemiLocal import AgentState
# from dataloader.statetransformer_globalGuidance import AgentState
from dataloader.statetransformer_Guidance import AgentState
# from onlineExpert.ECBS_onlineExpert import ComputeCBSSolution
class multiRobotSimNew:
def __init__(self, config):
'''
Simulator init method
Args:
config: global config for simulation
'''
print('*****New Simulator Enabled*****')
self.config = config
# Init AgentState
self.AgentState = AgentState(self.config)
# Per-define directions
self.up = np.array([-1, 0])
self.down = np.array([1, 0])
self.left = np.array([0, -1])
self.right = np.array([0, 1])
self.stop = np.array([0, 0])
self.up_keyValue = 0
self.down_keyValue = 2
self.left_keyValue = 1
self.right_keyValue = 3
self.stop_keyValue = 4
self.current_positions = None
self.start_positions = None
self.goal_positions = None
self.obstacle_positions = None
self.num_obstacle = None
self.reach_goal = None
self.first_move = None
self.List_MultiAgent_ActionVec_target = None
self.store_MultiAgent = None
self.channel_map = None
self.size_map = None
self.maxstep = None
self.posObstacle = None
self.numObstacle = None
self.posStart = None
self.posGoal = None
self.currentState_predict = None
self.makespanTarget = None
self.flowtimeTarget = None
self.makespanPredict = None
self.flowtimePredict = None
self.count_reachgoal = None
self.count_reachgoalTarget = None
self.fun_Softmax = None
self.zeroTolerance = 1e-9
# Link function self.getAdjacencyMatrix
if self.config.dynamic_commR:
# comm radius that ensure initial graph connected
print("run on multirobotsim (radius dynamic) with collision shielding")
self.getAdjacencyMatrix = self.computeAdjacencyMatrix
else:
# comm radius fixed
print("run on multirobotsim (radius fixed) with collision shielding")
self.getAdjacencyMatrix = self.computeAdjacencyMatrix_fixedCommRadius
self.failureCases_input = self.config.failCases_dir + 'input/'
self.dir_sol = os.path.join(self.config.failCases_dir, "output_ECBS/")
def setup(self, loadInput, loadTarget, case_config, tensor_map, ID_dataset, mode):
'''
Setup of environment, called before actual use.
Args:
loadInput: agent initial/goal positions
loadTarget: computed expert paths of all agents
case_config: contains information of a case (ID_MAP, ID_case, makespanTarget)
tensor_map: information of the map including obstacles
ID_dataset: dataset id
mode: - test_trainingSet using softmax for action decoding
- otherwise using exp_multinorm for action decoding
Returns:
'''
# makespanTarget is total steps of expert solutions
ID_MAP, ID_case, makespanTarget = case_config
# Here defines the action decoding function used in this simulator
if self.config.default_actionSelect:
if mode == 'test_trainingSet':
self.convectToActionKey = self.convectToActionKey_softmax
else:
self.convectToActionKey = self.convectToActionKey_exp_multinorm
else:
if self.config.action_select == 'soft_max':
self.convectToActionKey = self.convectToActionKey_softmax
elif self.config.action_select == 'sum_multinorm':
self.convectToActionKey = self.convectToActionKey_sum_multinorm
elif self.config.action_select == 'exp_multinorm':
self.convectToActionKey = self.convectToActionKey_exp_multinorm
self.ID_MAP = int(ID_MAP)
self.ID_case = int(ID_case)
# self.fun_Softmax = nn.Softmax(dim=-1)
self.fun_Softmax = nn.LogSoftmax(dim=-1)
self.ID_dataset = ID_dataset
self.store_GSO = []
self.store_attentionGSO = []
self.store_localPath = []
self.store_communication_radius = []
self.status_MultiAgent = {}
target = loadTarget.permute(1, 2, 3, 0)
self.List_MultiAgent_ActionVec_target = target[:, :, :, 0]
# self.List_MultiAgent_ActionVec_target = target[:, :, 0]
# Read map
self.channel_map = tensor_map[0] # setupState[:, :, 0, 0, 0]
self.AgentState.setmap(self.channel_map)
# Read obstacles
self.obstacle_positions = self.get_pos(self.channel_map)
self.num_obstacle = self.obstacle_positions.shape[0]
# Save wall info into a dict
self.wall_dict = {}
for i in range(self.num_obstacle):
tuple_pos = tuple(self.obstacle_positions[i])
self.wall_dict[tuple_pos] = i
# print("obstacle_positions shape: ", self.obstacle_positions.shape)
self.size_map = self.channel_map.shape
# Read and init agent positions
self.start_positions = loadInput[0, 1, :, :].cpu().numpy()
self.current_positions = self.start_positions.copy()
# Read goal positions
self.goal_positions = loadInput[0, 0, :, :].cpu().numpy()
# save paths during the simulation
self.path_list = []
self.path_list.append(self.current_positions.copy())
self.path_matrix = None
# print('position vector:', self.start_positions.shape, self.goal_positions.shape)
self.reach_goal = np.zeros(self.config.num_agents, )
self.first_move = np.zeros(self.config.num_agents, )
self.end_step = np.zeros(self.config.num_agents, )
self.expert_first_move = np.zeros(self.config.num_agents, )
self.expert_end_step = np.zeros(self.config.num_agents, )
self.expert_path_list = []
self.expert_path_matrix = None
self.posObstacle = self.findpos(self.channel_map).to(self.config.device)
self.numObstacle = self.posObstacle.shape[0]
self.size_map = self.channel_map.shape
if self.config.num_agents >= 20:
self.rate_maxstep = 3
else:
self.rate_maxstep = self.config.rate_maxstep
# Calculate maximum allowed steps in this simulation, otherwise timeout
self.maxstep = int(makespanTarget.type(torch.int32) * self.rate_maxstep)
self.check_predictCollsion = False
self.check_moveCollision = True
self.check_predictEdgeCollsion = [False] * self.config.num_agents
self.count_reachgoal = [False] * self.config.num_agents
self.count_reachgoalTarget = [False] * self.config.num_agents
self.allReachGoal_Target = False
self.makespanTarget = 0
self.flowtimeTarget = 0
self.makespanPredict = self.maxstep
self.flowtimePredict = self.maxstep * self.config.num_agents # 0
# Process expert solution for metrics comparison
self.getPathTarget()
def getPathTarget(self):
'''
Process the expert solution.
Read the information including flowtime, makespan and all steps from the expert solution
# todo check the length for ground truth, out of index
# TODO: move this function to data transformer
Returns:
'''
current_pos = self.start_positions.copy()
self.expert_path_list.append(current_pos.copy())
for current_step in range(self.List_MultiAgent_ActionVec_target.shape[1]):
# Following each step in the solution to combine information
# Lin: updated to use matrix operations for speedup
expert_output = self.List_MultiAgent_ActionVec_target[:, current_step, :].cpu().numpy()
expert_actions = np.argmax(expert_output, axis=1)
# print(expert_actions)
expert_moves = np.zeros((self.config.num_agents, 2))
expert_moves[expert_actions == self.up_keyValue, :] = self.up
expert_moves[expert_actions == self.left_keyValue, :] = self.left
expert_moves[expert_actions == self.down_keyValue, :] = self.down
expert_moves[expert_actions == self.right_keyValue, :] = self.right
expert_moves[expert_actions == self.stop_keyValue, :] = self.stop
# update first step to move for each agent
self.expert_first_move[
(expert_actions != self.stop_keyValue) & (self.expert_first_move == 0)] = current_step + 1
# update expert moves
current_pos += expert_moves
current_distance = np.sum(np.abs(current_pos - self.goal_positions), axis=1)
# print(current_distance)
# Update end_step
self.expert_end_step[(current_distance == 0) & (self.expert_end_step == 0)] = current_step + 1
# save current step to path matrix
self.expert_path_list.append(current_pos.copy())
self.expert_path_matrix = np.array(self.expert_path_list)
# expert action length
self.expert_action_length = self.expert_end_step - self.expert_first_move + 1
# calculate flowtime
self.flowtimeTarget = np.sum(self.expert_action_length)
# maximum makespan
self.makespanTarget = np.max(self.expert_end_step) - np.min(self.expert_first_move) + 1
def getCurrentState(self, return_GPos=False):
store_goalAgents = torch.FloatTensor(self.goal_positions)
store_stateAgents = torch.FloatTensor(self.current_positions)
tensor_currentState = self.AgentState.toInputTensor(store_goalAgents, store_stateAgents)
tensor_currentState = tensor_currentState.unsqueeze(0)
agents_localPath = self.AgentState.get_localPath()
self.store_localPath.append(agents_localPath)
if return_GPos:
return tensor_currentState, store_stateAgents.unsqueeze(0)
else:
return tensor_currentState
def getGSO(self, step):
'''
Compute GSO (adjacency matrix) of the current agent graph,
physically truncated according to radius of communication.
Args:
step:
Returns:
GSO_tensor: the tensor of the current GSO
'''
store_PosAgents = self.current_positions[None, :] # Add new axis to fit input
if step == 0:
self.initCommunicationRadius()
# print("{} - Step-{} - initCommunication Radius:{}".format(self.ID_dataset, step, self.communicationRadius))
# comm radius fixed
# GSO, communicationRadius, graphConnected = self.computeAdjacencyMatrix_fixedCommRadius(step, store_PosAgents, self.communicationRadius)
# comm radius that ensure initial graph connected
GSO, communicationRadius, graphConnected = self.getAdjacencyMatrix(step, store_PosAgents,
self.communicationRadius)
GSO_tensor = torch.from_numpy(GSO)
self.store_GSO.append(GSO)
self.store_communication_radius.append(communicationRadius)
return GSO_tensor
def record_attentionGSO(self, attentionGSO):
'''
This function helps the model to save output attention graphs for visualisation.
Args:
attentionGSO: tensor of GSO after the GAT attention module (values 0~1)
Returns:
'''
self.store_attentionGSO.append(attentionGSO)
def check_collision(self, current_pos, move):
'''
This function checks collisions, and disable illegal movements of agents if needed.
Args:
current_pos: current position
move: move matrix (all agents' proposed moves)
Returns:
new_move: valid move without collision
out_boundary: matrix of whether the agent in place goes out of boundary
move_to_wall: ids of moving to walls
collide_agents: ids of collided agents (earlier mover has advantages to move)
collide_in_move_agents: ids of face-to-face collision (both stop)
'''
map_width = self.size_map[0]
map_height = self.size_map[1]
# Avoid Memory invasion
move = move.copy()
# print('map size:', map_width, map_height)
new_pos = current_pos + move
# Stop agents running out of the arena
out_boundary = (new_pos[:, 0] >= map_width) | (new_pos[:, 1] >= map_height) | (new_pos[:, 0] < 0) | (
new_pos[:, 1] < 0)
move[out_boundary == True] = self.stop
# print('out_of_boundaries:', out_boundary)
# Add 0.5 step to detect face-to-face collision
new_pos = current_pos + move / 2
check_dict = {}
collide_in_move_agents = []
for i in range(self.config.num_agents):
tuple_pos = tuple(new_pos[i])
if tuple_pos in check_dict.keys():
j = check_dict[tuple_pos]
collide_in_move_agents += [i, j]
move[i] = self.stop
move[j] = self.stop
# print('collide in move, stop', i, current_pos[i])
# print('collide in move, stop', j, current_pos[j])
check_dict[tuple_pos] = i
# collide_agents = list(set(collide_agents))
# print('collide in move:', collide_in_move_agents)
# Update new proposed move
new_pos = current_pos + move
need_reverse_list = []
check_dict = {}
collision_check_dict = {}
collide_agents = []
move_to_wall = []
for i in range(self.config.num_agents):
# Reverse Wall collision
tuple_pos = tuple(new_pos[i])
if tuple_pos in self.wall_dict.keys():
# Collide into walls
move_to_wall.append(i)
move[i] = self.stop
# Mark to reverse all the paths
need_reverse_list.append(current_pos[i])
new_pos = current_pos + move
tuple_pos = tuple(new_pos[i])
collision_check_dict.setdefault(tuple_pos, []).append(i)
else:
collision_check_dict.setdefault(tuple_pos, []).append(i)
while len(collision_check_dict) > 0:
tuple_pos, inplace_agent_list = collision_check_dict.popitem()
# print(tuple_pos, inplace_agent_list)
if len(inplace_agent_list) > 1: # more than 1 agents show on the same pos
# print(inplace_agent_list, 'happen to show in', tuple_pos)
selected_agent = random.choice(inplace_agent_list)
for i in inplace_agent_list:
if (move[i] == self.stop).all():
# print('agent', i, 'holds its position')
selected_agent = i
# print('agent', selected_agent, 'is chosen to move')
for i in [x for x in inplace_agent_list if x != selected_agent]:
# print('reverse', i)
move[i] = self.stop
# Mark to reverse all the paths
need_reverse_list.append(current_pos[i])
# collision_check_dict.setdefault(tuple_pos, []).append(i)
collide_agents += inplace_agent_list
# Record all moves of agents, easier to reverse when some of them crash with reversed agents
reverse_dict = {}
new_pos = current_pos + move
for i in range(self.config.num_agents):
reverse_dict.setdefault(tuple(new_pos[i]), []).append((i, tuple(current_pos[i])))
# Reverse invalid path
# print('####### START REVERSING ######')
# if len(need_reverse_list) != 0:
# print(need_reverse_list)
# print(reverse_dict)
while len(need_reverse_list) > 0:
need_reverse_pos = need_reverse_list.pop(0)
# print('need_reverse_pos', need_reverse_pos)
need_reverse_pos_tuple = tuple(need_reverse_pos)
if need_reverse_pos_tuple in reverse_dict.keys():
to_do_list = reverse_dict[need_reverse_pos_tuple]
# print('to_do_list', to_do_list)
for to_do_item in to_do_list:
reverse_agent, next_need_reverse_pos_tuple = to_do_item[0], to_do_item[1]
if need_reverse_pos_tuple != next_need_reverse_pos_tuple:
need_reverse_list.append(np.array(next_need_reverse_pos_tuple))
move[reverse_agent] = self.stop
# print('reverse', reverse_agent)
# print('####### FINISHED REVERSING ######')
# collide_agents = list(set(collide_agents))
# print('move into walls after move:', move_to_wall)
# print('collide after move:', collide_agents)
# # Validate collision (for validation only, commented out in real runs)
# new_pos = current_pos + move
# valid_dict = {}
# # pause = False
# for i in range(self.config.num_agents):
# tuple_pos = tuple(new_pos[i])
# if tuple_pos in valid_dict.keys():
# j = valid_dict[tuple_pos]
# # print(i, j, 'collision', tuple_pos)
# # pause = True
# valid_dict[tuple_pos] = i
# # if pause:
# # a = input('stop')
return move, out_boundary == True, move_to_wall, collide_agents, collide_in_move_agents
def move(self, actionVec, currentstep):
# print('current step', currentstep)
allReachGoal = (np.count_nonzero(self.reach_goal) == self.config.num_agents)
# print('++++++++++step:', currentstep)
# print('new robot:', self.reach_goal)
# print('current_pos', self.current_positions, self.goal_positions)
# print('first_move:', self.first_move)
# print('end_step:', self.end_step)
self.check_predictCollsion = False
self.check_moveCollision = False
if (not allReachGoal) and (currentstep < self.maxstep):
proposed_actions = [int(self.convectToActionKey(actionVec[id_agent]).cpu())
for id_agent in range(self.config.num_agents)]
proposed_actions = np.array(proposed_actions)
# print('model_output', proposed_actions)
proposed_moves = np.zeros((self.config.num_agents, 2))
proposed_moves[proposed_actions == self.up_keyValue, :] = self.up
proposed_moves[proposed_actions == self.left_keyValue, :] = self.left
proposed_moves[proposed_actions == self.down_keyValue, :] = self.down
proposed_moves[proposed_actions == self.right_keyValue, :] = self.right
proposed_moves[proposed_actions == self.stop_keyValue, :] = self.stop
# update first step to move for each agent
self.first_move[(proposed_actions != self.stop_keyValue) & (self.first_move == 0)] = currentstep
# Check collisions, update valid moves for each agent
new_move, move_to_boundary, move_to_wall_agents, collide_agents, collide_in_move_agents = self.check_collision(
self.current_positions, proposed_moves)
# if not (new_move == proposed_moves).all():
# print('something changes')
if not self.check_predictCollsion:
if np.count_nonzero(move_to_boundary) != 0 or len(move_to_wall_agents) != 0 or np.count_nonzero(
collide_agents) != 0 or len(collide_in_move_agents) != 0:
self.check_predictCollsion = True
# if len(move_to_wall_agents) != 0 or np.count_nonzero(collide_agents) != 0 or len(collide_in_move_agents) != 0:
# print('collision happens step {}'.format(currentstep), move_to_wall_agents, collide_agents, collide_in_move_agents)
# if np.count_nonzero(move_to_boundary) != 0:
# print('move out of boundaries step {}'.format(currentstep), move_to_boundary)
# Compute Next position
self.current_positions += new_move
self.path_list.append(self.current_positions.copy())
# print('decision:', new_move)
# print('new position:', self.current_positions)
# Check reach goals
# print('target:', self.goal_positions)
current_distance = np.sum(np.abs(self.current_positions - self.goal_positions), axis=1)
# print('distance', current_distance)
self.reach_goal[current_distance == 0] = 1
# Update end_step
self.end_step[(current_distance == 0) & (self.end_step == 0)] = currentstep
if allReachGoal or (currentstep >= self.maxstep):
# if allReachGoal:
# print('reach goals')
# else:
# print('timeout')
# set all unstarted end step to current step
self.end_step[self.end_step == 0] = currentstep - 1
# Each agent's action length
self.agent_action_length = self.end_step - self.first_move + 1
# print(self.agent_action_length)
# calculate flowtime
self.flowtimePredict = np.sum(self.agent_action_length)
# maximum makespan
self.makespanPredict = np.max(self.end_step) - np.min(self.first_move) + 1
# print(self.makespanPredict)
return allReachGoal, self.check_moveCollision, self.check_predictCollsion
def save_success_cases(self, mode):
'''
This function saves the cases into yaml files
Args:
mode: - success: save a tag in the file indicating successful cases.
Returns:
'''
inputfile_name = os.path.join(self.config.result_AnimeDemo_dir_input,
'input_map{:02d}x{:02d}_IDMap{:05d}_IDCase{:05d}.yaml'.format(self.size_map[0],
self.size_map[1],
self.ID_MAP,
self.ID_case))
print(inputfile_name)
outputfile_name = os.path.join(self.config.result_AnimeDemo_dir_predict_success,
'predict_map{:02d}x{:02d}_IDMap{:05d}_IDCase{:05d}.yaml'.format(self.size_map[0],
self.size_map[1],
self.ID_MAP,
self.ID_case))
if mode == 'success':
checkSuccess = 1
else:
checkSuccess = 0
targetfile_name = os.path.join(self.config.result_AnimeDemo_dir_target,
'expert_map{:02d}x{:02d}_IDMap{:05d}_IDCase{:05d}.yaml'.format(self.size_map[0],
self.size_map[1],
self.ID_MAP,
self.ID_case))
gsofile_name = os.path.join(self.config.result_AnimeDemo_dir_GSO,
'predict_map{:02d}x{:02d}_IDMap{:05d}_IDCase{:05d}.mat'.format(self.size_map[0],
self.size_map[1],
self.ID_MAP,
self.ID_case))
save_statistics_GSO = {'gso': self.store_GSO, 'attentionGSO': self.store_attentionGSO,
'commRadius': self.store_communication_radius,
'FOV_Path': self.store_localPath}
sio.savemat(gsofile_name, save_statistics_GSO)
# print('############## successCases in training set ID{} ###############'.format(self.ID_dataset))
f = open(inputfile_name, 'w')
f.write("map:\n")
f.write(" dimensions: {}\n".format([self.size_map[0], self.size_map[1]]))
f.write(" ID_Map: {}\n".format(self.ID_MAP))
f.write(" ID_Case: {}\n".format(self.ID_case))
f.write(" obstacles:\n")
for ID_obs in range(self.numObstacle):
list_obs = list(self.obstacle_positions[ID_obs])
f.write(" - {}\n".format(list_obs))
f.write("agents:\n")
for id_agent in range(self.config.num_agents):
f.write(" - name: agent{}\n start: {}\n goal: {}\n".format(id_agent,
list(map(int,
self.start_positions[id_agent])),
list(map(int,
self.goal_positions[id_agent]))))
f.close()
f_sol = open(outputfile_name, 'w')
f_sol.write("statistics:\n")
f_sol.write(" cost: {}\n".format(int(self.flowtimePredict)))
f_sol.write(" makespan: {}\n".format(int(self.makespanPredict)))
f_sol.write(" succeed: {}\n".format(int(checkSuccess)))
f_sol.write("schedule:\n")
for id_agent in range(self.config.num_agents):
# print(self.status_MultiAgent[name_agent]["path_predict"])
self.path_matrix = np.array(self.path_list)
len_path = len(self.path_list)
f_sol.write(" agent{}:\n".format(id_agent))
for step in range(len_path):
f_sol.write(
" - x: {}\n y: {}\n t: {}\n".format(int(self.path_matrix[step][id_agent][0]),
int(self.path_matrix[step][id_agent][1]),
step))
f_sol.close()
f_target = open(targetfile_name, 'w')
f_target.write("statistics:\n")
f_target.write(" cost: {}\n".format(int(self.flowtimeTarget)))
f_target.write(" makespan: {}\n".format(int(self.makespanTarget)))
f_target.write("schedule:\n")
for id_agent in range(self.config.num_agents):
len_path = len(self.expert_path_list)
f_target.write(" agent{}:\n".format(id_agent))
for step in range(len_path):
f_target.write(" - x: {}\n y: {}\n t: {}\n".format(
int(self.expert_path_matrix[step][id_agent][0]),
int(self.expert_path_matrix[step][id_agent][1]),
step))
f_target.close()
def save_failure_cases(self):
'''
This function saves cases (only failed cases) for online expert
Returns:
'''
inputfile_name = os.path.join(self.failureCases_input,
'input_map{:02d}x{:02d}_IDMap{:05d}_IDCase{:05d}.yaml'.format(self.size_map[0],
self.size_map[1],
self.ID_MAP,
self.ID_case))
print('############## failureCases in training set ID{} ###############'.format(self.ID_dataset))
f = open(inputfile_name, 'w')
f.write("map:\n")
f.write(" dimensions: {}\n".format([self.size_map[0], self.size_map[1]]))
f.write(" ID_Map: {}\n".format(int(self.ID_MAP)))
f.write(" ID_Case: {}\n".format(int(self.ID_case)))
f.write(" obstacles:\n")
for ID_obs in range(self.numObstacle):
list_obs = list(map(int, self.obstacle_positions[ID_obs]))
f.write(" - {}\n".format(list_obs))
f.write("agents:\n")
for id_agent in range(self.config.num_agents):
f.write(" - name: agent{}\n start: {}\n goal: {}\n".format(id_agent,
list(map(int, self.current_positions[
id_agent])),
list(map(int,
self.goal_positions[id_agent]))))
f.close()
def createfolder_failure_cases(self):
'''
Creat folder for failed cases
Returns:
'''
if os.path.exists(self.failureCases_input) and os.path.isdir(self.failureCases_input):
shutil.rmtree(self.failureCases_input)
if os.path.exists(self.dir_sol) and os.path.isdir(self.dir_sol):
shutil.rmtree(self.dir_sol)
try:
# Create target Directory
os.makedirs(self.failureCases_input)
except FileExistsError:
# print("Directory ", dirName, " already exists")
pass
def count_GSO_communcationRadius(self, step):
_ = self.getGSO(step)
_ = self.getCurrentState()
return self.store_GSO, self.store_communication_radius
def count_numAgents_ReachGoal(self):
return np.count_nonzero(self.reach_goal)
def checkOptimality(self, collisionFreeSol):
'''
Check if the solution is optimal
Args:
collisionFreeSol: - True : solution is free of collision
Returns:
'''
if self.makespanPredict <= self.makespanTarget and self.flowtimePredict <= self.flowtimeTarget and collisionFreeSol:
findOptimalSolution = True
else:
findOptimalSolution = False
return findOptimalSolution, [self.makespanPredict, self.makespanTarget], [self.flowtimePredict,
self.flowtimeTarget]
def get_pos(self, map_tensor):
map_np = map_tensor.numpy()
pos = np.transpose(np.nonzero(map_np))
return pos
def findpos(self, channel):
pos_object = channel.nonzero()
num_object = pos_object.shape[0]
pos = torch.zeros(num_object, 2)
# pos_list = []
for i in range(num_object):
pos[i][0] = pos_object[i][0]
pos[i][1] = pos_object[i][1]
# pos_list.append([pos_object[i][0], pos_object[i][1]])
# pos = torch.FloatTensor(pos_list)
return pos
def initCommunicationRadius(self):
self.communicationRadius = self.config.commR
def computeAdjacencyMatrix(self, step, agentPos, CommunicationRadius, graphConnected=False):
len_TimeSteps = agentPos.shape[0] # length of timesteps
nNodes = agentPos.shape[1] # Number of nodes
# Create the space to hold the adjacency matrices
W = np.zeros([len_TimeSteps, nNodes, nNodes])
W_norm = np.zeros([len_TimeSteps, nNodes, nNodes])
# Initial matrix
distances = squareform(pdist(agentPos[0], 'euclidean')) # nNodes x nNodes
# I will increase the communication radius by 10% each time,
# but I have to do it consistently within the while loop,
# so in order to not affect the first value set of communication radius, I will account for that initial 10% outside
if step == 0:
self.communicationRadius = self.communicationRadius / 1.1
while graphConnected is False:
self.communicationRadius = self.communicationRadius * 1.1
W[0] = (distances < self.communicationRadius).astype(agentPos.dtype)
W[0] = W[0] - np.diag(np.diag(W[0]))
graphConnected = graph.isConnected(W[0])
# And once we have found a connected initial position, we normalize it
if self.config.symmetric_norm:
deg = np.sum(W[0], axis=1) # nNodes (degree vector)
zeroDeg = np.nonzero(np.abs(deg) < self.zeroTolerance)[0]
deg[zeroDeg] = 1.
invSqrtDeg = np.sqrt(1. / deg)
invSqrtDeg[zeroDeg] = 0.
Deg = np.diag(invSqrtDeg)
W[0] = Deg @ W[0] @ Deg
maxEigenValue = self.get_maxEigenValue(W[0])
W_norm[0] = W[0] / maxEigenValue
# And once we have found a communication radius that makes the initial graph connected,
# just follow through with the rest of the times, with that communication radius
else:
distances = squareform(pdist(agentPos[0], 'euclidean')) # nNodes x nNodes
W[0] = (distances < self.communicationRadius).astype(agentPos.dtype)
W[0] = W[0] - np.diag(np.diag(W[0]))
graphConnected = graph.isConnected(W[0])
if np.any(W):
# if W is all non-zero matrix, do normalization
if self.config.symmetric_norm:
deg = np.sum(W[0], axis=1) # nNodes (degree vector)
zeroDeg = np.nonzero(np.abs(deg) < self.zeroTolerance)[0]
deg[zeroDeg] = 1.
invSqrtDeg = np.sqrt(1. / deg)
invSqrtDeg[zeroDeg] = 0.
Deg = np.diag(invSqrtDeg)
W[0] = Deg @ W[0] @ Deg
maxEigenValue = self.get_maxEigenValue(W[0])
W_norm[0] = W[0] / maxEigenValue
else:
# if W is all zero matrix, don't do any normalization
W_norm[0] = W
return W_norm, self.communicationRadius, graphConnected
def get_maxEigenValue(self, matrix):
isSymmetric = np.allclose(matrix, np.transpose(matrix, axes=[1, 0]))
if isSymmetric:
W = np.linalg.eigvalsh(matrix)
else:
W = np.linalg.eigvals(matrix)
maxEigenvalue = np.max(np.real(W), axis=0)
return maxEigenvalue
# return np.max(np.abs(np.linalg.eig(matrix)[0]))
def computeAdjacencyMatrix_fixedCommRadius(self, step, agentPos, CommunicationRadius, graphConnected=False):
len_TimeSteps = agentPos.shape[0] # length of timesteps
nNodes = agentPos.shape[1] # Number of nodes
# Create the space to hold the adjacency matrices
W = np.zeros([len_TimeSteps, nNodes, nNodes])
W_norm = np.zeros([len_TimeSteps, nNodes, nNodes])
# Initial matrix
distances = squareform(pdist(agentPos[0], 'euclidean')) # nNodes x nNodes
W[0] = (distances < self.communicationRadius).astype(agentPos.dtype)
W[0] = W[0] - np.diag(np.diag(W[0]))
graphConnected = graph.isConnected(W[0])
if np.any(W):
# if W is all non-zero matrix, do normalization
if self.config.symmetric_norm:
deg = np.sum(W[0], axis=1) # nNodes (degree vector)
zeroDeg = np.nonzero(np.abs(deg) < self.zeroTolerance)[0]
deg[zeroDeg] = 1.
invSqrtDeg = np.sqrt(1. / deg)
invSqrtDeg[zeroDeg] = 0.
Deg = np.diag(invSqrtDeg)
W[0] = Deg @ W[0] @ Deg
maxEigenValue = self.get_maxEigenValue(W[0])
W_norm[0] = W[0] / maxEigenValue
else:
# if W is all zero matrix, don't do any normalization
print('No robot are connected at this moment, all zero matrix.')
W_norm[0] = W
return W_norm, self.communicationRadius, graphConnected
def getOptimalityMetrics(self):
return [self.makespanPredict, self.makespanTarget], [self.flowtimePredict, self.flowtimeTarget]
def getMaxstep(self):
return self.maxstep
def getMapsize(self):
return self.size_map
def normalize(self, x):
x_normed = x / torch.sum(x, dim=-1, keepdim=True)
return x_normed
def convectToActionKey_softmax(self, actionVec):
actionVec_current = self.fun_Softmax(actionVec)
if not self.config.batch_numAgent:
actionKey_predict = torch.max(actionVec_current, 1)[1]
else:
actionKey_predict = torch.max(actionVec_current, 0)[1]
return actionKey_predict
def convectToActionKey_sum_multinorm(self, actionVec):
actionVec_current = self.normalize(actionVec)
# print(actionVec_current)
actionKey_predict = torch.multinomial(actionVec_current, 1)[0]
return actionKey_predict
def convectToActionKey_exp_multinorm(self, actionVec):
actionVec_current = torch.exp(actionVec)
actionKey_predict = torch.multinomial(actionVec_current, 1)[0]
return actionKey_predict
|
18,641 | 788cf526fae1ea70fe2f3293b5b1bc7135f7389d | import psycopg2
from dotenv import load_dotenv
import os
load_dotenv()
password = os.environ.get('PASSWORD')
user = os.environ.get('USER')
db = os.environ.get('DB_NAME')
host = os.environ.get('HOST')
port = os.environ.get('PORT')
def setup_db() -> psycopg2.connect:
try:
connection = psycopg2.connect(user = user,
password = password,
host = host,
port = port,
database = db)
except (Exception, psycopg2.Error) as error :
print ("Error while connecting to PostgreSQL", error)
try:
cursor = connection.cursor()
create_user = '''CREATE TABLE IF NOT EXISTS users \
(ID BIGINT PRIMARY KEY NOT NULL, \
USERNAME TEXT NOT NULL UNIQUE, \
NAME TEXT NOT NULL , \
EMAIL TEXT); '''
create_timetable = '''CREATE TABLE IF NOT EXISTS timestamps \
(ID BIGINT NOT NULL, \
DATE TEXT NOT NULL, \
ENTRY TEXT NULL); '''
create_who_is_in = '''CREATE TABLE IF NOT EXISTS whoisin \
(ID BIGINT NOT NULL, \
DATE TEXT NOT NULL, \
ENTRY TEXT NULL); '''
cursor.execute(create_user)
cursor.execute(create_timetable)
cursor.execute(create_who_is_in)
connection.commit()
print("PostgreSQL is Setup.")
except (Exception, psycopg2.DatabaseError) as error :
print ("Error while creating PostgreSQL table", error)
def connect():
try:
connection = psycopg2.connect(user = user,
password = password,
host = host,
port = port,
database = db)
return connection
except (Exception, psycopg2.Error) as error :
print ("Error while connecting to PostgreSQL", error)
|
18,642 | f6737a5f7732dbcfab955e3fc6eef2fcf634ffff | # Generated by Django 3.0.5 on 2020-05-03 18:17
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('post', '0035_auto_20200503_2322'),
]
operations = [
migrations.AlterField(
model_name='comment',
name='pub_date',
field=models.DateTimeField(default=datetime.datetime(2020, 5, 3, 18, 17, 14, 338350, tzinfo=utc)),
),
migrations.AlterField(
model_name='post',
name='pub_date',
field=models.DateTimeField(default=datetime.datetime(2020, 5, 3, 18, 17, 14, 337311, tzinfo=utc)),
),
migrations.AlterField(
model_name='tag',
name='pub_date',
field=models.DateTimeField(default=datetime.datetime(2020, 5, 3, 18, 17, 14, 339425, tzinfo=utc)),
),
]
|
18,643 | a53603fc8c851604be5a49a49a95b11124a5d6f2 | import pandas as pd
import numpy as np
# Inizio KB
# Lettura csv
movieDataString = pd.read_csv(r'..\datasets\categ_complete_dataset.csv', sep=',')
#Creazione delle liste per ogni singola colonna
type = movieDataString.loc[:,'type']
title = movieDataString.loc[:, 'title']
duration = movieDataString.loc[:, 'duration']
director = movieDataString.loc[:, 'director']
genre = movieDataString.loc[:, 'genre']
country = movieDataString.loc[:, 'country']
ratings = movieDataString.loc[:, 'ratings']
year_range= movieDataString.loc[:, 'year_range']
# dato un genere, restituisce una lista con tutti i film che trova di quel genere
def trovaGenere(genere:str):
j = 0
listaRisultati = {}
for i in range(len(genre)):
if(genre[i] == genere):
listaRisultati[j] = [title[i]]
j += 1
return listaRisultati
# determina se un film esiste
def titoloEsiste(titolo:str):
for i in range(len(title)):
if(title[i] == titolo or title[i].lower() == titolo):
return True
return False
# determina in che posizione si trova il film
def whereTitoloEsiste(titolo:str):
for i in range(len(title)):
if(title[i] == titolo or title[i].lower() == titolo):
return i
return i # altrimenti ritonra un mess di errore
# determina se un determinato genere esiste nel dataset
def genereEsiste(genere:str):
for i in range(len(genre)):
if(genre[i] == genere):
return True
return False
# dato un indice, restituisce il genere corrispondente a quell'indice
def estrapolaGenere(i: int):
return genre[i]
# determina se i due generi in input sono uguali
def generiUguali(primoGenere:str, secondoGenere:str):
if(primoGenere==secondoGenere):
return True
return False
# trova la corrispondenza nel dataset tra un titolo e un genere
def corrispondenzaEsiste(titolo:str, genere:str):
i = whereTitoloEsiste(titolo)
if(genre[i]==genere):
return True
return False
# dato un titolo e un genere in input, risponde se ha trovato una corrispondenza nel dataset
def askGenereDaTitolo(titolo:str,genere:str):
# Controllo se titolo scritto bene
if(not titoloEsiste(titolo)):
print("Il titolo inserito non esiste")
return
#controllo se genere scritto bene
if(not genereEsiste(genere)):
print("Hai inserito un genere non presente")
return
stringa = titolo + "_" + genere
# Trova la corrispondenza tra titolo e genere
risposta = corrispondenzaEsiste(titolo,genere)
if(risposta):
print("YES")
else:
print("NO")
# spiega come si è arrivati alla soluzione
rispostaUtente=input("Digitare how per la spiegazione: ")
if (rispostaUtente.lower()=="how"):
print("askGenereDaTitolo("+titolo+","+genere+") <=> "+stringa)
rispostaUtente=input("Digitare 'how i' specificando al posto di i il numero dell'atomo : ")
if(rispostaUtente.lower()=='how 1'):
print(stringa + " <=>", risposta)
else:
print("Errore di digitazione")
else:
print("Errore di digitazione")
# dati due titoli, risponde se presentano lo stesso genere
def askStessoGenere(titolo1:str, titolo2:str):
if(not titoloEsiste(titolo1)):
print("Il primo titolo inserito non è presente")
return
if(not titoloEsiste(titolo2)):
print("Il secondo titolo inserito non è presente")
return
# Identifico la posizione dei titoli visto che sono presenti nel dataset
primoTitolo = whereTitoloEsiste(titolo1)
secondoTitolo = whereTitoloEsiste(titolo2)
# Estrapolo i generi dei titoli
primoGenere = estrapolaGenere(primoTitolo)
secondoGenere = estrapolaGenere(secondoTitolo)
risposte = {}
risposte[1] = corrispondenzaEsiste(titolo1,primoGenere)
risposte[2] = corrispondenzaEsiste(titolo2, secondoGenere)
risposte[3] = generiUguali(primoGenere, secondoGenere)
if(risposte.get(1) == True and risposte.get(2) == True and risposte.get(3) == True):
print("YES")
else:
print("NO")
# Spiega come si è arrivati ai risultati
rispostaUtente=input("Digitare how per la spiegazione: ")
if (rispostaUtente.lower()=="how"):
print("askStessoGenere("+titolo1+","+titolo2+") <=> "+titolo1+"_"+primoGenere+ " and "+titolo2+"_"+secondoGenere+" and generiUguali("+primoGenere+","+secondoGenere+")")
rispostaUtente=input("Digitare 'how i' specificando in i il numero dell'atomo per ulteriori informazioni: ")
if(rispostaUtente.lower()=='how 1'):
print(titolo1+"_"+primoGenere+" <=>", risposte.get(1))
rispostaUtente=input("Digitare 'how i' specificando in i il numero dell'atomo per ulteriori informazioni: ")
if(rispostaUtente.lower() =="how 2"):
print(titolo2+"_"+secondoGenere+" <=>",risposte.get(2))
rispostaUtente=input("Digitare 'how i' specificando in i il numero dell'atomo per ulteriori informazioni: ")
if(rispostaUtente=="how 3"):
print("generiUguali("+primoGenere+","+secondoGenere+") <=>", risposte[3])
else:
print("Errore di digitazione")
else:
print("Errore di digitazione")
else:
if(rispostaUtente.lower() =="how 2"):
print("SecondaCorrispondenza("+titolo2+") <=> corrispondenzaEsiste("+titolo2+" , " + secondoGenere+ ") <=>", risposte.get(2))
rispostaUtente=input("Digitare 'how i' specificando in i il numero dell'atomo per ulteriori informazioni: ")
if(rispostaUtente.lower()=="how 1"):
print("PrimaCorrispondenza("+titolo1+") <=> corrispondenzaEsiste("+titolo1+" , " + primoGenere+ ") <=>", risposte.get(1))
else:
if(rispostaUtente=="how 3"):
print("stessoGenere("+titolo1+","+titolo2+") <=> "+primoGenere+"_"+secondoGenere+" <=>", risposte[3])
else:
print("Errore di digitazione")
else:
if(rispostaUtente.lower() =="how 3"):
print("stessoGenere("+titolo1+","+titolo2+") <=> generiUguali("+primoGenere+","+secondoGenere+") <=>", risposte[3])
rispostaUtente=input("Digitare 'how i' specificando in i il numero dell'atomo per ulteriori informazioni: ")
if(rispostaUtente.lower() =="how 2"):
print("SecondaCorrispondenza("+titolo2+") <=> corrispondenzaEsiste("+titolo2+" , " + secondoGenere+ ") <=>", risposte.get(2))
rispostaUtente=input("Digitare 'how i' specificando in i il numero dell'atomo per ulteriori informazioni: ")
if(rispostaUtente.lower()=="how 1"):
print("PrimaCorrispondenza("+titolo1+" ) <=> corrispondenzaEsiste("+titolo1+" , " + primoGenere+ ") is ", risposte.get(1))
else:
if(rispostaUtente=="how 2"):
print("SecondaCorrispondenza("+titolo2+") <=> corrispondenzaEsiste("+titolo2+" , " + secondoGenere+ ") is ", risposte.get(2))
else:
print("Errore di digitazione")
else:
print("Errore di digitazione")
else:
print("Errore di digitazione")
# spiega i risultati ottenuti dalle raccomandazioni effettuate col clustering
def explainResultsCluster(cluster1, cluster2, cluster3, similarities, choice):
choice+=1
print('Il cluster di appartenenza è il valore di choice:', choice)
print('Le metriche restituite tra tutti i cluster sono le seguenti:', similarities )
if(choice == 1):
copia = cluster1.drop(columns=['ratings_range','type','genre','cast', 'director','year_range','country','description'])
copia = copia.rename(columns={"sum": "similarity"})
print('\nLe singole metriche di similarità restituite per il cluster', choice, 'sono:\n', copia.head(), '\n')
if(choice == 2):
copia = cluster2.drop(columns=['ratings_range','genre','cast','director','year_range','country','description'])
copia = copia.rename(columns={"sum": "similarity"})
print('\nLe singole metriche di similarità restituite per il cluster', choice, 'sono:\n', copia.head(), '\n')
if(choice == 3):
copia = cluster3.drop(columns=['ratings_range','genre','cast','director','year_range','country','description'])
copia = copia.rename(columns={"sum": "similarity"})
print('\nLe singole metriche di similarità restituite per il cluster', choice ,'sono: \n', copia.head(), '\n')
def mainFunz():
# askGenereDaTitolo
print('1) Dato un titolo e un genere in input, la KB è in grado di dirti se il titolo corrisponde al genere indicato grazie alla funzione askGenereDaTitolo, rispondendo YES se effettivamente corrisponde, altrimenti NO \n')
# askStessoGenere
print('2) Dati due titoli in input, la KB è in grado di dirti se il genere dei due film è lo stesso oppure no grazie alla funzione askStessoGenere, rispondendo YES se corrispondono, NO altrimenti\n')
rispostaUtente=input("Digitare il numero della funzione che si vuole eseguire : ")
if (rispostaUtente=="1"):
titoloUtente = input("Digitare il titolo del film: ")
genereUtente = input("Digitare il genere del film: ")
askGenereDaTitolo(titoloUtente, genereUtente)
else:
if(rispostaUtente=="2"):
titoloUtente1 = input("Digitare il titolo del primo film: ")
titoloUtente2 = input("Digitare il titolo del secondo film: ")
askStessoGenere(titoloUtente1,titoloUtente2)
else:
print('Errore di digitazione')
if __name__ == "__main__":
mainFunz() |
18,644 | 354e1617bdd690831c2a1617a1e35a68a3d78a98 | import os
from django.db import models
from django.contrib.auth.models import AbstractBaseUser, PermissionsMixin, BaseUserManager
from phonenumber_field.modelfields import PhoneNumberField
from django.db.models.base import ObjectDoesNotExist
NEW = 'Placed'
PROCESSED = 'In preparation'
READY = 'Ready to pick up'
FINISHED = 'Finished'
ORDER_STATUSES = (
(NEW, NEW),
(PROCESSED, PROCESSED),
(READY, READY),
(FINISHED, FINISHED)
)
class UserAccountManager(BaseUserManager):
def create_user(self, name, surname, email, phone_number, pesel, password=None):
if not email:
raise ValueError('Users must have an email address')
email = self.normalize_email(email)
user = self.model(name=name, surname=surname, email=email, phone_number=phone_number, pesel=pesel,
is_staff=False)
user.set_password(password)
user.save()
try:
existing_customer = Customer.objects.get(pesel=pesel)
existing_customer.user = user
existing_customer.save()
except ObjectDoesNotExist:
customer = Customer(name=name, surname=surname, pesel=pesel, user=user)
customer.save()
return user
def _create_user(self, email, password, **extra_fields):
if not email:
raise ValueError('The given email must be set')
email = email.lower()
if self.model.objects.filter(email=email).exists():
raise ValueError('This email is already in use')
user = self.model(email=email, **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password, **extra_fields):
extra_fields.setdefault('is_staff', True)
extra_fields.setdefault('is_superuser', True)
if extra_fields.get('is_staff') is not True:
raise ValueError('Superuser must have is_staff=True')
if extra_fields.get('is_superuser') is not True:
raise ValueError('Superuser must have is_superuser=True')
return self._create_user(email, password, **extra_fields)
class UserAccount(AbstractBaseUser, PermissionsMixin):
name = models.CharField(max_length=255)
surname = models.CharField(max_length=255)
email = models.EmailField(max_length=255, unique=True)
phone_number = PhoneNumberField()
pesel = models.IntegerField(default=00000000000)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
objects = UserAccountManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['name', 'surname', 'phone_number', 'pesel']
def __str__(self):
return f'({self.email} {self.name} {self.surname}'
class Customer(models.Model):
name = models.CharField(max_length=20)
surname = models.CharField(max_length=20)
pesel = models.IntegerField()
user = models.ForeignKey(UserAccount, on_delete=models.SET_NULL, null=True)
def __str__(self):
return f'({self.id}) {self.pesel} {self.name} {self.surname}'
class MedicineOrder(models.Model):
customer = models.ForeignKey(Customer, related_name='medicineOrders', on_delete=models.CASCADE)
total_price = models.DecimalField(max_digits=5, decimal_places=2, default=0)
orderStatus = models.CharField(max_length=100, choices=ORDER_STATUSES, default=NEW)
created = models.DateField(auto_now_add=True)
class Meta:
ordering = ['-created', '-id']
def __str__(self):
return f'({self.id} {self.created}) {self.customer} {self.total_price}'
class OrderNote(models.Model):
order = models.ForeignKey(MedicineOrder, related_name="notes", on_delete=models.CASCADE)
user = models.ForeignKey(UserAccount, on_delete=models.CASCADE)
author = models.CharField(max_length=50)
content = models.TextField()
created_at = models.DateTimeField(auto_now_add=True)
class NewOrderMessage(models.Model):
medicine_order = models.OneToOneField(MedicineOrder, on_delete=models.CASCADE, related_name='newOrderMessage')
started = models.DateTimeField(auto_now_add=True)
unread = models.BooleanField(default=True)
class Meta:
ordering = ['-started']
class Category(models.Model):
code = models.CharField(max_length=10)
name = models.CharField(max_length=150)
class Meta:
ordering = ['code']
def get_image_path(instance, filename):
fn, ext = os.path.splitext(filename)
return os.path.join('medicines', str(instance.id)+ext)
class Medicine(models.Model):
name = models.CharField(max_length=25)
price = models.DecimalField(max_digits=5, decimal_places=2)
quantity = models.PositiveIntegerField(default=0)
brand = models.CharField(max_length=30)
capacity = models.CharField(max_length=30)
dose = models.CharField(max_length=30)
image = models.ImageField(upload_to=get_image_path, blank=True, null=True)
category = models.ForeignKey(Category, related_name='medicines', on_delete=models.SET_DEFAULT, default=1)
def save(self, *args, **kwargs):
if self.id is None:
saved_image = self.image
self.image = None
super(Medicine, self).save(*args, **kwargs)
self.image = saved_image
if 'force_insert' in kwargs:
kwargs.pop('force_insert')
super(Medicine, self).save(*args, **kwargs)
def __str__(self):
return f'({self.id}) {self.name} {self.brand} {self.capacity} {self.dose} {self.price}'
class MedicineOrderItem(models.Model):
medicineOrder = models.ForeignKey(MedicineOrder, related_name="medicineOrderItems", on_delete=models.CASCADE)
medicine = models.ForeignKey(Medicine, on_delete=models.CASCADE)
amount = models.IntegerField(default=1)
|
18,645 | b1e963309acfeda9a73912f88c137f8226247369 | ##############################################################################################
# Fichier contenant 2 classes pour le projet Malaria (Mini-Projet) #
# #
# Contributeurs principaux : #
# ------------------------ #
# 1. Partie Preprocessing -> Alicia Bec & Charlotte Trupin #
# 2. Partie Model -> Maxime Vincent & Baptiste Maquet #
# 3. Partie Visualization -> Sakina Atia & Mathilde Lasseigne #
# #
# Historique des modifications : #
# ---------------------------- #
# 1. Suppression d'attributs de la classe model #
# 2. Suppression de vérifications de méthodes de la classe model #
# 3. Ajout de méthodes dans la classe model : #
# 3.1. Méthode model::score pour calculer le score #
# 3.2. Méthode model::optimize_model pour optimiser les hyperparamètres #
# 3.3. Méthode model::confusion_matrix pour afficher une matrice de confusion #
# 4. Ajout d'une classe preprocess au sein du fichier model.py #
# 5. Ajout d'une méthode model::optimize_preprocess pour tenter d'optimiser le preprocessing #
# #
# Date de dernière modification : #
# ----------------------------- #
# https://github.com/charlottetrupin/malaria/commits/master/starting_kit/ #
# sample_code_submission/model.py #
# #
##############################################################################################
import warnings
warnings.filterwarnings('ignore')
import pickle # Pour enregistrer et charger modèle
import numpy as np # We recommend to use numpy arrays
from os.path import isfile # Fonction fichier
from sklearn.base import BaseEstimator # Interface d'un estimator
from sklearn.ensemble import RandomForestClassifier # Modèle choisi
from sklearn.ensemble import IsolationForest
from sklearn.decomposition import PCA # Preprocessing
from sklearn.metrics import roc_auc_score # Méthode de calcul du score
from sklearn.metrics import make_scorer # Conversion de la fonction de score
from sklearn.model_selection import RandomizedSearchCV # Optimisation hyperparamètres
import matplotlib.pyplot as plt; import seaborn as sns; sns.set() # Affichage graphique
from sklearn.metrics import confusion_matrix # Matrice de confusion
class preprocess:
def __init__(self, n_pca=9):
''' Classe pour le preprocessing
'''
self.is_trained = False # Etat de l'apprentissage
self.n_pca = n_pca
self.pca = PCA(n_components=n_pca) # Preprocessing
self.estimator = IsolationForest(n_estimators=5,n_jobs= -1) # outlier detection
def fit(self, X):
X = X[:,[0,3,5,6,7,8,14,15,16]]
self.pca.fit(X)
self.estimator.fit(X)
self.is_trained = True
def transform(self, X, y, remove_outliers = True):
""" Preprocessing du jeu de données X """
X = X[:,[0,3,5,6,7,8,14,15,16]]
if remove_outliers :
liste = []
for i in range(X.shape[0]):
if self.estimator.predict(X)[i] != -1 :
liste.append(i)
X = X[liste, :]
y = y[liste]
X = self.pca.transform(X) # reduce dimension
return X,y
class model (BaseEstimator):
def __init__(self, classifier=RandomForestClassifier()):
'''
This constructor is supposed to initialize data members.
Use triple quotes for function documentation.
Args :
classifier : classifier we will use for making our predictions
n_pca : argument for preprocessing
'''
self.is_trained=False # Etat de l'apprentissage
self.classifier = classifier # Modèle de classification
self.scoring_function = roc_auc_score # Méthode de calcul du score
self.preprocess = preprocess()
def fit(self, X, y):
'''
This function should train the model parameters.
Here we do nothing in this example...
Args:
X: Training data matrix of dim num_train_samples * num_feat.
y: Training label matrix of dim num_train_samples * num_labels.
Both inputs are numpy arrays.
'''
self.preprocess.fit(X) # fit processing
X,y = self.preprocess.transform(X,y) # transform
self.classifier = self.load() # Rechargement du modèle optimisé
self.classifier.fit(X, np.ravel(y)) # entrainement du modèle
self.is_trained=True
return self
def predict_proba(self, X):
'''
This function provides predictions of labels on (test) data
'''
y = []
X,_ = self.preprocess.transform(X,y, remove_outliers = False) # datatransform
return self.classifier.predict_proba(X) # make predictions
def optimize(self, X, y, n_iter=100):
"""
Optimise le classifieur en cherchant les meilleurs hyperparamètres
Args:
X : jeu de données d'entraînement
y : labels correspondants
n_iter : nombre de combinaisons testées (default=100)
"""
# Paramètres à tester
#print(self.classifier.get_params().keys())
parameters={'bootstrap':[True,False],
'criterion':["gini", "entropy"],
'n_estimators':[i for i in range(10,300,10)],
'max_depth':[i for i in range(1,10)]+[None],
'min_samples_split':[i for i in range(2,5)],
'min_samples_leaf':[i for i in range(1,5)],
'random_state':[i for i in range(1,100)]}
# Grille de recherche en utilisant toute la puissance processeur et paramétrée avec la fonction de score
grid = RandomizedSearchCV(self.classifier, parameters, scoring=make_scorer(self.scoring_function), n_jobs=-1, n_iter=n_iter)
print(grid.param_distributions)
# Lancement des entrainements
grid.fit(X, y)
# Meilleurs hyperparamètres
print(grid.best_estimator_)
self.classifier = grid.best_estimator_
def score(self, y_true, y_pred):
"""
Calcul du score du modèle actuel
sur un jeu de données quelconque
Args:
y_true : labels réels
y_pred : labels prédits
"""
# Score du modèle
return self.scoring_function(y_true, y_pred)
def save(self, path="./"):
pickle.dump(self.classifier, open(path + '_model.pickle', "wb"))
def load(self, path="./"):
""" Rechargement du modèle préalablement enregistré """
modelfile = path + '_model.pickle'
if isfile(modelfile):
with open(modelfile, 'rb') as f:
self.classifier = pickle.load(f)
print("Model reloaded from: " + modelfile)
return self.classifier
def confusion_matrix(self, X, y):
"""
Affiche la matrice de confusion de préférence
sur l'ensemble de test
Args:
X : jeu de données
y : labels correspondants
"""
ax = plt.subplot()
# annot=True to annotate cells
sns.heatmap(confusion_matrix(y, self.classifier.predict_proba(X)), annot=True, fmt='g', ax=ax)
# labels, title and ticks
ax.set_xlabel('Predicted labels');
ax.set_ylabel('True labels');
ax.set_title('Confusion Matrix');
ax.xaxis.set_ticklabels(['Parasitized', 'Uninfected']);
ax.yaxis.set_ticklabels(['Parasitized', 'Uninfected']);
plt.show()
|
18,646 | e458e2fc710be71e74b0073eeb718c0535ea88f9 | """check if there exists a permutation of the
given string such that it is a palindrome.
"""
def check(s1):
"""For a palindrome with even length we must have an
even multiple of each character, or for an odd palindrome we
must have exactly one character with an odd frequency. Our runtime
complexity is O(n) for the worst case with space O(128).
"""
chars = [0] * 128
for c in s1:
chars[ord(c)]+=1
counter = 0
for i in range(len(chars)):
if chars[i] %2 != 0:
counter+=1
return counter <= 1
if __name__ == "__main__":
s1 = "nitin"
res = check(s1)
print(res) |
18,647 | ffb7baa996ae0a6eff3fb44fb4c0f9ae12c504c1 | #!/usr/bin/python
import os, sys, regex
from time import time
from string import *
# serial input device
TTY = "/dev/ttyS0"
input = open( TTY, "rb" )
########################################
# form a timestamp
prev_tv = (0,0,0,0)
def ts():
ta = localtime()
next_tv = ( t[0], t[1], t[2], t[8])
if prev_tv != next_tv:
prev_tv = next_tv
outs( "#" + asctime( ta ))
ta = localtime()
return "%02d:%02d:%02d " % ( t[3], t[4], t[5] )
########################################
# startup initialization
def init():
pass
########################################
# main loop
def main():
while 1:
line = input.readline()
print ts(), line,
init()
main()
|
18,648 | 20f2ff7163a04004f0229937734e60d8bf27c84b | from numba import jit, njit,prange
from numba import cuda, int32, complex128, float64, int64
import numpy as np
import threading
import math
import random
import torch
import weibull
import itertools
from scipy.spatial import distance as compute_distance
from scipy.spatial.distance import squareform
import scipy
import sklearn
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.neighbors import KernelDensity
#####################################################################################
#Customize CUDA Kernels
@cuda.jit(device = True)
def cosine_gpu(u, v):
dot_product = 0
norm_v = 0
norm_u = 0
for m, n in zip(u, v):
dot_product += m * n
for m, n in zip(u, u):
norm_u += m * n
for m, n in zip(v, v):
norm_v += m * n
return 1.0 - dot_product / (math.sqrt(norm_u) * math.sqrt(norm_v))
@cuda.jit(device = True)
def euclidean_gpu(u, v):
norm = 0
for m, n in zip(u, v):
norm += (m - n) * (m - n)
norm = math.sqrt(norm)
return norm
@cuda.jit
def cosine_dis_gpu(X, Y, out):
i, j = cuda.grid(2)
if i < out.shape[0] and j < out.shape[1]:
u = X[i]
v = Y[j]
out[i, j] = cosine_gpu(u, v)
@cuda.jit
def euclidean_dis_gpu(X, Y, out):
i, j = cuda.grid(2)
if i < out.shape[0] and j < out.shape[1]:
u = X[i]
v = Y[j]
out[i, j] = euclidean_gpu(u, v)
#####################################################################################
def tau(args, features, gpus):
#Now only support Cosine and Euclidean on GPU
if args.distance_metric:
metrics = [args.distance_metric]
else:
metrics =['cosine','euclidean']
print("The Distance Metric is: ", metrics)
#CUDA parallel distance computing, support multi-gpus
def gpu_pairwise_distance(chunks, step_i, gpu):
#lock.acquire()#no need to lock threads in this case
cuda.select_device(gpu)
for i, chunk1 in enumerate(chunks):
print("Computing distance chunk: ", i + 1)
#Define chunk id x axis
step_j = 0
n_i = chunk1.shape[0]
for j, chunk2 in enumerate(chunks):
#Copy data to gpu
X_global = cuda.to_device(chunk1)
Y_global = cuda.to_device(chunk2)
#Define chunk id y axis
n_j = chunk2.shape[0]
out_global = cuda.device_array((chunk1.shape[0], chunk2.shape[0]))
# Define gpu's grid
threadsperblock = (16, 16)
blockspergrid_x = int(math.ceil(out_global.shape[0] / threadsperblock[0]))
blockspergrid_y = int(math.ceil(out_global.shape[1] / threadsperblock[1]))
blockspergrid = (blockspergrid_x, blockspergrid_y)
#Compute distance on gpu
if metric == "cosine":
cosine_dis_gpu[blockspergrid, threadsperblock](X_global, Y_global, out_global)
elif metric == "euclidean":
euclidean_dis_gpu[blockspergrid, threadsperblock](X_global, Y_global, out_global)
#Find mean and max for each loop
mean_distances.append(np.mean(out_global.copy_to_host()))
max_dis.append(np.max(out_global.copy_to_host()))
#Select 2% points to EVT randomly
k = int(len(out_global.copy_to_host()) * 0.02)
number_of_rows = out_global.copy_to_host().shape[0]
random_indices = np.random.choice(number_of_rows, size=k, replace=False)
#Copy gpu distance data to cpu numpy
if len(out_global.copy_to_host()[random_indices, :]) > 0:
whole_distances.extend(out_global.copy_to_host()[random_indices, :].flatten())
#update chunk id
step_j += n_j
step_i += n_i
del X_global, Y_global, out_global
for metric in metrics:
distances = []
mean_distances = []
max_dis = []
#Split cpu's data to gpus
n = int(len(features) / len(gpus))
mutilple_features = [features[i * n:(i + 1) * n] for i in range((len(features) + n - 1) // n )]
threads = []
from split import split_double
#Compute chunks in multi-gpus
for p, gpu in enumerate(gpus):
whole_distances = []
split = split_double(args, mutilple_features[p])
n = int(len(mutilple_features[p]) / split)
chunks = [mutilple_features[p][i * n:(i + 1) * n] for i in range((len(mutilple_features[p]) + n - 1) // n )]
step_i = 0
threads.append(threading.Thread(target=gpu_pairwise_distance, args=[chunks, step_i, int(gpu),]))
#Number of threads depend on how many gpus you have
for t in threads:
t.setDaemon(True)
t.start()
#Re-group final distance data from gpus
for t in threads:
whole_distances = []
t.join()
distances.extend(np.array(whole_distances).flatten())
#Process data
random_distances = np.array(distances).flatten()
random_distances = random_distances.reshape((random_distances.shape[0], 1)).T
mean_distances = np.mean(mean_distances)
print("mean_distances: ",mean_distances)
print("max dis:", max(max_dis))#original max dis before EVT
###################################################################
########################################################################################
print("Finding Nearest Points......")
#Find nearest points on GPUs
from gpu_functions import gpu_nearest
nearest_cluster = np.zeros((len(features)), dtype = 'int')
nearest_points_dis = np.zeros((len(features)))
n = int(len(features) / len(gpus))
features = list(features)
mutilple_features = [features[i * n:(i + 1) * n] for i in range((len(features) + n - 1) // n )]
if len(gpus) > 1:
if len(mutilple_features) > len(gpus):
mutilple_features[len(gpus) - 1].extend(mutilple_features[len(gpus)])
del mutilple_features[len(gpus)]
ind = []
step = 0
steps = []
for i, j in enumerate(mutilple_features[0:len(gpus)]):
ind.append(range(step, len(j)+step))
steps.append(step)
step += len(j)
threads = []
for p, gpu in enumerate(gpus):
threads.append(threading.Thread(target=gpu_nearest, args=[mutilple_features[p], features, int(gpu), ind[p], steps[p], metric, nearest_cluster, nearest_points_dis]))
thread(threads)
del mutilple_features
# In round 1 the centroids is the points no matter what's linkage
nearest_cluster_with_distance_round_1 = [[j, [k, i]] for k, (i, j) in enumerate(zip(nearest_cluster, nearest_points_dis))]
nearest_cluster_with_distance_round_1 = sorted(nearest_cluster_with_distance_round_1) # Sort by distance, process the smallest one first
nearest_points = nearest_cluster
########################################################################################
print("Computing the appearance of nearest_points")
threadsperblock = 32
blockspergrid = math.ceil(nearest_points.shape[0] / threadsperblock)
X_global = cuda.to_device(nearest_points)
out_global = cuda.device_array((nearest_points.shape[0]))
from cuda_kernels import count_appear
count_appear[blockspergrid, threadsperblock](X_global, out_global)
appear = np.array(out_global.copy_to_host(), dtype = int)
appear_count = [[j, i] for i, j in enumerate(appear)]
# count the appearance of each kernel points
# generate order
order = [i[1] for i in sorted(appear_count, reverse=True)]
# add non kernel points to order
processed = set()
init = []
for count, i in enumerate(order):
j = nearest_points[i]
if i not in processed and j not in processed:
init.append([i, j])
processed.add(i)
processed.add(j)
init = init[0: int(len(init))]
N = len(init)
init_length = N
init_features = [[features[i[0]], features[i[1]]] for i in init] #features of initial groups.
######################################################################################################
print("Finding Nearest Intial Pairs")
#Computing nearest centroids on GPUs
centroids = [np.mean(i,axis=0) for i in init_features]
X = centroids
from gpu_functions import gpu_nearest_init_centroids
gs = np.zeros((len(init_features)))
nearest_init = np.zeros((len(init_features)), dtype = 'int')
n = int(len(centroids) / len(gpus))
mutilple_centroids = [centroids[i * n:(i + 1) * n] for i in range((len(centroids) + n - 1) // n )]
if len(gpus) > 1:
if len(mutilple_centroids) > len(gpus):
mutilple_centroids[len(gpus) - 1].extend(mutilple_centroids[len(gpus)])
del mutilple_centroids[len(gpus)]
ind = []
step = 0
steps = []
for i, j in enumerate(mutilple_centroids[0:len(gpus)]):
ind.append(range(step, len(j) + step))
steps.append(step)
step += len(j)
threads = []
for p, gpu in enumerate(gpus):
threads.append(threading.Thread(target=gpu_nearest_init_centroids, args=[mutilple_centroids[p], X, int(gpu), ind[p], metric, gs, nearest_init]))
thread(threads)
del mutilple_centroids
##########################################################################################################
#Nearest initial pairs combo
nearest_init_combo = [[m, init[n]] for m, n in zip(init, nearest_init)]
##########################################################################################################
gxs = []
print("Computing Gaps")
# Computing gaps on GPUs
from gpu_functions import gpu_distance
for pair1, pair2 in nearest_init_combo:
round_features = np.array([features[k] for k in [pair1[0], pair1[1], pair2[0], pair2[1]]])
features0 = [features[k] for k in pair1] #extract features of cluster0
features1 = [features[k] for k in pair2] #extract features of cluster1
centroid0 = np.mean(features0, axis=0) # Get controid of initial pair0
centroid1 = np.mean(features1, axis=0) # Get controid of initial pair1
if metric == "cosine":
gx = scipy.spatial.distance.cosine(centroid0, centroid1)
elif metric == "euclidean":
gx = scipy.spatial.distance.euclidean(centroid0, centroid1)
gxs.append(gx) #gaps
#Our tau
number_of_clusters = 30
thresh = 0.01
tw = weibull.weibull()
data = torch.Tensor(gxs)
fullrange = torch.linspace(0, 1, 100)
tailj = torch.linspace(.45, .55, 10)
torch.Tensor.ndim = property(lambda self: len(self.shape))
tw.FitHigh(data.view(1, -1), int(1. * len(data)))
parms = tw.return_all_parameters()
print(parms)
pcent = 1 - 1 / len(data)
pcent = 0.99
print("EVT Tau for ", pcent * 100, " Percentile at ",
parms['Scale'] * np.power(np.log(1 / (1 - pcent)), (1 / parms['Shape'])) - 1 + parms['smallScoreTensor'])
# wscoresj = tw.wscore(tailj)
# print("Ijbb Wscores=",tailj,wscoresj)
wscoresj = tw.wscore(fullrange)
tau_T = parms['Scale'] * np.power(np.log(1 / (1 - pcent)), (1 / parms['Shape'])) - 1 + parms['smallScoreTensor']
tau_T = tau_T.numpy()[0][0]
return 0, T, tau, nearest_points, metric, init_length, nearest_cluster_with_distance_round_1, nearest_points_dis, gx, 0
def nan_to_num(t,mynan=0.):
if torch.all(torch.isfinite(t)):
return t
if len(t.size()) == 0:
return torch.tensor(mynan)
return torch.cat([nan_to_num(l).unsqueeze(0) for l in t],0)
def get_tau(data,maxval,tailfrac=.25,pcent=.999):
#tw = weibull.weibull(translateAmountTensor=.001)
tw = weibull.weibull()
nbin=200
nscale = 10
#fullrange = torch.linspace(0,torch.max(ijbbdata),nbin)
fullrange = torch.linspace(0,maxval,nbin)
torch.Tensor.ndim = property(lambda self: len(self.shape))
#print( name , "Data mean, max", torch.mean(ijbbdata),torch.max(ijbbdata))
imean = torch.mean(data)
istd = torch.std(data)
imax = torch.max(data)
tw.FitHighTrimmed(data.view(1,-1),int(tailfrac*len(data)))
parms = tw.return_all_parameters()
wscoresj = tw.wscore(fullrange)
probj = nan_to_num(tw.prob(fullrange))
if(torch.sum(probj) > .001):
probj = probj/torch.sum(probj)
tau= parms['Scale']*np.power(-np.log((1-pcent)),(1/parms['Shape'])) - parms['translateAmountTensor'] + parms['smallScoreTensor']
return tau.numpy()
def thread(threads):
for t in threads:
t.setDaemon(True)
t.start()
for t in threads:
t.join()
def takeSecond(elem):
return elem[1]
|
18,649 | ee462d22ee87ecae29505a60a78c511b02443ca9 | import torch
a1 = torch.rand(4,4)
a2 = torch.ones(4,4)
a3 = torch.zeros(4,4)
print(a1)
print(torch.where(a1>0.5,a2,a3)) |
18,650 | 2ba8d7a6e956b002cba9724f45331da915f99864 | En el modulo descarga ya se debe abrir la conexion entre la intranet y la pasarela ,
en este se consultara periodicamente si hay descargas pendientes, si estas existen se debe enviar
la informacion necesaria para iniciar o continuar la descarga (la informacion se envia a transmission)
y poder monitorearla, en caso de que no exista sigue con la consulta.
|
18,651 | e6105eb9f37f92f9c8dddb3cc713e5026277f436 | import numpy as np
#function will take the previously observed data
def monte_carlo_sim(D_prev , mu_t, tau_t, alpha_t, beta_t, P_initial, iterations):
tau_proposed = P_initial
n = len(D_prev)
d_values = []
for i in range(iterations):
## This will calculate the hypermarameter mu of unknown posterior parameter Mu.
mu_posterior = (tau_t * mu_t + tau_proposed * np.sum(D_prev)) / (tau_t + n * tau_proposed)
## this will calculate the hyperparameter tau of the unknown posterior parameter mu.
tau_posterior = (tau_t + n * tau_proposed)
## Now calculate a random value using the Gaussian Distribution defined by the parameters Mu_posterior and Tau_post.
ran_mu = np.random.normal(mu_posterior , 1/np.sqrt(tau_posterior))
#The above calculated value is our proposed value for the first trial
mu_prop = ran_mu
## Now using the proposed value of Mu (mu_prop) to find the hyperparameters of unknown parameter tau.
alpha = alpha_t + n/2 # value of hyperparameter alpha
beta = beta_t + np.sum((D_prev - mu_prop)**2)/2
## Now draw a value from the Gamma distribution uing above calculated value
##### IMPORTANT ######
## The Gamma function in the numpy library uses Scale parameter and not the rate (Beta) that we calculated
''' Remember that we are calculating the rate parameter for the gamma distribution but
Numpy will accept Scale parameter so remember to convert it first '''
ran_tau = np.random.gamma(alpha , 1/beta)
tau_proposed = ran_tau
d_values.append(mu_prop)
return d_values
|
18,652 | e088af2365dc48931e83d7b005501cdc28fed7f4 | import time
import cv2
import numpy as np
from PIL import Image
from random import random, randrange
if __name__ == "__main__":
# Opens the Video file
cap = cv2.VideoCapture('/Users/udiram/Documents/GitHub/FitnessDetection/main_project/Udi/input/trimmedJJ.mp4')
frame_sampling_rate = randrange(3, 5)
# frame_sampling_rate = 15
print("frame sampling rate:")
print(frame_sampling_rate)
i = 0
setNumber = 1
imageNumber = 1
while (cap.isOpened()):
ret, frame = cap.read()
# frame = cv2.resize(frame,(244,244),fx=0,fy=0, interpolation = cv2.INTER_CUBIC)
frame = frame[140:940, 560:1360]
scale_percent = 30.5
# calculate the 50 percent of original dimensions
width = int(frame.shape[1] * scale_percent / 100)
height = int(frame.shape[0] * scale_percent / 100)
# dsize
dsize = (width, height)
# resize image
output = cv2.resize(frame, dsize)
# cv2.imshow('Video', sky)
if ret == False:
break
if i % frame_sampling_rate == 0: # this is the line I added to make it only save one frame every 'frame_sampling_rate'
# frame = cv2.resize(frame, (224, 224))
cv2.imwrite('/Users/udiram/Documents/GitHub/FitnessDetection/main_project/Udi/UdiProcessed_test/Set' + str(
setNumber) + 'Image' + str(imageNumber) + '.png', output)
imageNumber += 1
i += 1
if imageNumber == 9:
setNumber += 1
imageNumber = 1
cv2.imshow('example', ret)
cap.release()
cv2.destroyAllWindows()
|
18,653 | 8dcc0f06a8ca0147d60b4aebcc133e219eafe16d | # 2798 블랙잭
# 1. 첫번쨰 풀이 직접 다 합산해서 비교해봐야 함 Brute Force
'''
from sys import stdin
n,m = map(int, stdin.readline().strip().split())
cards = list(map(int, stdin.readline().strip().split()))
sum_cards = []
for i in range(n-2):
for j in range(1,n-1):
for k in range(2,n):
if i >= j or j >= k:
continue
else:
if cards[i] + cards[j] + cards[k] <= m:
sum_cards.append(cards[i]+cards[j]+cards[k])
print(max(sum_cards))
'''
# 2. 두번쨰 풀이 1번풀이보다 조금 더 빠름 max 내장함수 쓰지않고 변수에 최댓값 받음
from sys import stdin
n,m = map(int, stdin.readline().strip().split())
cards = list(map(int, stdin.readline().strip().split()))
sum_cards = []
max_sum = 0
for i in range(n-2):
for j in range(1,n-1):
for k in range(2,n):
if i >= j or j >= k:
continue
else:
temp = cards[i] + cards[j] + cards[k]
if max_sum <= temp <= m :
max_sum = temp
print(max_sum)
|
18,654 | a46cfe2359b9ca34e8f10b4e74dab333f2d72f4e | from dataclasses import dataclass, field
from data.models.structures.zone.farm import ZoneFarm
@dataclass
class ZoneCylinder(ZoneFarm):
x: Int32
y: Int32
z1: Int32
z2: Int32
radiant: Int32
radiantS: Int32
|
18,655 | af6ae5a6f822da5c3e40e742274c22deb0efe5f6 | import json
import os
import time
import sys
import psutil
from lib.thread import StoppableThread
DEFAULT_GRACE_PERIOD = 5.0
def _cpu_and_men_usage(processes):
"""
Compute the current CPU and memory (MB) usage for a group of processes.
"""
cpu_usage = 0
mem_usage_mb = 0
for process in processes:
cpu_usage += process.cpu_percent()
mem_usage_mb += process.memory_info().rss >> 20 # from bytes to Mb
return cpu_usage, mem_usage_mb
class Agent(StoppableThread):
def __init__(self, *, pid=None, sleep=1):
super().__init__()
self._sleep = sleep
self._pid = pid or os.getpid()
self._process = psutil.Process(self._pid)
def _get_metrics(self):
processes = [self._process] + self._process.children(recursive=True)
cpu, mem = _cpu_and_men_usage(processes)
return {"cpu_usage": cpu, "mem_usage_mb": mem}
def mainloop(self):
metrics = self._get_metrics()
sys.stdout.write(json.dumps(metrics) + "\n")
sys.stdout.flush()
time.sleep(self._sleep)
if __name__ == "__main__":
print("Using streaming agent.")
print("-" * 20)
agent = Agent()
agent.start()
time.sleep(5)
agent.stop()
agent.wait()
|
18,656 | c76ea8c5fed87fe17eb4bf77bbe6e914ba3028f3 | import math
import copy
from torch import nn
import torch
import torch.nn.functional as F
from torch import nn
class VNetEmbed(nn.Module):
def __init__(self, instances_num=4, feat_num=2048, num_classes=0, drop_ratio=0.5):
super(VNetEmbed, self).__init__()
self.instances_num = instances_num
self.feat_num =2048 #3072
self.temp = 1
#self.kron = KronMatching()
self.bn = nn.BatchNorm1d(self.feat_num)
self.bn.weight.data.fill_(1)
self.bn.bias.data.zero_()
self.classifier = nn.Linear(self.feat_num, num_classes)
self.classifier.weight.data.normal_(0, 0.001)
self.classifier.bias.data.zero_()
self.drop = nn.Dropout(drop_ratio)
self.classifier2 = nn.Linear(self.feat_num, 1)
self.classifier2.weight.data.normal_(0, 0.001)
self.classifier2.bias.data.zero_()
def forward(self, probe_x, gallery_x, p2g=True, g2g=False):
if not self.training and len(probe_x.size()) != len(gallery_x.size()):
probe_x = probe_x.unsqueeze(0)
probe_x.contiguous()
gallery_x.contiguous()
if g2g is True:
N_probe, C, H, W = probe_x.size()
N_probe = probe_x.size(0)
N_gallery = gallery_x.size(0)
probe_x = F.avg_pool2d(probe_x, probe_x.size()[2:]).view(N_probe,
C) # average pooling the prob_x
gallery_x = F.avg_pool2d(gallery_x, gallery_x.size()[2:]).view(N_gallery,
C) # average pooling the gallary_x
probe_x = probe_x.unsqueeze(1)
f_len = gallery_x.size(1)
pro = probe_x.view(N_probe, f_len).contiguous()
pro = self.bn(pro)
galle = probe_x.view(N_probe, f_len).contiguous()
galle = self.bn(galle)
dot_p = torch.matmul(pro, galle.transpose(1, 0)).contiguous()
dot_p = dot_p.view(dot_p.size(0), dot_p.size(0))
#dot_p = torch.pow(dot_p, 2)
probe_x = probe_x.expand(N_probe, N_gallery,
self.feat_num) # reshaping prob_x to a square matrix, (numb_gallery image X number_gallery_image)
probe_x = probe_x.contiguous()
gallery_x = gallery_x.unsqueeze(0)
gallery_x = gallery_x.expand(N_probe, N_gallery,
self.feat_num) # reshaping gallary_x to a square matrix, (numb_gallery image X number_gallery_image)
#probe_x = self._kron_matching(gallery_x, probe_x)
gallery_x = gallery_x.contiguous()
diff = gallery_x - probe_x # Computing the distance
diff = torch.pow(diff, 2)
#dot_p = torch.pow(dot_p,2)
#diff= torch.exp(diff) # using exponent instead of square doesn't work
diff = diff.view(N_probe * N_gallery, -1)
diff = diff.contiguous()
bn_diff = self.bn(diff)
bn_diff = self.drop(bn_diff)
cls_encode = self.classifier(bn_diff)
# cls_encode = torch.mean(bn_diff
cls_encode = cls_encode.view(N_probe, N_gallery, -1)
#dot_p = self.classifier2(bn_diff)
#dot_p = dot_p.view(N_probe, N_gallery)
return cls_encode, dot_p
|
18,657 | d1ee678bdba61ca01ff8ba9f7e5c9a88c8b3021c | import unittest
import numpy as np
import trees
import math
import knn
import bayes
class Test(unittest.TestCase):
def test_calculate_entropy(self):
self.failUnless(trees.calculate_entropy([0]) == 0)
self.failUnless(trees.calculate_entropy([0, 0, 0, 1, 1, 1]) == -math.log(0.5, 2))
self.failUnless(trees.calculate_entropy(['A', 'B']) == -math.log(0.5, 2))
self.failUnless(trees.calculate_entropy(['A', 'B', 'C', 'D', 'E']) == -math.log(0.2, 2))
self.failUnless(trees.calculate_entropy([0, 0, 'A', 'A', 'A']) ==
-0.4 * math.log(0.4, 2) +
-0.6 * math.log(0.6, 2))
def test_get_token_set(self):
corpus = [['this', 'is', 'not', 'a', 'test'],
['actually', 'this', 'is', 'a', 'test']]
vocabulary = bayes.get_token_set(corpus)
self.failUnless(set(vocabulary) == {'this', 'is', 'not', 'actually', 'a', 'test'})
self.failUnless(len(vocabulary) == 6)
def test_get_vocabulary_set_vector(self):
vocabulary = 'these are the words'.split(' ')
sample = 'these are words'.split(' ')
self.failUnless(bayes.get_vocabulary_set_vector(vocabulary, sample) == [1, 1, 0, 1])
sample = 'these words may really be words'.split(' ')
self.failUnless(bayes.get_vocabulary_set_vector(vocabulary, sample) == [1, 0, 0, 1])
vocabulary = []
sample = 'edge case test'.split(' ')
self.failUnless(bayes.get_vocabulary_set_vector(vocabulary, sample) == [])
def test_get_vocabulary_bag_vector(self):
vocabulary = 'these are the words'.split(' ')
sample = 'these are words'.split(' ')
self.failUnless(np.all(bayes.get_vocabulary_bag_vector(vocabulary, sample) == [1, 1, 0, 1]))
sample = 'these words are words but not the words'.split(' ')
self.failUnless(np.all(bayes.get_vocabulary_bag_vector(vocabulary, sample) == [1, 1, 1, 3]))
vocabulary = []
sample = 'edge case test'.split(' ')
self.failUnless(np.all(bayes.get_vocabulary_bag_vector(vocabulary, sample) == []))
def test_parse_to_word_list(self):
self.failUnless(bayes.parse_to_word_list('') == [])
self.failUnless(bayes.parse_to_word_list('SiNgLeToN') == ['singleton'])
text = 'Some words, including punctuation, AND capitalization, and extra spaces.'
words = ['some', 'words', 'including', 'punctuation', 'and', 'capitalization', 'and', 'extra', 'spaces']
self.failUnless(bayes.parse_to_word_list(text) == words)
def test_get_max_valued_key(self):
tally = {'two': 2, 'six': 6, 'one': 1, 'four': 4}
self.failUnless(knn.get_most_common(tally) == 'six')
def test_train_naive_bayes(self):
factors = np.array([[1, 0], [1, 0], [0, 1], [1, 0], [0, 1]])
labels = ['A', 'A', 'B', 'B', 'C', ]
numerators, denominators, ps, unique_labels = bayes.train_naive_bayes(factors, labels)
self.failUnless(set(unique_labels) == set(['A', 'B', 'C']))
self.failUnless(np.all(numerators[unique_labels.index('A')] == [2, 0]))
self.failUnless(np.all(numerators[unique_labels.index('B')] == [1, 1]))
self.failUnless(np.all(numerators[unique_labels.index('C')] == [0, 1]))
self.failUnless(np.all(denominators[unique_labels.index('A')] == 2))
self.failUnless(np.all(denominators[unique_labels.index('B')] == 2))
self.failUnless(np.all(denominators[unique_labels.index('C')] == 1))
self.failUnless(ps[unique_labels.index('A')] == 0.4)
self.failUnless(ps[unique_labels.index('B')] == 0.4)
self.failUnless(ps[unique_labels.index('C')] == 0.2)
|
18,658 | e1808a1d666fe911d30ebc50a67e76695e8ee69e | """Interfaces to the MaterialsIO parsers for use by the MDF"""
from mdf_matio.version import __version__ # noqa: F401
from materials_io.utils.interface import (get_available_adapters, ParseResult,
get_available_parsers, run_all_parsers)
from mdf_matio.grouping import groupby_file, groupby_directory
from mdf_matio.validator import MDFValidator
from mdf_toolbox import dict_merge
from typing import Iterable, Set, List
from functools import reduce, partial
import logging
import os
logger = logging.getLogger(__name__)
_merge_func = partial(dict_merge, append_lists=True)
"""Function used to merge records"""
def get_mdf_parsers() -> Set[str]:
"""Get the list of parsers defined for the MDF
Returns:
([str]): Names of parsers that are compatible with the MDF
"""
return set([name for name, info in get_available_adapters().items()
if info['class'].startswith('mdf_matio')])
def _merge_records(group: List[ParseResult]):
"""Merge a group of records
Args:
group ([ParseResult]): List of parse results to group
"""
# Group the file list and parsers
group_files = list(set(sum([tuple(x.group) for x in group], ())))
group_parsers = '-'.join(sorted(set(sum([[x.parser] for x in group], []))))
# Merge the metadata
is_list = [isinstance(x.metadata, list) for x in group]
if sum(is_list) > 1:
raise NotImplementedError('We have not defined how to merge >1 list-type data')
elif sum(is_list) == 1:
list_data = group[is_list.index(True)].metadata
if len(is_list) > 1:
other_metadata = reduce(_merge_func,
[x.metadata for x, t in zip(group, is_list) if not t])
group_metadata = [_merge_func(x, other_metadata) for x in list_data]
else:
group_metadata = list_data
else:
group_metadata = reduce(_merge_func, [x.metadata for x in group])
return ParseResult(group_files, group_parsers, group_metadata)
def _merge_files(parse_results: Iterable[ParseResult]) -> Iterable[ParseResult]:
"""Merge metadata of records associated with the same file(s)
Args:
parse_results (ParseResult): Generator of ParseResults
Yields:
(ParseResult): ParserResults merged for each file.
"""
return map(_merge_records, groupby_file(parse_results))
def _merge_directories(parse_results: Iterable[ParseResult], dirs_to_group: List[str])\
-> Iterable[ParseResult]:
"""Merge records from user-specified directories
Args:
parse_results (ParseResult): Generator of ParseResults
Yields:
(ParseResult): ParserResults merged for each record
"""
# Add a path separator to the end of each directory
# Used to simplify checking whether each file is a subdirectory of the matched groups
dirs_to_group = [d + os.path.sep for d in dirs_to_group]
def is_in_directory(f):
"""Check whether a file is in one fo the directories to group"""
f = os.path.dirname(f) + os.path.sep
return any(f.startswith(d) for d in dirs_to_group)
# Gather records that are in directories to group or any of their subdirectories
flagged_records = []
for record in parse_results:
if any(is_in_directory(f) for f in record.group):
flagged_records.append(record)
else:
yield record
# Once all of the parse results are through, group by directory
for group in groupby_directory(flagged_records):
yield _merge_records(group)
def generate_search_index(data_url: str, validate_records=True, parse_config=None,
exclude_parsers=None, index_options=None) -> Iterable[dict]:
"""Generate a search index from a directory of data
Args:
data_url (str): Location of dataset to be parsed
validate_records (bool): Whether to validate records against MDF Schemas
parse_config (dict): Dictionary of parsing options specific to certain files/directories.
Keys must be the path of the file or directory.
Values are dictionaries of options for that directory, supported options include:
group_by_directory: (bool) Whether to group all subdirectories of this
directory as single records
exclude_parsers ([str]): Names of parsers to exclude
index_options (dict): Indexing options used by MDF Connect
Yields:
(dict): Metadata records ready for ingestion in MDF search index
"""
if parse_config is None:
parse_config = {}
# Get the list of parsers that have adapters defined in this package
target_parsers = get_mdf_parsers()
logging.info(f'Detected {len(target_parsers)} parsers: {target_parsers}')
missing_parsers = set(get_available_parsers().keys()).difference(
target_parsers)
if len(missing_parsers) > 0:
logging.warning(f'{len(missing_parsers)} parsers are not used: {missing_parsers}')
if exclude_parsers is not None:
target_parsers.difference_update(exclude_parsers)
logging.info(f'Excluded {len(exclude_parsers)} parsers: {len(exclude_parsers)}')
# Add root directory to the target path
index_options = index_options or {}
# TODO (wardlt): Figure out how this works with Globus URLs
index_options['generic'] = {'root_dir': data_url}
# Run the target parsers with their matching adapters on the directory
parse_results = run_all_parsers(data_url, include_parsers=list(target_parsers),
adapter_map='match', parser_context=index_options,
adapter_context=index_options)
# Merge by directory in the user-specified directories
grouped_dirs = []
for path, cfg in parse_config.items():
if cfg.get('group_by_directory', False):
grouped_dirs.append(path)
logging.info(f'Grouping {len(grouped_dirs)} directories')
parse_results = _merge_directories(parse_results, grouped_dirs)
# TODO: Add these variables as arguments or fetch in other way
dataset_metadata = None # Provided by MDF directly
validation_params = None # Provided by MDF directly
schema_branch = "master" # Must be configurable, can be provided by MDF directly
# Validate metadata and tweak into final MDF feedstock format
# Will fail if any entry fails validation - no invalid entries can be allowed
vald = MDFValidator(schema_branch=schema_branch)
vald_gen = vald.validate_mdf_dataset(dataset_metadata, validation_params)
# Yield validated dataset entry
yield next(vald_gen)
# Merge records associated with the same file
for group in _merge_files(parse_results):
# Skip records that include only generic metadata
if group.parser == 'generic':
continue
# Loop over all produced records
metadata = group.metadata if isinstance(group.metadata, list) else [group.metadata]
# Record validation
for record in metadata:
yield vald_gen.send(record)
vald_gen.send(None)
|
18,659 | 50839918cc9f70a8b90fe260c857c10566cebab0 | '''
You are given an array of positive integers w where w[i] describes the weight of ith index (0-indexed).
We need to call the function pickIndex() which randomly returns an integer in the range [0, w.length - 1]. pickIndex() should return the integer proportional to its weight in the w array. For example, for w = [1, 3], the probability of picking the index 0 is 1 / (1 + 3) = 0.25 (i.e 25%) while the probability of picking the index 1 is 3 / (1 + 3) = 0.75 (i.e 75%).
More formally, the probability of picking index i is w[i] / sum(w).
Example 1:
Input
["Solution","pickIndex"]
[[[1]],[]]
Output
[null,0]
Explanation
Solution solution = new Solution([1]);
solution.pickIndex(); // return 0. Since there is only one single element on the array the only option is to return the first element.
https://leetcode.com/problems/random-pick-with-weight/
https://www.youtube.com/watch?v=skkJtFzePwQ&ab_channel=thecodingworld
'''
import random
class Solution:
def __init__(self, w, l):
self.prefix_sum = []
prefix_sum = 0
for weight in w:
prefix_sum += weight
self.prefix_sum.append(prefix_sum)
self.total_sum = prefix_sum
def pickIndex(self):
random_int = self.total_sum * random.random()
low, high = 0, len(self.prefix_sum)
while low < high:
mid = low + (high-low)//2
if random_int > self.prefix_sum[mid]:
low = mid +1
else:
high = mid
return low
l = ["Solution","pickIndex"]
w = [[[1]],[]]
obj = Solution(l, w)
param_1 = obj.pickIndex()
print(param_1) |
18,660 | d9bceb9c2007bed4eade47bd88c0f40537e5724f | import os
from .context import sdf
def test_read_phoenix():
dir = sdf.config.file['model_root']
s = sdf.spectrum.ModelSpectrum.read_phoenix(
dir+'lte009.5-3.5-0.0.BT-Settl.7.bz2'
)
os.unlink(dir+'lte009.5-3.5-0.0.BT-Settl.7.bz2.npy')
s = sdf.spectrum.ModelSpectrum.read_phoenix(
dir+'lte058-4.5-0.0a+0.0.BT-Settl.7.bz2'
)
os.unlink(dir+'lte058-4.5-0.0a+0.0.BT-Settl.7.bz2.npy')
|
18,661 | 4346b6af6861f67024bf17ee12e9d9d079eea37c | import random
def insertionsort(arr):
for i in range(1, len(arr)):
j = i-1
elem = arr[i]
while arr[j] > elem and j >= 0:
arr[j+1] = arr[j]
j -= 1
arr[j+1] = elem
if __name__ == '__main__':
arr = []
for i in range(10):
arr.append(random.randint(-99,99))
print(arr)
insertionsort(arr)
print(arr) |
18,662 | 8a6597c63dea6d70c64eb151fb5ea4d9d306253b | #! -*- coding:utf8 -*-
import sys
sys.path.append('..')
reload(sys)
sys.setdefaultencoding('utf-8')
ELEME_SOURCE = 0
MEITUAN_SOURCE = 1
DIANPING_SOURCE = 2
BAIDU_SOURCE = 3
ALL_SOURCES = [
{
'pinyin':'eleme',
'name':u"饿了么",
'source':ELEME_SOURCE,
},
{
'pinyin':'meituan',
'name':u"美团外卖",
'source':MEITUAN_SOURCE,
},
{
'pinyin':'dianping',
'name':u"点评外卖",
'source':DIANPING_SOURCE,
},
{
'pinyin':'baidu',
'name':u"百度外卖",
'source':BAIDU_SOURCE,
}
]
# 电话匹配分数阀值 score >= phone_threshold 确认匹配,否则继续进行别属性匹配
phone_threshold = 1.0
# 店铺各种属性的权重
name_weight = 0.5
address_weight = 0.3
distance_weight = 0.2
# 店铺的名称,地址在匹配分数段上对应的得分
eleme_baidu_name = [
{
'min_score':0.8,
'min_include':False,
'max_score':1.0,
'max_include':True,
'return_score':1.0,
},
{
'min_score':0.6,
'min_include':False,
'max_score':0.8,
'max_include':True,
'return_score':0.8,
},
{
'min_score':0.5,
'min_include':False,
'max_score':0.6,
'max_include':True,
'return_score':0.6,
},
{
'min_score':0.0,
'min_include':True,
'max_score':0.5,
'max_include':True,
'return_score':0.0,
},
]
eleme_baidu_address = [
{
'min_score':0.8,
'min_include':False,
'max_score':1.0,
'max_include':True,
'return_score':1.0,
},
{
'min_score':0.6,
'min_include':False,
'max_score':0.8,
'max_include':True,
'return_score':0.9,
},
{
'min_score':0.4,
'min_include':False,
'max_score':0.6,
'max_include':True,
'return_score':0.8,
},
{
'min_score':0.3,
'min_include':False,
'max_score':0.4,
'max_include':True,
'return_score':0.6,
},
{
'min_score':0.0,
'min_include':True,
'max_score':0.3,
'max_include':True,
'return_score':0.0,
},
]
eleme_meituan_name = [
{
'min_score':0.8,
'min_include':False,
'max_score':1.0,
'max_include':True,
'return_score':1.0,
},
{
'min_score':0.6,
'min_include':False,
'max_score':0.8,
'max_include':True,
'return_score':0.8,
},
{
'min_score':0.5,
'min_include':False,
'max_score':0.6,
'max_include':True,
'return_score':0.6,
},
{
'min_score':0.0,
'min_include':True,
'max_score':0.5,
'max_include':True,
'return_score':0.0,
},
]
eleme_meituan_address = [
{
'min_score':0.8,
'min_include':False,
'max_score':1.0,
'max_include':True,
'return_score':1.0,
},
{
'min_score':0.6,
'min_include':False,
'max_score':0.8,
'max_include':True,
'return_score':0.9,
},
{
'min_score':0.4,
'min_include':False,
'max_score':0.6,
'max_include':True,
'return_score':0.8,
},
{
'min_score':0.3,
'min_include':False,
'max_score':0.4,
'max_include':True,
'return_score':0.6,
},
{
'min_score':0.0,
'min_include':True,
'max_score':0.3,
'max_include':True,
'return_score':0.0,
},
]
eleme_dianping_name = [
{
'min_score':0.8,
'min_include':False,
'max_score':1.0,
'max_include':True,
'return_score':1.0,
},
{
'min_score':0.6,
'min_include':False,
'max_score':0.8,
'max_include':True,
'return_score':0.8,
},
{
'min_score':0.5,
'min_include':False,
'max_score':0.6,
'max_include':True,
'return_score':0.6,
},
{
'min_score':0.0,
'min_include':True,
'max_score':0.5,
'max_include':True,
'return_score':0.0,
},
]
eleme_dianping_address = [
{
'min_score':0.8,
'min_include':False,
'max_score':1.0,
'max_include':True,
'return_score':1.0,
},
{
'min_score':0.6,
'min_include':False,
'max_score':0.8,
'max_include':True,
'return_score':0.9,
},
{
'min_score':0.4,
'min_include':False,
'max_score':0.6,
'max_include':True,
'return_score':0.8,
},
{
'min_score':0.3,
'min_include':False,
'max_score':0.4,
'max_include':True,
'return_score':0.6,
},
{
'min_score':0.0,
'min_include':True,
'max_score':0.3,
'max_include':True,
'return_score':0.0,
},
]
# 店铺 经纬度距离 和对应的得分
eleme_meituan_distance = [
{
'max_distance':100,
'max_include':True,
'return_max_score':0.9,
'min_distance':0,
'min_include':True,
'return_min_score':1.0,
},
{
'max_distance':200,
'max_include':True,
'return_max_score':0.8,
'min_distance':100,
'min_include':False,
'return_min_score':0.9,
},
{
'max_distance':300,
'max_include':True,
'return_max_score':0.6,
'min_distance':200,
'min_include':False,
'return_min_score':0.8,
},
{
'max_distance':400,
'max_include':True,
'return_max_score':0.5,
'min_distance':300,
'min_include':False,
'return_min_score':0.6,
},
{
'max_distance':1000,
'max_include':True,
'return_max_score':0.0,
'min_distance':400,
'min_include':False,
'return_min_score':0.5,
},
{
'max_distance':sys.maxint,
'max_include':True,
'return_max_score':0.0,
'min_distance':1000,
'min_include':False,
'return_min_score':0.0,
},
]
eleme_dianping_distance = [
{
'max_distance':100,
'max_include':True,
'return_max_score':0.9,
'min_distance':0,
'min_include':True,
'return_min_score':1.0,
},
{
'max_distance':200,
'max_include':True,
'return_max_score':0.8,
'min_distance':100,
'min_include':False,
'return_min_score':0.9,
},
{
'max_distance':300,
'max_include':True,
'return_max_score':0.6,
'min_distance':200,
'min_include':False,
'return_min_score':0.8,
},
{
'max_distance':400,
'max_include':True,
'return_max_score':0.5,
'min_distance':300,
'min_include':False,
'return_min_score':0.6,
},
{
'max_distance':1000,
'max_include':True,
'return_max_score':0.0,
'min_distance':400,
'min_include':False,
'return_min_score':0.5,
},
{
'max_distance':sys.maxint,
'max_include':True,
'return_max_score':0.0,
'min_distance':1000,
'min_include':False,
'return_min_score':0.0,
},
]
eleme_baidu_distance = [
{
'max_distance':200,
'max_include':True,
'return_max_score':0.3,
'min_distance':0,
'min_include':True,
'return_min_score':0.5,
},
{
'max_distance':300,
'max_include':True,
'return_max_score':0.7,
'min_distance':200,
'min_include':False,
'return_min_score':0.9,
},
{
'max_distance':400,
'max_include':True,
'return_max_score':0.9,
'min_distance':300,
'min_include':True,
'return_min_score':1.0,
},
{
'max_distance':500,
'max_include':True,
'return_max_score':0.5,
'min_distance':400,
'min_include':False,
'return_min_score':0.7,
},
{
'max_distance':1000,
'max_include':True,
'return_max_score':0.0,
'min_distance':500,
'min_include':False,
'return_min_score':0.5,
},
{
'max_distance':sys.maxint,
'max_include':True,
'return_max_score':0.0,
'min_distance':1000,
'min_include':False,
'return_min_score':0.0,
},
]
|
18,663 | 453f8effeb31cb74b43411d88b5a2a2d7b34a8ef | #!/usr/bin/env python
import evelib.newdb as db
import emcom.gmi as gmi
from emcom import humane
GROUP_TAG = {'Arkonor': "zero",
'Bistot': "zero",
'Crokite': "zero",
'Dark Ochre': "zero",
'Gneiss': "zero",
'Spodumain': "zero",
'Mercoxit': "zero",
'Veldspar': "high",
'Scordite': "high",
'Plagioclase': "high M/G/C",
'Pyroxeres': "high C/A",
'Omber': "high M/G",
'Kernite': "low M/C, high A",
'Hedbergite': "low M/C",
'Hemorphite': "low G/A",
'Jaspet': "low G/A",
}
def main():
index = gmi.current()
conn = db.connect()
c = conn.cursor()
c.execute("SELECT t.typename, t.volume, t.portionsize, "
" mt.typename, tm.quantity "
"FROM ccp.invtypematerials tm "
" INNER JOIN ccp.invtypes t ON tm.typeid = t.typeid "
" INNER JOIN ccp.invgroups g ON t.groupid = g.groupid "
" INNER JOIN ccp.invcategories c "
" ON c.categoryid = g.categoryid "
" INNER JOIN ccp.invtypes mt "
" ON tm.materialtypeid = mt.typeid "
"WHERE t.published = 1 "
" AND c.categoryname = 'Asteroid' "
" AND g.groupname = t.typename "
" AND g.groupname != 'Ice' "
" AND t.typename NOT LIKE 'Compressed %%'")
compositions = {}
for roid, volume, portionsize, mineral, qty in c.fetchall():
compositions.setdefault(roid, 0)
compositions[roid] += (index[mineral] * qty) / float(portionsize) / float(volume)
profits = [(price, roid) for (roid, price) in compositions.items()]
profits.sort(reverse=True)
for profit, name in profits:
print "%8s %s (%s)" % (humane(profit), name, GROUP_TAG[name])
if __name__ == '__main__':
main()
|
18,664 | c7552c19fc5f04ebd745dc14ebe2603311d444af | from sklearn import datasets
from keras.models import Sequential
from keras.layers import Dense
iris = datasets.load_iris()
x = iris.data
y = iris.target
model = Sequential()
model.add(Dense(8, input_dim=4, activation='relu'))
model.add(Dense(8, activation='relu'))
model.add(Dense(8, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='adam', metrics=['accuracy'])
model.fit(x, y, batch_size=15, epochs=150)
scores = model.evaluate(x, y)
predictions = model.predict(x)
rounded = [round(x[0]) for x in predictions]
count = 0
for i in range(len(predictions)):
if(y[i] != predictions[i]):
count = count + 1
print('Number of wrong answers : {}'.format(count))
|
18,665 | bd4699027549a4f987188a561e7c0b5741cbce99 | cache = {}
def fib(n):
if n not in cache.keys():
cache[n] = _fib(n)
return cache[n]
def _fib(n):
if n < 2:
return n
else:
return fib(n-1) + fib(n-2)
x = []
for i in range(100):
f = fib(i)
if f > 4000000:
break
x.append(f)
y = []
for t in x:
if t % 2 == 0:
y.append(t)
print(y)
print(sum(y)) |
18,666 | ccec3ec54aea28959b19908b53349dfc5db8821f | import logging
import numpy as np
from overrides import overrides
import random
from typing import Callable, Dict, Iterable, Iterator, List
from allennlp.common import Params, Tqdm
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.fields import ArrayField
from allennlp.data.fields.metadata_field import MetadataField
from allennlp.data.instance import Instance
from src.gnli_tokenizer import GNLITokenizer
from src import utils
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
@DatasetReader.register("gnli")
class GNLIDatasetReader(DatasetReader):
def __init__(self,
pretrained_model: str,
max_premise_length: int = None,
max_hypothesis_length: int = None,
percent_data: float = 1,
lazy: bool = False) -> None:
super().__init__(lazy)
assert percent_data > 0 and percent_data <= 1
self.percent_data = percent_data
self.max_premise_length = max_premise_length
self.max_hypothesis_length = max_hypothesis_length
self._tokenizer = GNLITokenizer.from_pretrained(pretrained_model)
self._label_dict = {'entailment': 0, 'neutral': 1, 'contradiction': 2}
@overrides
def _read(self, file_path: str):
lines = utils.read_data(file_path, self.percent_data)
# Create instances
for line in lines:
yield self.text_to_instance(**line)
@overrides
def text_to_instance(self, premise: str, hypothesis: str, label: str = None, tag=None) -> Instance:
####################
##### Tokenization and truncation
####################
premise_tokens = self._tokenizer.tokenize(premise.strip())
hypothesis_tokens = self._tokenizer.tokenize(hypothesis.strip())
premise_tokens, hypothesis_tokens = self._truncate_input(premise_tokens, hypothesis_tokens)
####################
##### Create ids for encoder inputs, decoder inputs and decoder targets
####################
## Create encoder inputs
src = []
src.append(self._tokenizer.add_special_tokens_single_sentence(self._tokenizer.convert_tokens_to_ids([self._tokenizer.entail_token]+premise_tokens)))
src.append(self._tokenizer.add_special_tokens_single_sentence(self._tokenizer.convert_tokens_to_ids([self._tokenizer.neutral_token]+premise_tokens)))
src.append(self._tokenizer.add_special_tokens_single_sentence(self._tokenizer.convert_tokens_to_ids([self._tokenizer.contradict_token]+premise_tokens)))
assert len(src[0]) == len(src[1]) == len(src[2])
src_length = len(src[0])
## Create decoder inputs and targets
# Targets of the decoder: [<s> A B C D E <\s>]
target = self._tokenizer.add_special_tokens_single_sentence(self._tokenizer.convert_tokens_to_ids(hypothesis_tokens))
# Inputs of the decoder: [<\s> <s> A B C D E]
prev_output_tokens = [self._tokenizer.eos_token_id] + target[:-1]
target_length = len(target)
####################
##### Padding of the input
####################
# Pad the premise ids (the source)
if self.max_premise_length:
encoder_padding = [self._tokenizer.pad_token_id]*(self.max_premise_length - src_length)
src = [s + encoder_padding for s in src]
# Pad the hypothesis ids (the target)
if self.max_hypothesis_length:
decoder_padding = [self._tokenizer.pad_token_id]*(self.max_hypothesis_length - target_length)
target += decoder_padding
prev_output_tokens += decoder_padding
# Replicate `prev_output_tokens` and `src_lengths` three times
prev_output_tokens = [prev_output_tokens]*3
src_length = [src_length]*3
####################
##### Create instance
####################
metadata = {'premise': premise,
'hypothesis': hypothesis,
'premise_tokens': premise_tokens,
'hypothesis_tokens': hypothesis_tokens,
'label': label, 'tag': tag}
fields = {'src': ArrayField(np.array(src), dtype=np.int64),
'src_lengths': ArrayField(np.array(src_length), dtype=np.int64),
'prev_output_tokens': ArrayField(np.array(prev_output_tokens), dtype=np.int64),
'target': ArrayField(np.array(target), dtype=np.int64),
'target_lengths': ArrayField(np.array(target_length), dtype=np.int64),
'metadata': MetadataField(metadata)}
if label is not None:
fields['label'] = ArrayField(np.array(self._label_dict[label]), dtype=np.int64)
return Instance(fields)
def _truncate_input(self, premise_tokens, hypothesis_tokens):
if self.max_premise_length:
# Account for [<s>] + label_token + premise_tokens + [</s>]
max_premise_length = self.max_premise_length - 3
premise_tokens = premise_tokens[:max_premise_length]
if self.max_hypothesis_length:
# Account for [<s>] + hypothesis_tokens + [</s>]
max_hypothesis_length = self.max_hypothesis_length - 2
hypothesis_tokens = hypothesis_tokens[:max_hypothesis_length]
return premise_tokens, hypothesis_tokens
|
18,667 | 95311c8554f96f05645c37e96d1a2c54c793129e | __author__ = 'Acko'
import unittest
from application.Heap import *
class HeapTest(unittest.TestCase):
def test_add(self):
# preparation
h = Heap()
# do work and test
h.add(5, '1')
self.assertEqual(h.top(), (5, '1'))
h.add(10, '2')
self.assertEqual(h.top(), (5, '1'))
h.add(2, '3')
self.assertEqual(h.top(), (2, '3'))
h.add(7, '4')
self.assertEqual(h.top(), (2, '3'))
h.add(1, '5')
self.assertEqual(h.top(), (1, '5'))
def test_pop(self):
# preparation
h = Heap()
for element in ([(5, '1'), (10, '2'), (2, '3'), (7, '4'), (1, '5')]):
h.add(element[0], element[1])
# do work and test
self.assertEqual(h.pop(), (1, '5'))
self.assertEqual(h.pop(), (2, '3'))
self.assertEqual(h.pop(), (5, '1'))
self.assertEqual(h.pop(), (7, '4'))
self.assertEqual(h.pop(), (10, '2'))
with self.assertRaises(HeapError):
h.pop()
def test_direction_test(self):
# preparation
h_up = Heap(Heap.HEAP_UP)
h_down = Heap(Heap.HEAP_DOWN)
elements = [(1, 1), (4, 4), (2, 2), (3, 3)]
# do work
for element in elements:
h_up.add(element[0], element[1])
h_down.add(element[0], element[1])
# test
self.assertNotEqual(h_up.top(), h_down.top())
for i in xrange(1, 5):
self.assertEqual(h_up.pop(), (5 - i, 5 - i))
self.assertEqual(h_down.pop(), (i, i))
if __name__ == '__main__':
unittest.main()
|
18,668 | 8169e2c9e079279551352a341cd51ada3cfbe267 | import networkx as nx
import os
import re
import time
import logging
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from .common import get_relative_path_from_href
from ..config import *
from ..constant import *
browser = webdriver.Chrome()
def __pass_if_exist(cm: nx.DiGraph, node_type, mode='new_generate'):
if mode == 'new_generate':
return False
Ntype_attributes = nx.get_node_attributes(cm, 'Ntype')
for node in cm.nodes:
if Ntype_attributes[node] == node_type:
return True
return False
def __add_meta_node_javadoc(concept_map: nx.DiGraph):
if __pass_if_exist(concept_map, NodeType.SCOPE):
return
concept_map.add_node(
'javase',
Ntype=NodeType.SCOPE,
description='The Java Platform, Standard Edition (Java SE) APIs define the core Java platform for general-purpose computing. These APIs are in modules whose names start with java.'
)
concept_map.add_node(
'jdk',
Ntype=NodeType.SCOPE,
description='The Java Development Kit (JDK) APIs are specific to the JDK and will not necessarily be available in all implementations of the Java SE Platform. These APIs are in modules whose names start with jdk.'
)
#concept_map.add_edge('javase', 'jdk', Etype = EdgeType.RELATED)
#concept_map.add_edge('jdk', 'javase', Etype = EdgeType.RELATED)
def __add_module_node_javadoc(concept_map: nx.DiGraph):
if __pass_if_exist(concept_map, NodeType.MODULE):
return
global browser
javadoc_index_path = os.path.join(JAVADOC_PATH, 'api/index.html')
browser.get(javadoc_index_path)
overview_tabpanel = browser.find_element(By.ID, 'overviewSummary_tabpanel')
module_table = overview_tabpanel.find_element(By.XPATH, './table')
trs = module_table.find_elements(By.XPATH, './tbody/tr')
modules = []
for tr in trs:
th_a = tr.find_element(By.XPATH, './th/a')
td_div = tr.find_element(By.XPATH, './td/div')
modules.append(
(
th_a.text,
th_a.get_attribute('href'),
td_div.text
)
)
for module_name, module_href, module_description in modules:
concept_map.add_node(module_name)
concept_map.nodes[module_name]['name'] = module_name
concept_map.nodes[module_name]['Ntype'] = NodeType.MODULE
concept_map.nodes[module_name]['local_href'] = module_href
concept_map.nodes[module_name]['description'] = module_description
relative_path = get_relative_path_from_href(module_href)
concept_map.nodes[module_name]['path'] = relative_path
if re.match('^java', module_name):
concept_map.add_edge(
'javase', module_name,
Etype=EdgeType.INCLUDE
)
elif re.match('^jdk', module_name):
concept_map.add_edge(
'jdk', module_name,
Etype=EdgeType.INCLUDE
)
def __add_package_node_javadoc(concept_map: nx.DiGraph):
if __pass_if_exist(concept_map, NodeType.PACKAGE):
return
global browser
Ntype_attributes = nx.get_node_attributes(concept_map, 'Ntype')
for node in concept_map.nodes:
if Ntype_attributes[node] == NodeType.PACKAGE:
return
module_nodes = [
node for node in concept_map.nodes if Ntype_attributes[node] == NodeType.MODULE]
for node in module_nodes:
local_href = concept_map.nodes[node]['local_href']
browser.get(local_href)
time.sleep(0.05)
packages = []
block_sections = browser.find_elements(
By.XPATH, r'//li[@class="blockList"]/section')
for block_section in block_sections:
block_title = block_section.find_element(By.XPATH, './h2')
if block_title.text != 'Packages':
continue
package_groups = block_section.find_elements(
By.XPATH, './div/table')
for package_group in package_groups:
relation_name = package_group.find_element(
By.XPATH, './caption/span[1]').text
if relation_name != 'Exports':
continue
trs = package_group.find_elements(By.XPATH, './tbody/tr')
for tr in trs:
th_a = tr.find_element(By.XPATH, './/a')
td_div = tr.find_element(By.XPATH, './td')
packages.append(
(
th_a.text,
th_a.get_attribute('href'),
td_div.text
)
)
for package_name, package_href, package_description in packages:
concept_map.add_node(package_name)
concept_map.nodes[package_name]['name'] = package_name
concept_map.nodes[package_name]['Ntype'] = NodeType.PACKAGE
concept_map.nodes[package_name]['local_href'] = package_href
concept_map.nodes[package_name]['description'] = package_description
relative_path = get_relative_path_from_href(package_href)
concept_map.nodes[package_name]['path'] = relative_path
concept_map.add_edge(
node, package_name,
Etype=EdgeType.EXPORT
)
pass
def __add_class_level_node_javadoc(concept_map: nx.DiGraph):
if __pass_if_exist(concept_map, NodeType.CLASS):
return
'''
添加所有的类级结点:接口、类、异常、错误、注解
'''
global browser
Ntype_attributes = nx.get_node_attributes(concept_map, 'Ntype')
package_nodes = [
node for node in concept_map.nodes if node in Ntype_attributes and Ntype_attributes[node] == NodeType.PACKAGE]
for package_node in package_nodes:
local_href = concept_map.nodes[package_node]['local_href']
browser.get(local_href)
time.sleep(0.05)
blocks = browser.find_elements(By.XPATH, r'//li[@class="blockList"]')
for block in blocks:
content_table = block.find_element(By.XPATH, r'.//table')
if content_table is None:
continue
block_title = content_table.find_element(
By.XPATH, r'./caption').text.lower()
temps = []
trs = content_table.find_elements(By.XPATH, r'./tbody/tr')
for tr in trs:
th_a = tr.find_element(By.XPATH, './/a')
td_div = tr.find_element(By.XPATH, './td')
temps.append(
(
th_a.text,
th_a.get_attribute('href'),
td_div.text
)
)
for name, href, description in temps:
relative_path = get_relative_path_from_href(href)
concept_map.add_node(relative_path)
concept_map.nodes[relative_path]['name'] = name
if 'interface' in block_title:
concept_map.nodes[relative_path]['Ntype'] = NodeType.INTERFACE
elif 'class' in block_title:
concept_map.nodes[relative_path]['Ntype'] = NodeType.CLASS
elif 'enum' in block_title:
concept_map.nodes[relative_path]['Ntype'] = NodeType.ENUM
elif 'exception' in block_title:
concept_map.nodes[relative_path]['Ntype'] = NodeType.EXCEPTION
elif 'error' in block_title:
concept_map.nodes[relative_path]['Ntype'] = NodeType.ERROR
elif 'annotation' in block_title:
concept_map.nodes[relative_path]['Ntype'] = NodeType.ANNOTATION
concept_map.nodes[relative_path]['local_href'] = href
concept_map.nodes[relative_path]['description'] = description
concept_map.nodes[relative_path]['path'] = relative_path
concept_map.add_edge(
package_node, relative_path,
Etype=EdgeType.INCLUDE
)
def __add_member_node_to_class_level_node_javadoc(concept_map: nx.DiGraph, node_type: str):
'''
add memeber nodes to node type : class, error, exception, interface, enum
method, field, constructor
'''
global browser
Ntype_attributes = nx.get_node_attributes(concept_map, 'Ntype')
class_nodes = [
node for node in concept_map.nodes if node in Ntype_attributes and Ntype_attributes[node] == node_type]
for class_node in class_nodes:
try:
local_href = concept_map.nodes[class_node]['local_href']
browser.get(local_href)
time.sleep(0.05)
content_container = browser.find_element(
By.XPATH, r'//div[@class="contentContainer"]')
try:
summary_section = content_container.find_element(
By.XPATH, r'.//section[@class="summary"]')
blocks = summary_section.find_elements(
By.XPATH, r'.//li[@class="blockList"]')
except Exception as e:
print('node ', class_node, ': nothing found')
continue
for block in blocks:
block_title = block.find_element(
By.XPATH, r'.//h2').text.lower()
try:
member_summary_div = block.find_element(
By.XPATH, r'.//div[@class="memberSummary"]')
except NoSuchElementException as no_element:
continue
member_summary_table = member_summary_div.find_element(
By.XPATH, r'.//table')
trs = member_summary_table.find_elements(
By.XPATH, r'./tbody/tr')
temps = []
for tr in trs:
th_span = tr.find_element(
By.XPATH, r'.//span[@class="memberNameLink"]')
th_a = th_span.find_element(By.XPATH, r'.//a')
td_div = tr.find_element(By.XPATH, r'./td[last()]')
temps.append(
(
th_a.text,
th_a.get_attribute('href'),
td_div.text
)
)
for name, href, description in temps:
relative_path = get_relative_path_from_href(href)
concept_map.add_node(relative_path)
concept_map.nodes[relative_path]['name'] = name
if 'field' in block_title:
concept_map.nodes[relative_path]['Ntype'] = NodeType.FIELD
elif 'constructor' in block_title:
concept_map.nodes[relative_path]['Ntype'] = NodeType.CONSTRUCTOR
elif 'method' in block_title:
concept_map.nodes[relative_path]['Ntype'] = NodeType.METHOD
elif 'enum constant' in block_title:
concept_map.nodes[relative_path]['Ntype'] = NodeType.ENUM_CONSTANT
elif 'element' in block_title:
concept_map.nodes[relative_path]['Ntype'] = NodeType.OPTIONAL_ELEMENT
elif 'nested' in block_title:
concept_map.nodes[relative_path]['Ntype'] = NodeType.CLASS
else:
#print('undetected tile type: ', block_title, ' in ', class_node)
pass
concept_map.nodes[relative_path]['local_href'] = href
concept_map.nodes[relative_path]['description'] = description
concept_map.nodes[relative_path]['path'] = relative_path
concept_map.add_edge(
class_node, relative_path,
Etype=EdgeType.INCLUDE
)
except Exception as e:
logging.exception(e)
def __add_member_node_to_class_level_nodes_javadoc(concept_map: nx.DiGraph):
if __pass_if_exist(concept_map, NodeType.FIELD):
return
__add_member_node_to_class_level_node_javadoc(
concept_map, NodeType.ANNOTATION)
__add_member_node_to_class_level_node_javadoc(concept_map, NodeType.CLASS)
__add_member_node_to_class_level_node_javadoc(concept_map, NodeType.ENUM)
__add_member_node_to_class_level_node_javadoc(concept_map, NodeType.ERROR)
__add_member_node_to_class_level_node_javadoc(
concept_map, NodeType.EXCEPTION)
__add_member_node_to_class_level_node_javadoc(
concept_map, NodeType.INTERFACE)
def __add_Parameter_ReturnType_relation(concept_map: nx.DiGraph):
global browser
Ntype_attributes = nx.get_node_attributes(concept_map, 'Ntype')
#a = 'api/jdk.jshell/jdk/jshell/JShell.Subscription.html' in concept_map
for node_type in class_level_node_types:
class_nodes = [
node for node in concept_map.nodes if node in Ntype_attributes and Ntype_attributes[node] == node_type]
for class_node in class_nodes:
local_href = concept_map.nodes[class_node]['local_href']
browser.get(local_href)
time.sleep(0.05)
content_container = browser.find_element(
By.XPATH, r'//div[@class="contentContainer"]')
method_exist = True
try:
method_detail_section = content_container.find_element(
By.XPATH, r'.//section[@class="methodDetails"]')
method_blocks = method_detail_section.find_elements(
By.XPATH, r'.//li[@class="blockList"]')
except:
method_exist = False
if method_exist:
for method_block in method_blocks:
try:
method_detail = method_block.find_element(
By.XPATH, r'./section[@class="detail"]')
a_with_id = method_detail.find_element(
By.XPATH, r'./h3/a[last()]')
method_id = a_with_id.get_attribute("id")
method_node = class_node + '#' + method_id
method_signature = method_detail.find_element(
By.XPATH, r'.//div[@class="memberSignature"]')
try:
return_type = method_signature.find_element(
By.XPATH, r'./span[@class="returnType"]/a')
return_type_node = get_relative_path_from_href(
return_type.get_attribute('href'))
concept_map.add_edge(
method_node,
return_type_node,
Etype=EdgeType.RETURN_TYPE
)
except NoSuchElementException:
pass
try:
parameters = method_signature.find_elements(
By.XPATH, r'./span[@class="arguments"]/a')
for parameter in parameters:
parameter_node = get_relative_path_from_href(
parameter.get_attribute('href'))
concept_map.add_edge(
method_node, parameter_node, Etype=EdgeType.PARAMETER)
except NoSuchElementException:
pass
except:
continue
# add constructor parameter
constructor_exist = True
try:
constructor_detail_section = content_container.find_element(
By.XPATH, r'.//section[@class="constructorDetails"]')
constructor_blocks = constructor_detail_section.find_elements(
By.XPATH, r'.//li[@class="blockList"]')
except:
constructor_exist = False
if constructor_exist:
for method_block in constructor_blocks:
try:
method_detail = method_block.find_element(
By.XPATH, r'./section[@class="detail"]')
a_with_id = method_detail.find_element(
By.XPATH, r'./h3/a[last()]')
method_id = a_with_id.get_attribute("id")
method_node = class_node + '#' + method_id
method_signature = method_detail.find_element(
By.XPATH, r'.//div[@class="memberSignature"]')
try:
parameters = method_signature.find_elements(
By.XPATH, r'./span[@class="arguments"]/a')
for parameter in parameters:
parameter_node = get_relative_path_from_href(
parameter.get_attribute('href'))
concept_map.add_edge(
method_node,
parameter_node,
Etype=EdgeType.PARAMETER
)
except NoSuchElementException:
pass
except:
continue
# add field type relation
field_exist = True
try:
field_detail_section = content_container.find_element(
By.XPATH, r'.//section[@class="fieldDetails"]')
field_blocks = field_detail_section.find_elements(
By.XPATH, r'.//li[@class="blockList"]')
except:
field_exist = False
if field_exist:
for field_block in field_blocks:
try:
field_detail = field_block.find_element(
By.XPATH, r'./section[@class="detail"]')
a_with_id = field_detail.find_element(
By.XPATH, r'./h3/a[last()]')
field_id = a_with_id.get_attribute("id")
field_node = class_node + '#' + field_id
field_signature = field_detail.find_element(
By.XPATH, r'.//div[@class="memberSignature"]')
try:
return_type = field_signature.find_element(
By.XPATH, r'./span[@class="returnType"]/a')
return_type_node = get_relative_path_from_href(
return_type.get_attribute('href'))
concept_map.add_edge(
field_node,
return_type_node,
Etype=EdgeType.FIELD_IS_TYPE
)
except NoSuchElementException:
pass
except:
continue
def __add_ref_in_desc_relation_javadoc(concept_map: nx.DiGraph):
global browser
Ntype_attributes = nx.get_node_attributes(concept_map, 'Ntype')
for node_type in class_level_node_types:
class_nodes = [
node for node in concept_map.nodes if node in Ntype_attributes and Ntype_attributes[node] == node_type]
for class_node in class_nodes:
local_href = concept_map.nodes[class_node]['local_href']
browser.get(local_href)
time.sleep(0.05)
content_container = browser.find_element(
By.XPATH, r'//div[@class="contentContainer"]')
# add ref_in_desc relation
try:
description_block = content_container.find_element(
By.XPATH, r'.//section[@class="description"]/div')
ref_as = description_block.find_elements(By.XPATH, r'.//a')
for ref_a in ref_as:
try:
local_href = ref_a.get_attribute("href")
relative_path = get_relative_path_from_href(local_href)
concept_map.add_edge(
class_node,
relative_path,
Etype=EdgeType.REFERENCE_IN_DESCRIPTION
)
except NoSuchElementException:
continue
except NoSuchElementException:
pass
# add inheritance relation
try:
inheritance_tree = content_container.find_element(
By.XPATH, r'./div[@class="inheritance"]')
parent = inheritance_tree.find_element(By.XPATH, r'./a')
child = inheritance_tree.find_element(
By.XPATH, r'./div[@class="inheritance"]')
terminited = False
while not terminited:
try:
new_parent = child.find_element(By.XPATH, r'./a')
parent = new_parent
child = child.find_element(
By.XPATH, r'./div[@class="inheritance"]')
except NoSuchElementException:
terminited = True
parent_href = parent.get_attribute("href")
relative_path = get_relative_path_from_href(parent_href)
concept_map.add_edge(
class_node,
relative_path,
Etype=EdgeType.INHERIT
)
except NoSuchElementException:
pass
# add implement relation
try:
description_block = content_container.find_element(
By.XPATH, r'.//section[@class="description"]')
dls = description_block.find_elements(By.XPATH, r'.//dl')
for dl in dls:
try:
dts = dl.find_elements(By.XPATH, r'./dt')
dds = dl.find_elements(By.XPATH, r'./dd')
for i in range(len(dts)):
dt = dts[i]
dd = dds[i]
if 'Implemented' in dt.text:
implement_as = dd.find_elements(
By.XPATH, r'.//a')
for implement_a in implement_as:
implement_href = implement_a.get_attribute(
"href")
relative_path = get_relative_path_from_href(
implement_href)
concept_map.add_edge(
class_node,
relative_path,
Etype=EdgeType.IMPLEMENT
)
if 'See' in dt.text or 'Note' in dt.text:
also_see_as = dd.find_elements(
By.XPATH, r'.//a')
for also_see_a in also_see_as:
also_see_href = also_see_a.get_attribute(
"href")
relative_path = get_relative_path_from_href(
also_see_href)
if relative_path not in concept_map:
concept_map.add_node(relative_path)
concept_map.nodes[relative_path]['Ntype'] = NodeType.OTHER
concept_map.add_edge(
class_node,
relative_path,
Etype=EdgeType.ALSO_SEE
)
except:
continue
except NoSuchElementException:
pass
def generate_basic_concept_map_javadoc(mode='new_generate'):
'''
仅根据最明显的层级包含关系构建concept map
'''
graph_path = os.path.join(CONCEPT_MAP_STORE_PATH,
JAVADOC_CONCEPT_MAP_FILE_NAME)
if os.path.exists(graph_path) and mode != 'new_generate':
concept_map = nx.read_gexf(graph_path)
else:
concept_map = nx.DiGraph()
__add_meta_node_javadoc(concept_map)
#print('meta data added')
__add_module_node_javadoc(concept_map)
#print('module data added')
try:
__add_package_node_javadoc(concept_map)
#print('package data added')
__add_class_level_node_javadoc(concept_map)
#print('class data added')
__add_member_node_to_class_level_nodes_javadoc(concept_map)
#print('member data added')
except Exception as e:
print(e)
return concept_map
def add_complex_relations_javadoc(concept_map: nx.DiGraph):
__add_Parameter_ReturnType_relation(concept_map)
print('relation 1 added')
__add_ref_in_desc_relation_javadoc(concept_map)
print('relation 2 added')
# TODO: add ATTACH_ANNOTATION, THROWS, NESTED_CLASS relations and ALSO_SEE for methods and fields
# TODO: nested class has actually no Ntype, need to consider and generate whole concept map again
def generate_concept_map_javadoc():
concept_map = generate_basic_concept_map_javadoc()
add_complex_relations_javadoc(concept_map)
return concept_map
def generate_concept_map(doc_name='javadoc'):
switch = {
'javadoc': generate_concept_map_javadoc
}
return switch[doc_name]()
|
18,669 | c0e8e7957fd66cec96af5cce41388b1e335971aa | # coding: utf-8
"""
flask_oauthlib.contrib.sqlalchemy
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
SQLAlchemy support for OAuth2 provider.
:copyright: (c) 2013 by Hsiaoming Yang.
"""
def user_handle(session, model, provider):
"""Add user getter for provider."""
def load_user(username, password, *args, **kwargs):
return session.query(model).filter_by(
username=username, password=password
).first()
provider._usergetter = load_user
return provider
def client_handle(session, model, provider):
"""Add client getter for provider."""
def load_client(client_id):
return session.query(model).filter_by(client_id=client_id).first()
provider._clientgetter = load_client
return provider
def sqlalchemy_handle(session, provider, user=None, client=None, token=None):
"""Bind getters and setters provided by SQLAlchemy model."""
if user:
user_handle(session, user, provider)
if client:
client_handle(session, client, provider)
return provider
|
18,670 | 7c476f20f3f33942f49f6b92ae9cc09b9db4c9bc | # answer for leetcode question 7, straight forward python solution. Python uses memory to store integer bits,
# it doesn't have the problem of integer overflow unless memory itself isn't enough
class Solution:
def reverse(self, x: int) -> int:
result = 0
negative = False if x >= 0 else True
current = -x if negative else x
while (current != 0):
result = result * 10 + current % 10
current = current // 10
result = -result if negative else result
if result < pow(-2, 31) or result > pow(2, 31) - 1:
result = 0
return result |
18,671 | 4ce3037dd480ab2100cc375544fb5f9341072202 | import datetime
from ..util import db
from ..models import *
from flask.ext.login import login_required
import flask
import flask_login
import flask.views
## All Workouts
class Workouts(flask.views.MethodView):
@login_required
def get(self):
workouts = db.session.query(Workout).all()
pass
@login_required
def post(self):
return WorkoutCRUD.create_workout()
## Workout CREATE View
class WorkoutCreate(flask.views.MethodView):
@login_required
def get(self):
return flask.render_template('workout_create.html')
## Workout EDIT View
class WorkoutEdit(flask.views.MethodView):
@login_required
def get(selfself, workout_id):
if workout_id is not None:
workout = Workout.query.get(workout_id)
return flask.render_template('workout_edit.html', workout=workout)
else:
return flask.render_template('404.html'), 404
## Workout CRUD
class WorkoutCRUD(flask.views.MethodView):
@login_required
def get(self, workout_id):
if workout_id is not None and Workout.query.get(workout_id) is not None:
workout = Workout.query.get(workout_id)
if workout is not None:
return flask.render_template('workout.html', workout=workout, user=flask_login.current_user)
return flask.render_template('404.html'), 404
@login_required
def post(self, workout_id):
method = flask.request.form.get('_method', '')
if method == "PUT":
pass
elif method == "DELETE":
pass
else:
return flask.render_template('404.html'), 404
@staticmethod
def edit_workout(workout_id):
pass
@staticmethod
def delete_workout(workout_id):
pass
@staticmethod
def create_workout():
if WorkoutCRUD.validate_user_data() != 0:
return flask.redirect(flask.url_for('workout_create'))
else:
parts = WorkoutCRUD.collect_parts()
workout = Workout(flask.request.form['workout_name'])
workout.post_date = datetime.datetime.strptime(flask.request.form['workout_date'], '%Y-%m-%d').date()
workout.gym = flask_login.current_user.owns_gym
for idx in parts:
part = WorkoutPart(parts[idx]['name'])
part.order = idx
part.uom = parts[idx]['uom']
for tag_name in parts[idx]['tags'].split(','):
if tag_name != "":
tag = Tag.query.filter_by(name=tag_name).first()
if tag is None:
tag = Tag(tag_name)
part.tags.append(tag)
workout.parts.append(part)
db.session.add(workout)
db.session.commit()
return flask.redirect(flask.url_for('workout', workout_id=workout.id))
@staticmethod
def collect_parts():
f = flask.request.form
parts = {}
for key in f.keys():
if "[part]" in key:
idx = key[key.find('[')+1:key.find(']')]
part = {'name': flask.request.form[key], 'uom': flask.request.form[key.replace('[part]', '[uom]')]}
if key.replace('[part]', '[tags]') in f.keys():
part['tags'] = str(f.getlist(key.replace('[part]', '[tags]'))).strip()[3:-2]
parts[idx] = part
return parts
@staticmethod
def validate_user_data():
error = 0
if flask.request.form['workout_name'] == "":
flask.flash("Workout name is required!", "error")
error += 1
if flask.request.form['workout_date'] == "":
flask.flash("Workout date is required!", "error")
error += 1
return error
class WorkoutResults(flask.views.MethodView):
@login_required
def get(self, workout_id):
if workout_id is not None and Workout.query.get(workout_id) is not None:
workout = Workout.query.get(workout_id)
if workout is not None:
return flask.render_template('workout_results.html', workout=workout, user=flask_login.current_user)
return flask.render_template('404.html'), 404 |
18,672 | 0e9fe502cdcb0f40c58fe558d357169b69cdc19e | #Created by Joseph Brason
#Blank Pygame
import pygame, sys
from pygame.locals import *
pygame.init()
clock = pygame.time.Clock()
screenX = 1200
screenY = 720
screen = pygame.display.set_mode((screenX ,screenY))
windowTitle = "Blank Pygame window"
pygame.display.set_caption(windowTitle)
#COLOURS
black = pygame.Color( 0, 0, 0)
def keyDetection():
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
if event.type == KEYDOWN:
if event.key == K_ESCAPE:
pygame.event.post(pygame.event.Event(QUIT))
print(event)
def mainLoop():
keyDetection()
pygame.display.flip()
screen.fill(black)
clock.tick(60)
while True:
mainLoop()
|
18,673 | 308b544711f2ef11d0295afb1c8841de9ecd7f5b | """
The library to provide common functions of version related, like to compare two versions.
Written in Python 3.6
"""
__version__ = 0.1
def compare(v1="", v2=""):
"""
.compare(v1, v2)
The method to check two versions whether one is greater than, equal to, or less than the other
:param v1: String - the first version to compare, like '1.2'
:param v2: String - the second version to compare
:return: A string indicating the comparison result
"""
if any([v1 == "", v2 == ""]):
return 'One or both versions are not provided.'
characters1 = list(v1)
characters2 = list(v2)
if not characters1.index('.'):
return 'v1 is in wrong format'
if not characters2.index('.'):
return 'v2 is in wrong format'
def extract_number(characters):
working_list = []
resulting_list = []
dot_index = characters.index('.')
go_on = True
for i in range(dot_index):
if characters[i] == '0' and go_on:
continue
go_on = False
working_list.append(characters[i])
if not working_list:
working_list.append('0')
num_str = ''.join(working_list)
resulting_list.append(num_str)
resulting_list.append('.')
working_list.clear()
go_on = True
for i in range(len(characters)-(dot_index+1)):
index = i + (dot_index+1)
if characters[index] == '0' and go_on:
continue
go_on = False
working_list.append(characters[index])
if not working_list:
working_list.append('0')
num_str = ''.join(working_list)
resulting_list.append(num_str)
return resulting_list
list1 = extract_number(characters1)
list2 = extract_number(characters2)
def check(a_list):
if a_list[0].isdigit() and a_list[2].isdigit():
return True
return False
if not check(list1):
return 'Invalid input - {}'.format(v1)
if not check(list2):
return 'Invalid input - {}'.format(v2)
if list1[0] > list2[0]:
return 'Version {0} is greater than Version {1}'.format(v1, v2)
elif list1[0] < list2[0]:
return 'Version {0} is smaller than Version {1}'.format(v1, v2)
else:
if list1[2] > list2[2]:
return 'Version {0} is greater than Version {1}'.format(v1, v2)
elif list1[2] < list2[2]:
return 'Version {0} is smaller than Version {1}'.format(v1, v2)
else:
return 'Version {0} is equal to Version {1}'.format(v1, v2)
|
18,674 | 732f553f4bfb8c61d4c5c67ab6de7b22589c126d | #return function
def format_name(first_name, last_name):
f_name = first_name.title()
l_name = last_name.title()
return f"{f_name} {l_name}" #when return reaches then the complier terminate out of the function
print("hi juli") #this line is not excute
print(format_name("juli", "kumari"))
#Functions with Outputs
def format_name(f_name, l_name):
if f_name == "" or l_name == "":
return "You didn't provide valid inputs."
formated_f_name = f_name.title()
formated_l_name = l_name.title()
f"Result: {formated_f_name} {formated_l_name}"
#Storing output in a variable
formatted_name = format_name(input("Your first name: "), input("Your last name: "))
print(formatted_name)
#or printing output directly
print(format_name(input("What is your first name? "), input("What is your last name? ")))
#Already used functions with outputs.
length = len(formatted_name)
#Return as an early exit
def format_name(f_name, l_name):
"""Take a first and last name and format it
to return the title case version of the name.""" #docstring (documentation)
if f_name == "" or l_name == "":
return "You didn't provide valid inputs."
formated_f_name = f_name.title()
formated_l_name = l_name.title()
return f"Result: {formated_f_name} {formated_l_name}"
#day in months using return function
def is_leap(year):
if year % 4 == 0:
if year % 100 == 0:
if year % 400 == 0:
return True
else:
return False
else:
return True
else:
return False
def days_in_month():
month_days = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
if is_leap(year) and month == 2:
return 29
return month_days[month-1]
year = int(input("Enter a year: "))
month = int(input("Enter a month: "))
days = days_in_month(year, month)
print(days)
|
18,675 | 4e53760c73f4ffe7acbf8d2efb421500fd7b584a | from django.urls import path
from . import views
urlpatterns=[
path('products',views.products,name='products'),
path('<int:pro_id>',views.product,name='product'),
path('search',views.search,name='search'),
] |
18,676 | 369c3d3188ded731be25fd4742fc9c06d7263b9f | import os
import sys
import pygame
import time
from pygame.locals import *
CAPTION = "Rescue Xiaomeng"
SCREEN_SIZE = (800, 600)
pygame.init()
pygame.display.set_caption(CAPTION)
screen = pygame.display.set_mode(SCREEN_SIZE)
def loo():
while True:
time.sleep(0.01)
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
class People(object):
def __init__(self):
self.x = 380
self.y = 230
self.up = pygame.image.load('lesson1/up.png').convert_alpha()
self.up = pygame.transform.smoothscale(self.up, (30,40))
self.down = pygame.image.load('lesson1/down.png').convert_alpha()
self.down = pygame.transform.smoothscale(self.down, (30,40))
self.left = pygame.image.load('lesson1/left.png').convert_alpha()
self.left = pygame.transform.smoothscale(self.left, (30,40))
self.right = pygame.image.load('lesson1/right.png').convert_alpha()
self.right = pygame.transform.smoothscale(self.right, (30,40))
self.bac1 = pygame.image.load('lesson1/back.png').convert_alpha()
self.bac1 = pygame.transform.smoothscale(self.bac1, (800,600))
self.bac2 = pygame.image.load('lesson1/map.png').convert_alpha()
self.bac2 = pygame.transform.smoothscale(self.bac2, (800,600))
self.start = pygame.image.load('lesson1/start.png').convert_alpha()
self.start = pygame.transform.smoothscale(self.start, (50,60))
self.end = pygame.image.load('lesson1/end.png').convert_alpha()
self.end = pygame.transform.smoothscale(self.end, (50,60))
def showBac(self):
screen.blit(self.bac1, (0,0))
screen.blit(self.bac2, (0,0))
screen.blit(self.start, (300,130))
screen.blit(self.end, (550,500))
def show(self):
framerate = pygame.time.Clock()
for n in range(30):
framerate.tick(30)
self.showBac()
screen.blit(self.down, (self.x, self.y))
pygame.display.update()
def moveUp(self, num=1):
framerate = pygame.time.Clock()
for n in range(num*5):
framerate.tick(30)
self.showBac()
if n%4 == 0:
screen.blit(self.up, (self.x, self.y))
else:
screen.blit(self.up, (self.x, self.y-5))
self.y -= 5
pygame.display.update()
#fpsClock.tick(FPS)
def moveDown(self, num=1):
framerate = pygame.time.Clock()
for n in range(num*5):
framerate.tick(30)
self.showBac()
if n%4 == 0:
screen.blit(self.down, (self.x, self.y))
else:
screen.blit(self.down, (self.x, self.y-5))
self.y += 5
pygame.display.update()
def moveRight(self, num=1):
framerate = pygame.time.Clock()
for n in range(num*5):
framerate.tick(30)
self.showBac()
if n%4 == 0:
screen.blit(self.right, (self.x, self.y))
else:
screen.blit(self.right, (self.x, self.y-5))
self.x += 5
pygame.display.update()
def moveLeft(self, num=1):
framerate = pygame.time.Clock()
for n in range(num*5):
framerate.tick(30)
self.showBac()
if n%4 == 0:
screen.blit(self.left, (self.x, self.y))
else:
screen.blit(self.left, (self.x, self.y-5))
self.x -= 5
pygame.display.update()
xm = People()
xm.show()
def run():
xm.moveRight(3)
xm.moveDown(3)
xm.moveLeft(9)
xm.moveDown(3)
xm.moveRight(11)
xm.moveDown(7)
loo()
run()
|
18,677 | 31b94efe4acfea35149c61e69128d4b18e8a19f7 | class Port(object):
"""
Descriptor check port range,
that must be in 1024-65535 range.
"""
def __set_name__(self, owner, name):
self.prop = name
def __get__(self, instance, cls):
return instance.__dict__.get(self.prop, None)
def __set__(self, instance, value):
"""
Check port range: must be not well known port:
"The Well Known Ports are those from 0 through 1023".
And must be less or qeual than 65535.
"""
if 1023 > value or value > 65535:
raise ValueError("port must contains value from range 1024-65535")
instance.__dict__[self.prop] = value
|
18,678 | 8550095ca4889ee50f93bc9fea1e99b62fd7e83d | from PIL import Image
from io import BytesIO
import boto3
from boto3.dynamodb.conditions import Key, Attr
import base64
DDB_RESOURCE = boto3.resource('dynamodb')
S3_CLIENT = boto3.client('s3', region_name="eu-west-1")
ddb_device_table_name = "iowt-devices"
ddb_event_table_name = "iowt-events"
ddb_device_table = DDB_RESOURCE.Table(ddb_device_table_name)
ddb_event_table = DDB_RESOURCE.Table(ddb_event_table_name)
s3_bucket = "iowt-events"
owner = "chrisw"
thumb_size = 128, 128
# Loop through devices and pick all tagged with 'owner'
things = dict()
response = ddb_device_table.scan(FilterExpression=Attr('owner').eq(owner))
for item in response['Items']:
things[item['id']] = item
# loop devices and get events
for thing in things.keys():
response = ddb_event_table.scan(FilterExpression=Attr('device_id').eq(thing))
things[thing]['events'] = response['Items']
for i in things:
for event in things[i]['events']:
image_name = event['image_id']
image_object = S3_CLIENT.get_object(Bucket=s3_bucket, Key=image_name)
image_content = base64.b64encode(image_object['Body'].read())
image = image_content.decode('utf-8')
with open(event['id'] + ".html", "w") as f:
f.write("<html><body>")
f.write('<img alt="Embedded Image" src="data:image/png;base64,%s"/>' % image)
f.write("</html></body>")
im_buffer = BytesIO()
im = Image.open(BytesIO(base64.b64decode(image)))
im.thumbnail(thumb_size)
im.save(im_buffer, format="JPEG")
thumb_content = im_buffer.getvalue()
S3_CLIENT.put_object(Body=thumb_content,
Bucket=s3_bucket,
Key=event['id'] + "_thumb.jpg")
|
18,679 | 395546e7a9e25da530034c801520405b43fd3023 | # -*- coding: utf-8 -*-
# The football.csv file contains the results from the English Premier League.
# The columns labeled ‘Goals’ and ‘Goals Allowed’ contain the total number of
# goals scored for and against each team in that season (so Arsenal scored 79 goals
# against opponents, and had 36 goals scored against them). Write a program to read the file,
# then print the name of the team with the smallest difference in ‘for’ and ‘against’ goals.
# The below skeleton is optional. You can use it or you can write the script with an approach of your choice.
import csv
def read_data(filename):
goals = {}
with open(filename,'rb') as f:
reader = csv.reader(f)
headers = reader.next()
for row in reader:
name, for_, against_, = row[0], row[5], row[6]
goals[name] = {}
goals[name]['for'] = int(for_)
goals[name]['against'] = int(against_)
return goals
def get_team_with_min_score_difference(goals):
min_diff = 9999
min_name = ''
for name in goals:
diff = abs(goals[name]['for'] - goals[name]['against'])
if diff < min_diff:
min_diff, min_name = diff, name
return min_name, min_diff
goals = read_data('football.csv')
min_name, min_diff = get_team_with_min_score_difference(goals)
print("The team with the smallest 'for' vs. 'against' difference is {0}: with a difference of {1}".format(min_name, min_diff))
# Aston_Villa
# 1
|
18,680 | 5801c60e21936c8a6e92fe1bf1481204c7705275 | from .main import hdonly
def autoload():
return hdonly()
config = [{
'name': 'hdonly',
'groups': [
{
'tab': 'searcher',
'list': 'torrent_providers',
'name': 'HD-Only',
'description': 'See <a href="https://hd-only.org/">HD-Only.org</a>',
'wizard': True,
'options': [
{
'name': 'enabled',
'type': 'enabler',
'default': 0,
},
{
'name': 'username',
'default': '',
},
{
'name': 'password',
'default': '',
'type': 'password',
},
{
'name': 'ignoreyear',
'label': 'Ignore Year',
'default': 0,
'type': 'bool',
'description': 'Won\'t use the release year to narrow the search if checked',
},
{
'name': 'seed_ratio',
'label': 'Seed ratio',
'type': 'float',
'default': 1,
'description': 'Will not be (re)moved until this seed ratio is met.',
},
{
'name': 'seed_time',
'label': 'Seed time',
'type': 'int',
'default': 80,
'description': 'Will not be (re)moved until this seed time (in hours) is met (72 hours is tracker rules minimum).',
},
{
'name': 'extra_score',
'advanced': True,
'label': 'Extra Score',
'type': 'int',
'default': 20,
'description': 'Starting score for each release found via this provider.',
}
],
},
],
}]
|
18,681 | b953b3a384f9c443356b32d59d53aadbbab3c461 | """
Solves the 1D advection equation with a Riemann Solver and the Finite Volume
Method using the PyClaw Package.
∂q/∂t + u ∂q/∂x = 0
q : The conserved quantity e.g. a density
u : The advection speed
------
Scenario: A rectangular initial condition is transported to the right
┌────────────────────────────────────────┐
1.00 │⠀⡇⠀⠀⠀⠀⠀⠀⡖⠒⠒⠒⠒⠒⠒⠒⢲⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀│ y1
│⠀⡇⠀⠀⠀⠀⠀⠀⢸⠀⠀⠀⠀⠀⠀ ⢸⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀│
│⠀⡇⠀⠀⠀⠀⠀⠀⢸ ⠀⠀⠀⠀⠀ ⢸⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀│
│⠀⡇⠀⠀⠀⠀⠀⠀⢸⠀⠀⠀⠀⠀⠀ ⢸⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀│
│⠀⡇⠀⠀⠀⠀⠀⠀⢸⠀⠀⠀⠀⠀⠀ ⢸⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀│
│⠀⡇⠀⠀⠀⠀⠀⠀⢸⠀⠀⠀⠀⠀⠀ ⢸⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀│
│⠀⡇⠀⠀⠀⠀⠀⠀⢸⠀⠀⠀⠀⠀⠀⠀⢸ ⠀⠀⠀⠀u⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀│
│⠀⡇⠀⠀⠀⠀⠀⠀⢸⠀⠀⠀⠀⠀⠀⠀⢸ ⠀⠀-----> ⠀⠀⠀⠀⠀⠀⠀│
│⠀⡇⠀⠀⠀⠀⠀⠀⢸⠀⠀⠀⠀⠀⠀⠀⢸ ⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀│
│⠀⡇⠀⠀⠀⠀⠀⠀⢸⠀⠀⠀⠀⠀⠀⠀⢸ ⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀│
│⠀⡇⠀⠀⠀⠀⠀⠀⢸⠀⠀⠀⠀⠀⠀⠀⢸ ⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀│
│⠀⡇⠀⠀⠀⠀⠀⠀⢸⠀⠀⠀⠀⠀⠀⠀⢸ ⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀│
│⠀⡇⠀⠀⠀⠀⠀⠀⢸⠀⠀⠀⠀⠀⠀⠀⢸ ⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀│
│⠀⡇⠀⠀⠀⠀⠀⠀⢸⠀⠀⠀⠀⠀⠀⠀⢸ ⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀│
0.00 │⠤⡧⠤⠤⠤⠤⠤⠤⠼ ⠧⠤⠤⠤⠤⠤⠤⠤⠤⠤⠤⠤⠤⠤⠤⠤│
└────────────────────────────────────────┘
⠀ 0.00⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀1.00⠀
-> periodic Boundary conditions on the left and right end
------
Solution strategy
1. Instantiate a ClawPack Solver with an attached Riemann Solver according to
the PDE being solved including the Boundary Conditions.
2. Define the Finite Volume Mesh.
3. Instantiate fields on the Mesh (that save the conserved quantity "q").
4. Prescribe the initial condition.
5. Set Problem-Specific Parameters (in our case the advection speed "u").
6. Instantiate a controller that takes care of the time integration and attach
solution and solver to it.
7. Run the simulation and visualize the results.
"""
from clawpack import pyclaw
from clawpack import riemann
import numpy as np
def main():
# (1) Define the Finite Voluem solver to be used with a Riemann Solver from
# the library
solver = pyclaw.ClawSolver1D(riemann.advection_1D)
solver.bc_lower[0] = pyclaw.BC.periodic
solver.bc_upper[0] = pyclaw.BC.periodic
# (2) Define the mesh
x_dimension = pyclaw.Dimension(0.0, 1.0, 100)
domain = pyclaw.Domain(x_dimension)
# (3) Instantiate a solution field on the Mesh
solution = pyclaw.Solution(solver.num_eqn, domain,)
# (4) Prescribe an initial state
state = solution.state
cell_center_coordinates = state.grid.p_centers[0]
state.q[0, :] = np.where(
(cell_center_coordinates > 0.2)
&
(cell_center_coordinates < 0.4),
1.0,
0.0,
)
# (5) Assign problem-specific parameters ("u" refers to the advection speed)
state.problem_data["u"] = 1.0
# (6) The controller takes care of the time integration
controller = pyclaw.Controller()
controller.solution = solution
controller.solver = solver
controller.tfinal = 1.0
# (7) Run and visualize
controller.run()
pyclaw.plot.interactive_plot()
if __name__ == "__main__":
main()
|
18,682 | bb150a97bf787318733ad432078e011ccecbabd0 | #!/usr/bin/env python
""" Source code for AI solving maze (labyrinth). Using Dijkstra algorithm
for backtracking and Tremaux's algorithm as backup. Authors:
Adnan Begovic <abe065@post.uit.no>, Ole-Morten Tangen <ota000@post.uit.no>
and Magnus W. Bjoerklund <mbj042@post.uit.no>. October, 5th 2012"""
from adj_list_dict_of_dict import Graph
from labyrinth import labyrinth
from dijkstra_algorithm import Shortest_path
class Find_toby(object):
def __init__(self):
#Using sets to track visited, visited twice and unvisited nodes...
self.visitedtwice = set([])
self.visited = set([])
self.unvisited = set([])
self.intersection = []
self.lab = labyrinth()
self.graph = Graph()
self.current = (0,0)
self.visited.add(self.current)
#... and stack to track intersection nodes
if self._find_intersection():
self.intersection.append(self.current)
def find_tile(self, neighbour_list):
""" Check tile tyoes for tiles around the AI player
and acts on them accordingly.
toby -> player moves to toby's tile
trap -> run disarm command in the direction of the trap
"""
if neighbour_list[1] == 'tile':
if neighbour_list[2] == 'trap':
if neighbour_list[0] == 'north':
self.lab.disarm('north')
print 'Disarm north trap'
elif neighbour_list[0] == 'south':
self.lab.disarm('south')
print 'Disarm south trap'
elif neighbour_list[0] == 'west':
self.lab.disarm('west')
print 'Disarm west trap'
elif neighbour_list[0] == 'east':
self.lab.disarm('east')
print 'Disarm east trap'
return True
else:
if neighbour_list[1] == 'toby':
if neighbour_list[0] == 'north':
self.moveNorth()
elif neighbour_list[0] == 'south':
self.moveSouth()
elif neighbour_list[0] == 'west':
self.moveWest()
elif neighbour_list[0] == 'east':
self.moveEast()
return False
def _add_vertice(self, neighbour_list):
""" Current is tuple of its x and y position. Neighbour_list is
the list of neighbours to the current."""
if self.find_tile(neighbour_list):
if neighbour_list[0] == 'north':
return (self.current[0],self.current[1] - 1)
elif neighbour_list[0] == 'south':
return (self.current[0],self.current[1] + 1)
elif neighbour_list[0] == 'west':
return (self.current[0] - 1,self.current[1])
elif neighbour_list[0] == 'east':
return (self.current[0] + 1,self.current[1])
else:
return False
def _find_intersection(self):
""" Finds intersection vertices in labyrinth. We are only
interested in intersections with more than two paths."""
count = 0
for each_list in self.lab.look():
if each_list[1] == 'wall':
count += 1
if count < 2:
return True
else:
return False
def _connect_neighbours(self):
""" Connect neighbouring unvisited vertices and puts them to
visited set and intersection stack """
for prev in self.unvisited:
for next in self.unvisited:
if (next[0] == prev[0] and next[1] == prev[1] + 1) or (next[0] == prev[0] + 1 and next[1] == prev[1]):
self.graph.addEdge((prev, next))
self.visited.add(prev)
self.visited.add(next)
if self._find_intersection():
self.intersection.append(prev)
self.intersection.append(next)
def moveSouth(self):
""" Move current to south. """
south = (self.current[0], self.current[1] + 1)
mv = self.lab.south()
self.check_grue(mv)
self.current = south
self.visited.add(self.current)
def moveNorth(self):
""" Move current to north. """
north = (self.current[0], self.current[1] - 1)
mv = self.lab.north()
self.check_grue(mv)
self.current = north
self.visited.add(self.current)
def moveWest(self):
""" Move current to west. """
west = (self.current[0] - 1, self.current[1])
mv = self.lab.west()
self.check_grue(mv)
self.current = west
self.visited.add(self.current)
def moveEast(self):
""" Move current to east. """
east = (self.current[0] + 1, self.current[1])
mv = self.lab.east()
self.check_grue(mv)
self.current = east
self.visited.add(self.current)
def move_to_position(self, position):
""" Moving AI to the given position. Used in backtracking."""
if position[0] == self.current[0]:
y_distance = position[1] - self.current[1]
if y_distance > 0:
self.moveSouth()
else:
self.moveNorth()
elif position[1] == self.current[1]:
x_distance = position[0] - self.current[0]
if x_distance > 0:
self.moveEast()
else:
self.moveWest()
def backtrack(self):
""" Tracking back using Dijkstra algorithm and intersection stack"""
last_intersection = self.intersection.pop()
retrace = Shortest_path().shortestPath(self.graph, self.current, last_intersection)
print retrace
print "Moving back..."
self.current = retrace.pop(0)
if self.current in self.intersection:
self.intersection.remove(self.current)
while retrace:
position = retrace.pop(0)
self.move_to_position(position)
if position in self.intersection:
self.intersection.remove(position)
def move(self):
""" Moving. It's here the moving of AI actually happens. """
self.build_graph()
while 1:
if self.current in self.visited:
self.build_graph()
if (self.current[0], self.current[1] + 1) in self.unvisited:
self.moveSouth()
elif (self.current[0], self.current[1] - 1) in self.unvisited:
self.moveNorth()
elif (self.current[0] - 1, self.current[1]) in self.unvisited:
self.moveWest()
elif (self.current[0] + 1, self.current[1]) in self.unvisited:
self.moveEast()
elif self.intersection:
self.backtrack()
#Using Tremaux's algorithm as backup
elif (self.current[0] + 1, self.current[1]) in self.visited.difference(self.visitedtwice):
self.visitedtwice.add(self.current)
self.moveEast()
elif (self.current[0] - 1, self.current[1]) in self.visited.difference(self.visitedtwice):
self.visitedtwice.add(self.current)
self.moveWest()
elif (self.current[0], self.current[1] - 1) in self.visited.difference(self.visitedtwice):
self.visitedtwice.add(self.current)
self.moveNorth()
elif (self.current[0], self.current[1] + 1) in self.visited.difference(self.visitedtwice):
self.visitedtwice.add(self.current)
self.moveSouth()
if self._find_intersection() and self.current in self.unvisited:
self.intersection.append(self.current)
self.lab.cleanup()
def check_grue(self, tile):
""" Check for a grue at the tile the player is standing on.
"""
if tile[2] == 'grue':
if self.lab.inventory > 0:
self.lab.fire()
print 'Lighted match'
def build_graph(self):
""" Build graph around current, update unvisited set and connect neighbours. """
for each_list in self.lab.look():
vertice = self._add_vertice(each_list)
if vertice:
self.unvisited.add(vertice)
self.graph.addEdge((self.current, vertice))
self.unvisited -= self.visited
self._connect_neighbours()
if __name__ == "__main__":
toby = Find_toby()
toby.move()
|
18,683 | 36038c9a7e8ce6bbb98cefa0021ca8e891f44a28 | __author__ = 'Stephen Strickland'
class Group:
def __init__(self):
self.Id = ""
self.GroupName = ""
self.GroupLocation = ""
|
18,684 | a853eab78711cbeff7f88663569251ffb89309a4 | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from scipy.stats import skew
def scoring_csv():
"""
:return: serial number of observations(Scoring_SN) and features to predict on(X_Scoring)
"""
scoring_csv = pd.read_csv('scoring.csv')
scoring_columns = ['S/N', 'Gender', 'Age', 'Location', 'famsize', 'Pstatus', 'Medu', 'Fedu','reason', 'traveltime', 'studytime','failures', 'schoolsup', 'famsup', 'paid', 'activities', 'nursery', 'higher', 'internet', 'famrel', 'freetime', 'health', 'absences',]
scoring_csv.columns = scoring_columns
scoring_csv = scoring_csv.drop(['reason'], axis=1)
categorical_variable_list = ['Gender','Location','famsize','Pstatus','schoolsup','famsup','paid','activities','nursery','higher','internet']
scoring_csv_cleaned= pd.get_dummies(scoring_csv,columns=categorical_variable_list,drop_first=True)#todo:train both models
'''numeric_feats = scoring_csv_cleaned.dtypes[scoring_csv_cleaned.dtypes == "int64"].index
skewed_feats = scoring_csv_cleaned[numeric_feats].apply(lambda x: skew(x.dropna())) # compute skewness
skewed_feats = skewed_feats[skewed_feats > 0.75]
skewed_feats = skewed_feats.index
scoring_csv_cleaned[skewed_feats] = np.log1p(scoring_csv_cleaned[skewed_feats])'''
scoring_SN = scoring_csv_cleaned['S/N']
X_scoring = scoring_csv_cleaned.drop(['S/N'],axis=1)
return scoring_SN,X_scoring
scoring_SN,X_scoring= scoring_csv()
def write_to_csv(filename,y):
jh = np.array(zip(scoring_SN, y))
new = pd.DataFrame(jh)
print new.head()
new.to_csv(filename, sep=',', encoding='UTF-8', header=['S/N', 'Scores'], index=False)
#importing csv
df = pd.read_csv('Train.csv').dropna().drop_duplicates()
#assigning new column names
new_columns = ['S/N','Gender','Age','Location','famsize','Pstatus','Medu','Fedu','traveltime','studytime','failures','schoolsup','famsup',
'paid','activities','nursery','higher','internet','famrel','freetime','health','absences','Scores']
#todo: paid => did they pay for extra classes
df.columns = new_columns
#converting to categorical
categorical_variable_list = ['Gender','Location','famsize','Pstatus','schoolsup','famsup','paid','activities','nursery','higher','internet']
df_cleaned = pd.get_dummies(df,columns=categorical_variable_list,drop_first=True)#todo:train both models
#creating target and feature variables
y = df_cleaned['Scores'].values.reshape(-1,1)#200 rows,1 coulmn
X = df_cleaned.drop(['Scores','S/N'],axis=1) #200 rows,22 columns
'''numeric_feats = X.dtypes[X.dtypes == "int64"].index
skewed_feats = X[numeric_feats].apply(lambda x: skew(x.dropna())) # compute skewness
skewed_feats = skewed_feats[skewed_feats > 0.75]
skewed_feats = skewed_feats.index
X[skewed_feats] = np.log1p(X[skewed_feats])
'''
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.3,random_state=42)
'''print X_train.shape
print X_test.shape
print y_train.shape
print y_test.shape
'''
#print X.columns
if __name__ == '__main__':
reg = LinearRegression(normalize=True)
reg.fit(X_train,y_train)
y_pred = reg.predict(X_test)
from sklearn.metrics import mean_squared_error
print np.sqrt(mean_squared_error(y_test,y_pred)) #todo : this equals to 11.8393014174
#print reg.intercept_
#print reg.coef_
#print zip(X.columns,reg.coef_) |
18,685 | 1b53ec444028c02693b3d9d520b28cb855700218 | from day6 import part_one, part_two
class TestDay6:
def test_part_one(self):
input = ['COM)B', 'B)C', 'C)D', 'D)E', 'E)F', 'B)G',
'G)H', 'D)I', 'E)J', 'J)K', 'K)L']
assert part_one(input) == 42
def test_part_two(self):
input = ['COM)B', 'B)C', 'C)D', 'D)E', 'E)F', 'B)G',
'G)H', 'D)I', 'E)J', 'J)K', 'K)L', 'K)YOU', 'I)SAN']
assert part_two(input) == 4
|
18,686 | efd833eed608df09afd1134506a3697a21415489 | '''
Import and analyze rheological data
-----------------------------------
'''
from . import models
from . import rheodata
|
18,687 | 71406037718de2e2103229295be08a4942984a4d | from keras.models import Input, Model
from keras.layers import Lambda
from keras_signal import Spectrogram,MelSpectrogram,MFCC,STFT,InverseSTFT
import numpy as np
import matplotlib.pyplot as plt
from scipy.io import wavfile
frame_length = 512
frame_step = frame_length//4
window = "hann"
rate,src = wavfile.read("test.wav")
src = src.reshape((1,-1))
# Power spectrogram
x = Input(shape=(None,))
y = Spectrogram(frame_length,frame_step)(x)
model_spec = Model(inputs=x,outputs=y)
spec = model_spec.predict(src)
print("Powerspec shape: ",spec.shape)
plt.imshow(np.log(spec[0].T))
plt.show()
# MelSpectrogram
x = Input(shape=(None,))
y = MelSpectrogram(frame_length,frame_step,num_mel_bins=40,num_spectrogram_bins=frame_length//2+1,sample_rate=rate)(x)
model_melspec = Model(inputs=x,outputs=y)
melspec = model_melspec.predict(src)
print("Melspec shape: ",melspec.shape)
plt.imshow(melspec[0].T)
plt.show()
# MelSpectrogram
x = Input(shape=(None,))
y = MFCC(frame_length,frame_step,num_mel_bins=40,num_spectrogram_bins=frame_length//2+1,sample_rate=rate)(x)
model_mfcc = Model(inputs=x,outputs=y)
mfcc = model_mfcc.predict(src)
print("MFCC shape: ",mfcc.shape)
plt.imshow(mfcc[0].T)
plt.show()
# STFT -ISTFT
x = Input(shape=(None,))
y = STFT(frame_length,frame_step,window_fn_type=window)(x)
model_stft = Model(inputs=x,outputs=y)
f = model_stft.predict(src)
print("STFT shape: ",f.shape)
x = Input(shape=(None,frame_length//2+1,2))
y = InverseSTFT(frame_length,frame_step,window_fn_type=window)(x)
model_istft = Model(inputs=x,outputs=y)
dst = model_istft.predict(f)
print("ISTFT shape: ",dst.shape)
# compare original and reconstructed signals.
plt.plot(src.flatten())
plt.plot(dst.flatten())
plt.show()
|
18,688 | fa6e092bccd2a612ed696871cc754f3679e5c991 | from django.contrib import admin
from core.admin import LikeAndCommentAbleAdmin
from posts.models import Post
@admin.register(Post)
class PostAdmin(LikeAndCommentAbleAdmin):
readonly_fields = 'likes_count', 'comments_count', 'created', 'updated' |
18,689 | 0ec230309fc2ef7c63b74450dc43b22440f53bd9 | # Copyright 2016 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import mock
import yaml
from mistral_lib import actions
from swiftclient import exceptions as swiftexceptions
from tripleo_common.actions import scale
from tripleo_common import constants
from tripleo_common.tests import base
def mock_stack():
stack = mock.Mock()
stack.name = 'My Stack'
stack.parameters = {'ComputeCount': '2'}
stack.to_dict.return_value = {
'uuid': 5,
'name': 'My Stack',
'parameters': stack.parameters,
}
return stack
class ScaleDownActionTest(base.TestCase):
def setUp(self):
super(ScaleDownActionTest, self).setUp()
self.image = collections.namedtuple('image', ['id'])
@mock.patch('tripleo_common.actions.base.TripleOAction.'
'cache_delete')
@mock.patch('tripleo_common.actions.base.TripleOAction.'
'get_orchestration_client')
@mock.patch('heatclient.common.template_utils.'
'process_multiple_environments_and_files')
@mock.patch('heatclient.common.template_utils.get_template_contents')
@mock.patch('tripleo_common.actions.base.TripleOAction.get_object_client')
def test_run(self, mock_get_object_client,
mock_get_template_contents, mock_env_files,
mock_get_heat_client, mock_cache):
mock_env_files.return_value = ({}, {})
heatclient = mock.MagicMock()
heatclient.resources.list.return_value = [
mock.MagicMock(
links=[{'rel': 'stack',
'href': 'http://192.0.2.1:8004/v1/'
'a959ac7d6a4a475daf2428df315c41ef/'
'stacks/overcloud/123'}],
logical_resource_id='logical_id',
physical_resource_id='resource_id',
resource_type='OS::Heat::ResourceGroup',
resource_name='Compute'
),
mock.MagicMock(
links=[{'rel': 'stack',
'href': 'http://192.0.2.1:8004/v1/'
'a959ac7d6a4a475daf2428df315c41ef/'
'stacks/overcloud/124'}],
logical_resource_id='node0',
physical_resource_id='123',
resource_type='OS::TripleO::Compute',
parent_resource='Compute',
resource_name='node0',
)
]
heatclient.stacks.get.return_value = mock_stack()
heatclient.stacks.validate.return_value = {}
mock_get_heat_client.return_value = heatclient
mock_ctx = mock.MagicMock()
swift = mock.MagicMock(url="http://test.com")
mock_env = yaml.safe_dump({
'name': 'overcloud',
'temp_environment': 'temp_environment',
'template': 'template',
'environments': [{u'path': u'environments/test.yaml'}]
}, default_flow_style=False)
mock_roles = yaml.safe_dump([{"name": "foo"}])
mock_network = yaml.safe_dump([{'enabled': False}])
mock_exclude = yaml.safe_dump({"name": "foo"})
swift.get_object.side_effect = (
({}, mock_env),
({}, mock_env),
({}, mock_roles),
({}, mock_network),
({}, mock_exclude),
({}, mock_env),
({}, mock_env),
({}, mock_env),
({}, mock_roles),
({}, mock_network),
({}, mock_exclude),
({}, mock_env),
({}, mock_env),
swiftexceptions.ClientException('atest2')
)
def return_container_files(*args):
return ('headers', [{'name': 'foo.role.j2.yaml'}])
swift.get_container = mock.MagicMock(
side_effect=return_container_files)
mock_get_object_client.return_value = swift
env = {
'resource_registry': {
'resources': {'*': {'*': {'UpdateDeployment': {'hooks': []}}}}
}
}
mock_get_template_contents.return_value = ({}, {
'heat_template_version': '2016-04-30'
})
# Test
action = scale.ScaleDownAction(
constants.STACK_TIMEOUT_DEFAULT, ['resource_id'], 'stack')
result = action.run(mock_ctx)
heatclient.stacks.validate.assert_called_once_with(
environment=env,
files={},
show_nested=True,
template={'heat_template_version': '2016-04-30'}
)
clear_list = list(['ComputeCount', 'ComputeRemovalPolicies'])
_, kwargs = heatclient.stacks.update.call_args
self.assertEqual(set(kwargs['clear_parameters']), set(clear_list))
self.assertEqual(kwargs['environment'], env)
self.assertEqual(kwargs['existing'], True)
self.assertEqual(kwargs['files'], {})
mock_cache.assert_called_with(
mock_ctx,
"stack",
"tripleo.parameters.get"
)
self.assertEqual(None, result)
@mock.patch('tripleo_common.actions.scale.ScaleDownAction.'
'_get_removal_params_from_heat')
@mock.patch('tripleo_common.actions.scale.ScaleDownAction._update_stack')
@mock.patch('tripleo_common.actions.base.TripleOAction.'
'get_orchestration_client')
def test_run_bad_update(self, mock_get_heat_client,
mock__update_stack,
mock__get_removal_params_from_heat):
mock__update_stack.return_value = actions.Result(error='Update error')
mock__get_removal_params_from_heat.return_value = {}
heatclient = mock.MagicMock()
heatclient.resources.list.return_value = [
mock.MagicMock(
links=[{'rel': 'stack',
'href': 'http://192.0.2.1:8004/v1/'
'a959ac7d6a4a475daf2428df315c41ef/'
'stacks/overcloud/123'}],
logical_resource_id='logical_id',
physical_resource_id='resource_id',
resource_type='OS::Heat::ResourceGroup',
resource_name='Compute'
),
mock.MagicMock(
links=[{'rel': 'stack',
'href': 'http://192.0.2.1:8004/v1/'
'a959ac7d6a4a475daf2428df315c41ef/'
'stacks/overcloud/124'}],
logical_resource_id='node0',
physical_resource_id='123',
resource_type='OS::TripleO::Compute',
parent_resource='Compute',
resource_name='node0',
)
]
heatclient.stacks.get.return_value = mock_stack()
heatclient.stacks.validate.return_value = {}
mock_get_heat_client.return_value = heatclient
mock_ctx = mock.MagicMock()
# Test
action = scale.ScaleDownAction(
constants.STACK_TIMEOUT_DEFAULT, ['resource_id'], 'stack')
result = action.run(mock_ctx)
self.assertEqual(actions.Result(error='Update error'), result)
|
18,690 | 40c385e196208c76313edcb8d0df90ce81478027 | # coding: utf-8
import re
import spacy
import pandas as pd
import numpy as np
import pickle
import nltk
import time
from tqdm import tqdm
from nlp_module.loader import load_feature_dict, load_rule_dict, nlp
from nlp_module.extractor import ReportExtractor
from nlp_module.extractor_Helper import *
def sort_list(lst):
lengths = [len(item) for item in lst]
lst_df = pd.Series(lengths, index=lst) # , columns = ['len'])
lst_df = lst_df.sort_values(ascending=False)
return lst_df.index.tolist()
# ### Sentence Processing
def pre_process_sents(findings):
if type(findings) == None or type(findings) == float:
return []
else:
sentences = nltk.tokenize.sent_tokenize(findings)
sentences = [sent.lower() for sent in sentences]
sentences = [sent.split(" ") for sent in sentences]
sentences = [sent for sents in sentences for sent in sents]
sentences = [re.sub('\d+?/\d+?/\d{2,}', '', sent) for sent in sentences]
sentences = [sent.replace("/", " ").replace("\n", " ") for sent in sentences]
# sentences = [sent.replace("chronic obstructive pulmonary disease", "copd") for sent in sentences]
# sentences = [sent.replace("coronary artery bypass graft", "cabg") for sent in sentences]
# sentences = [sent.replace("coronary bypass surgery", "cabg") for sent in sentences]
# sentences = [sent.replace("tb", "tuberculosis") for sent in sentences]
# sentences = [sent.replace("cp", "costophrenic") for sent in sentences]
sentences = [sent.replace(".", " ") for sent in sentences]
return sentences
def get_sents(df):
imps, finds = [], []
for i in range(len(df)):
pt = df.iloc[i]
imps.append(pre_process_sents(pt.IMPRESSION))
finds.append(pre_process_sents(pt.FINDINGS))
imps = list(set([sent for sents in imps for sent in sents]))
finds = list(set([sent for sents in finds for sent in sents]))
return imps, finds
# ### ClauseSplitter
'''Split initial list of sentences into a list of sentences by clause'''
def split_by_clause(sentence, term_rules):
# print("Sentence:", sentence)
'''Subfunction to split up sentence if the word AND is present'''
def split_ands(phrase):
if phrase.count('and') == 1 and "," not in phrase:
parts = phrase.split('and')
pos1 = [token.pos_ for token in nlp(parts[0])]
pos2 = [token.pos_ for token in nlp(parts[1])]
if 'NOUN' in pos1 and 'VERB' in pos1 and 'NOUN' in pos2 and 'VERB' in pos2: # maybe also 'ADV'
return parts
else:
return [phrase]
else:
return [phrase]
'''Subfunction to split up sentence into comma-separated phrases'''
def split_sent_by_comma(sent):
## Find all commas
comma_indices = [c.start() for c in re.finditer(',', sent)]
## If commas are more than 5, treat it as a single sentence.
if len(comma_indices) > 5:
return [sent]
## ------------------------
## Find commas not for sents
## ------------------------
oxford_comma_indices = []
no_oxford_comma_indices = []
# if oxford comma
reg_pattern = ', (((\w+\s?){1,3},)\s)+'
if re.findall(reg_pattern, sent):
found_indices = [(c.start(), c.end()) for c in re.finditer(reg_pattern, sent)]
indices_and = [(start, end+3) for (start, end) in found_indices if 'and' == sent[end:end + 3]]
indices_or = [(start, end+2) for (start, end) in found_indices if 'or' == sent[end:end + 2]]
oxford_comma_indices += list(set(indices_and) | set(indices_or))
# if no oxford comma
reg_pattern = '((\w+\s?){1,3},\s?)+?(\s\w+){1,3}?'
if not oxford_comma_indices and re.findall(reg_pattern, sent):
found_indices = [(c.start(), c.end()) for c in re.finditer(reg_pattern, sent)]
indices = [(start, end + re.search(r'\s(and|or)', sent[end:end+10]).end()) for (start, end) in found_indices if re.findall(r'\s(and|or)', sent[end:end+10])]
no_oxford_comma_indices += list(indices)
word_comma_indices = oxford_comma_indices + no_oxford_comma_indices
for index_tuple in word_comma_indices:
start_word_comma, end_oxford_comma = index_tuple
comma_indices = [x for x in comma_indices if x not in range(start_word_comma, end_oxford_comma)]
# if comma_indices:
# print("Sent commas: ", comma_indices)
## Split a sentence with sentences comma
sxns = [sent[i:j] for i, j in zip([0] + comma_indices, comma_indices + [len(sent)])]
return sxns
term_pat, clauses = "\[TERM\]", []
for rule in term_rules: # check every rule for a clause termination word
reformatRule = re.sub(r'\s+', '_', rule[0].strip())
sentence = rule[3].sub(' ' + rule[2].strip() # add in Negation tag to
+ reformatRule + rule[2].strip() + ' ', sentence)
if re.findall(term_pat, sentence, flags=re.IGNORECASE):
# if termination words exist, split up the phrases by them
phrases = re.split(term_pat, sentence, flags=re.IGNORECASE)
phrases = [" ".join([word.strip() for word in phrase.split()]) for phrase in phrases if len(phrase.split()) > 1]
phrases = [split_sent_by_comma(phrase) for phrase in phrases] # Split phrases by comma, except in list case
phrases = sum(phrases, []) # [phrase for sub_phrase in phrases for phrase in sub_phrase]
phrases = [re.split(';|:', phrase) for phrase in phrases]
phrases = sum(phrases, []) # [phrase for sub_phrase in phrases for phrase in sub_phrase]
phrases = [phrase.split(" ") for phrase in phrases]
phrases = sum(phrases, []) # [phrase for sub_phrase in phrases for phrase in sub_phrase]
else:
# if no termination words exist, return listicized sentence following other split rules
phrases = split_sent_by_comma(sentence)
phrases = [re.split(';|:', phrase) for phrase in phrases]
phrases = sum(phrases, []) # [phrase for sub_phrase in phrases for phrase in sub_phrase]
phrases = [phrase.split(" ") for phrase in phrases]
phrases = sum(phrases, []) # [phrase for sub_phrase in phrases for phrase in sub_phrase]
phrases = [split_ands(phrase) for phrase in phrases]
phrases = sum(phrases, []) # [phrase for sub_phrase in phrases for phrase in sub_phrase]
phrases = [phrase.replace(" -,", " - ,") for phrase in phrases]
for phrase in phrases:
if len(phrase) != 0:
clauses.append(phrase.lower())
return clauses
def remove_double_errors(df):
if 'effusion' in df.findings.values and 'pleural effusion' in df.findings.values:
df = df[df.findings != 'effusion']
if 'effusions' in df.findings.values and 'pleural effusions' in df.findings.values:
df = df[df.findings != 'effusions']
if 'process' in df.findings.values and re.findall('\S+\s+process', ", ".join(df.findings.values)):
df = df[df.findings != 'process']
if 'processes' in df.findings.values and re.findall('\S+\s+processes', ", ".join(df.findings.values)):
df = df[df.findings != 'processes']
if 'disease' in df.findings.values and (re.findall('\S+\s+disease', ", ".join(df.findings.values))):
df = df[df.findings != 'disease']
return df
def remove_submatches(matches_to_search):
unique_matches = []
while len(matches_to_search) > 0:
match1 = max(matches_to_search, key=len)
related_matches = [match1]
matches_to_search.remove(match1)
for match2 in matches_to_search:
if match2 in match1:
related_matches.append(match2)
unique_matches.append(max(related_matches, key=len))
for match in related_matches:
if match in matches_to_search:
matches_to_search.remove(match)
return unique_matches
def get_abnormals(df):
abnormals = []
for finding in df[df.statuses == 'current'].findings:
is_normal = False
for word in normal_list + ["stable", "aerated", "aerated (well)"]:
if word in finding.replace("(", "").replace(")", ""):
is_normal = True
if not is_normal:
abnormals.append(finding)
# include previous procedures
procedure_df = df[df.finding_types == 'procedure']
procedure_df = procedure_df[procedure_df.statuses == 'previous']
for finding in procedure_df.findings:
abnormals.append(finding)
return list(set(remove_submatches(abnormals)))
def get_abnormals_and_locs(df):
abnormal_locs = []
abnormal_descrs = []
currents = df[df.statuses == 'current']
for idx in range(len(currents)):
finding = currents.iloc[idx].findings
is_normal = False
for word in normal_list + ["stable", "aerated", "aerated (well)"]:
if word in finding.replace("(", "").replace(")", ""):
is_normal = True
if not is_normal:
abnormal_locs.append((finding, currents.iloc[idx].locations))
abnormal_descrs.append((finding, currents.iloc[idx].descriptors))
# include previous procedures
procedure_df = df[df.finding_types == 'procedure']
procedure_df = procedure_df[procedure_df.statuses == 'previous']
for idx in range(len(procedure_df)):
finding = procedure_df.iloc[idx].findings
abnormal_locs.append((finding, procedure_df.iloc[idx].locations))
abnormal_descrs.append((finding, procedure_df.iloc[idx].descriptors))
return list(set(remove_submatches(abnormal_locs))), list(set(remove_submatches(abnormal_descrs)))
def get_findings(df):
devices = []
df = df[df.statuses != 'negated']
df = df[df.statuses != 'negated, previous']
df = df[df.statuses != 'previous']
for finding in df.findings:
devices.append(finding)
return list(set(remove_submatches(devices)))
def get_changes(df):
changes = []
df = df[df.statuses != 'negated']
df = df[df.statuses != 'negated, previous']
for i in range(len(df)):
change_description = df.iloc[i].findings + ", " + df.iloc[i].descriptors
if len(change_description.split()) > 1:
changes.append(change_description)
return list(set(remove_submatches(changes)))
def get_changes_and_locs(df):
change_locs = []
change_descrs = []
df = df[df.statuses != 'negated']
df = df[df.statuses != 'negated, previous']
for i in range(len(df)):
change_description = df.iloc[i].findings + ", " + df.iloc[i].descriptors
if len(change_description.split()) > 1:
change_locs.append((change_description, df.iloc[i].locations))
change_descrs.append((df.iloc[i].findings, df.iloc[i].descriptors))
return list(set(remove_submatches(change_descrs))), list(set(remove_submatches(change_locs)))
def convert_list_to_string(list_items):
# print(list_items)
if len(list_items) == 0 or list_items is None:
final_str = ""
else:
# for idx, item in enumerate(list_items):
# if "," in str(item):
# words = item.split(',')
# item = "__".join([word.strip() for word in words])
# list_items[idx] = item
final_str = "--".join([str(item) for item in list_items])
print(list_items, final_str)
return final_str
def merge_paragraphs(list_of_paragraph):
## return all merged paragraph
report_interest = ""
for paragraph in list_of_paragraph:
if type(paragraph) != float:
report_interest += paragraph
return report_interest
# skipped 20737
def extract_from_rednet_report(radnet):
all_abnorms, all_devices, all_vis_diseases, all_changes, all_procedures, all_anatomies = [], [], [], [], [], []
for i in tqdm(range(len(radnet))):
abnormalities, devices, vis_diseases, changes, procedures, anatomies = [], [], [], [], [], []
findings = radnet.iloc[i].FINDINGS
impression = radnet.iloc[i].IMPRESSION
conclusion = radnet.iloc[i].Conclusion
report_interest = merge_paragraphs([findings, impression, conclusion])
patient_sents = pre_process_sents(report_interest)
results = get_patient_results(patient_sents, feature_dict)
if results is not None and len(results) > 0:
abnormalities = get_abnormals(results[results.finding_types != 'change'])
changes = get_changes(results[results.finding_types == 'change'])
procedures = get_findings(results[results.finding_types == 'procedure'])
all_abnorms.append(abnormalities)
all_devices.append(devices)
all_anatomies.append(anatomies)
all_procedures.append(procedures)
all_vis_diseases.append(vis_diseases)
all_changes.append(changes)
return pd.DataFrame([all_abnorms, all_devices, all_vis_diseases, all_anatomies, all_procedures, all_changes],
index=['abnorms', 'devices', 'vis_diseases', 'anatomies', 'procedures', 'changes']).T
def extract_from_calues(clauses, rules_dict, feature_dict):
clause_outputs = []
for clause in clauses:
start = time.time()
extractor = ReportExtractor(clause=clause, neg_rules=rules_dict["neg_rules"],
prev_rules=rules_dict["prev_rules"],
vis_dis_list=feature_dict["vis_dis_list"],
anatomy_list=feature_dict["anatomy_list"],
procedure_list=feature_dict["procedure_list"],
device_list=feature_dict["device_list"],
change_list=feature_dict["change_list"],
locations_list=feature_dict["locations"],
descriptor_list=sort_list(feature_dict["descriptors"]),
normal_list=feature_dict["normal_list"],
hedge_list=feature_dict["hedge_list"],
post_hedge_list=feature_dict["post_hedge_list"],
hedge_dict=feature_dict['hedge_dict'],
hedge_scores=feature_dict['hedge_scores'],
grab=True)
clause_output = extractor.run_extractor()
if clause_output.shape[0]:
# clause_outputs.append(extractor.run_extractor())
clause_outputs.append(clause_output)
clause_output['clause'] = clause
return clause_outputs
def get_patient_results(sents, feature_dict):
rules_dict = feature_dict['rule_dict']
patient_sent_dfs = []
for sent in sents:
pt_paths, pt_neg_paths, pt_changes, pt_devices, pt_procedures, pt_norm_anatomy = [], [], [], [], [], []
print("Sent:", sent)
clauses = split_by_clause(sent, rules_dict["term_rules"])
# print("run_extractor: {:.2f}sec".format(time.time() - start), clause_outputs[-1].shape)
# print(len(clause_outputs))
clause_outputs = extract_from_calues(clauses)
if len(clause_outputs) != 0:
print(pd.concat(clause_outputs).shape)
patient_sent_dfs.append(pd.concat(clause_outputs))
return patient_sent_dfs
def extract_findings_from_reports(df, rules_dict, feature_dict):
hedge_dict = feature_dict['hedge_dict']
hedge_scores = feature_dict['hedge_scores']
all_results = pd.DataFrame()
for i in tqdm(range(len(df))):
accnum = df.iloc[i].accnum
findings = df.iloc[i].FINDINGS
impression = df.iloc[i].IMPRESSION
conclusion = df.iloc[i].Conclusion
report_interest = ""
if type(findings) != float:
report_interest += findings
if type(impression) != float:
report_interest += impression
if type(conclusion) != float:
report_interest += conclusion
patient_sents = pre_process_sents(report_interest)
results = get_patient_results(patient_sents, feature_dict)
all_results = all_results.append(results, ignore_index=True)
return all_results
def extract_runner():
feature_dict = load_feature_dict()
# radnet = pd.read_excel("data/csvs/radnet_norm_parsed.xlsx")
# df = pd.read_csv("{}/radnet_cxr_100K_reports_parsed.csv".format(cxr_rpt_dir), delimiter="|", dtype=str)
df = pd.read_csv("{}/sample.csv".format(cxr_rpt_dir), delimiter="|", dtype=str)
extracted_findings = extract_findings_from_reports(df, rule_dict, feature_dict)
list_columns = ['accnum', 'findings', 'finding_types', 'certainties', 'statuses', 'descriptors', 'locations',
'changed']
extracted_findings = extracted_findings[list_columns]
work_df = df
list_columns = ['accnum', 'abnorms', 'devices', 'vis_diseases', 'anatomies', 'procedures', 'changes']
extracted_findings = extract_from_rednet_report(work_df)
extracted_findings['accnum'] = pd.Series(list(work_df['accnum']))
extracted_findings = extracted_findings[list_columns]
extracted_findings['FINDINGS'] = pd.Series(list(work_df['FINDINGS']))
extracted_findings['IMPRESSION'] = pd.Series(list(work_df['IMPRESSION']))
extracted_findings['CONCLUSION'] = pd.Series(list(work_df['Conclusion']))
extracted_findings['REPORT'] = pd.Series(list(work_df['report']))
# Save Extracts
extracted_findings.to_csv("outputs/extracts_all_1.csv", index=None)
extracts = pd.read_csv("outputs/extracts_all_1.csv") # .drop("Unnamed: 0", axis = 1)
def extract_from_paragraph(paragraph, feature_dict, rules_dict):
outputs = []
sents = pre_process_sents(paragraph)
print("len sent", len(sents))
for sent in sents:
clauses = split_by_clause(sent, rules_dict["term_rules"])
outputs += extract_from_calues(clauses, rules_dict, feature_dict)
if outputs:
result_df = pd.concat(outputs, axis=0).reset_index()
result_df = result_df.sort_values('findings')
result_df = result_df.drop(columns=['index'])
else:
result_df = pd.DataFrame()
return result_df
def extract_from_paragraph_light(paragraph, target_list, rules_dict):
outputs = []
sents = pre_process_sents(paragraph)
print("len sent", len(sents))
filler = "_"
term_rules = rules_dict['term_rules']
neg_rules = rules_dict['neg_rules']
output_df = pd.DataFrame()
for sentence in sents:
start_time = time.time()
clauses_in_sent = split_by_clause(sentence, term_rules)
execution_time = time.time() - start_time
if execution_time > 1000:
print("clauses", clauses_in_sent, file=open('../data/logs/long_clauses_{}.log'.format(time.time()), "a"))
def show_neg_with_cla(cla, target):
if target in cla.lower():
# print(cla, target)
for rule in neg_rules:
reformatRule = re.sub(r'\s+', filler, rule[0].strip()) # rule[0] == not_had
cla = rule[3].sub(' ' + rule[2].strip() + reformatRule + rule[2].strip() + ' ', cla)
# 'not had' -> ' [PREN]note_had[PREN] '
if re.findall('\[[A-Za-z]{4}\]', cla):
tag = '-'
else:
tag = '+'
return tag
return ""
for cla in clauses_in_sent:
# print('Len of clauses :', len(cla), cla)
row = pd.Series()
if len(cla) < 3:
continue
row['clause'] = cla
row['sent'] = sentence
result_tag = ""
for target in target_list:
tag = show_neg_with_cla(cla, target)
row[target] = tag
result_tag += tag
## Add only clasue having tag
if result_tag.strip():
output_df = output_df.append(row, ignore_index=True)
return output_df
if __name__ == "__main__":
# extract_runner()
nlp_data_dir = "./rules"
cxr_rpt_dir = "../../data"
feature_dict = load_feature_dict(nlp_data_dir)
rule_dict = load_rule_dict(nlp_data_dir)
# text = "'EXAM: X-RAY CHEST PA AND LATERAL HISTORY: Pneumonia, unspecified organism per script. Cough and congestion for 3 years. Chest pain since sternum injury from car accident on June 2, 2017. COPD. Hypertension controlled by medication. Former smoker for 55 years that quit 5 months ago. TECHNIQUE: 2 views of the chest. COMPARISON: 10/18/2017, 12/8/2014, and 1/16/2014 FINDINGS: Cardiomediastinal silhouette is within normal limits. Atheromatous calcifications of the aorta. Mitral annulus calcifications. Patchy infiltrate in the region of the superior segment of the right lower lobe is slightly decreased since 10/18/2017 and appears new since the 2014 x-rays. Relatively stable patchy density at the right lung base which may be from scarring or atelectasis. The lungs are hyperinflated consistent with chronic obstructive pulmonary disease. The left lung appears grossly clear. Stable probable pleural thickening at the right lung base. Degenerative changes of the spine. The bones appear demineralized. IMPRESSION: Patchy infiltrate in the region of the superior segment of the right lower lobe is slightly decreased since 10/18/2017. Other stable findings as described above.'"
# result_df = extract_from_paragraph(text, feature_dict, rule_dict)
# print(result_df)
# result_df.to_csv('result_sample1.csv', index=False, encoding='utf8')
#
# text = "Exam Number: 17271262 Report Status: Final Type: CTChestWC Date/Time: 08/01/2014 11:22 Exam Code: CTCHW Ordering Provider: Hochberg, Ephraim P MD HISTORY: Lymphadenopathy - Neoplasm - Lymphoma REPORT CT scan of the chest WITH intravenous contrast, using standard protocol. COMPARISON: 8/05/2013, 9/6/2011, 8/16/2010 FINDINGS: Lines/tubes: There is a right chest port, the catheter tip is near the cavoatrial junction. Lungs and Airways: The central airways are patent. There is left lower lobe cylindrical bronchiectasis and thickening of subsegmental bronchi. There is a right upper lobe central 1 cm groundglass opacity on image 47, unchanged since 2010. A left upper lobe noncalcified 2 mm nodule on image 66 is also unchanged since 2010. Pleura: There are partially calcified bilateral pleural plaques. No pleural effusions. Heart and mediastinum: The thyroid gland is unchanged. Mediastinal lymph nodes measure up to 8 mm short axis in the lower right paratracheal station and 5 mm in the AP window, unchanged. No hilar or axillary lymphadenopathy is seen. There is atherosclerotic calcification of the coronary arteries, aortic valve and aorta. There is cardiomegaly. No pericardial effusion. There is distal esophageal wall thickening and a small hiatal hernia. Soft tissues: Normal. Abdomen: Please refer to separately dictated Abdominal CT. Bones: There are new minimally displaced healing fractures of the left lateral eighth and ninth ribs. Again seen are degenerative changes. There are no suspicious lytic or blastic lesions. IMPRESSION: No evidence of lymphoma a the thorax. "
# result_df = extract_from_paragraph(text, feature_dict, rule_dict)
# print(result_df)
# result_df.to_csv('result_sample2.csv', index=False, encoding='utf8')
#
#
# text = " Thoracic Surgery Inpatient Post-Op Check Attending: Dr. Christopher Morse Hospital Day: 0 Post Op Day #0 Procedure: Procedure(s) (LRB): THORACOSCOPY VIDEO ASSISTED LOBECTOMY WITH BRONCHOSCOPY FLEXIBLE (Right) Recent Events: No acute issues, pain relatively well controlled. Vitals: 36.8 \xb0C (98.3 \xb0F) | P 90 | BP 102/60 | RR 18 | SpO2 100 % | 3 | FiO2 | Fluid Balance: I/O 10/21 0000 - 10/21 2359 10/22 0000 - 10/22 2359 I.V. 800 IV Piggyback 100 Total Intake 900 Chest Tube 47 Total Output 47 Net +853 Labs: No results for input(s): WBC, HGB, HCT, PLT, NA, K, CL, CO2, BUN, EGFR, CRE, GLU, MG, PHOS, CA, CRP, ALBUMIN, TP, SGPT, SGOT, ALKPHOS, BILITOT, BILIDIR, AMY, LIPASE in the last 72 hours. No results for input(s): PT, PTT, INR in the last 72 hours. No results for input(s): TROPI in the last 72 hours. Medications: IVF: \u2022 lactated Ringers 75 mL/hr (10/22/18 1458) \u2022 morphine \u2022 sodium chloride Stopped (10/22/18 1100) Scheduled Meds: \u2022 acetaminophen 650 mg Oral Q6H Or \u2022 acetaminophen 650 mg Rectal Q6H \u2022 heparin 5,000 Units Subcutaneous Q8H SCH \u2022 nicotine 1 patch Transdermal Daily PRN Meds: fentaNYL, haloperidol lactate, HYDROmorphone, ipratropium-albuterol, nalOXone, ondansetron **OR** ondansetron, polyethylene glycol, senna, sodium chloride Current Diet: Diet Clear liquid; Thin Advance diet as tolerated Exam: General: appears well, NAD Neuro: alert and conversant, NAD CV: RRR Resp: symmetric chest wall expansion, unlabored breathing, CTAB Abdomen: soft, non-tender, non-distended Extremities: WWP Incisions: CDI, appropriate peri-incisional tenderness Tubes/Drains: CT, thin sanguinous, no AL CXR IMPRESSION: Interval right upper lobectomy with right apical chest tube in place. There is relative lucency at the right apex with equivocal small pneumothorax as detailed above. Attention to this area at follow-up is advised Assessment & Plan: 51 yo F former smoker with RUL nodule, FDG Avid and biopsy proven with atypical cells. Morse 10/22: FB, VATS RULobectomy Post Op Day #0 Procedure(s) (LRB): THORACOSCOPY VIDEO ASSISTED LOBECTOMY WITH BRONCHOSCOPY FLEXIBLE (Right) Plan: N:morphine PCA, Tylenol. Can add toradol if chest tube output is low Pulm: CT to H20 seal. Cxr in pacu and tomorrow C: no home meds FEN/GI: Clrs, ADAt, LR@75 GU: DTv Heme/ID: no issues Ppx: HepSC Dispo: Inpatient floor Code Status: Full Code (Presumed)"
# result_df = extract_from_paragraph(text, feature_dict, rule_dict)
# print(result_df)
# result_df.to_csv('result_sample3.csv', index=False, encoding='utf8')
sent = " labs: no results for input(s): wbc, hgb, hct, plt, na, k, cl, co2, bun, egfr, cre, glu, mg, phos, ca, crp, albumin, tp, sgpt, sgot, alkphos, bilitot, bilidir, amy, lipase in the last 72 hours "
split_by_clause(sentence=sent, term_rules=rule_dict['term_rules'])
sent = '''RegExr was created by gskinner.com, and is proudly hosted by Media Temple.
Edit the Expression & Text to see matches. Roll over matches or the expression for details. PCRE & Javascript flavors of RegEx are supported.
The side bar includes a Cheatsheet, full Reference, and Help. You can also Save & Share with the Community, and view patterns you create or favorite in My Patterns.
Explore results with the Tools below. Replace & List output custom results. Details lists capture groups. Explain describes your expression in plain English.
Sentence split sent by comma: labs: no results for input(s): wbc, hgb, hct, plt, na, k, cl, co2, bun, egfr, cre, glu, mg, phos, ca, crp, albumin, tp, sgpt, sgot, alkphos, bilitot, bilidir, amy, lipase, and in the last 72 hours
'''
print(split_by_clause(sentence=sent, term_rules=rule_dict['term_rules']))
sent = '''result letter by dr **** appears he was not concerned pt denies fever,chills,abd pain,dysphagia,odynophagia,brbpr,urinary symptoms,lh,dizziness,pnd,orthopnea,personal or fhx of blood clots,numbness,tingling,weakness in ext,bowel bladder issues,taking ocp ,wt loss etc'''
print(split_by_clause(sentence=sent, term_rules=rule_dict['term_rules'])) |
18,691 | d94f2a5afc18241011281c77aa6585aeadb8b187 | '''
78. Subsets
https://leetcode.com/problems/subsets/
Given an integer array nums, return all possible subsets (the power set).
The solution set must not contain duplicate subsets.
Example 1:
Input: nums = [1,2,3]
Output: [[],[1],[2],[1,2],[3],[1,3],[2,3],[1,2,3]]
Example 2:
Input: nums = [0]
Output: [[],[0]]
'''
def subsets(nums):
'''2^n'''
output = [[]]
for num in nums:
# output += [lst + [num] for lst in output]
for i in range(len(res)):
output.append(res[i] + [num])
return output
# REQUIRE REVISION
class Solution(object):
def subsets(self, nums):
ret = []
self.dfs(nums, [], ret)
return ret
def dfs(self, nums, path, ret):
ret.append(path)
for i in range(len(nums)):
self.dfs(nums[i+1:], path+[nums[i]], ret)
# REQUIRE REVISION
class Solution(object):
def subsets(self, nums):
sol = []
self.helper(nums, sol, [], 0)
return sol
def helper(self, nums, sol, curr, index):
sol.append(list(curr))
for i in range(index, len(nums)):
curr.append(nums[i])
self.helper(nums, sol, curr, i + 1)
curr.pop()
|
18,692 | 0c162767fe173440540f7734c14666cc08eb2001 | import sys
import os
from PIL import Image
source_folder = sys.argv[1]
dest_folder = sys.argv[2]
# path = f'./{source_folder}'
# print (path)
# check if folder exist
folder = os.path.exists(f'../{dest_folder}')
# print (folder, f'{dest_folder}')
if (not folder):
os.mkdir(f'../{dest_folder}')
# loop entire the folder and convert
dir_list = os.listdir(f'../{source_folder}')
print (dir_list)
for image in dir_list:
img = Image.open(f'../{source_folder}{image}')
im = os.path.splitext(image)[0]
#im = image.split('.')[0]
img.save(f"../{dest_folder}{im}.png", 'png')
print('all done baccha')
# # save in new folder
|
18,693 | 62515af4678ad47133a11451c43f2c7b8937ece1 | import socket
import threading
import os
import os.path
import time
##########################################################
# Datos importantes
##########################################################
#bind_ip = '0.0.0.0'
#bind_ip = '127.0.0.1'
bind_ip = input('Digite IP del servidor (Remoto: 0.0.0.0. Local: 127.0.0.1): ')
#bind_port = 5005
bind_port = int(input('Digite puerto del servidor (5005): '))
TAM_BUFFER = 1024
MAX_THREADS = 100
threads = []
dir_src = os.getcwd()
dir_data = os.path.join(dir_src[:-(len(os.sep)+len("src"))],"data")
dir_archivos = os.path.join(dir_src[:-(len(os.sep)+len("src"))],"archivos")
# Se crea el socket de espera y se conecta el servidor
servidor = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
servidor.bind((bind_ip, bind_port))
servidor.listen(5)
print ('Escuchando en (ip:puerto){}:{}'.format(bind_ip, bind_port))
##########################################################
# Se declaran los threads!
##########################################################
##### Esta funcion es un thread, maneja la transferencia de archivos con un cliente.
##### el cliente entra por parametro
def manejador_conexion(socket__conexion_servidor_cliente, nombre_cliente, puerto_cliente):
#se envia la lista de archivos primero (sin los archivos de python!)
os.chdir(dir_archivos)
lista_archivos = [a for a in os.listdir() if os.path.isfile(a)]
#esta linea envia un string. Toca codificarlo a un stream de bytes.
socket__conexion_servidor_cliente.sendto(str(lista_archivos).encode(), (nombre_cliente, puerto_cliente))
#se recibe la peticion del archivo a enviar
peticion = ''
tiempo_inicio_conexion = time.time()
while peticion != b'TERMINADA':
peticion = socket__conexion_servidor_cliente.recv(TAM_BUFFER)
print ('Pidieron: {}'.format(peticion))
#si la peticion no existe, espera una peticion correcta del cliente
while not os.path.isfile(peticion):
print('No existe!')
socket__conexion_servidor_cliente.sendto('No existe'.encode(), (nombre_cliente, puerto_cliente))
peticion = socket__conexion_servidor_cliente.recv(TAM_BUFFER)
#se debe enviar el tamanho del archivo antes
#con el tamanho el cliente puede ver progreso y transferir correctamente los archivos
tam_archivo = os.path.getsize(peticion)
print('El archivo pedido {} tiene tamanho {}'.format(peticion,tam_archivo))
socket__conexion_servidor_cliente.sendto(str(tam_archivo).encode(), (nombre_cliente, puerto_cliente))
#se abre el archivo que se quiere enviar, se lee en pedazos de tamanho TAM_BUFFER
tiempo_inicial = time.time()
with open(peticion, 'rb') as f:
archivo_enviar = f.read(TAM_BUFFER)
#este while envia el archivo pedazo a pedazo hasta que ya no se lee mas.
while archivo_enviar:
socket__conexion_servidor_cliente.send(archivo_enviar)
archivo_enviar = f.read(TAM_BUFFER)
#se cierra el socket para escritura para prevenir errores raros
tiempo_final = float(socket__conexion_servidor_cliente.recv(TAM_BUFFER).decode())
tiempo_transcurrido = tiempo_final - tiempo_inicial
print('Descarga finalizada con {}:{}. Tiempo transcurrido: {}'.format(nombre_cliente, puerto_cliente, tiempo_transcurrido))
socket__conexion_servidor_cliente.sendto(str(tiempo_transcurrido).encode(), (nombre_cliente, puerto_cliente))
socket__conexion_servidor_cliente.shutdown(socket.SHUT_WR)
#se cierra el socket ahora si
socket__conexion_servidor_cliente.close()
tiempo_total =time.time() - tiempo_inicio_conexion
print('Conexion cerrada con {}:{}. Tiempo transcurrido: {}'.format(nombre_cliente, puerto_cliente, tiempo_total))
#####Esta funcion es un thread para la recepcion de clientes.
def manejador_clientes():
#while True obligatorio para escuchar siempre
while True:
print('aceptando conexion!')
#se acepta conexion y se crea el socket de la comunicacion
socket__conexion_servidor_cliente, direccion = servidor.accept()
nombre_cliente = direccion[0]
puerto_cliente = direccion[1]
print ('Se acepto una conexion desde {}:{}'.format(direccion[0], direccion[1]))
##Esto inicia el threading de la comunicacion para un solo cliente
e = threading.Event()
if len(threads) < MAX_THREADS:
thread_cliente = threading.Thread(
target=manejador_conexion,
args=(socket__conexion_servidor_cliente, nombre_cliente, puerto_cliente,) #con la coma!!
)
thread_cliente.start()
threads.append(thread_cliente)
#Se espera 15 segundos para ver actividad en el thread, o se cierra
thread_cliente.join(15)
if thread_cliente.is_alive():
e.set()
thread_cliente.join()
##########################################################
# Ahora si empieza el programa
##########################################################
#Se crea el thread para manejar clientes
thread_generador_clientes = threading.Thread(
target=manejador_clientes
)
thread_generador_clientes.start()
#Se mantiene vivo un conteo de threads.
while True:
print('Numero de threads activos: {}'.format(threading.activeCount()))
time.sleep(3)
|
18,694 | 53078d56450b0f7f97621cd2fb75baf6e0c3b924 | #coding:utf-8
import sys
reload(sys)
sys.setdefaultencoding('utf8')
import numpy as np
import matplotlib.pyplot as plt
import cPickle
from easydict import EasyDict as edict
from matplotlib.pyplot import MultipleLocator
from bfs_group import bfs_clustering
import cv2
import glob
from random import random as rand
from PIL import Image, ImageDraw, ImageFont
import json
import os
config = edict()
config.minimum_points = 50
config.max_group = 3
config.max_neighbor_distance = 10
config.resize_factor = 0.5
color_map = {'White':'白色', 'Silver_gray': '银灰色', 'Black': '黑色', 'Red': '红色', 'Brown': '棕色', 'Blue': '蓝色',
'Yellow': '黄色', 'Purple': '紫色', 'Green': '绿色', 'Pink': '粉色', 'Ching': '青色', 'Golden': '金色', 'other': '其他'}
letter = [u'A', u'B', u'C', u'D', u'E', u'F', u'G', u'H', u'J', u'K', u'L', u'M',
u'N', u'P', u'Q', u'R', u'S', u'T', u'U', u'V', u'W', u'X', u'Y', u'Z']
province = [u'京', u'津', u'沪', u'渝', u'黑', u'吉', u'辽', u'冀', u'晋', u'鲁', u'豫', u'陕', u'甘', u'青', u'苏', u'浙',
u'皖', u'鄂', u'湘', u'闽', u'赣', u'川', u'贵', u'云', u'粤', u'琼', u'蒙', u'宁', u'新', u'桂', u'藏']
type_map = {'BigTruck': '货车', 'Bus': '公交车', 'Lorry': '货车', 'MPV': '轿车', 'MiniVan': '轿车', 'MiniBus': '公交车',
'SUV': '轿车', 'Scooter': '轿车', 'Sedan_Car': '轿车', 'Special_vehicle': '其他', 'Three_Wheeled_Truck':'其他', 'other': '其他', 'Minibus': '公交车'}
def draw_box_v2(img, box, alphaReserve=0.8, color=None):
color = (rand() * 255, rand() * 255, rand() * 255) if color is None else color
h,w,_ = img.shape
x1 = max(0, int(float(box[0])))
y1 = max(0, int(float(box[1])))
x2 = min(w-1, int(float(box[2])))
y2 = min(h-1, int(float(box[3])))
B, G, R = color
img[y1:y2, x1:x2, 0] = img[y1:y2, x1:x2, 0] * alphaReserve + B * (1 - alphaReserve)
img[y1:y2, x1:x2, 1] = img[y1:y2, x1:x2, 1] * alphaReserve + G * (1 - alphaReserve)
img[y1:y2, x1:x2, 2] = img[y1:y2, x1:x2, 2] * alphaReserve + R * (1 - alphaReserve)
cv2.line(img, (x1, y1), (x1+7, y1), (255,255,255), thickness=1)
cv2.line(img, (x1, y1), (x1, y1+7), (255,255,255), thickness=1)
cv2.line(img, (x2, y1), (x2-7, y1), (255,255,255), thickness=1)
cv2.line(img, (x2, y1), (x2, y1+7), (255,255,255), thickness=1)
cv2.line(img, (x1, y2), (x1+7, y2), (255,255,255), thickness=1)
cv2.line(img, (x1, y2), (x1, y2-7), (255,255,255), thickness=1)
cv2.line(img, (x2, y2), (x2-7, y2), (255,255,255), thickness=1)
cv2.line(img, (x2, y2), (x2, y2-7), (255,255,255), thickness=1)
def cv2ImgAddText(img, text, left, top, textColor=(0, 255, 0), textSize=20, font_path="./LiHeiPro.ttf"):
if (isinstance(img, np.ndarray)):
img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
draw = ImageDraw.Draw(img)
fontText = ImageFont.truetype(font_path, textSize, encoding="utf-8")
draw.text((left, top), unicode(text.decode('utf-8')) , textColor, font=fontText)
return cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2BGR)
def draw_history(blend_img, history, history_cnt, history_record, history_platenum):
history = [_ for i, _ in enumerate(history) if history_cnt[i]>0]
history_record = [_ for i, _ in enumerate(history_record) if history_cnt[i]>0]
history_platenum = [_ for i, _ in enumerate(history_platenum) if history_cnt[i]>0]
history_cnt = [_-1 for i, _ in enumerate(history_cnt) if history_cnt[i]>0]
for i, plate in enumerate(history):
ph, pw = plate.shape[:2]
if 70+50*i+ph >= blend_img.shape[0]:
continue
blend_img[70+50*i:70+50*i+ph,w-290:w-290+pw,:] = plate
text = '违章记录:第%d帧' %history_record[i]
blend_img = cv2ImgAddText(blend_img, text, w-290+pw+10,70+50*i+5, textColor=(0, 0, 0),\
textSize=20, font_path="./LiHeiPro.ttf")
if history_platenum[i] != ' ':
text = '车牌识别:'+ history_platenum[i]
blend_img = cv2ImgAddText(blend_img, text, w-290+pw+10,70+50*i+25, textColor=(0, 0, 0),\
textSize=20, font_path="./LiHeiPro.ttf")
return blend_img, history, history_cnt, history_record, history_platenum
def cal_iou(box1, box2):
iw = min(box1[2], box2[2]) - max(box1[0], box2[0]) + 1
if iw > 0:
ih = min(box1[3], box2[3]) - max(box1[1], box2[1]) + 1
if ih > 0:
box1_area = (box1[2] - box1[0] + 1) * (box1[3] - box1[1] + 1)
box2_area = (box2[2] - box2[0] + 1) * (box2[3] - box2[1] + 1)
all_area = float(box1_area + box2_area - iw * ih)
return iw * ih / all_area
return 0
# judge whether line segment (xc,yc)->(xr,yr) is crossed with infinite line (x1,y1)->(x2,y2)
def is_cross(xc,yc,xr,yr,x1,y1,x2,y2):
if x1 == x2:
if (xc-x1) * (xr-x1) < 0:
return True
else:
return False
return ((y2-y1)/(x2-x1)*(xc-x1)+y1-yc) * \
((y2-y1)/(x2-x1)*(xr-x1)+y1-yr) < 0
def filter_area(boxes, area=50):
if len(boxes) > 0:
return np.where((boxes[:,3]-boxes[:,1])*(boxes[:,2]-boxes[:,0]) > area**2)[0]
else:
return np.array([], dtype=np.int)
def indicator(x):
x_square_sum, x_sum = np.sum(x**2), np.sum(x)
det = len(x) * x_square_sum - x_sum**2
return x_square_sum, x_sum, det
def solve_k_b(x, y):
x_square_sum, x_sum, det = indicator(x)
while det == 0:
x = x[:-1]
y = y[:-1]
x_square_sum, x_sum, det = indicator(x)
N_ = len(x)
k_ = np.sum(y * (N_*x-x_sum)) / det
b_ = np.sum(y * (x_square_sum-x*x_sum)) / det
return N_, k_, b_
if __name__ == "__main__":
json_path = 'nixing/nixingattrs.json'
boxes_results = []
with open(json_path, 'r') as f:
line = f.readline()
while line:
this_img = json.loads(line.strip())
boxes_results.append(this_img)
line = f.readline()
save_dir = 'nixing_v3'
if not os.path.exists(save_dir):
os.makedirs(save_dir)
os.system('rm ./*.jpg ./*.png ./%s/*.jpg' %save_dir)
with open('nixing/nixing_mask_res.pkl', 'rb') as f:
img_list = cPickle.load(f)['all_seg_results']
img_list = [_['seg_results'] for _ in img_list]
img_dir = './nixing/frames'
num_img = len(os.listdir(img_dir))
history = []
history_cnt = []
history_record = []
history_platenum = []
for cnt in range(num_img):
print('%d/%d' %(cnt,num_img))
# if cnt < 110:
# continue
img = img_list[cnt]
im_path = os.path.join(img_dir, 'nixing.mp4_%06d.jpg' %(cnt+1))
raw_img = cv2.imread(im_path)
lane_img = 255 * np.ones_like(raw_img, dtype=np.uint8)
lane_img[np.where(img == 1)] = [0,225,0]
lane_img[np.where(img == 2)] = [0,225,255]
blend_img = cv2.addWeighted(raw_img, 1.0, lane_img, 0.3, gamma=0)
# parse the boxes (vehicle box, plate box, vehicle head box, vehicle tail box)
vehicle_boxes = [_['data'] for _ in boxes_results[cnt]['vehicle']]
vehicle_attrs = [_['attrs'] for _ in boxes_results[cnt]['vehicle']]
plate_data = boxes_results[cnt]['plate_box']
if plate_data != []:
plate_boxes = [_['data'] for _ in plate_data]
plate_nums = [_['attrs']['plate_num']]
for i in range(len(plate_nums)):
if len(plate_nums[i]) >= 7 and plate_nums[i][0] in province and plate_nums[i][1] in letter:
plate_nums.append(plate_nums[i])
else:
plate_nums[i] = ' '
print(plate_nums[-1])
else:
plate_boxes, plate_nums = [], []
head_box, tail_box = [], []
for item in boxes_results[cnt]['common_box']:
if item['attrs']['head'] == 'tail':
tail_box.append(item['data'])
elif item['attrs']['head'] == 'head':
head_box.append(item['data'])
else:
raise ValueError('unsupported attr!')
# draw the boxes (vehicle box, plate box, vehicle head box, vehicle tail box)
for box, attrs in zip(vehicle_boxes, vehicle_attrs):
draw_box_v2(blend_img, box, color=(255,0,0), alphaReserve=0.9)
text = color_map[attrs['color']]
text += type_map[attrs['type']]
cv2.rectangle(blend_img, (int(box[0]), int(box[1])-20), (int(box[0])+70, int(box[1])), (128, 128, 128), thickness=-1)
blend_img = cv2ImgAddText(blend_img, text, int(box[0]), int(box[1]-20), textColor=(255, 255, 255),\
textSize=15, font_path="./LiHeiPro.ttf")
for box in plate_boxes:
draw_box_v2(blend_img, box, color=(0,0,255), alphaReserve=0.7)
for box in head_box:
draw_box_v2(blend_img, box, color=(0,0,128), alphaReserve=0.7)
for box in tail_box:
draw_box_v2(blend_img, box, color=(0,0,128))
# cluster the lane points
neighbor = list(range(1, config.max_neighbor_distance+1))
neighbor.extend([-i for i in neighbor])
neighbor.append(0)
dsize = (int(img.shape[1]*config.resize_factor), int(img.shape[0]*config.resize_factor))
resized_img = cv2.resize(img, dsize, fx=config.resize_factor,fy=config.resize_factor)
group_res = bfs_clustering(resized_img, neighbor, ig_cls=0, show=False)
h, w = img.shape[:2]
resized_h, resized_w = resized_img.shape[:2]
# title = '基于X2的"去中心化"违章记录仪'
# blend_img = cv2ImgAddText(blend_img, title, 20,20, textColor=(0, 0, 0),\
# textSize=45, font_path="./LiHeiPro.ttf")
title = '逆行车辆:'
blend_img = cv2ImgAddText(blend_img, title, w-200,20, textColor=(255, 0, 0),\
textSize=25, font_path="./LiHeiPro.ttf")
lanes = []
b = []
for cls in group_res:
print('----cls %d----' %cls)
for g in group_res[cls]:
if len(g) < config.minimum_points:
continue
print('group length: %d' %(len(g)))
x, y = [], []
for i, j in g:
x.append(j)
y.append(resized_h-1-i)
x = np.array(x, dtype='float32') / config.resize_factor
y = np.array(y, dtype='float32') / config.resize_factor
N_, k_, b_ = solve_k_b(x, y)
print(N_, k_, b_)
x1, x2 = np.min(x), np.max(x)
y1, y2 = k_ * x1 + b_, k_ * x2 + b_
y1, y2 = h-1-y1, h-1-y2
if cls == 1:
color = (0,225,0)
else:
color = (0,225,225)
if k_ > 0.1:
lanes.append([x1,y1,x2,y2])
b.append(b_)
# cv2.line(blend_img,(int(x1),int(y1)),(int(x2),int(y2)), color, thickness=3)
# find the central yellow solid line
lane = lanes[np.argmax(-1 * np.array(b))]
# judge whether cross solid lane
for box in head_box:
if (box[2] - box[0] + 1) * (box[3] - box[1] + 1) < 50*50:
continue
ref_line = [0,0,(box[0]+box[2])/2,(box[1]+box[3])/2] # (x1,y2,x2,y2)
input1 = ref_line + lane
if is_cross(*input1):
text = '逆行危险!'
print(text)
blend_img = cv2ImgAddText(blend_img, text, int((box[0]+box[2])/2-20),int(box[1]), textColor=(255, 0, 0),\
textSize=15, font_path="./LiHeiPro.ttf")
ious = np.array([cal_iou(_, box) for _ in plate_boxes])
if ious.size > 0:
max_idx = np.argmax(ious)
pbox = plate_boxes[max_idx]
pnum = plate_nums[max_idx]
pbox[0] -= 10
pbox[2] += 10
pbox[1] -= 10
pbox[3] += 10
ratio = (pbox[3]-pbox[1]) / (pbox[2]-pbox[0])
ph = 50
pw = int(ph / ratio)
pbox = [int(_) for _ in pbox]
plate = raw_img[pbox[1]:pbox[3],pbox[0]:pbox[2],:]
plate = cv2.resize(plate, (pw,ph))
history.insert(0, plate)
history_cnt.insert(0, 1)
history_record.insert(0, cnt)
history_platenum.insert(0, pnum)
blend_img, history, history_cnt, history_record, history_platenum = \
draw_history(blend_img, history, history_cnt, history_record, history_platenum)
cv2.imwrite('./%s/tmp%d.jpg' %(save_dir,cnt), blend_img) |
18,695 | 00d82ebb594812bb5394c8457bd00718e92f7c20 | from flask import *
from documents import *
import unicodedata
from settings import *
import tweepy
import sentiments
from clarifai.client import ClarifaiApi
import os
from flask.ext.cors import CORS
app = Flask(__name__)
CORS(app)
clarifai_api = ClarifaiApi(
"-ZFtoURc4GYEORZtUFCacKQ82SrRRV4_IRVn59QL",
"Gta5B-hl7cjg0FQm6NnJaF7GGOUuHDOoArJJxJNU")
@app.route('/signup', methods=['POST'])
def signup():
if request.args["name"] is not None and request.args[
"email"] is not None and request.args["interests"] is not None:
_email = request.args["email"]
usr = user.objects(email=_email)
if len(usr) > 0:
return str(usr[0].id)
interest_list = request.args["interests"]
for i in interest_list:
try:
g = globalInterests()
g.text = i
g.save()
except Exception as e:
print "already exits"
u = user()
u.name = request.args["name"]
u.email = request.args["email"]
u.interests = request.args["interests"]
u.save()
return str(u.id)
else:
abort(400)
@app.route('/getposts', methods=['GET'])
def getposts():
objectId = request.args.get('objectId')
_users = user.objects(id=objectId)
interests = []
for u in _users:
interests = u.interests
break
dict = []
_tweets = tweets.objects[:100](tag__in=interests)
for t in _tweets:
temp = {}
for key in t:
if key == "id" or key == "createdAt":
temp[key] = str(t[key])
else:
temp[key] = unicodedata.normalize(
'NFKD', t[key]).encode(
'ascii', 'ignore')
dict.append(temp)
return json.dumps(dict)
@app.route('/getpostsbygeo', methods=['GET'])
def byGeo():
lat = request.args.get('lat')
lon = request.args.get('lon')
geo_str = lat + "," + lon + ",5km"
print geo_str
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
dict = []
for t in tweepy.Cursor(api.search, geocode=geo_str).items(20):
temp = {}
text = t.text.lower()
print text
name = t.user.name
profilepicture_url = t.user.profile_image_url
createdAt = t._json["created_at"]
temp["text"] = unicodedata.normalize(
'NFKD', text).encode(
'ascii', 'ignore')
temp["by_name"] = name
temp["by_profilepicture"] = profilepicture_url
temp["createdAt"] = createdAt
dict.append(temp)
return json.dumps(dict)
@app.route('/getSentiments')
def getSentiments():
keyword = request.args.get('keyword')
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
dict = {}
positive = 0
negetive = 0
neutral = 0
for t in tweepy.Cursor(api.search, q=keyword).items(50):
tweet = unicodedata.normalize(
'NFKD', t.text.lower()).encode(
'ascii', 'ignore')
_sentiments = sentiments.getSentiments(tweet)
_sentiments = json.loads(_sentiments)
positive += float(_sentiments["probability"]["pos"])
negetive += float(_sentiments["probability"]["neg"])
neutral += float(_sentiments["probability"]["neutral"])
print negetive
print _sentiments
positive = float(positive) / 50
negetive = float(negetive) / 50
neutral = float(neutral) / 50
dict["pos"] = str(positive)
dict["neg"] = str(negetive)
dict["neutral"] = str(neutral)
return json.dumps(dict)
@app.route('/searchtweetbyimage', methods=['POST'])
def searchtweetbyimage():
file = request.files['file']
result = clarifai_api.tag_images(file)
tags = result["results"][0]["result"]["tag"]["classes"]
print tags
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
dict = []
for t in tweepy.Cursor(api.search, q=str(tags[2])).items(20):
temp = {}
text = unicodedata.normalize(
'NFKD', t.text.lower()).encode(
'ascii', 'ignore')
name = t.user.name
profilepicture_url = t.user.profile_image_url
createdAt = t.created_at
temp["text"] = text
temp["by_name"] = name
temp["by_profilepicture"] = profilepicture_url
temp["createdAt"] = createdAt
dict.append(temp)
return json.dumps(dict)
port = int(os.environ.get('PORT', 5000))
app.run(host="0.0.0.0", debug=True, port=port)
# /home/ubuntu/twags_api_master
|
18,696 | af2277a0181ff60c3907baa906f81f398dbf50dd | import socket
import os
from glob import glob
HOST = ‘0.0.0.0’
PORT = 12345
def get_file(filename):
def get_last_char(el):
num = el.split('.')[0][-1]
return int(num) if str.isdigit(num) else 0
tmp = filename.split('.')
name, ext = '.'.join(tmp[:-1]), tmp[-1]
files = [f.split('/')[2] for f in glob(f'./server_storage/{name}*{ext}')]
if len(files) != 0:
tmp = max([get_last_char(i) for i in files]) + 1
return open(f'./server_storage/{name}_copy{tmp}.{ext}', 'wb')
else:
return open(f'./server_storage/{filename}', 'wb')
def start():
if not os.path.exists('./server_storage') or not os.path.isdir('./server_storage'):
os.mkdir('server_storage')
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind((HOST, PORT))
s.listen()
print('Created TCP socket')
print(f'Start listening on {PORT}', end='\n\n')
while True:
conn, addr = s.accept()
print('Connected by', addr)
filename = conn.recv(1024).decode()
conn.sendall('1'.encode())
print(f'Recieved filename - {filename}')
with get_file(filename) as f:
while True:
data = conn.recv(1024)
if not data:
break
f.write(data)
print(f'File {filename} was successfully stored', end='\n\n')
if __name__ == "__main__":
start()
|
18,697 | 1976a12988104de7a0cda14437ac155d261ed152 | sid = int(input("Enter the student id"))
name = input("Enter student name")
marks = float(input("enter the marks"))
print("ID:",sid,"Name:",name,"marks:",marks)
|
18,698 | 44a732c12d2266ad222166160528160e72c3e6ac | import cv2
import numpy as np
from cv2 import imshow
cv2.namedWindow('window_frame')
video_capture = cv2.VideoCapture(0)
def count_non_zero(y,x):
global h,w
counter=0
# print (h,w)
for i in range(10):
for j in range(10):
if y+i< h and x+j <w and y+i>=0 and x+j>=0:
if contour[y+i,x+j]>10:
counter+=1
return counter
img=video_capture.read()[1]
img = cv2.resize(img,None,fx=0.5, fy=0.5, interpolation = cv2.INTER_CUBIC)
global h,w
h,w,_=img.shape
tri_info=[]
rect_info=[]
while True:
##################################### Preprocess the image ####################################
img=video_capture.read()[1]
img = cv2.resize(img,None,fx=0.7, fy=0.7, interpolation = cv2.INTER_CUBIC)
gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (5, 5), 0)
thresh = cv2.adaptiveThreshold(blurred,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY_INV,11,4)
################################### Find contours of rect and tri ####################################
_,contours,_ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE )
# img = cv2.drawContours(img, contours, -1, (0,255,0), 3)
# imshow('contour',img)
################################### Locate tri and rect ###################################
for cont in contours:
if cv2.contourArea(cont)>=300 and cv2.contourArea(cont)<=h*w/4:
tri=np.array([[0,0],[0,0],[0,0]])
rect=np.array([[0,0],[0,0],[0,0],[0,0]])
arc_len = cv2.arcLength(cont,True)
approx = cv2.approxPolyDP( cont, 0.03 * arc_len, True )
if (len(approx)==3):
for i in range(3):
tri[i]=approx[i][0]
edge1=(tri[0][0]-tri[1][0])**2+(tri[0][1]-tri[1][1])**2
edge2=(tri[0][0]-tri[2][0])**2+(tri[0][1]-tri[2][1])**2
edge3=(tri[1][0]-tri[2][0])**2+(tri[1][1]-tri[2][1])**2
# print(edge1/edge2,edge1/edge3,edge2/edge3)
# if (edge1/edge2<=1.2 and edge1/edge2>0.8 and edge1/edge3 <= 1.2 and edge1/edge3 > 0.8 and edge2/edge3 <= 1.2 and edge2/edge3 >0.8):
if edge1+edge2+edge3/cv2.contourArea(cont)<5000:
if (len(tri_info)==0):
tri_info.append(tri)
img=cv2.polylines(img,[tri],True,(0,255,0),3)
if (len(approx)==4):
for i in range(4):
rect[i]=approx[i][0]
edge1=(rect[0][0]-rect[1][0])**2+(rect[0][1]-rect[1][1])**2# 1 4
edge2=(rect[1][0]-rect[2][0])**2+(rect[1][1]-rect[2][1])**2# 2 3
edge3=(rect[2][0]-rect[3][0])**2+(rect[2][1]-rect[3][1])**2
edge4=(rect[3][0]-rect[0][0])**2+(rect[3][1]-rect[0][1])**2
diagonal1=(rect[0][0]-rect[2][0])**2+(rect[0][1]-rect[2][1])**2
diagonal2=(rect[1][0]-rect[3][0])**2+(rect[1][1]-rect[3][1])**2
# print(edge1+edge2+edge3+edge4/cv2.contourArea(cont))
if edge1+edge2+edge3+edge4/cv2.contourArea(cont)<6000:
# if ( edge1/edge2<1.2 and edge1/edge3<1.2 and edge1/edge4<1.2 and diagonal1/diagonal2<1.2 and diagonal1/diagonal2>0.8):
if (len(rect_info)<4):
rect_info.append(rect)
img=cv2.polylines(img,[rect],True,(0,0,255),3)
# img=cv2.polylines(img,[rect],True,(0,0,255),3)
if (len(rect_info)<4):
rect_info=[]
elif (len(rect_info)==4):
cv2.putText(img, 'Found 4 rect', (20,20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (155,175,131), thickness = 1, lineType = 8)
imshow('img',img)
if cv2.waitKey(1) & 0xFF == ord('b'):
break
all_info=rect_info+tri_info
all_center=[]
for fig in all_info:
center = [sum([fig[i][0] for i in range(len(fig))])/len(fig),sum([fig[i][1] for i in range(len(fig))])/len(fig)]
all_center.append(center)
# print(all_info)
# print(all_center)
while True:
img=video_capture.read()[1]
img = cv2.resize(img,None,fx=0.7, fy=0.7, interpolation = cv2.INTER_CUBIC)
gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (5, 5), 0)
thresh = cv2.adaptiveThreshold(blurred,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY_INV,11,4)
_,contours,_ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE )
cv2.putText(img, 'Choosing a fig.', (20,20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (155,175,131), thickness = 1, lineType = 8)
#print the located graphs on screen
if len(tri_info)!=0:
for i in range(len(tri_info)):
img=cv2.polylines(img,[tri_info[i]],True,(0,255,0),3)
if len(rect_info)!=0:
for i in range(len(rect_info)):
img=cv2.polylines(img,[rect_info[i]],True,(0,0,255),3)
#Find finger
for cont in contours:
# print(cv2.contourArea(cont))
if cv2.contourArea(cont)>=300 and cv2.contourArea(cont)<=h*w/4:
arc_len = cv2.arcLength(cont,True)
approx = cv2.approxPolyDP( cont, 0.03 * arc_len, True )
if (len(approx)==5):
penta=np.array([[0,0],[0,0],[0,0],[0,0],[0,0]])
for i in range(5):
penta[i]=approx[i][0]
edge=[]
edge.append((penta[0][0]-penta[1][0])**2+(penta[0][1]-penta[1][1])**2)
edge.append((penta[1][0]-penta[2][0])**2+(penta[1][1]-penta[2][1])**2)
edge.append((penta[2][0]-penta[3][0])**2+(penta[2][1]-penta[3][1])**2)
# edge.append((penta[3][0]-penta[4][0])**2+(penta[3][1]-penta[4][1])**2)
# edge.append((penta[4][0]-penta[0][0])**2+(penta[4][1]-penta[0][1])**2)
if (max(edge)/min(edge)<2.3 and max(edge)/min(edge)>1.7):
img=cv2.polylines(img,[penta],True,(0,100,100),3)
# img=cv2.circle(img,(penta[0][0],penta[0][1]),3,(255,255,255),3)
# img=cv2.circle(img,(penta[1][0],penta[1][1]),5,(255,255,255),3)
# img=cv2.circle(img,(penta[2][0],penta[2][1]),7,(255,255,255),3)
# img=cv2.circle(img,(penta[3][0],penta[3][1]),9,(255,255,255),3)
# img=cv2.circle(img,(penta[4][0],penta[4][1]),11,(255,255,255),3)
distance=[]
for i in range(len(all_center)):
center=all_center[i]
# distance.append(np.sqrt((center[0]-penta[0][0])**2+(center[1]-penta[0][1])**2))
if (np.sqrt((center[0]-penta[0][0])**2+(center[1]-penta[0][1])**2)<50):
cv2.putText(img, 'This one?', (20,60), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (155,175,131), thickness = 1, lineType = 8)
img=cv2.polylines(img,[all_info[i]],True,(255,255,255),3)
locked_fig=all_info[i]
center[0]=int(center[0])
center[1]=int(center[1])
original_center = np.array(center)
break
imshow('img',img)
if cv2.waitKey(1) & 0xFF == ord('l'):
break
while True:
img=video_capture.read()[1]
img = cv2.resize(img,None,fx=0.7, fy=0.7, interpolation = cv2.INTER_CUBIC)
gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (5, 5), 0)
thresh = cv2.adaptiveThreshold(blurred,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY_INV,11,4)
_,contours,_ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE )
cv2.putText(img, 'Successfully locking a fig.', (20,20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (155,175,131), thickness = 1, lineType = 8)
img=cv2.polylines(img,[locked_fig],True,(255,255,255),3)
for cont in contours:
if cv2.contourArea(cont)>=300 and cv2.contourArea(cont)<=h*w/4:
arc_len = cv2.arcLength(cont,True)
approx = cv2.approxPolyDP( cont, 0.03 * arc_len, True )
if (len(approx)==5):
penta=np.array([[0,0],[0,0],[0,0],[0,0],[0,0]])
for i in range(5):
penta[i]=approx[i][0]
edge=[]
edge.append((penta[0][0]-penta[1][0])**2+(penta[0][1]-penta[1][1])**2)
edge.append((penta[1][0]-penta[2][0])**2+(penta[1][1]-penta[2][1])**2)
edge.append((penta[2][0]-penta[3][0])**2+(penta[2][1]-penta[3][1])**2)
# edge.append((penta[3][0]-penta[4][0])**2+(penta[3][1]-penta[4][1])**2)
# edge.append((penta[4][0]-penta[0][0])**2+(penta[4][1]-penta[0][1])**2)
if (max(edge)/min(edge)<2.3 and max(edge)/min(edge)>1.7):
img=cv2.polylines(img,[penta],True,(0,100,100),3)
center=np.array([int(penta[0][0]),int(penta[0][1])])
vect=center-original_center
# print('c,oc,v',center,original_center,vect)
new_fig=[]
for i in range(len(locked_fig)):
new_fig.append(locked_fig[i]+vect)
new_fig=np.array(new_fig)
for i in range(len(locked_fig)):
new_fig[i][0]=int(new_fig[i][0])
new_fig[i][1]=int(new_fig[i][1])
img=cv2.polylines(img,[new_fig],True,(255,255,255),3)
for i in range(len(locked_fig)):
print(locked_fig[i],new_fig[i])
img=cv2.polylines(img,[np.array([locked_fig[i],new_fig[i]])],True,(255,255,255),3)
imshow('img',img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cv2.destroyAllWindows() |
18,699 | 9fe30d850eaadd7c4b67d4a5bb09e55c0d86c4f3 | from random import shuffle
from random import randint
def grafo(tarefa, n_cidades):
fich = "tarefa_" + str(tarefa) + "_" + str(n_cidades) + ".txt"
lista = list(range(1, (n_cidades)*(n_cidades)+1))
shuffle(lista)
print(lista)
start = randint(0, n_cidades-1)
f=open(fich,'w')
f.write("start in " + str(start)+'\n')
i=0
for elem in range(n_cidades):
j=0
while j < (n_cidades):
if j != elem:
a=lista.pop(i)
f.write(str(elem)+" -> "+str(j)+" "+str(a)+ '\n')
j+=1
f.close()
return fich |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.