index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
996,200 | a201be721c3d8897518e621c3bd3db3a2b762a8c | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
from cms.plugin_pool import plugin_pool
from cms.plugin_base import CMSPluginBase
from .models import Gallery, CONFIG
from .admin import PhotoInline
class GalleryPlugin(CMSPluginBase):
"""
The gallery plugin instance.
"""
model = Gallery
name = _('Gallery')
module = _('Unite gallery')
inlines = [PhotoInline, ]
def get_render_template(self, context, instance, placeholder):
default = 'djangocms_unitegallery/gallery.html'
if CONFIG['USE_EASYTHUMBNAILS']:
default = 'djangocms_unitegallery/easythumb-gallery.html'
return default
def render(self, context, instance, placeholder):
context.update({
'gallery': instance,
'placeholder': placeholder,
'CONFIG': CONFIG
})
return context
plugin_pool.register_plugin(GalleryPlugin)
|
996,201 | 73de3f8c74c25463c3bb35cb493a7d98a6174feb |
'''
print('hello word')
#变量
message="hello python"
print(message)
message="hello python2"
print(message)
#修改字符串大小写
message='adfB'
print(message.title())
print(message.upper())
print(message.lower())
#通过\t 添加空白
print("a\tpython")
#通过\n换行
print("语言包含:\nC#\nPython\nJava")
print("Languages:\n\tC#\n\tPython\n\tJava")
#删除末尾,前面,收尾空白
message=' fdfdff '
message.rstrip()
print(message)
message.lstrip()
print(message)
message.strip()
print(message)
#四则运算
message=1+2
print(message)
message=5-2
print(message)
message=5*2
print(message)
message=6/3
print(message)
#str函数转换类型
age=23
message="i am "+str(age)+"years old"
print(message)
#列表
colors=['red','yellow','green']
print("first one is : "+colors[0])
names=['A','B','C']
message="Welcom "
print(message+names[0],"\n"+message+names[1],"\n"+message+names[2])
#列表中添加元素
colors=[]
colors.append("read")
colors.append("yellow")
colors.append("green")
print(colors)
#列表中插入元素
colors=['yellow','red','green']
colors.insert(2,'white')
print(colors)
#从列表中删除del , 删除后不可再使用
colors=['yellow', 'red', 'white', 'green']
del colors[1]
print(colors)
#使用pop删除元素,删除后还可以在其他表中使用
colors1=['yellow', 'red', 'white', 'green']
colors2=colors1.pop(1)
print(colors2)
#根据值删除元素
colors=['yellow', 'red', 'white', 'green']
colors.remove('white')
print(colors)
#使用sort排序(永久排序)
message=['dd','vv','aa','cc']
message.sort()
print('按照顺序排序后:'+str(message))
#逆序排序(永久排序)
message=['dd','vv','aa','cc']
message.sort(reverse=True)
print('按照逆序排序后:'+str(message))
#临时排序
message=['dd','vv','aa','cc']
print('更改临时顺序'+str(sorted(message)))
print('检查原来排序'+str(message))
#倒着打印列表reverse
message=['dd','vv','aa','cc']
message.reverse()
print(message)
#确定列表长度
message=['dd','vv','aa','cc']
print('列表长度为:'+str(len(message)))
#遍历列表
messages=['aa','bb','cc']
for message in messages:
print(message)
#for 结束后进行一些操作
messages=['aa','bb','cc']
for message in messages:
print(message+',you are very good')
print('I like all of you ')
#使用函数range()打印一系列数字,注意差一行为
for value in range(1,5):
print(value)
#使用range()创建数字列表
value=list(range(1,5))
print(value)
#4-3 数到 数到20 :使用一个for 循环打印数字1~20(含)。
for messages in range(1,21):
print(messages)
#:创建一个列表,其中包含数字1~1 00,再使用一个for 循环将这些数字打印出来
messages=list(range(1,100))
for message in messages:
print(message)
#4-5 计算 计算1~1 000 的总和 的总和 :创建一个列表,其中包含数字1~1 000,再使用min() 和max() 核实该列表确实是从1开始,
# 到1 000 结束的。另外,对这个列表 调用函数sum() ,看看Python将一百万个数字相加需要多长时间。
messages=list(range(1,10001))
print('最小值为:'+str(min(messages)))
print('最大值为:'+str(max(messages)))
messages_sum=sum(messages)
print(messages_sum)
#4-6 奇数 :通过给函数range() 指定第三个参数来创建一个列表,其中包含1~20的奇数;再使用一个for 循环将这些数字都打印出来。
messages=list(range(1,21,2))
for message in messages:
print(message)
#3的倍数:创建一个列表,其中包含3~30内能被3整除的数字;再使用一个for 循环将这个列表中的数字都打印出来。
messages=[]
for message in range(1,11):
messages.append(message*3)
print(messages)
#遍历切片
players = ['charles', 'martina', 'michael', 'florence', 'eli']
print('前三个队员为:')
for player in players[:3]:
print(player.title())
#复制列表
my_food=['aa','bb','cc','dd']
friend_food=my_food[:]
print('我喜欢的食物:'+str(my_food))
print('朋友喜欢的食物:'+str(friend_food))
#元祖 :不可变的列表
Value=(12,22)
print(Value)
for v in Value:
print(v)
#if语句
cars = ['audi', 'bmw', 'subaru', 'toyota']
for car in cars:
if car=='bmw':
print(car.upper())
else:
print(car.title())
# if-elif-else
price=12
if price<10:
a=1
elif price<2:
a=2
else:
a=4
print(a)
#练习
members=['aa','bb','cc','admin']
for member in members:
if member=='admin':
print('Hello admin, would you like to see a status report?' )
else:
print('Hello '+str(member)+' welcom to login')
del members[:]
if len(members)==0:
print('need some users')
#字典:键值对
members={'color':'green','point':1}
print('颜色'+members['color'])
print('分数'+str(members['point']))
#字典:添加键值对
members={'color':'green','point':1}
members['x_point']=1
members['y_point']=2
print(members)
#字典:修改值
members={'color':'green','point':1}
members['point']=2
print(members)
#对一个能够以不同速度移动的外星人的位置进行跟踪。 为此, 我们将存储该外星人的当前速度, 并据此确定该外星人将向右移动多远:
position={'x_point':0,'y_point':0,'speed':'medium'}
if position['speed']=='slow':
new_x_point=1
elif position['speed']=='medium':
new_x_point=2
else:
new_x_point=3
position['new_x_point']=position['x_point']+new_x_point
print('新位置为:'+str(position['new_x_point'])+','+str(position['y_point']))
#遍历字典
users={
'name':'cc',
'age':'12',
'sex':'female',
}
for N,AG in users.items():
print('key:'+str(N))
print('Value:' + str(AG))
#遍历字典中的键
users={
'amy':25,
'sisi':24,
'frank':29
}
for user in users.keys():
print(user)
#按顺序遍历键
users={
'amy':25,
'sisi':24,
'frank':29
}
for user in sorted(users.keys()):
print(user)
#遍历字典中的值
users={
'amy':25,
'sisi':25,
'frank':29
}
for user in users.values():
print(user)
#遍历字典的值,并去重(set()方法)
users={
'amy':25,
'sisi':25,
'frank':29
}
for user in set(users.values()):
print(user)
#嵌套
alien_0 = {'color': 'green', 'points': 5}
alien_1 = {'color': 'yellow', 'points': 10}
alien_2 = {'color': 'red', 'points': 15}
aliens=[alien_0,alien_1,alien_2]
print(aliens)
#函数input
message=input('input something')
print(message)
#使用int()获取数值输入
message=input('您的体重')
a=int(message)
if a<=70:
print('太轻了')
elif a>=120:
print('太重了')
else:
print('体重正常')
#while
number=1
while number<=5:
print(number)
number+=1
#
prompt='please enter what you want'
prompt+='\n enter q to exit'
message=''
while message!='q':
message=input(prompt)
if message!='q':
print(message)
#使用标志
prompt='please enter what you want'
prompt+='\nenter q to exit'
active=True
while active:
message=input(prompt)
if message=='q':
active=False
else:
print(message)
#使用break退出循环
prompt='please enter what you want'
prompt+='\nenter q to exit'
while True:
message=input(prompt)
if message=='q':
break
else:
print(message)
#7-4 比萨配料 : 编写一个循环, 提示用户输入一系列的比萨配料, 并在用户输入'quit' 时结束循环。 每当用户输入一种配料后,
# 都打印一条消息, 说我们会在比萨中添加这种配料。
prompt='请输入披萨的配料:'
while True:
message=input(prompt)
if message=='q':
break
else:
print('我们会在披萨中添加'+str(message)+'这种配料')
#7-5 电影票 : 有家电影院根据观众的年龄收取不同的票价: 不到3岁的观众免费; 3~12岁的观众为10美元; 超过12岁的观众为15美元。
# 请编写一个循环, 在其中询问用户的年龄, 并指出其票价。
active=True
while active:
age = input("请输入你的年龄:")
if age=="q":
break
elif int(age)<3:
print("0 Point")
continue
elif int(age)>=3 and int(age)<=12:
print("10 Point")
continue
elif int(age)>12:
print("15 Point")
continue
#在列表之间移动元素
#创建一个未验证用户表和一个已验证用户表
unconfirm_users = ['aa', 'bb', 'cc', 'dd']
confirm_users = []
# 验证每个用户, 直到没有未验证用户为止
# 将每个经过验证的列表都移到已验证用户列表中
while unconfirm_users:
current_users = unconfirm_users.pop()
print('已经确认的用户有:'+current_users)
confirm_users.append(current_users)
print('已经确认的所有用户有:'+str(confirm_users))
for confirm_user in confirm_users:
print(confirm_user)
#删除包含特定值的所有列表元素
users = ['aa', 'bb', 'cc', 'cc', 'cc']
print(users)
while 'cc' in users:
users.remove('cc')
print(users)
#使用用户输入来填充字典
responses={}
active = True
while active:
name=input('whats your name:')
value=input('whitch color you like :')
responses[name]=value
confirm_info=input('if you want ask other person')
if confirm_info=='no':
active = False
for name,value in responses.items():
print('姓名:'+name)
print('最喜欢的颜色:'+value)
#7-8 熟食店 : 创建一个名为sandwich_orders 的列表, 在其中包含各种三明治的名字; 再创建一个名为finished_sandwiches 的空列表。
# 遍历列表sandwich_orders , 对于其中的每种三明治, 都打印一条消息, 如I made your tuna sandwich , 并将其移到列表finished_sandwiches 。
#所有三明治都制作好后, 打印一条消息, 将这些三明治列出来。
sandwitch_orders=['aa', 'bb', 'cc']
finished_sandwiches=[]
for sandwitch_order in sandwitch_orders:
print('I made your '+sandwitch_order+' sandwich')
finished_sandwiches.append(sandwitch_order)
print('所有三明治有:')
for finished_sandwiche in finished_sandwiches:
print(finished_sandwiche)
#7-9 五香烟熏牛肉(pastrami) 卖完了 : 使用为完成练习7-8而创建的列表sandwich_orders , 并确保'pastrami' 在其中至少出现了三次。 在程序开头附近添加
#这样的代码: 打印一条消息, 指出熟食店的五香烟熏牛肉卖完了; 再使用一个while 循环将列表sandwich_orders 中的'pastrami' 都删除。 确认最终的列
#表finished_sandwiches 中不包含'pastrami' 。
sandwitch_orders=['aa', 'bb', 'cc', 'pastrami', 'pastrami', 'pastrami']
print('熟食店的五香烟熏牛肉卖完了')
finished_sandwiches=[]
while 'pastrami'in sandwitch_orders:
sandwitch_orders.remove('pastrami')
for sandwitch_order in sandwitch_orders:
print('I made your ' + sandwitch_order + ' sandwich')
finished_sandwiches.append(sandwitch_order)
print('所有三明治有:')
for finished_sandwiche in finished_sandwiches:
print(finished_sandwiche)
#7-10 梦想的度假胜地 : 编写一个程序, 调查用户梦想的度假胜地。 使用类似于“If you could visit one place in the world, where would you go?”的提示
# , 并编写一个打印调查结果的代码块。
active=True
answers={}
while active:
qustions = input('想回答问题吗?')
if qustions=='no':
active=False
else:
name=input('whats your name:')
Value=input('If you could visit one place in the world, where would you go:')
answers[name]=Value
for name,Value in answers.items():
print('姓名:'+name)
print('最想去的城市'+Value)
#定义函数
def user():
print('hello')
user()
#向函数传递信息
def user(username):
print('hello '+username)
user('cc')
#位置实参(传参是需要注意位置)
def friend(name,age):
print('I like '+name)
print(name+' is '+age+' years old!')
friend('cc',str(12))
#调用函数多次
def friend(name,age):
print('I like '+name)
print(name+' is '+age+' years old!')
friend('cc',str(12))
friend('bb',str(22))
#关键字实参(传参时不要位置)
def friend(name,age):
print('I like '+name)
print(name+' is '+age+' years old!')
friend(name='sisi',age=str(28))
#返回简单值
def name(firstname,lastname):
print('firstname is '+firstname)
print('lastname is '+lastname)
fullname=firstname+' '+lastname
return fullname
full_name=name('cai','ran')
print(full_name.title())
#返回字典
def name(firstname,lastname):
person={'firs':firstname,'last':lastname}
return person
namelist=name('cai','ran')
print(namelist)
#结合使用函数和while 循环
def name(firstname,lastname):
full_name=firstname+' '+lastname
return full_name.title()
while True:
print('input your name:')
first=input('firstname is : ')
if first=='q':
break
last=input('lastname is : ')
if last=='q':
break
inputname=name(first,last)
print('helleo '+ inputname)
#创建专辑
def make_album(singer_name,album_name):
album={singer_name:album_name}
return album
alnum1=make_album('张杰','为了爱')
print(alnum1)
alnum2=make_album('艾薇儿','bad girl')
print(alnum2)
#给函数make_album() 添加一个可选形参, 以便能够存储专辑包含的歌曲数
def make_album(singer_name,album_name,number=''):
album = {singer_name: album_name}
if number:
album['number']=number
return album
alnum1=make_album('张杰','为了爱',number=10)
print(alnum1)
alnum2=make_album('艾薇儿','bad girl')
print(alnum2)
#编写一个while 循环, 让用户输入一个专辑的歌手和名称
def make_album(singer_name,album_name):
album = {singer_name: album_name}
return album
while True:
input_singer = input('input singer name')
if input_singer=='q':
break
input_album = input('input alum name')
if input_album=='q':
break
name=make_album(input_singer,input_album)
print(name)
#传递列表
def user(names):
for name in names:
msg='Hello'+' '+name.title()
print(msg)
users=['aa','bb','cc']
user(users)
#修改列表
#定义未完成和已完成
def print_models(unprinted_designs, completed_models):
"""
模拟打印每个设计, 直到没有未打印的设计为止
打印每个设计后, 都将其移到列表completed_models中
"""
while unprinted_designs:
current_models=unprinted_designs.pop()
print('打印的模板:'+current_models)
completed_models.append(current_models)
def show_completed_models(completed_models):
print('已完成的如下:')
for completed_model in completed_models:
print(completed_model)
unprinted_designs=['aa','bb','cc','dd']
completed_models=[]
print_models(unprinted_designs,completed_models)
show_completed_models(completed_models)
#8-9 魔术师
def show_magicians(names):
for name in names:
print(name)
names=['aa','bb','cc']
show_magicians(names)
#8-10 了不起的魔术师
def show_magicians(names):
for name in names:
print(name)
def make_great(change_names):
for i in range(len(change_names)):
change_names[i]='Great '+change_names[i]
return change_names
names=['aa','bb','cc']
make_great(names)
show_magicians(names)
#不变的魔术师
def show_magicians(names):
for name in names:
print(name)
def make_great(change_names):
for i in range(len(change_names)):
change_names[i]='Great '+change_names[i]
return change_names
names=['aa','bb','cc']
rtn=make_great(names[:])
show_magicians(rtn)
print(names)
#传递任意数量的实参
def make_pizza(*toppings):
print('材料包含:')
for topping in toppings:
print(topping)
make_pizza('aa')
make_pizza('bb','cc','dd')
# 使用任意数量的关键字实参
def build_profil(first,last,**userifo):
profil={}
profil['first_name']=first
profil['last_name']=last
for key,value in userifo.items():
profil[key]=value
return profil
user_profile=build_profil('cai','ran',sex='girl')
print(user_profile)
#三明治
def made_pizza(*profils):
print('材料包含:')
for profil in profils:
print(profil)
made_pizza('aa')
made_pizza('bb','cc')
made_pizza('dd','zz','ff')
'''
#用户简介
def build_profil(first,last,**userifo):
profil={}
profil['first_name']=first
profil['last_name']=last
for key,value in userifo.items():
profil[key]=value
return profil
user_profile=build_profil('cai','ran',sex='girl',age=28,school='Neusoft')
print(user_profile)
#8-14 汽车
def make_car(made_user,model,**otherinfo):
car={}
car['made_user']=made_user
car['model']=model
for key,value in otherinfo.items():
car[key]=value
return car
carinfo=make_car('sisi','cairan',color='blue',score=12)
print(carinfo) |
996,202 | 42b64e510cb52daf1295dbbe023c925cb5f858ea | s = 'madam'
flag = True
j = -1
size = len(s)
for i in range(size):
if s[i] != s[j]:
flag = False
break
else:
j -= 1
if flag:
print('Palindrome')
else:
print('Not palindrome')
|
996,203 | fd170bb0968d71439ef4f11c0a200892cf83fe41 | import warnings
warnings.filterwarnings('ignore')
import sys
import os
from astroquery.skyview import SkyView
from astropy.coordinates import SkyCoord
import astropy.units as u
import aplpy
import matplotlib.pyplot as plt
from astropy.io import fits
import astropy
def coords_from_name(field_name):
"""Get ra, dec coordinates from a field name using astropy
Args:
field_name (str): Field name, e.g. 'M101'
Returns:
(float, float): ra, dec in degrees
Example:
>>> coords_from_name('M101')
(210.80242917, 54.34875)
"""
coords = SkyCoord.from_name(field_name)
return coords
def call_skyview_simple(survey, source_name, fov=1):
"""Call Skyview to download data from a survey based on input parameters
Args:
survey (str): name of survey, from https://skyview.gsfc.nasa.gov/current/cgi/survey.pl
source (str): name of astronomical source
fov (float): FOV in degrees
Examples:
>>> call_skyview('DSS', 'M31', 2.)
>>> call_skyview('NVSS', NGC6670, 0.5)
"""
coords = coords_from_name(source_name)
outname = f'{source_name}_{survey}_{fov}d.fits'
images = SkyView.get_images(coords, survey,
coordinates='J2000',
projection='Car', pixels=500,
height=fov*u.deg, width=fov*u.deg)
fitsname = f'images/{source_name}_{survey}_{fov}d.fits'
try:
images[0][0].writeto(fitsname, overwrite=True)
except astropy.io.fits.verify.VerifyError:
print('Data not available')
pass
return fitsname
def plot_fits(fits_name, plot_title=None, cmap_name='viridis', colorbar=True, contour=True):
"""Make a PNG plot out of a FITS file
Args:
fits_name (str): path of fits file
plot_title (str): plot title, default is name of the fits file
cmap_name (str): name of colormap, default is viridis
colorbar (bool): include colorbar, default is True
contour (bool): include contour, default is True
"""
f = aplpy.FITSFigure(fits_name, figsize=(10, 8))
if plot_title == None:
plot_title = fits_name.replace('.fits', '')
plt.title(plot_title)
f.show_colorscale(cmap=cmap_name, stretch='linear')
f.ticks.set_color('k')
if colorbar:
f.add_colorbar()
if 'BMAJ' in fits.open(fits_name)[0].header:
f.add_beam()
print(f'Adding beam for {fits_name}')
if contour:
f.show_contour()
output_name = fits_name.replace('.fits', '.png')
plt.savefig(output_name, dpi=200, bbox_inches='tight')
def main_func(source_name, survey, fov):
if not os.path.isdir('images'):
os.mkdir('images')
fitsname = call_skyview_simple(survey, source_name, fov)
plot_fits(fitsname, plot_title=None, cmap_name='viridis', colorbar=True)
if __name__ == '__main__':
#warnings.filterwarnings('ignore', category=AstropyDeprecationWarning, append=True)
#warnings.simplefilter('ignore', AstropyDeprecationWarning)
main_func(sys.argv[1], sys.argv[2], float(sys.argv[3]))
|
996,204 | 562939e275d106ec8186af3406c98443016b92f1 | from cv2 import imread
from cv2 import imshow
from cv2 import VideoWriter
from cv2 import waitKey
from cv2 import destroyAllWindows
from cv2 import VideoWriter_fourcc
frame = imread("tests/hello.png")
print (frame)
width, height, layer = frame.shape
out = VideoWriter("tests/out.avi", VideoWriter_fourcc(*'DIVX'), 15.0, (width, height))
for _ in range(1000):
out.write(frame)
imshow("Frame", frame)
if waitKey(1) & 0xFF == ord("q"):
break
out.release() |
996,205 | c34462d31a31287e3f72d222d739a5949cf69fa2 | # ch20_5.py
import requests, bs4, json
from pprint import pprint
url = 'https://maps.googleapis.com/maps/api/place/nearbysearch/json?location=25.0329694,121.5654177&radius=3500&type=school&key=YOUR_API_KEY'
gmap = requests.get(url)
gsoup = bs4.BeautifulSoup(gmap.text, 'lxml')
g_info = json.loads(gsoup.text)
schools = g_info['results']
for data in schools:
print(data['name'])
|
996,206 | ecf276e9ad6017cd6d559f557c179db352695a8b | from time import sleep
while True:
try:
a=int(input('Qual o valor deseja sacar?\n'))
print(f'''Serão:
{a//50} notas de 50;
{a%50//20} nota(s) de 20 reais;
{a%50%20//10} nota(s) de 10 reais;
{a%50%20%10//5} nota(s) de 5 reais;
{a%50%20%10%5//1} nota(s) de 1 real.''')
break
except:
print('Informação inválida, tente novamente.')
sleep(0.7) |
996,207 | 10572a58c8007500dd34586e2c0ea48c9107f611 | import printinbetweenPrimeNumbers_m_less_than
def test_two_even_numbers():
#rearrange
m = 8
n = 10
excepted = "There is no Prime Numbers exsist"
#act
actual = printinbetweenPrimeNumbers_m_less_than.inbetweenPrimeNumbers(m,n)
#assert
assert excepted == actual
def test_one_and_two():
#rearrange
m = 1
n = 2
excepted = [2]
#act
actual = printinbetweenPrimeNumbers_m_less_than.inbetweenPrimeNumbers(m,n)
#assert
assert excepted == actual
def test_with_two_and_three():
#rearrange
m = 2
n = 3
excepted = [2,3]
#act
actual = printinbetweenPrimeNumbers_m_less_than.inbetweenPrimeNumbers(m,n)
#assert
assert excepted == actual
def test_with_seven_and_three():
#rearrange
m = 3
n = 7
excepted = [3,5,7]
#act
actual = printinbetweenPrimeNumbers_m_less_than.inbetweenPrimeNumbers(m,n)
#assert
assert excepted == actual
def test_with_seven_and_eleven():
#rearrange
m = 7
n = 11
excepted = [7,11]
#act
actual = printinbetweenPrimeNumbers_m_less_than.inbetweenPrimeNumbers(m,n)
#assert
assert excepted == actual
def test_with_three_and_two():
#rearrange
m = 3
n = 2
excepted = "Provide always M less than N"
#act
actual = printinbetweenPrimeNumbers_m_less_than.inbetweenPrimeNumbers(m,n)
#assert
assert excepted == actual |
996,208 | 35e3e8ce9f1013bde5217de5f487c098e9f089a8 | """Pretraining on GPUs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os, sys
os.environ['CUDA_VISIBLE_DEVICES'] = '3'
import math
import json
import time
import numpy as np
from absl import flags
import absl.logging as _logging
import tensorflow as tf
import data_utils
import model_utils
from gpu_utils import assign_to_gpu, average_grads_and_vars
from tensorflow.python.distribute.cross_device_ops import (
AllReduceCrossDeviceOps,
)
import custom_function_builder
from tensorflow.python.estimator.run_config import RunConfig
from tensorflow.python.estimator.estimator import Estimator
flags.DEFINE_integer('num_hosts', default = 1, help = 'Number of hosts')
flags.DEFINE_integer(
'num_core_per_host', default = 1, help = 'Number of cores per host'
)
flags.DEFINE_bool(
'use_tpu', default = False, help = 'Whether to use TPUs for training.'
)
flags.DEFINE_integer(
'num_gpu_cores',
2,
'Only used if `use_gpu` is True. Total number of GPU cores to use.',
)
# Experiment (data/checkpoint/directory) config
flags.DEFINE_integer(
'num_passes', default = 1, help = 'Number of passed used for training.'
)
flags.DEFINE_string(
'record_info_dir',
default = None,
help = 'Path to local directory containing `record_info-lm.json`.',
)
flags.DEFINE_string('model_dir', default = None, help = 'Estimator model_dir.')
flags.DEFINE_string(
'init_checkpoint',
default = None,
help = 'checkpoint path for initializing the model.',
)
# Optimization config
flags.DEFINE_float(
'learning_rate', default = 1e-4, help = 'Maximum learning rate.'
)
flags.DEFINE_float('clip', default = 1.0, help = 'Gradient clipping value.')
# for cosine decay
flags.DEFINE_float(
'min_lr_ratio', default = 0.001, help = 'Minimum ratio learning rate.'
)
flags.DEFINE_integer(
'warmup_steps', default = 0, help = 'Number of steps for linear lr warmup.'
)
flags.DEFINE_float('adam_epsilon', default = 1e-8, help = 'Adam epsilon')
flags.DEFINE_string('decay_method', default = 'poly', help = 'poly or cos')
flags.DEFINE_float('weight_decay', default = 0.0, help = 'weight decay')
# Training config
flags.DEFINE_integer(
'train_batch_size', default = 16, help = 'Size of train batch.'
)
flags.DEFINE_integer(
'train_steps', default = 100000, help = 'Total number of training steps.'
)
flags.DEFINE_integer(
'iterations', default = 1000, help = 'Number of iterations per repeat loop.'
)
flags.DEFINE_integer(
'save_steps',
default = None,
help = 'number of steps for model checkpointing.',
)
# Data config
flags.DEFINE_integer(
'seq_len', default = 0, help = 'Sequence length for pretraining.'
)
flags.DEFINE_integer(
'reuse_len',
default = 0,
help = 'How many tokens to be reused in the next batch. '
'Could be half of seq_len',
)
flags.DEFINE_bool(
'bi_data',
default = True,
help = 'Use bidirectional data streams, i.e., forward & backward.',
)
flags.DEFINE_integer(
'mask_alpha', default = 6, help = 'How many tokens to form a group.'
)
flags.DEFINE_integer(
'mask_beta',
default = 1,
help = 'How many tokens to mask within each group.',
)
flags.DEFINE_integer(
'num_predict',
default = None,
help = 'Number of tokens to predict in partial prediction.',
)
flags.DEFINE_integer('perm_size', default = None, help = 'perm size.')
flags.DEFINE_bool('uncased', False, help = 'Use uncased inputs or not.')
flags.DEFINE_integer('n_token', 32000, help = 'Vocab size')
# Model config
flags.DEFINE_integer('mem_len', default = 0, help = 'Number of steps to cache')
flags.DEFINE_bool(
'same_length', default = False, help = 'Same length attention'
)
flags.DEFINE_integer('clamp_len', default = -1, help = 'Clamp length')
flags.DEFINE_integer('n_layer', default = 6, help = 'Number of layers.')
flags.DEFINE_integer('d_model', default = 32, help = 'Dimension of the model.')
flags.DEFINE_integer(
'd_embed', default = 32, help = 'Dimension of the embeddings.'
)
flags.DEFINE_integer('n_head', default = 4, help = 'Number of attention heads.')
flags.DEFINE_integer(
'd_head', default = 8, help = 'Dimension of each attention head.'
)
flags.DEFINE_integer(
'd_inner',
default = 32,
help = 'Dimension of inner hidden size in positionwise feed-forward.',
)
flags.DEFINE_float('dropout', default = 0.0, help = 'Dropout rate.')
flags.DEFINE_float('dropatt', default = 0.0, help = 'Attention dropout rate.')
flags.DEFINE_bool(
'untie_r', default = False, help = 'Untie r_w_bias and r_r_bias'
)
flags.DEFINE_string(
'summary_type',
default = 'last',
help = 'Method used to summarize a sequence into a compact vector.',
)
flags.DEFINE_string(
'ff_activation',
default = 'relu',
help = 'Activation type used in position-wise feed-forward.',
)
flags.DEFINE_bool('use_bfloat16', False, help = 'Whether to use bfloat16.')
# Parameter initialization
flags.DEFINE_enum(
'init',
default = 'normal',
enum_values = ['normal', 'uniform'],
help = 'Initialization method.',
)
flags.DEFINE_float(
'init_std', default = 0.02, help = 'Initialization std when init is normal.'
)
flags.DEFINE_float(
'init_range',
default = 0.1,
help = 'Initialization std when init is uniform.',
)
FLAGS = flags.FLAGS
def per_device_batch_size(batch_size, num_gpus):
"""For multi-gpu, batch-size must be a multiple of the number of GPUs.
Note that this should eventually be handled by DistributionStrategies
directly. Multi-GPU support is currently experimental, however,
so doing the work here until that feature is in place.
Args:
batch_size: Global batch size to be divided among devices. This should be
equal to num_gpus times the single-GPU batch_size for multi-gpu training.
num_gpus: How many GPUs are used with DistributionStrategies.
Returns:
Batch size per device.
Raises:
ValueError: if batch_size is not divisible by number of devices
"""
if num_gpus <= 1:
return batch_size
remainder = batch_size % num_gpus
if remainder:
err = (
'When running with multiple GPUs, batch size '
'must be a multiple of the number of available GPUs. Found {} '
'GPUs with a batch size of {}; try --batch_size={} instead.'
).format(num_gpus, batch_size, batch_size - remainder)
raise ValueError(err)
return int(batch_size / num_gpus)
def get_model_fn():
"""doc."""
def model_fn(features, labels, mode, params):
"""doc."""
#### Training or Evaluation
is_eval = mode == tf.estimator.ModeKeys.EVAL
assert is_eval
#### Retrieve `mems` from `params["cache"]`
mems = {}
idx = 0
if FLAGS.mem_len > 0:
mems['mems'] = params['cache']
#### Get loss from inputs
total_loss, total_accuracy, new_mems, monitor_dict = custom_function_builder.get_loss(
FLAGS, features, labels, mems, False
)
#### Turn `new_mems` into `new_cache`
new_cache = []
if FLAGS.mem_len > 0:
new_cache += new_mems['mems']
#### Check model parameters
num_params = sum([np.prod(v.shape) for v in tf.trainable_variables()])
tf.logging.info('#params: {}'.format(num_params))
#### Configuring the optimizer
train_op, learning_rate, gnorm = model_utils.get_train_op(
FLAGS, total_loss
)
monitor_dict['lr'] = learning_rate
monitor_dict['gnorm'] = gnorm
#### Customized initial checkpoint
scaffold_fn = model_utils.init_from_checkpoint(
FLAGS, global_vars = True
)
# def metric_fn(accuracy):
# return
#
# eval_metrics = (metric_fn, [total_accuracy])
output_spec = tf.estimator.EstimatorSpec(
mode = mode,
loss = total_loss,
train_op = train_op,
eval_metric_ops = {'accuracy': total_accuracy},
scaffold = scaffold_fn,
)
return output_spec
return model_fn
def get_input_fn(split, batch_size):
"""doc."""
assert split == 'train'
input_fn, record_info_dict = data_utils.get_input_fn(
tfrecord_dir = FLAGS.record_info_dir,
split = split,
bsz_per_host = batch_size,
seq_len = FLAGS.seq_len,
reuse_len = FLAGS.reuse_len,
bi_data = FLAGS.bi_data,
num_hosts = 1,
num_core_per_host = 1,
perm_size = FLAGS.perm_size,
mask_alpha = FLAGS.mask_alpha,
mask_beta = FLAGS.mask_beta,
uncased = FLAGS.uncased,
num_passes = FLAGS.num_passes,
use_bfloat16 = FLAGS.use_bfloat16,
num_predict = FLAGS.num_predict,
)
return input_fn, record_info_dict
def get_cache_fn(mem_len, batch_size):
"""doc."""
tf_float = tf.bfloat16 if FLAGS.use_bfloat16 else tf.float32
def cache_fn():
mems = []
if FLAGS.mem_len > 0:
for _ in range(FLAGS.n_layer):
zeros = tf.zeros(
[mem_len, batch_size, FLAGS.d_model], dtype = tf_float
)
mems.append(zeros)
return mems
if mem_len > 0:
return cache_fn
else:
return None
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
# Get corpus info
FLAGS.n_token = data_utils.VOCAB_SIZE
tf.logging.info('n_token {}'.format(FLAGS.n_token))
if not tf.gfile.Exists(FLAGS.model_dir):
tf.gfile.MakeDirs(FLAGS.model_dir)
bsz_per_core = FLAGS.train_batch_size
train_input_fn, train_record_info_dict = get_input_fn('train', bsz_per_core)
tf.logging.info(
'num of batches {}'.format(train_record_info_dict['num_batch'])
)
train_cache_fn = get_cache_fn(FLAGS.mem_len, bsz_per_core)
tf.logging.info(train_cache_fn)
log_every_n_steps = 10
run_config = RunConfig(
log_step_count_steps = log_every_n_steps,
model_dir = FLAGS.model_dir,
save_checkpoints_steps = FLAGS.save_steps,
save_summary_steps = None,
)
model_fn = get_model_fn()
tf.logging.info('Use normal Estimator')
estimator = Estimator(
model_fn = model_fn,
params = {'batch_size': bsz_per_core, 'cache': None},
config = run_config,
)
tf.logging.info('***** Running evaluation *****')
tf.logging.info(' Batch size = %d', FLAGS.train_batch_size)
estimator.evaluate(input_fn = train_input_fn, steps = 100)
if __name__ == '__main__':
tf.app.run()
|
996,209 | f4ffa0d46f866a650672e7ba23f9d4a57dd846da | from __future__ import unicode_literals
from django.utils import timezone
from datetime import datetime
from django.db import transaction
from django.contrib.auth.models import (AbstractBaseUser, PermissionsMixin, BaseUserManager)
from django.db import models
import pytz
utc=pytz.UTC
# timezone.localtime(timezone.now())
class Proyecto(models.Model):
centro_de_coste = models.CharField(max_length = 20, unique=True)
nombre_proyecto = models.CharField(max_length = 100)
ubicacion = models.CharField(max_length = 100)
cliente = models.CharField(max_length = 100)
rut_cliente = models.CharField(max_length = 20)
mandante = models.CharField(max_length = 100)
rut_mandante = models.CharField(max_length = 20)
mandante_final = models.CharField(max_length = 100)
cantidad_voucher_imprimir = models.IntegerField(blank=True, default=1)
available = models.BooleanField(default=True)
def __str__(self):
return self.centro_de_coste
class Subcontratista(models.Model):
proyecto = models.ForeignKey(Proyecto, on_delete=models.CASCADE)
rut = models.CharField(max_length = 20, unique=True)
razon_social = models.CharField(max_length = 100)
nombre_subcontratista = models.CharField(max_length = 100)
nombre_contacto = models.CharField(max_length = 50)
apellido_contacto = models.CharField(max_length = 50)
email_contacto = models.CharField(max_length = 100, blank=True, default='')
telefono_contacto = models.CharField(max_length = 20)
available = models.BooleanField(default=True)
def __str__(self):
return self.razon_social
def get_upload_path_camion(instance, filename):
now = timezone.now()
return 'fotoscamiones/{year}/{month}/{day}/subcontratista_{id_desp}/{fn}'.format(
year=now.strftime('%Y'), month=now.strftime('%m'), day=now.strftime('%d'),
id_desp=instance.subcontratista.id, fn=filename)
class Camion(models.Model):
UNIDADES = [
('m3','m3'),
('ton','ton')
]
subcontratista = models.ForeignKey(Subcontratista, on_delete=models.CASCADE)
patente_camion = models.CharField(max_length = 20, unique=True)
marca_camion = models.CharField(max_length = 20)
modelo_camion = models.CharField(max_length = 20)
capacidad_camion = models.CharField(max_length = 20)
unidad_medida = models.CharField(max_length = 5, choices=UNIDADES)
nombre_conductor_principal = models.CharField(max_length = 50)
apellido_conductor_principal = models.CharField(max_length = 50)
telefono_conductor_principal = models.CharField(max_length = 20)
descripcion = models.CharField(max_length = 20, blank=True)
numero_ejes = models.CharField(max_length = 20, blank=True)
color_camion = models.CharField(max_length = 20, blank=True)
foto_camion = models.FileField(upload_to=get_upload_path_camion, blank=True, null=True)
available = models.BooleanField(default=True)
def __str__(self):
return self.patente_camion+" "+self.marca_camion+" "+self.modelo_camion
class Meta:
verbose_name_plural = "Camiones"
class Origen(models.Model):
proyecto = models.ForeignKey(Proyecto, on_delete=models.CASCADE)
nombre_origen = models.CharField(max_length = 100)
comuna = models.CharField(max_length = 50,blank=True)
calle = models.CharField(max_length = 50,blank=True)
numero = models.IntegerField(blank=True,null=True)
latitud = models.CharField(max_length = 20)
longitud = models.CharField(max_length = 20)
available = models.BooleanField(default=True)
def __str__(self):
return self.nombre_origen
class Meta:
verbose_name_plural = "Origenes"
class Suborigen(models.Model):
origen = models.ForeignKey(Origen, on_delete=models.CASCADE)
nombre_suborigen = models.CharField(max_length = 100)
activo = models.BooleanField(default=True)
def __str__(self):
return self.nombre_suborigen+" perteneciente al origen: "+str(self.origen)
class Meta:
verbose_name_plural = "Sub-Origenes"
class Destino(models.Model):
proyecto = models.ForeignKey(Proyecto, on_delete=models.CASCADE)
nombre_destino = models.CharField(max_length = 100)
nombre_propietario = models.CharField(max_length = 100)
rut_propietario = models.CharField(max_length = 20)
comuna = models.CharField(max_length = 50,blank=True)
calle = models.CharField(max_length = 50,blank=True)
numero = models.IntegerField(blank=True,null=True)
longitud = models.CharField(max_length = 20)
latitud = models.CharField(max_length = 20)
available = models.BooleanField(default=True)
def __str__(self):
return str(self.id)+" "+self.nombre_destino
class Material(models.Model):
destino = models.ForeignKey(Destino, on_delete=models.CASCADE)
material = models.CharField(max_length = 100)
available = models.BooleanField(default=True)
def __str__(self):
return str(self.id)+" "+self.material
class Meta:
verbose_name_plural = "Materiales"
class CodigoQR(models.Model):
camion = models.ForeignKey(Camion, on_delete=models.CASCADE)
activo = models.BooleanField(default=True)
def __str__(self):
return str(self.id)+" "+str(self.activo)
class Meta:
verbose_name_plural = "Codigos QR"
def save(self, *args, **kwargs):
if not self.activo:
return super(CodigoQR, self).save(*args, **kwargs)
with transaction.atomic():
CodigoQR.objects.filter(activo=True,camion=self.camion).update(activo=False)
return super(CodigoQR, self).save(*args, **kwargs)
##### Usuarios #####
class UserManager(BaseUserManager):
use_in_migrations = True
def _create_user(self, rut, password, **extra_fields):
if not rut:
raise ValueError('The given rut must be set')
try:
with transaction.atomic():
user = self.model(rut=rut, **extra_fields)
user.set_password(password)
# print("proceso01")
user.save(using=self._db)
# print("proceso02")
return user
except:
raise
def create_user(self, rut, password=None, **extra_fields):
extra_fields.setdefault('is_staff', False)
extra_fields.setdefault('is_superuser', False)
return self._create_user(rut, password, **extra_fields)
def create_superuser(self, rut, password, **extra_fields):
extra_fields.setdefault('is_staff', True)
extra_fields.setdefault('is_superuser', True)
return self._create_user(rut, password=password, **extra_fields)
class AdminManager(BaseUserManager):
use_in_migrations = True
def _create_user(self, rut, password, **extra_fields):
if not rut:
raise ValueError('The given rut must be set')
try:
with transaction.atomic():
user = self.model(rut=rut, **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
except:
raise
def create_user(self, rut, password=None, **extra_fields):
extra_fields.setdefault('is_staff', False)
extra_fields.setdefault('is_superuser', False)
return self._create_user(rut, password, **extra_fields)
def create_superuser(self, rut, password, **extra_fields):
extra_fields.setdefault('is_staff', True)
extra_fields.setdefault('is_superuser', True)
return self._create_user(rut, password=password, **extra_fields)
class DespManager(BaseUserManager):
use_in_migrations = True
def _create_user(self, rut, password, **extra_fields):
if not rut:
raise ValueError('The given rut must be set')
try:
with transaction.atomic():
user = self.model(rut=rut, **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
except:
raise
def create_user(self, rut, password=None, **extra_fields):
extra_fields.setdefault('is_staff', False)
extra_fields.setdefault('is_superuser', False)
return self._create_user(rut, password, **extra_fields)
def create_superuser(self, rut, password, **extra_fields):
extra_fields.setdefault('is_staff', True)
extra_fields.setdefault('is_superuser', True)
return self._create_user(rut, password=password, **extra_fields)
class User(AbstractBaseUser, PermissionsMixin):
rut = models.CharField(max_length=15, unique=True)
nombre = models.CharField(max_length=30)
apellido = models.CharField(max_length=30)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
date_joined = models.DateTimeField(default=timezone.now)
objects = UserManager()
USERNAME_FIELD = 'rut'
REQUIRED_FIELDS = ['nombre', 'apellido']
def __str__(self):
return self.rut
def save(self, *args, **kwargs):
super(User, self).save(*args, **kwargs)
return self
class Administrador(User, PermissionsMixin):
email = models.CharField(max_length=100, unique=True)
cargo = models.CharField(max_length=100, blank=True)
proyecto = models.ManyToManyField(Proyecto, related_name='proyecto', blank=True)
objects = AdminManager()
USERNAME_FIELD = 'rut'
REQUIRED_FIELDS = ['nombre', 'apellido']
def __str__(self):
return self.nombre
# def save(self, *args, **kwargs):
# Administrador.objects.create_superuser(Administrador ,rut, password, *args, **kwargs)
# super(Administrador, self).save(*args, **kwargs)
# return self
class Meta:
verbose_name_plural = "Administradores"
class Despachador(User, PermissionsMixin):
telefono = models.CharField(max_length=30, blank=True)
origen_asignado = models.IntegerField(blank=True, null=True)
proyecto = models.ForeignKey(Proyecto, on_delete=models.CASCADE, blank=True)
objects = DespManager()
USERNAME_FIELD = 'rut'
REQUIRED_FIELDS = ['nombre', 'apellido']
def __str__(self):
return self.nombre
# def save(self, *args, **kwargs):
# super(Despachador, self).save(*args, **kwargs)
# return self
class Meta:
verbose_name_plural = "Despachadores"
##### fin usuarios #####
def fin_origen_temporal():
return timezone.now() + timezone.timedelta(hours=12)
class OrigenTemporal(models.Model):
despachador = models.ForeignKey(Despachador, on_delete=models.CASCADE)
id_origen = models.IntegerField()
timestamp_inicio = models.DateTimeField(default=timezone.now)
duracion = models.IntegerField(default=12)
activo = models.BooleanField(default=True)
def __str__(self):
return str(self.id_origen)
class Meta:
verbose_name_plural = "Origenes"
def get_upload_path_patente(instance, filename):
now = timezone.now()
return 'fotospatentes/{year}/{month}/{day}/user_{id_desp}/{ahora}_{fn}'.format(
year=now.strftime('%Y'), month=now.strftime('%m'), day=now.strftime('%d'),
id_desp=instance.despachador.id, ahora=now, fn=filename)
class Voucher(models.Model):
despachador = models.ForeignKey(Despachador, on_delete=models.CASCADE)
proyecto = models.CharField(max_length = 100)
nombre_cliente = models.CharField(max_length = 100)
rut_cliente = models.CharField(max_length = 20)
nombre_subcontratista = models.CharField(max_length = 100)
rut_subcontratista = models.CharField(max_length = 20)
nombre_conductor_principal = models.CharField(max_length = 50)
apellido_conductor_principal = models.CharField(max_length = 50)
fecha_servidor = models.DateField(auto_now_add=True)
hora_servidor = models.TimeField(auto_now_add=True)
fecha = models.DateField()
hora = models.TimeField()
patente = models.CharField(max_length = 20)
foto_patente = models.FileField(upload_to=get_upload_path_patente)
# foto_patente = models.FileField(upload_to='fotospatentes/%Y/%m/%d/', blank=True)
volumen = models.CharField(max_length = 20)
tipo_material = models.CharField(max_length = 50)
punto_origen = models.CharField(max_length = 100)
punto_suborigen = models.CharField(max_length = 100, blank=True)
punto_destino = models.CharField(max_length = 100)
contador_impresiones = models.IntegerField()
id_qr = models.CharField(max_length = 255, blank=True)
def __str__(self):
cadena = "voucher_"+str(self.id)+" "+str(self.despachador)
return cadena
# voucher debiera tener las id de las otras tablas que se necesitan en el despacho |
996,210 | 84b416028543a1586fe8cd4fc3b22562da0e1b17 | from .rfc3414_key_derivation import snmpv3_key_from_password, derive_intermediate_key, localize_intermediate_key
__all__ = [snmpv3_key_from_password, derive_intermediate_key, localize_intermediate_key]
|
996,211 | a18266f1853e36443d85ac1b4e286eb1fb07b53a | from django.conf.urls import include, url
from django.contrib import admin
from django.views.generic import TemplateView
from .views import *
urlpatterns = [
url(r'^register$', User_Register.as_view(), name='register'),
url(r'^login$', User_Login.as_view(), name='login'),
url(r'^logout/$', user_logout, name='logout'),
url(r'^detail/(?P<pk>[0-9]+)$', Post_Detail.as_view(), name='detail'),
url(r'^create$', Create_Post.as_view(), name='create'),
url(r'^delete/(?P<pk>[0-9]+)$', Delete_Post.as_view(), name='delete'),
url(r'^update/(?P<pk>[0-9]+)$', Update_Post.as_view(), name='update'),
url(r'^$', Post_List.as_view(), name='list')
]
|
996,212 | 0fc67cace663793a6f823e771cbcdea5d770980e | """
Quantitative MRI
=======================================
This example shows how to build quantitative maps of R1 and R2* and semi-quantitative
PD from a MP2RAGEME dataset by performing the following steps:
1. Download a downsampled MP2RAGEME dataset using
:func:`nighres.data.download_MP2RAGEME_testdata` [1]_
2. Denoise the MP2RAGEME data using
:func:`nighres.intensity.lcpca_denoising` [2]_
3. Build quantitative maps using
:func:`nighres.intensity.mp2rage_t1_mapping`,
:func:`nighres.intensity.flash_t2s_fitting`,
:func:`nighres.intensity.mp2rageme_pd_mapping` [3]_
4. Remove the skull and create a brain mask using
:func:`nighres.brain.intensity_based_skullstripping` [4]_
"""
############################################################################
# Import and download
# -------------------
# First we import ``nighres`` and the ``os`` module to set the output directory
# Make sure to run this file in a directory you have write access to, or
# change the ``out_dir`` variable below.
import nighres
import os
in_dir = os.path.join(os.getcwd(), 'nighres_testing/data_sets')
out_dir = os.path.join(os.getcwd(), 'nighres_testing/quantitative_mri')
os.makedirs(out_dir, exist_ok=True)
############################################################################
# We import the ``ants`` modules for inhomogeneity correction
# As ANTspy is a depencency for nighres, it should be installed already
import ants
############################################################################
# We also import the ``numpy`` and ``nibabel`` modules to perform basic image
# operations like masking, intensity scaling, or reorientation
import nibabel
import numpy
############################################################################
# We also try to import Nilearn plotting functions. If Nilearn is not
# installed, plotting will be skipped.
skip_plots = False
try:
from nilearn import plotting
except ImportError:
skip_plots = True
print('Nilearn could not be imported, plotting will be skipped')
############################################################################
# Now we download an example MP2RAGEME dataset, which consists of a MPRAGE sequence
# with two inversions interleaved and multiple echoes on the second inversion [1]_.
dataset = nighres.data.download_MP2RAGEME_testdata(in_dir)
############################################################################
# Denoising
# ----------------
# First we perform some denoising. Quantitative MRI sequences combine multiple
# acquisitions (in this case 10, counting magnitude and phase) which can be efficiently
# denoised with a local PCA approach without compromising spatial resolution [2]_.
denoising_results = nighres.intensity.lcpca_denoising(
image_list=dataset['mp2rageme_mag'],
phase_list=dataset['mp2rageme_phs'],
unwrap=True, rescale_phs=True,
save_data=True,
output_dir=out_dir)
############################################################################
# .. tip:: in Nighres functions that have several outputs return a
# dictionary storing the different outputs. You can find the keys in the
# docstring by typing ``nighres.intensity.lcpca_denoising?`` or list
# them with ``denoising_results.keys()``
#
# .. tip: in Nighres modules check whether computations have been run or
# not, and skip them if the output files exist. You can force overwriting
# with the input option ``overwrite=True``
#
# .. tip: file names given to modules serve as base names for the output
# unless a specific name in provided by ``file_name=`` or ``file_names=[]``
# and the module always adds suffixes related to its outputs.
#
# To check if the denoising worked well we plot one of the original images
# and the corresponding denoised result. You can also open the images stored
# in ``out_dir`` in your favourite interactive viewer and scroll through the volume.
#
# Like Nilearn, we use Nibabel SpatialImage objects to pass data internally.
# Therefore, we can directly plot the outputs using `Nilearn plotting functions
# <http://nilearn.github.io/plotting/index.html#different-plotting-functions>`_
# .
if not skip_plots:
plotting.plot_anat(dataset['mp2rageme_mag'][0], vmax=100000.0, cmap='gray',cut_coords=[-75.0,90.0,-30.0])
plotting.plot_anat(denoising_results['denoised'][0], vmax=100000.0, cmap='gray',cut_coords=[-75.0,90.0,-30.0])
############################################################################
# T1 quantitative mapping
# ----------------
# Now we can generate a T1 and R1 map (with R1=1/T1).
# Note that we could skip the denoising and use the original data directly.
# The T1 mapping requires several of the imaging parameters, which are often
# available from headers and/or json files generated when exporting images
# from the scanner into a standard format like Nifti.
# By default Nighres does not extract them automatically, they have to be
# explicitly provided.
#
# Note also that quantitative T1, T2* parameters have units: here we use
# seconds (and Hertz) as the basis. Sometimes people prefer milliseconds,
# so a x1000 scaling factor is expected.
# T1 mapping uses the first and second inversion, first echo, both magnitude and phase
inv1m = denoising_results['denoised'][0]
inv1p = denoising_results['denoised'][5]
inv2e1m = denoising_results['denoised'][1]
inv2e1p = denoising_results['denoised'][6]
t1mapping_results = nighres.intensity.mp2rage_t1_mapping(
first_inversion=[inv1m,inv1p],
second_inversion=[inv2e1m,inv2e1p],
excitation_TR=[0.0062,0.0314],
flip_angles=[7.0,6.0],
inversion_TR=6.720,
inversion_times=[0.607,3.855],
N_excitations=150,
save_data=True,
output_dir=out_dir)
############################################################################
# Quantitative T2* fitting
# ----------------
# The relevant images for T2* fitting are the 4 echoes from the second inversion.
# As with T1 mapping, echo times (TE) are explicitly provided
inv2e2m = denoising_results['denoised'][2]
inv2e3m = denoising_results['denoised'][3]
inv2e4m = denoising_results['denoised'][4]
t2sfitting_results = nighres.intensity.flash_t2s_fitting(
image_list=[inv2e1m,inv2e2m,inv2e3m,inv2e4m],
te_list=[0.0030,0.0115,0.0200,0.0285],
save_data=True,
output_dir=out_dir)
############################################################################
# Semi-quantitative PD mapping
# ----------------
# PD mapping combines information from T1 and T2* mapping with the data from
# the first and second inversion. PD estimates are not normalized to a specific
# region value (e.g. ventricles, white matter...)
pdmapping_results = nighres.intensity.mp2rageme_pd_mapping(
first_inversion=[inv1m,inv1p],
second_inversion=[inv2e1m,inv2e1p],
t1map=t1mapping_results['t1'],
r2smap=t2sfitting_results['r2s'],
echo_times=[0.0030,0.0115,0.0200,0.0285],
inversion_times=[0.670, 3.85],
flip_angles=[4.0, 4.0],
inversion_TR=6.72,
excitation_TR=[0.0062, 0.0314],
N_excitations=150,
save_data=True,
output_dir=out_dir)
############################################################################
# Quantitative susceptibility mapping (QSM)
# ----------------
# Note that this data can also be used to obtain QSM, using the phase data
# from the second inversion. Nighres does not include a QSM reconstruction
# technique, but we have used successfully TGV-QSM, which has the advantage
# of being a python-based software tool (other methods may be superior, but
# few run as standalone or python scripts).
############################################################################
# Skull stripping
# ----------------
# Finally, we perform skull stripping, and apply it to all the quantitative maps.
# Only the second inversion, first echo image is required to calculate the brain mask.
# But if we input the T1map and/or T1w image as well, they will help refine the CSF
# boundary.
skullstripping_results = nighres.brain.intensity_based_skullstripping(
main_image=inv2e1m,
extra_image=t1mapping_results['t1'],
save_data=True,
output_dir=out_dir)
############################################################################
# Masking, Thresholding, and Reorientation
# ----------------
# Here we use nibabel and numpy routines to perform these simple steps,
# rather than having a dedicated nighres module
# Note that thresholds have been set for 7T qMRI values, and would need
# to be updated for other field strengths.
# Note also that the PD map is normalized to the mean, after inhomogeneity correction
brainmask_file = skullstripping_results['brain_mask']
brainmask = nighres.io.load_volume(brainmask_file).get_fdata()
r1strip_file = t1mapping_results['r1'].replace('.nii','_brain.nii')
if not os.path.isfile(r1strip_file):
print("Mask qR1")
r1 = nighres.io.load_volume(t1mapping_results['r1'])
r1strip = nibabel.Nifti1Image(numpy.minimum(3.0,brainmask*r1.get_fdata()), r1.affine, r1.header)
r1strip = nibabel.as_closest_canonical(r1strip)
nighres.io.save_volume(r1strip_file, r1strip)
r2strip_file = t2sfitting_results['r2s'].replace('.nii','_brain.nii')
if not os.path.isfile(r2strip_file):
print("Mask qR2s")
r2 = nighres.io.load_volume(t2sfitting_results['r2s'])
r2strip = nibabel.Nifti1Image(numpy.maximum(0.0,numpy.minimum(200.0,brainmask*r2.get_fdata())), r2.affine, r2.header)
r2strip = nibabel.as_closest_canonical(r2strip)
nighres.io.save_volume(r2strip_file, r2strip)
# for PD, we also need to run some inhomogeneity correction with N4
pdn4_file = pdmapping_results['pd'].replace('.nii','_n4.nii')
if not os.path.isfile(pdn4_file):
print("Correct inhomogeneities for PD")
img = ants.image_read(pdmapping_results['pd'])
pd_n4 = ants.n4_bias_field_correction(img, mask=brainmask_file)
ants.image_write(pd_n4, pdn4_file)
pdstrip_file = pdn4_file.replace('.nii','_brain.nii')
if not os.path.isfile(pdstrip_file):
print("Mask PD")
pd = nighres.io.load_volume(pdn4_file)
pddata = pd.get_fdata()
pdmean = numpy.mean(pddata[pddata>0])
pdstrip = nibabel.Nifti1Image(numpy.minimum(8.0,brainmask*pddata/pdmean), pd.affine, pd.header)
pdstrip = nibabel.as_closest_canonical(pdstrip)
nighres.io.save_volume(pdstrip_file, pdstrip)
############################################################################
# And we are done! Let's have a look at the final maps:
if not skip_plots:
plotting.plot_anat(r1strip_file, vmax=2.0, cut_coords=[-75.0,90.0,-30.0], cmap='gray')
plotting.plot_anat(r2strip_file, vmax=100.0, cut_coords=[-75.0,90.0,-30.0], cmap='gray')
plotting.plot_anat(pdstrip_file, vmax=5.0, cut_coords=[-75.0,90.0,-30.0], cmap='gray')
#############################################################################
# If the example is not run in a jupyter notebook, render the plots:
if not skip_plots:
plotting.show()
#############################################################################
# References
# -----------
# .. [1] Alkemade, A., Mulder, M.J., Groot, J.M., Isaacs, B.R., van Berendonk, N., Lute, N.,
# Isherwood, S.J., Bazin, P.-L., Forstmann, B.U., 2020. The Amsterdam Ultra-high field
# adult lifespan database (AHEAD): A freely available multimodal 7 Tesla submillimeter
# magnetic resonance imaging database. NeuroImage 221, 117200.
# https://doi.org/10.1016/j.neuroimage.2020.117200
# .. [2] Bazin, P.-L., Alkemade, A., van der Zwaag, W., Caan, M., Mulder, M., Forstmann,
# B.U., 2019. Denoising High-Field Multi-Dimensional MRI With Local Complex PCA.
# Frontiers in Neuroscience 13. https://doi.org/10.3389/fnins.2019.01066
# .. [3] Caan, M.W.A., Bazin, P., Marques, J.P., Hollander, G., Dumoulin, S.O., Zwaag, W., 2019.
# MP2RAGEME: T1, T2*, and QSM mapping in one sequence at 7 tesla.
# Human Brain Mapping 40, 1786–1798. https://doi.org/10.1002/hbm.24490
# .. [4] Bazin, P.-L., Weiss, M., Dinse, J., Schäfer, A., Trampel, R., Turner, R., 2014.
# A computational framework for ultra-high resolution cortical segmentation at 7Tesla.
# NeuroImage 93, 201–209. https://doi.org/10.1016/j.neuroimage.2013.03.077
|
996,213 | 16775618c9a7f80a355a1293528fed7541b4b81a | #
# Solved problems in Geostatistics
#
# ------------------------------------------------
# Script for lesson 4.1
# "Impact of the central limit theorem"
# ------------------------------------------------
from numpy import *
from geo import *
from matplotlib import *
from scipy import *
from pylab import *
import sys
sys.path.append(r'../shared')
from statistics import *
import matplotlib.pyplot
# ------------------------------------------------
# Problem:
#
# (1) Verify that the sum of independent random variables tends toward a normal distribuition as predicted by the CLT. For this problem, consider 10 random variables, Xi, i = 1, ..., n, with a uniform probability distrubution between 0 and 1, and create a RV, S, that is the sum of these 10 uniform RVs.
# Generate several realizations of S and calculate its mean and variance
# ------------------------------------------------
# Function to generate random uniform values
def get_random_uniform_values(n):
x = zeros( (n), order = 'F', dtype = float)
for i in xrange(n):
x[i] = uniform(0,1)
return x
# Since RVs are independent we can calculate variance simplier
def calc_var_sum(std_dev, n):
return std_dev**2 / n
# Number of random variables
n = 10
# Number of S realizations
S_num = 100
# Generate one realization of random variables and check mean/var
random_variables = get_random_uniform_values(n)
print "---------------------------------------------------"
rand_mean = random_variables.mean()
rand_var = random_variables.var()
print "One random realization mean is: ", rand_mean
print "One random realization var is: ", rand_var
print "---------------------------------------------------"
# Calculating summary mean/var for S realizations
# Vector with S realizations
summary_vec = zeros( (S_num), order = 'F', dtype = float)
for j in xrange(S_num):
random_variables = get_random_uniform_values(n)
for i in xrange(n):
summary_vec[j] = summary_vec[j] + random_variables[i]
sum_mean = summary_vec.mean()
sum_var = calc_var_sum(calc_quadr_var(summary_vec, sum_mean),n)
print "Summary mean is: ", sum_mean
print "Summary variance is:", sum_var
print "---------------------------------------------------"
print "plotting histograms..."
# Histogram of random variables
figure()
hist(random_variables)
xlabel("Random variables")
ylabel("Number")
title("Histogram of random variables")
# Histogram of summary random variables statistics
figure()
hist(summary_vec)
xlabel("Summary random variables")
ylabel("Number")
title("Histogram of summary random variables statistics")
show()
|
996,214 | ef4953bc2e368c188a200bb41b78b905460b9ef9 | Name = 'FlipImageDataAxii'
Label = 'Flip ImageData Axii'
FilterCategory = 'CSM Geophysics Filters'
Help = 'This filter will flip ImageData on any of the three cartesian axii. A checkbox is provided for each axis on which you may desire to flip the data.'
NumberOfInputs = 1
InputDataType = 'vtkImageData'
OutputDataType = 'vtkImageData'
ExtraXml = ''
Properties = dict(
reverse_x_dir=False,
reverse_y_dir=False,
reverse_z_dir=False,
)
PropertiesHelp = dict(
reverse_x_dir="Reverse all data along the X-axis",
reverse_y_dir="Reverse all data along the Y-axis",
reverse_z_dir="Reverse all data along the Z-axis",
)
def RequestData():
pdi = self.GetInput() # vtkImageData
image = self.GetOutput() # vtkImageData
# Make user selection iterable
dirs = [reverse_x_dir, reverse_y_dir, reverse_z_dir]
# Copy over input to output to be flipped around
# Deep copy keeps us from messing with the input data
image.DeepCopy(pdi)
# Iterate over all array in the PointData
for j in range(image.GetPointData().GetNumberOfArrays()):
# Swap Scalars with all Arrays in PointData so that all data gets filtered
scal = image.GetPointData().GetScalars()
arr = pdi.GetPointData().GetArray(j)
image.GetPointData().SetScalars(arr)
image.GetPointData().AddArray(scal)
for i in range(3):
# Rotate ImageData on each axis if needed
# Go through each axis and rotate if needed
# Note: ShallowCopy is necessary!!
if dirs[i]:
flipper = vtk.vtkImageFlip()
flipper.SetInputData(image)
flipper.SetFilteredAxis(i)
flipper.Update()
flipper.UpdateWholeExtent()
image.ShallowCopy(flipper.GetOutput())
|
996,215 | cc03da02349f49ec8d295701660536697ec29bd7 | #Creating Node class for doubly linked list
class DNode():
def __init__(self, data, west=None, east=None):
self.data = data
self.west = west
self.east = east
class TrainRoute:
def __init__(self):
self.head = None
self.tail = None
#Record all the station names on this route
self.stations = []
#Method adds stations to the queue
def add(self,station):
if self.head == None:
self.head = station
self.tail = station
self.head.west = self.tail
self.tail.east = self.head
else:
self.tail.west = station
station.east = self.tail
self.tail = station
self.stations.append(station.data)
#Method lists all stations available for user to take
def printRoute(self):
print('------------------------------------------------')
print('---'.join(self.stations))
print('------------------------------------------------')
def inquire(self):
# Ask a user for the fromStation and toStation
# The program allows a user to make multiple inquiries
fromStation = self.currentStation()
toStation = self.desStation()
# The route suggestion will be printed to the console
route.routeSuggestion(fromStation, toStation)
# Ask user if he/she wishes to make another inquiry
toContinue = input("Do you want to continue[y/n]? ")
# letter 'y' is case-insensitive here
if toContinue == 'y' or toContinue == 'Y' :
self.inquire()
#Method prompts user to input their current location
def currentStation(self):
currStation = input("Which station are you in now? (Enter 'help' to view all the stations) \n")
#print(currStation)
if currStation == 'help':
self.printRoute()
return self.currentStation()
if currStation == '':
return self.currentStation()
i = 1
current = self.head
if(self.head == None):
print("The route is empty!")
return
while(current != None):
if(current.data == currStation):
return i
current = current.west
i = i + 1
#Keeping prompting the user until a valid station name is entered
print("\nThe station is not in the train route, please enter a valid station name:");
return self.currentStation()
#Method prompts user to input their destination
def desStation(self):
desStation = input("Which station do you want to go? (Enter 'help' to view all the stations, or Enter 'X' to quit the program): \n")
#print(desStation)
if desStation == 'help':
self.printRoute()
return self.desStation()
if desStation == '':
return self.desStation()
j = 1
current = self.head
if(self.head == None):
print("The route is empty")
return
while(current != None):
if current.data == desStation:
return j
current = current.west
j = j + 1
#Keeping prompting the user until a valid station name is entered
print("\nThe station is not in the train route, please enter a valid station name:")
return self.desStation()
#Method tells user which train to take and
#how many stations they will skip before arriving at their destination
#If user is already at location, then program will notify user
#they have are already at their destination
def routeSuggestion(self, fromStation, toStation):
if fromStation > toStation:
print('\nGo by West Bound Train, skip {} stations and get off at Station {}.'.format(fromStation - toStation - 1, fromStation - toStation))
elif fromStation < toStation:
print('\nGo by East Bound Train, skip {} stations and get off at Station {}.'.format(toStation - fromStation - 1, toStation - fromStation))
else:
print('\nYou\'re already at your destination.')
#Allows code to run as a standalone program
#instead of through application
if __name__ == "__main__":
# Initialize a train route
route = TrainRoute()
# Add all the stations along the route
route.add(DNode('Ealing Broadway'))
route.add(DNode('West Acton'))
route.add(DNode('North Acton'))
route.add(DNode('East Acton'))
route.add(DNode('White City'))
route.add(DNode('Shepherds Bush'))
route.add(DNode('Holland Park'))
route.add(DNode('Notting Hill Gate'))
route.add(DNode('Queensway'))
route.add(DNode('Lancaster'))
route.add(DNode('Marble Arch'))
route.add(DNode('Bond Street'))
route.add(DNode('Oxford Circus'))
route.add(DNode('Tottenham Court Road'))
route.add(DNode('Holborn'))
route.add(DNode('Chancery Lane'))
route.add(DNode('St Pauls'))
route.add(DNode('Bank'))
route.add(DNode('Liverpool Street'))
route.add(DNode('Bethnal Green'))
route.add(DNode('Mile End'))
route.inquire()
|
996,216 | 304628cfa8626e7c94693008cc2afdc741e29212 | import networkx as nx
import matplotlib.pyplot as plt
import random
'''
第三章 小世界理论
生成小世界模型 及 各种中心度量
'''
def generate_regular_network(n,k):
'''
'''
k = k // 2
edges = []
for i in range(20):
for j in range(i-k,i):
if j < 0: edges.append((i,j+n))
else: edges.append((i,j))
G = nx.Graph()
G.add_edges_from(edges)
return G
#nx.draw(G)
#plt.show()
def generate_WS_Model(n,k,p):
G = generate_regular_network(n,k)
all_possible_edges = [] #无环的,无向的(a,b)==(b,a)
for a in range(n):
for b in range(a):
all_possible_edges.append((b,a))
edges = list(G.edges())
for idx in range(len(edges)):
if random.random()<p:
new_edge = all_possible_edges[random.randint(0,len(all_possible_edges)-1)]
while new_edge in edges:
new_edge = all_possible_edges[random.randint(0,len(all_possible_edges)-1)]
edges[idx] = new_edge
G_new = nx.Graph()
G_new.add_edges_from(edges)
centralityDict = nx.degree_centrality(G_new)
closenessDict = nx.closeness_centrality(G_new)
betweennessDict = nx.betweenness_centrality(G_new)
print(sorted(centralityDict.items(), key=lambda x: x[1], reverse=True))
print(sorted(closenessDict.items(), key=lambda x: x[1], reverse=True))
print(sorted(betweennessDict.items(), key=lambda x: x[1], reverse=True))
#nx.draw(G_new,with_labels=True)
#plt.show()
generate_WS_Model(20,4,0.8)
|
996,217 | be28de2a7031628ec7544aad26715a0c8e154a4d | from django.shortcuts import render, redirect
from authy.forms import SignupForm, ChangePasswordForm
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from django.contrib.auth import update_session_auth_hash
# Create your views here.
def Signup(request):
if request.method == 'POST':
form = SignupForm(request.POST)
if form.is_valid():
username = form.cleaned_data.get('username')
email = form.cleaned_data.get('email')
password = form.cleaned_data.get('password')
User.objects.create_user(username=username, email=email, password=password)
return redirect('index')
else:
form = SignupForm()
context = {
'form':form,
}
return render(request, 'signup.html', context)
@login_required
def PasswordChange(request):
user = request.user
if request.method == 'POST':
form = ChangePasswordForm(request.POST)
if form.is_valid():
new_password = form.cleaned_data.get('new_password')
user.set_password(new_password)
user.save()
update_session_auth_hash(request, user)
return redirect('change_password_done')
else:
form = ChangePasswordForm(instance=user)
context = {
'form':form,
}
return render(request, 'change_password.html', context)
def PasswordChangeDone(request):
return render(request, 'change_password_done.html')
###
|
996,218 | 9232c4e80fb3d084c74c1cef6eb5d49a5948a0e7 | import math
import numpy as np
from numpy.random import normal
from hyperspy.misc.utils import isiterable
from atomap.sublattice import Sublattice
from atomap.atom_position import Atom_Position
from atomap.atom_lattice import Atom_Lattice
class MakeTestData(object):
def __init__(self, image_x, image_y, sublattice_generate_image=True):
"""
Class for generating test datasets of atomic resolution
STEM images.
Parameters
----------
image_x, image_y : int
Size of the image data.
sublattice_generate_image : bool, default True
When generating sublattices, a raster image is generated to
complement the atom position objects (found in sublattice.image).
For large amounts of atom positions, this can take a very long
time. If sublattice_generate_image is False, this image will not
be generated. Useful for generating sublattice objects for testing
quicker, when only the atom positions themselves are needed.
Attributes
----------
signal : HyperSpy 2D Signal
sublattice : Atomap Sublattice
atom_lattice : Atomap Atom_Lattice
gaussian_list : list of 2D Gaussians objects
Examples
--------
>>> from atomap.testing_tools import MakeTestData
>>> test_data = MakeTestData(200, 200)
>>> test_data.add_atom(x=10, y=20)
>>> test_data.signal.plot()
Adding many atoms
>>> test_data = MakeTestData(200, 200)
>>> import numpy as np
>>> x, y = np.mgrid[0:200:10j, 0:200:10j]
>>> x, y = x.flatten(), y.flatten()
>>> test_data.add_atom_list(x, y)
>>> test_data.signal.plot()
Adding many atoms with different parameters
>>> test_data = MakeTestData(200, 200)
>>> x, y = np.mgrid[0:200:10j, 0:200:10j]
>>> x, y = x.flatten(), y.flatten()
>>> sx, sy = np.random.random(len(x)), np.random.random(len(x))
>>> A, r = np.random.random(len(x))*10, np.random.random(len(x))*3.14
>>> test_data.add_atom_list(x, y, sigma_x=sx, sigma_y=sy,
... amplitude=A, rotation=r)
>>> test_data.signal.plot()
The class also generates a sublattice object
>>> test_data = MakeTestData(200, 200)
>>> import numpy as np
>>> x, y = np.mgrid[0:200:10j, 0:200:10j]
>>> x, y = x.flatten(), y.flatten()
>>> test_data.add_atom_list(x, y)
>>> test_data.sublattice.plot()
Also Atom_Lattice objects
>>> atom_lattice = test_data.atom_lattice
>>> atom_lattice.plot()
Generating a sublattice with 22500 atoms quickly, by not
generating the image
>>> test_data = MakeTestData(200, 200, sublattice_generate_image=False)
>>> import numpy as np
>>> x, y = np.mgrid[0:1000:150j, 0:1000:150j]
>>> x, y = x.flatten(), y.flatten()
>>> test_data.add_atom_list(x, y)
>>> sublattice = test_data.sublattice
"""
self.data_extent = (image_x, image_y)
self._image_noise = False
self._sublattice_generate_image = sublattice_generate_image
self.__sublattice = Sublattice([], np.zeros((2, 2)))
self.__sublattice.atom_list = []
@property
def signal(self):
signal = self.__sublattice.get_model_image(
image_shape=self.data_extent, show_progressbar=False)
if self._image_noise is not False:
signal.data += self._image_noise
return signal
@property
def gaussian_list(self):
gaussian_list = []
for atom in self.__sublattice.atom_list:
gaussian_list.append(atom.as_gaussian())
return gaussian_list
@property
def sublattice(self):
atom_list = []
for atom in self.__sublattice.atom_list:
new_atom = Atom_Position(
x=atom.pixel_x, y=atom.pixel_y,
sigma_x=atom.sigma_x, sigma_y=atom.sigma_y,
rotation=atom.rotation, amplitude=atom.amplitude_gaussian)
atom_list.append(new_atom)
if self._sublattice_generate_image:
image = self.signal.data
else:
image = np.zeros(self.data_extent[::-1])
sublattice = Sublattice([], image)
sublattice.atom_list = atom_list
return sublattice
@property
def atom_lattice(self):
sublattice = self.sublattice
atom_lattice = Atom_Lattice(image=sublattice.image,
sublattice_list=[sublattice])
return atom_lattice
def add_atom(self, x, y, sigma_x=1, sigma_y=1, amplitude=1, rotation=0):
"""
Add a single atom to the test data.
Parameters
----------
x, y : numbers
Position of the atom.
sigma_x, sigma_y : numbers, default 1
amplitude : number, default 1
rotation : number, default 0
Examples
--------
>>> from atomap.testing_tools import MakeTestData
>>> test_data = MakeTestData(200, 200)
>>> test_data.add_atom(x=10, y=20)
>>> test_data.signal.plot()
"""
atom = Atom_Position(
x=x, y=y, sigma_x=sigma_x, sigma_y=sigma_y,
rotation=rotation, amplitude=amplitude)
self.__sublattice.atom_list.append(atom)
def add_atom_list(
self, x, y, sigma_x=1, sigma_y=1, amplitude=1, rotation=0):
"""
Add several atoms to the test data.
Parameters
----------
x, y : iterable
Position of the atoms. Must be iterable, and have the same size.
sigma_x, sigma_y : number or iterable, default 1
If number: all the atoms will have the same sigma.
Use iterable for setting different sigmas for different atoms.
If iterable: must be same length as x and y iterables.
amplitude : number or iterable, default 1
If number: all the atoms will have the same amplitude.
Use iterable for setting different amplitude for different atoms.
If iterable: must be same length as x and y iterables.
rotation : number or iterable, default 0
If number: all the atoms will have the same rotation.
Use iterable for setting different rotation for different atoms.
If iterable: must be same length as x and y iterables.
Examples
--------
>>> from atomap.testing_tools import MakeTestData
>>> test_data = MakeTestData(200, 200)
>>> import numpy as np
>>> x, y = np.mgrid[0:200:10j, 0:200:10j]
>>> x, y = x.flatten(), y.flatten()
>>> test_data.add_atom_list(x, y)
>>> test_data.signal.plot()
"""
if len(x) != len(y):
raise ValueError("x and y needs to have the same length")
if isiterable(sigma_x):
if len(sigma_x) != len(x):
raise ValueError("sigma_x and x needs to have the same length")
else:
sigma_x = [sigma_x]*len(x)
if isiterable(sigma_y):
if len(sigma_y) != len(y):
raise ValueError("sigma_y and x needs to have the same length")
else:
sigma_y = [sigma_y]*len(x)
if isiterable(amplitude):
if len(amplitude) != len(x):
raise ValueError(
"amplitude and x needs to have the same length")
else:
amplitude = [amplitude]*len(x)
if isiterable(rotation):
if len(rotation) != len(x):
raise ValueError(
"rotation and x needs to have the same length")
else:
rotation = [rotation]*len(x)
iterator = zip(x, y, sigma_x, sigma_y, amplitude, rotation)
for tx, ty, tsigma_x, tsigma_y, tamplitude, trotation in iterator:
self.add_atom(tx, ty, tsigma_x, tsigma_y, tamplitude, trotation)
def add_image_noise(
self, mu=0, sigma=0.005, only_positive=False, random_seed=None):
"""
Add white noise to the image signal. The noise component is Gaussian
distributed, with a default expectation value at 0, and a sigma of
0.005. If only_positive is set to True, the absolute value of the
noise is added to the signal. This can be useful for avoiding negative
values in the image signal.
Parameters
----------
mu : int, float
The expectation value of the Gaussian distribution, default is 0
sigma : int, float
The standard deviation of the Gaussian distribution, default
is 0.005.
only_positive : bool
Default is False. If True, the absolute value of the noise is added
to the image signal.
random_seed : int, optional
Set the random seed of the noise, which gives the same image
noise each time. Useful for testing and comparing images.
Example
-------
>>> from atomap.testing_tools import MakeTestData
>>> test_data = MakeTestData(300, 300)
>>> import numpy as np
>>> x, y = np.mgrid[10:290:15j, 10:290:15j]
>>> test_data.add_atom_list(x.flatten(), y.flatten(), sigma_x=3,
... sigma_y=3)
>>> test_data.add_image_noise()
>>> test_data.signal.plot()
Using a specific random seed
>>> test_data.add_image_noise(random_seed=0)
"""
if random_seed is not None:
np.random.seed(random_seed)
shape = self.signal.axes_manager.shape
noise = normal(mu, sigma, shape)
if only_positive:
self._image_noise = np.absolute(noise)
else:
self._image_noise = noise
def make_vector_test_gaussian(x, y, standard_deviation=1, n=30):
point_list = []
for i in range(n):
g_x = normal(x, scale=standard_deviation)
g_y = normal(y, scale=standard_deviation)
point_list.append([g_x, g_y])
point_list = np.array(point_list)
return(point_list)
def make_nn_test_dataset(xN=3, yN=3, xS=9, yS=9, std=0.3, n=50):
point_list = np.array([[], []]).T
for ix in range(-xN, xN+1):
for iy in range(-yN, yN+1):
if (ix == 0) and (iy == 0):
pass
else:
gaussian_list = make_vector_test_gaussian(
ix*xS, iy*yS, standard_deviation=std, n=n)
point_list = np.vstack((point_list, gaussian_list))
return(point_list)
def find_atom_position_match(component_list, atom_list, delta=3, scale=1.):
match_list = []
for atom in atom_list:
for component in component_list:
x = atom.pixel_x*scale - component.centre_x.value
y = atom.pixel_y*scale - component.centre_y.value
d = math.hypot(x, y)
if d < delta:
match_list.append([component, atom])
break
return match_list
def get_fit_miss_array(match_list):
fit_miss = []
for match in match_list:
x = match[0].centre_x.value - match[1].pixel_x
y = match[0].centre_y.value - match[1].pixel_y
d = math.hypot(x, y)
fit_miss.append([x, y, d])
fit_miss = np.array(fit_miss)
return fit_miss
|
996,219 | ed53b4a0c85a80ed5d122dba00289cf2d69fd3f1 | class User:
def __init__(self, name, email):
self.accounts = []
self.name = name
self.email = email
def make_new_account(self, int_rate=0.02, balance=0):
new_acc = BankAccount(int_rate, balance)
self.accounts.append(new_acc)
def transfer_money(self, other_user, amount):
self.account.balance -= amount
other_user.account.balance += amount
class BankAccount:
all_inst = []
def __init__(self,int_rate=0.01,balance=0):
self.int_rate = int_rate
self.balance = balance
BankAccount.all_inst.append(self)
def deposit(self,amount):
self.balance += amount
return self
def withdraw(self,amount):
if (amount>self.balance):
self.balance-=5
self.balance-=amount
return self
else:
self.balance-=amount
return self
def display_account_info(self):
print(f"Balance: ${self.balance}")
return self
def yield_interest(self):
if self.balance>0:
self.balance*=(self.int_rate+1)
return self
@classmethod
def inst_print(cls):
for account in cls.all_inst:
print(account.balance)
Joe = User("Joe","joemama")
Joe.make_new_account(0.1,100)
Joe.make_new_account(.2,2000)
print(Joe.accounts)
Joe.accounts[1].deposit(100).display_account_info()
# self.account = BankAccount(int_rate=0.02,balance=0)
# def make_deposit(self, amount): # takes an argument that is the amount of the deposit
# self.account.balance += amount # the specific user's account increases by the amount of the value received
# def make_withdrawal(self, amount):
# self.account.balance -= amount
# def display_user_balance(self):
# print(f"User: {self.name}, Balance: ${self.account.balance}")
# acc1 = BankAccount()
# acc2 = BankAccount()
# acc1.deposit(100).deposit(50).deposit(25).withdraw(100).display_account_info()
# acc2.deposit(600).deposit(50).withdraw(100).withdraw(100).withdraw(100).withdraw(100).display_account_info()
# # NINJA BONUS: use a classmethod to print all instances of a Bank Account's info
# BankAccount.inst_print() |
996,220 | 1224a059c2f9125571564bf94870b250eb3203d6 | #!/usr/bin/python
#coding: utf-8
import httplib
import MySQLdb
import time
import sys
import copy
import smtplib
from email.mime.text import MIMEText
from email.header import Header
from email.utils import COMMASPACE,formatdate
import traceback
execfile("/mnt/xvdb/scripts/send_mail.py")
stock_dict = {}
yes_stock_dict = {}
quick_check_dict = {}
zhen_dict = {}
def buildDict():
zhen_file = open('/mnt/xvdb/scripts/zhen.log')
for line in zhen_file.readlines():
line = line.strip()
sp = line.split(' ')
zhen_dict[sp[0]] = line
daban_file = open('/mnt/xvdb/scripts/da_ban.log')
for line in daban_file.readlines():
line = line.strip()
if line.startswith('sz') or line.startswith('sh'):
yes_stock_dict[line] = 1
quick_check_dict[line] = 1
# if zhen_dict.has_key(line):
# txt = zhen_dict[line]
# sp = txt.split(' ')
# if len(sp) > 2:
# real_ban = float(sp[1])
# if real_ban < 0.79:
# continue
# yes_stock_dict[line] = 1
# quick_check_dict[line] = 1
# else:
# continue
def isBan(yesPrice, curPrice):
if yesPrice == 0:
return False
hi = yesPrice * 1.1
return curPrice - hi < 0.006 and curPrice - hi > -0.005
def quickCheck():
conn=httplib.HTTPConnection('hq.sinajs.cn')
stocks = ""
stock_ids = quick_check_dict.keys()
for i in range(0, len(stock_ids)):
stocks = stocks + stock_ids[i] + ","
if (i+1) % 900 == 0 or i == len(stock_ids) - 1:
conn.request('GET','/?list=' + stocks)
stocks = ""
result = conn.getresponse()
resultStatus = result.status
if resultStatus != 200:
sys.stderr.write("Error sina server!\n")
time.sleep(1)
continue
contents = result.read().decode('GBK').encode("utf-8").split("\n")
for content in contents:
content = content.strip()
kv = content.split("=")
if (len(kv) < 2):
continue
if len(kv[1]) < 10:
continue
kv[1] = kv[1].replace('"', '')
kv[1] = kv[1].replace(';', '')
allCol = kv[1].split(",")
stock_id = kv[0].split("_")[2]
try:
closed_price = float(allCol[2])
cur_price = float(allCol[3])
if closed_price == 0:
continue
cur_total_quantity = long(allCol[8])
buy1_quantity = long(allCol[10]) / 100
buy2_quantity = long(allCol[12]) / 100
if (cur_price > 0 and closed_price > 0 and ( cur_price - closed_price) / closed_price > 0.07):
subject = "daban " + stock_id + " " + allCol[0] + " " + str((cur_price - closed_price) * 100 / closed_price) + "%"
txt = ""
print subject
if zhen_dict.has_key(stock_id):
print zhen_dict[stock_id]
txt = zhen_dict[stock_id]
else:
print "0"
txt = "0"
sys.stdout.flush()
quick_check_dict.pop(stock_id)
sendMail(subject, txt)
except Exception,e:
print traceback.format_exc()
sys.stderr.write("send maill error. " + e + "\n")
conn.close()
if __name__ == "__main__":
buildDict()
last_time = "08-00"
while last_time < "15-15":
now_time = time.strftime("%H-%M")
if now_time > "11-40" and now_time < "12-50":
time.sleep(1)
continue
if now_time != last_time:
print now_time
last_time = now_time
sys.stdout.flush()
sys.stderr.flush()
try:
quickCheck()
except Exception,e:
sys.stderr.write("quick Check error %s\n" % e)
time.sleep(15)
|
996,221 | 50ccc36af5f33cee9f87a92cf9b4aa49f53debf3 | def merge(A, B, size_a):
a_index = size_a - 1
b_index = len(B) - 1
merge_index = size_a + len(B) - 1
while a_index >= 0 and b_index >= 0:
if A[a_index] > B[b_index]:
A[merge_index] = A[a_index]
a_index -= 1
else:
A[merge_index] = B[b_index]
b_index -= 1
merge_index -= 1
while a_index >= 0:
A[merge_index] = A[a_index]
merge_index -= 1
a_index -= 1
while b_index >= 0:
A[merge_index] = B[b_index]
merge_index -= 1
b_index -= 1
A = [0, 4, 7, 0, 0, 0]
B = [2, 6, 10]
merge(A, B, 3)
print A
|
996,222 | 568a21c14eb116b2dc77ba7202c26100e5493d1d | # ====================================
# @Project : Python_Demo
# @Author : fengjm
# @Time : 2018/1/24 10:55
# ====================================
# # 参数不固定的使用方式: *args 会把多传入的参数变成一个元组形式, **kwargs 会把多传入的参数变成一个dict形式
# def stu_register(name,age,*args): # *args 会把多传入的参数变成一个元组形式
# print(name,age,args)
#
# stu_register("Alex",22)
# stu_register("fengjm",30,"IT","mars")
# stu_register("Jack",32,"CN","Python")
#
#
# def stu_register2(name,age,*args,**kwargs): # **kwargs 会把多传入的参数变成一个dict形式
# print(name,age,args,kwargs)
#
# stu_register2("Alex",22)
# #输出
# #Alex 22 () {}#后面这个{}就是kwargs,只是因为没传值,所以为空
#
# stu_register2("Jack",32,"CN","Python",sex="Male",province="ShanDong")
# #输出
# # Jack 32 ('CN', 'Python') {'province': 'ShanDong', 'sex': 'Male'}
#
#
# # 嵌套函数
# name = "Alex"
#
# def change_name():
# name = "Alex2"
#
# def change_name2():
# name = "Alex3"
# print("第3层打印",name)
#
# change_name2() #调用内层函数
# print("第2层打印",name)
#
# change_name()
# print("最外层打印",name)
# # 递归函数实际应用案例,二分查找
# data = [1, 3, 6, 7, 9, 12, 14, 16, 17, 18, 20, 21, 22, 23, 30, 32, 33, 35]
#
#
# def binary_search(dataset,find_num):
# print(dataset)
#
# if len(dataset) >1:
# mid = int(len(dataset)/2)
# if dataset[mid] == find_num: #find it
# print("找到数字",dataset[mid])
# elif dataset[mid] > find_num :# 找的数在mid左面
# print("\033[31;1m找的数在mid[%s]左面\033[0m" % dataset[mid])
# return binary_search(dataset[0:mid], find_num)
# else:# 找的数在mid右面
# print("\033[32;1m找的数在mid[%s]右面\033[0m" % dataset[mid])
# return binary_search(dataset[mid+1:],find_num)
# else:
# if dataset[0] == find_num: #find it
# print("找到数字啦",dataset[0])
# else:
# print("没的分了,要找的数字[%s]不在列表里" % find_num)
#
# binary_search(data,66)
# 匿名函数
def calc(n):
return n**n
print(calc(10))
# 换成匿名函数
calc2 = lambda n:n**n
print(calc2(10)) |
996,223 | 5f78c1b352f147fb11fdfb76c2051d44b3ca6389 | from tkinter import *
class TopMenu(object):
def __init__(self, frame):
self._root = frame
def main_func(self):
self.menuone = Menu( self._root )
self.filemenu = Menu( self.menuone, tearoff=False )
self.filemenu.add_command( label="查看最近三十天的数据",command=self.read30data)
self.menuone.add_cascade( label="文件", font=("华康少女字体",30),menu=self.filemenu )
self._root.config( menu=self.menuone )
def read30data(self):
print('被调用') |
996,224 | ec7156eb2ba38fcf5aa491a41b1f90577fe9e252 | #!/usr/bin/env python
# Copyright 2020 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Google Cloud Game Servers sample for updating a game server cluster.
Example usage:
python update_cluster.py --project-id <project-id> --location <location> --realm-id <realm-id> --cluster-id <cluster-id>
"""
import argparse
from google.cloud import gaming
from google.cloud.gaming_v1.types import game_server_clusters
from google.protobuf import field_mask_pb2 as field_mask
# [START cloud_game_servers_cluster_update]
def update_cluster(project_id, location, realm_id, cluster_id):
"""Updates a game server cluster."""
client = gaming.GameServerClustersServiceClient()
request = game_server_clusters.UpdateGameServerClusterRequest(
game_server_cluster=game_server_clusters.GameServerCluster(
name=f"projects/{project_id}/locations/{location}/realms/{realm_id}/gameServerClusters/{cluster_id}",
labels={"label-key-1": "label-value-1", "label-key-2": "label-value-2"},
),
update_mask=field_mask.FieldMask(paths=["labels"]),
)
operation = client.update_game_server_cluster(request)
print(f"Update cluster operation: {operation.operation.name}")
operation.result(timeout=120)
# [END cloud_game_servers_cluster_update]
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--project-id', help='Your cloud project ID.', required=True)
parser.add_argument('--location', help='Your realm location.', required=True)
parser.add_argument('--realm-id', help='Your realm ID.', required=True)
parser.add_argument('--cluster-id', help='Your game server cluster ID.', required=True)
args = parser.parse_args()
update_cluster(args.project_id, args.location, args.realm_id, args.cluster_id)
|
996,225 | 3b3907c37886bfd5016e39544896abcc6d184a21 | from django.db import models
from django.contrib.auth.models import User
from django.db.models.fields.related import ManyToManyField
class Tag(models.Model):
name = models.CharField(max_length=255, unique=True)
def __str__(self):
return self.name
class Collection(models.Model):
name = models.CharField(max_length=255)
owner = models.ForeignKey(
User, on_delete=models.CASCADE, related_name='collection')
def __str__(self):
return self.name
class Bookmark(models.Model):
title = models.CharField(max_length=255)
url = models.TextField()
collection = models.ManyToManyField(Collection, blank=True)
tags = models.ManyToManyField(Tag, blank=True)
owner = models.ForeignKey(
User, on_delete=models.CASCADE, related_name='bookmark')
def __str__(self):
return self.title
class Shortcut(models.Model):
title = models.CharField(max_length=255)
url = models.TextField()
owner = models.ForeignKey(
User, on_delete=models.CASCADE, related_name='shortcut')
def __str__(self):
return self.title
|
996,226 | 5321cf1e4bdc9d0193d1ae208bb35918bbb29c98 | from nltk.classify.naivebayes import NaiveBayesClassifier
import os
def extract_features(sentence):
words = sentence.lower().split()
featureset = dict([('contains-word(%s)' % w, True) for w in words])
#featureset['contains-phrase(%s %s)'% (words[0],words[1])] = True
featureset['first-word(%s)'%words[0]] = True # improvement from 5.33 to 2.76
#featureset['last-word(%s)'%words[-1]] = True # no improvement
return featureset
def train(filename):
print 'Reading data from the file ' + filename
labeled_featuresets = []
with open(filename) as f:
for line in f:
sentence, category = line.split(' ,,, ', 1)
labeled_featuresets.append((extract_features(sentence), category.strip()))
print 'Training started'
classifier = NaiveBayesClassifier.train(labeled_featuresets)
print 'Training completed\n'
return classifier
def print_classified_probs(prbs):
print dict([(s, round(100*prbs.prob(s), 2)) for s in prbs.samples()])
def main():
training_file = os.path.dirname(__file__) + '/data/labelled_data.txt'
classifier = train(training_file)
sentence = raw_input("Enter test string: ")
featurevector = extract_features(sentence)
#print featurevector
question_types = classifier.prob_classify(featurevector)
print_classified_probs(question_types)
print '\nQuestion Type: '+classifier.classify(featurevector)+'\n\n'
def test():
training_file = os.path.dirname(__file__) + '/data/labelled_data.txt'
classifier = train(training_file)
with open(training_file) as f:
total=0
errors=0
for line in f:
sentence, category = line.split(' ,,, ', 1)
classified_category = classifier.classify(extract_features(sentence))
labelled_category = category.strip()
if labelled_category != classified_category:
print 'Sentence: '+sentence
print 'Labelled: '+labelled_category + '\tClassified: '+classified_category
errors += 1
total += 1
print 'Total error = %.2f %%' % (errors*100.0/total)
if __name__ == '__main__':
main()
#test()
|
996,227 | c525448eafcb8f3875e65d3932bf64b9f8d237b2 |
from xai.brain.wordbase.nouns._length import _LENGTH
#calss header
class _LENGTHS(_LENGTH, ):
def __init__(self,):
_LENGTH.__init__(self)
self.name = "LENGTHS"
self.specie = 'nouns'
self.basic = "length"
self.jsondata = {}
|
996,228 | 75a6754b4ec4b7cae099e164cb90a73e1bc62d1e | # -*- coding: utf-8 -*-
# @Time : 2019/7/25 10:47 AM
# @Author : nJcx
# @Email : njcx86@gmail.com
# @File : observer_pattern_dev.py
from test import get
from test import html_detail
header = {'content-type': 'application/json',
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) '
'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.15 Safari/537.36'
}
api_url= "https://api.threatbook.cn/v3/scene/dns"
api_key = '1111111111111111111111111111111'
class ThreatBook(object):
def __init__(self, ):
self.query = {
"apikey": api_key,
"resource": "zzv.no-ip.info"
}
def ip_detect(self, ip):
self.query["resource"] = ip
r = get(url=api_url, header=header, data=self.query)
html_detail(r)
if __name__ == '__main__':
test = ThreatBook()
test.ip_detect(ip='zzv.no-ip.info')
|
996,229 | 8f8aefd1759ccc41b99e1155ba0058b413d1a9fb | #!/usr/bin/env python3
# Imports
from aoc import AdventOfCode
# Input Parse
puzzle = AdventOfCode(year=2019, day=2)
puzzle_input = puzzle.get_input()
# Actual Code
program = [int(code) for code in puzzle_input.split(",")]
program[1] = 12
program[2] = 2
idx = 0
while program[idx] != 99:
assert program[idx] in {1, 2}
val1 = program[program[idx + 1]]
val2 = program[program[idx + 2]]
if program[idx] == 1:
program[program[idx + 3]] = val1 + val2
elif program[idx] == 2:
program[program[idx + 3]] = val1 * val2
idx += 4
# Result
print(program[0]) |
996,230 | 9ca2fdf26d6dd5f20cd4473725e94201a343bf18 | def show():
for i in range(1,1000):
print i,2,i,1
print 1998
show()
show()
|
996,231 | 4d9657011d3911c538613d51d56853f213d1b903 | """
Upload files to jupyter server location or to Python callback using jp_proxy.
"""
import jp_proxy_widget
from jp_proxy_widget import hex_codec
from IPython.display import display
from traitlets import Unicode, HasTraits
js_files = ["js/simple_upload_button.js"]
def _load_required_js(widget):
widget.load_js_files(filenames=js_files)
def from_hex_iterator(hexcontent):
# xxxx try to optimize...
for i in range(0, len(hexcontent), 2):
hexcode = hexcontent[i: i+2]
char = bytes(((int(hexcode, 16)),))
yield char
class JavaScriptError(Exception):
"Exception sent from javascript."
class UnicodeUploader(HasTraits):
status = Unicode("")
uploaded_filename = None
segmented = None # no segmentation -- use chunk size instead
def __init__(self, html_title=None, content_callback=None, to_filename=None, size_limit=None,
chunk_size=1000000):
# by default segment files into chunks to avoid message size limits
self.chunk_size = chunk_size
assert content_callback is None or to_filename is None, (
"content_callback and to_filename are mutually exclusive, please do not provide both. "
+ repr((content_callback, to_filename))
)
assert content_callback is not None or to_filename is not None, (
"one of content_callback or to_filename must be specified."
)
self.size_limit = size_limit
self.to_filename = to_filename
self.content_callback = content_callback
w = self.widget = jp_proxy_widget.JSProxyWidget()
_load_required_js(w)
element = w.element
if html_title is not None:
element.html(html_title)
level = 2
options = self.upload_options()
options["size_limit"] = size_limit
options["chunk_size"] = chunk_size
#proxy_callback = w.callback(self.widget_callback_handler, data="upload click", level=level,
# segmented=self.segmented)
#element = w.element()
#upload_button = element.simple_upload_button(proxy_callback, options)
w.js_init("""
var upload_callback = function(data) {
var content = data.content;
if (!($.type(content) === "string")) {
content = data.hexcontent;
}
handle_chunk(data.status, data.name, content, data);
}
var upload_button = element.simple_upload_button(upload_callback, options);
element.append(upload_button);
""", handle_chunk=self.handle_chunk_wrapper, options=options)
#w(element.append(upload_button))
#w.flush()
self.chunk_collector = []
self.status = "initialized"
def show(self):
self.status = "displayed"
display(self.widget)
def default_content_callback(self, widget, name, content):
to_filename = self.to_filename
if to_filename == True:
# use the name sent as the filename
to_filename = name
self.status = "writing " + repr(len(content)) + " to " + repr(to_filename)
f = self.open_for_write(to_filename)
f.write(content)
f.close()
self.status = "wrote " + repr(len(content)) + " to " + repr(to_filename)
self.uploaded_filename = to_filename
"""
def widget_callback_handler(self, data, results):
self.status = "upload callback called."
try:
file_info = results["0"]
name = file_info["name"]
status = file_info["status"]
content = self.get_content(file_info)
content_callback = self.content_callback
return self.handle_chunk(status, name, content, file_info)
except Exception as e:
self.status = "callback exception: " + repr(e)
raise"""
output = None
def handle_chunk_wrapper(self, status, name, content, file_info):
"""wrapper to allow output redirects for handle_chunk."""
out = self.output
if out is not None:
with out:
print("handling chunk " + repr(type(content)))
self.handle_chunk(status, name, content, file_info)
else:
self.handle_chunk(status, name, content, file_info)
def handle_chunk(self, status, name, content, file_info):
"Handle one chunk of the file. Override this method for peicewise delivery or error handling."
if status == "error":
msg = repr(file_info.get("message"))
exc = JavaScriptError(msg)
exc.file_info = file_info
self.status = "Javascript sent exception " + msg
self.chunk_collector = []
raise exc
if status == "more":
self.chunk_collector.append(content)
self.progress_callback(self.chunk_collector, file_info)
else:
assert status == "done", "Unknown status " + repr(status)
self.save_chunks = self.chunk_collector
self.chunk_collector.append(content)
all_content = self.combine_chunks(self.chunk_collector)
self.chunk_collector = []
content_callback = self.content_callback
if content_callback is None:
content_callback = self.default_content_callback
self.status = "calling " + repr(content_callback)
try:
content_callback(self.widget, name, all_content)
except Exception as e:
self.status += "\n" + repr(content_callback) + " raised " + repr(e)
raise
encoding_factor = 1
def progress_callback(self, chunks, file_info):
size = file_info["size"] * self.encoding_factor
got = 0
for c in chunks:
got += len(c)
pct = int((got * 100)/size)
self.status = "received %s of %s (%s%%)" % (got, size, pct)
def combine_chunks(self, chunk_list):
return u"".join(chunk_list)
def upload_options(self):
"options for jquery upload plugin -- unicode, not hex"
return {"hexidecimal": False}
def open_for_write(self, filename):
"open unicode file for write"
return open(filename, "w")
def get_content(self, file_info):
"get unicode content from file_info"
content = file_info.get("content")
return content
class BinaryUploader(UnicodeUploader):
encoding_factor = 2
def upload_options(self):
return {"hexidecimal": True}
def open_for_write(self, filename):
return open(filename, "wb")
def get_content(self, file_info):
return file_info.get("hexcontent")
def combine_chunks(self, chunk_list):
all_hex_content = "".join(chunk_list)
#return b"".join(from_hex_iterator(all_hex_content))
ba = hex_codec.hex_to_bytearray(all_hex_content)
return bytes(ba)
|
996,232 | 7a51dd06843cab2bea4b69ce42114490493777f4 | import unittest
from .common import JinsiTestCase
class JinsiConditionals(JinsiTestCase):
def test_conditional_with_let(self):
doc = """\
::let:
a: 1
::when:
::get: a == 1
::then:
foo: one
::else:
bar: two
"""
expected = {'foo': 'one'}
self.check(expected, doc)
def test_when_with_let(self):
doc = """\
::when:
::let:
a: 1
::get: a == 1
::then:
foo: one
::else:
bar: two
"""
expected = {'foo': 'one'}
self.check(expected, doc)
def test_case(self):
doc = """\
value:
::let:
x: 3
::case:
x == 1: one
x == 2: two
x == 3: three
x == 4: four
"""
expected = {'value': 'three'}
self.check(expected, doc)
def test_case_default_ellipsis(self):
doc = """\
value:
::let:
x: 17
::case:
x == 1: one
x == 2: two
x == 3: three
x == 4: four
...: more than four
"""
expected = {'value': 'more than four'}
self.check(expected, doc)
def test_case_default_underscore(self):
doc = """\
value:
::let:
x: 17
::case:
x == 1: one
x == 2: two
x == 3: three
x == 4: four
_: more than four
"""
expected = {'value': 'more than four'}
self.check(expected, doc)
def test_case_when_without_get(self):
doc = """\
value:
::let:
x: 1
::when: x == 1
::then: one
::else: two
"""
expected = {'value': 'one'}
self.check(expected, doc)
if __name__ == '__main__':
unittest.main()
|
996,233 | 05d3a0eb8c18a3bcaaed724da769b007681a9519 | list=[1,2,3,4,5,6,7,8,9,"damien","ivy"]
print("the list of the objects")
print("\n After removing 5")
list.remove(5)
print(list)
print('\n After removing damien')
list.remove("damien")
print(list)
print("\n After removing ivy")
list.remove("ivy")
print(list)
print("\n After removing 9")
list.remove(9)
print(list)
list1=[1,2,3,4,5,6,7,8,9,"damien","ivy"]
print("\n")
print(list1)
print("\n")
print(list1.pop(10))
print(list1)
print("\n")
print(list1.pop(2))
print(list1)
print("\n")
print(list1.index(4))
print("\n")
print(list1.index("damien"))
print("\n")
list2=[1,2,3,4,5,6,7,8,9,"damien","ivy"]
print(list2)
list3=["whoami","where","whatis","andthen"]
list2.append(list3)
print(list2)
print("\n")
list4=["whoami","where","whatis","andthen"]
list2.extend(list4)
print(list2)
print("\n")
list5=[1,2,3,4,5,6,7,8,9,"damien","ivy"]
print(list5)
list5+=list3
print(list5)
print("\n")
list6=[1,2,3,4,5,6,7,8,9,10]
print(list6)
print("\n")
list5.insert(3,list6)
print(list5)
list5.insert(0,100)
list5.insert(1,500)
print("\n")
print(list5)
count=list5.count(5)
print(count)
print("\n")
list5.reverse()
print(list5)
list1.reverse()
print(list1)
print("\n")
list7=[1,2,3,5,4,2,5,7,9,6,4,6,8,5,3,3,4,6,8,5,4,3,4,5,6,7,6,4,3,3,5,6,7,5,4,3]
list7.sort()
print(list7)
list7.copy()
print(list7)
list7.clear()
print(list7)
print("\n")
list7.insert(0,1)
print(list7)
list7.del() |
996,234 | 7db333ece83a9771cfd8aa3ecefeec24e3143d02 | import math
from typing import List, Tuple, Callable
Vector = List[float]
Matrix = List[List[float]]
def add(v: Vector, w: Vector) -> Vector:
"""Adds two vectors together using the principles of vector addition. Returns the sum
of both vectors. Checks if both vectors have the same length.
Arguments:
v {Vector} -- a vector of floats of length n
w {Vector} -- another vector of floats of the same length n
Returns:
Vector -- sum of the input vectors
"""
assert len(v) == len(w), 'both vectors must have the same length'
return [v_item + w_item for v_item, w_item in zip(v, w)]
def subtract(v: Vector, w: Vector) -> Vector:
"""Subtracts two vectors together using the principles of vector subtraction. Returns the
sum result of the subtraction. Checks if both vectors have the same length.
Arguments:
v {Vector} -- a vector of floats of length n
w {Vector} -- another vector of floats of the same length n
Returns:
Vector -- result of the subtraction of the input vectors
"""
assert len(v) == len(w), 'both vectors must have the same length'
return [v_item - w_item for v_item, w_item in zip(v, w)]
def vector_sum(vectors: List[Vector]) -> Vector:
"""Adds a list of vectors componentwise. In other words, for each nth element of each vector,
add those together to be the nth element of a result vector.
Arguments:
vectors {List[Vector]} -- a list of vectors of the same length
Returns:
Vector -- a vector whose elements are the sum of the input vectors' elements
"""
assert vectors, 'no vectors provided'
num_elements = len(vectors[0])
assert all(
len(v) == num_elements for v in vectors), 'vectors must be the same length'
return [sum(vec[i] for vec in vectors) for i in range(num_elements)]
def scalar_multiply(s: float, v: Vector) -> Vector:
"""Multiplies every element of vector {v} by a scalar {s}, then returns the resulting vector.
Arguments:
s {float} -- float to mutiply a vector's elements by
v {Vector} -- vector of any length
Returns:
Vector -- the vector containing the product of the scalar multiplication
"""
return [s * v_item for v_item in v]
def vector_mean(vectors: List[Vector]) -> Vector:
"""Calculates the elementwise mean of the values in a list of same-sized vectors.
Arguments:
vectors {List[Vector]} -- [description]
Returns:
Vector -- [description]
"""
n = len(vectors)
return scalar_multiply(1/n, vector_sum(vectors))
def dot(v: Vector, w: Vector) -> float:
"""Computes the sum of the componentwise products of two vectors. Returns the dot product.
Arguments:
v {Vector} -- a vector of floats of length n
w {Vector} -- another vector of floats of the same length n
Returns:
float -- the dot product of the input vectors
"""
assert len(v) == len(w), 'vectors must be the same length'
return sum(v_item * w_item for v_item, w_item in zip(v, w))
def sum_of_squares(v: Vector) -> float:
"""Computes the sum of the squares of all the elements of {v}
Arguments:
v {Vector} -- a vector of any length
Returns:
float -- the sum of the square of every element of vector {v}
"""
return dot(v, v)
def magnitude(v: Vector) -> float:
"""Returns the magnitude (or length) of a vector {v}
Arguments:
v {Vector} -- a vector of any length
Returns:
float -- the magnitude of vector {v}
"""
return math.sqrt(sum_of_squares(v))
def distance(v: Vector, w: Vector) -> float:
"""Computes the distance between two vectors
Arguments:
v {Vector} -- a vector of any length
w {Vector} -- a vector of the same length of {v}
Returns:
float -- the distance between vectors {v} and {w}
"""
return magnitude(subtract(v, w))
def shape(A: Matrix) -> Tuple[int, int]:
"""Returns the shape of a matrix in function of its rows and columns
Arguments:
A {Matrix} -- a matrix of any length
Returns:
Tuple[int, int] -- the shape of the matrix as (n_rows, n_cols)
"""
num_rows = len(A)
num_cols = len(A[0]) if A else 0
return num_rows, num_cols
def get_row(A: Matrix, i: int) -> Vector:
"""Returns the i-th row of a matrix as a vector
Arguments:
A {Matrix} -- a matrix of any length
i {int} -- the row to be returned as a vector
Returns:
Vector -- the {i}-th row of matrix {A} as a vector
"""
return A[i]
def get_column(A: Matrix, j: int) -> Vector:
"""Returns the j-th column of a matrix as a vector
Arguments:
A {Matrix} -- a matrix of any length
j {int} -- the column to be returned as a vector
Returns:
Vector -- the {j}-th column of matrix {A} as a vector
"""
return [A_i[j] for A_i in A]
def make_matrix(num_rows: int, num_cols: int, entry_fn: Callable[[int, int], float]) -> Matrix:
"""Creates a matrix of shape {num_rows} x {num_cols} whose (i, j)-entry is {entry_fn}(i, j)
Arguments:
num_rows {int} -- number of rows for the resulting matrix
num_cols {int} -- number of columns for the resulting matrix
entry_fn {Callable[[int, int], float]} -- a function to define the values of the matrix
Returns:
Matrix -- a {num_rows} x {num_cols} with {entry_fn}(i, j) elements for each (i, j)
"""
return [[entry_fn(i, j)
for j in range(num_cols)]
for i in range(num_rows)]
def identity_matrix(n: int) -> Matrix:
"""Constructs and returns a {n} x {n} identity matrix (a matrix with 1s on the diagonals and 0s elsewhere)
Arguments:
n {int} -- number of rows and columns for the resulting matrix
Returns:
Matrix -- a {n} x {n} identity matrix
"""
return make_matrix(n, n, lambda i, j: 1 if i == j else 0)
if __name__ == '__main__':
# Vector operations
assert add([1, 2, 3], [4, 5, 6]) == [5, 7, 9]
assert subtract([5, 7, 9], [4, 5, 6]) == [1, 2, 3]
assert vector_sum([[1, 2], [3, 4], [5, 6], [7, 8]]) == [16, 20]
assert scalar_multiply(2, [1, 2, 3]) == [2, 4, 6]
assert vector_mean([[1, 2], [3, 4], [5, 6]]) == [3, 4]
assert dot([1, 2, 3], [4, 5, 6]) == 32
assert sum_of_squares([1, 2, 3]) == 14
assert magnitude([3, 4]) == 5
assert distance([2, 3, 4, 2], [1, -2, 1, 3]) == 6
# Matrices
assert shape([[1, 2, 3], [4, 5, 6]]) == (2, 3)
assert get_row([[1, 2, 3], [4, 5, 6]], 0) == [1, 2, 3]
assert get_column([[1, 2, 3], [4, 5, 6]], 0) == [1, 4]
assert identity_matrix(5) == [[1, 0, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1]]
|
996,235 | 2ccad80d6d79d15a707a36dbe95c835f017c7e21 | #!/usr/bin/python
# docco-husky cannot parse CoffeeScript block comments so we have to manually
# transform them to single line ones while preserving the tab space
import sys
from os import walk
def isComment(line):
return "###" in line
def main(argv):
path = argv[0]
for (path, dirs, files) in walk(path):
for filename in files:
data = ""
inBlock = False
for line in open(path + '/' + filename, 'r'):
if isComment(line):
inBlock = not inBlock
else:
if inBlock:
if line.strip():
start = len(line) - len(line.lstrip())
line = line[:start] + "# " + line[start:]
data += line
else:
data += line
open(path + '/' + filename, 'w').writelines(data)
if __name__ == "__main__":
main(sys.argv[1:])
|
996,236 | 178b3a0db35b7abcf7730ec1f1334969d5c8e4f0 | # Generated by Django 3.2.6 on 2021-08-29 14:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('bloom', '0016_auto_20210827_1720'),
]
operations = [
migrations.AlterField(
model_name='company',
name='url',
field=models.CharField(blank=True, max_length=150),
),
migrations.AlterField(
model_name='contact',
name='phone',
field=models.CharField(blank=True, max_length=35),
),
migrations.AlterField(
model_name='opportunity',
name='role_list_url',
field=models.CharField(blank=True, max_length=150),
),
]
|
996,237 | 3ddc482976aa181df8c63e6fa1a606abd89132b7 | """empty message
Revision ID: d24d61a0c751
Revises: d7371b87f023
Create Date: 2021-11-08 09:52:07.690171
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'd24d61a0c751'
down_revision = 'd7371b87f023'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('item', 'img')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('item', sa.Column('img', sa.VARCHAR(), autoincrement=False, nullable=True))
# ### end Alembic commands ###
|
996,238 | 7cb6dfb4dde80445d8feaea50994a4a043af89ef | import flask
from flask import Flask, url_for, render_template, request, g
import model
from model import Movie
from model import User
from model import Rating
import movies
app = Flask(__name__)
@app.before_request
def before_request():
g.db = movies.connect()
@app.route("/")
def home():
return render_template("movie_web.html")
@app.route("/movie", methods=["POST"])
def movie_details():
movie_id = request.form['movie_id']
movie = Movie.get(int(movie_id))
return render_template("movie_details.html", movie=movie)
@app.route("/user", methods=["POST"])
def user_details():
user_id = request.form['user_id']
user = User.get(int(user_id))
return render_template("user_details.html", user=user)
@app.route("/rating", methods=["POST"])
def average_rating():
movie_id = request.form['movie_id']
average = Movie.get_average(str(movie_id))
return render_template("average_rating.html", average=average)
pass
if __name__ == '__main__':
app.run(debug=True)
|
996,239 | 90ec16899d3c32ec3fcfd50aa9fcf1126a0b4151 |
# -*- coding: utf-8 -*-
"""
ORIGINAL PROGRAM SOURCE CODE:
1: # coding=utf-8
2: __doc__ = "range builtin is invoked, but a class is used instead of an instance"
3:
4: if __name__ == '__main__':
5: # Call options
6: # (Integer) -> <built-in function range>
7: # (Overloads__trunc__) -> <built-in function range>
8: # (Integer, Integer) -> <built-in function range>
9: # (Overloads__trunc__, Integer) -> <built-in function range>
10: # (Integer, Overloads__trunc__) -> <built-in function range>
11: # (Overloads__trunc__, Overloads__trunc__) -> <built-in function range>
12: # (Integer, Integer, Integer) -> <built-in function range>
13: # (Overloads__trunc__, Integer, Integer) -> <built-in function range>
14: # (Integer, Overloads__trunc__, Integer) -> <built-in function range>
15: # (Integer, Integer, Overloads__trunc__) -> <built-in function range>
16: # (Integer, Overloads__trunc__, Overloads__trunc__) -> <built-in function range>
17: # (Overloads__trunc__, Overloads__trunc__, Integer) -> <built-in function range>
18: # (Overloads__trunc__, Integer, Overloads__trunc__) -> <built-in function range>
19: # (Overloads__trunc__, Overloads__trunc__, Overloads__trunc__) -> <built-in function range>
20:
21:
22:
23: class Sample:
24: def __trunc__(self):
25: return 4
26:
27:
28: # Type error
29: ret = range(int, int)
30: # Type error
31: ret = range(Sample, Sample)
32: # Type error
33: ret = range(Sample, Sample, 4)
34:
"""
# Import the stypy library necessary elements
from stypy.type_inference_programs.type_inference_programs_imports import *
# Create the module type store
module_type_store = Context(None, __file__)
# ################# Begin of the type inference program ##################
# Assigning a Str to a Name (line 2):
str_1 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 2, 10), 'str', 'range builtin is invoked, but a class is used instead of an instance')
# Assigning a type to the variable '__doc__' (line 2)
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 2, 0), '__doc__', str_1)
if (__name__ == '__main__'):
# Declaration of the 'Sample' class
class Sample:
@norecursion
def __trunc__(type_of_self, localization, *varargs, **kwargs):
global module_type_store
# Assign values to the parameters with defaults
defaults = []
# Create a new context for function '__trunc__'
module_type_store = module_type_store.open_function_context('__trunc__', 24, 8, False)
# Assigning a type to the variable 'self' (line 25)
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 25, 8), 'self', type_of_self)
# Passed parameters checking function
Sample.__trunc__.__dict__.__setitem__('stypy_localization', localization)
Sample.__trunc__.__dict__.__setitem__('stypy_type_of_self', type_of_self)
Sample.__trunc__.__dict__.__setitem__('stypy_type_store', module_type_store)
Sample.__trunc__.__dict__.__setitem__('stypy_function_name', 'Sample.__trunc__')
Sample.__trunc__.__dict__.__setitem__('stypy_param_names_list', [])
Sample.__trunc__.__dict__.__setitem__('stypy_varargs_param_name', None)
Sample.__trunc__.__dict__.__setitem__('stypy_kwargs_param_name', None)
Sample.__trunc__.__dict__.__setitem__('stypy_call_defaults', defaults)
Sample.__trunc__.__dict__.__setitem__('stypy_call_varargs', varargs)
Sample.__trunc__.__dict__.__setitem__('stypy_call_kwargs', kwargs)
Sample.__trunc__.__dict__.__setitem__('stypy_declared_arg_number', 1)
arguments = process_argument_values(localization, type_of_self, module_type_store, 'Sample.__trunc__', [], None, None, defaults, varargs, kwargs)
if is_error_type(arguments):
# Destroy the current context
module_type_store = module_type_store.close_function_context()
return arguments
# Initialize method data
init_call_information(module_type_store, '__trunc__', localization, [], arguments)
# Default return type storage variable (SSA)
# Assigning a type to the variable 'stypy_return_type'
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 0, 0), 'stypy_return_type', None)
# ################# Begin of '__trunc__(...)' code ##################
int_2 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 25, 19), 'int')
# Assigning a type to the variable 'stypy_return_type' (line 25)
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 25, 12), 'stypy_return_type', int_2)
# ################# End of '__trunc__(...)' code ##################
# Teardown call information
teardown_call_information(localization, arguments)
# Storing the return type of function '__trunc__' in the type store
# Getting the type of 'stypy_return_type' (line 24)
stypy_return_type_3 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 24, 8), 'stypy_return_type')
module_type_store.store_return_type_of_current_context(stypy_return_type_3)
# Destroy the current context
module_type_store = module_type_store.close_function_context()
# Return type of the function '__trunc__'
return stypy_return_type_3
@norecursion
def __init__(type_of_self, localization, *varargs, **kwargs):
global module_type_store
# Assign values to the parameters with defaults
defaults = []
# Create a new context for function '__init__'
module_type_store = module_type_store.open_function_context('__init__', 23, 4, False)
# Assigning a type to the variable 'self' (line 24)
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 24, 4), 'self', type_of_self)
# Passed parameters checking function
arguments = process_argument_values(localization, type_of_self, module_type_store, 'Sample.__init__', [], None, None, defaults, varargs, kwargs)
if is_error_type(arguments):
# Destroy the current context
module_type_store = module_type_store.close_function_context()
return
# Initialize method data
init_call_information(module_type_store, '__init__', localization, [], arguments)
# Default return type storage variable (SSA)
# Assigning a type to the variable 'stypy_return_type'
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 0, 0), 'stypy_return_type', None)
# ################# Begin of '__init__(...)' code ##################
pass
# ################# End of '__init__(...)' code ##################
# Teardown call information
teardown_call_information(localization, arguments)
# Destroy the current context
module_type_store = module_type_store.close_function_context()
# Assigning a type to the variable 'Sample' (line 23)
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 23, 4), 'Sample', Sample)
# Assigning a Call to a Name (line 29):
# Call to range(...): (line 29)
# Processing the call arguments (line 29)
# Getting the type of 'int' (line 29)
int_5 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 29, 16), 'int', False)
# Getting the type of 'int' (line 29)
int_6 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 29, 21), 'int', False)
# Processing the call keyword arguments (line 29)
kwargs_7 = {}
# Getting the type of 'range' (line 29)
range_4 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 29, 10), 'range', False)
# Calling range(args, kwargs) (line 29)
range_call_result_8 = invoke(stypy.reporting.localization.Localization(__file__, 29, 10), range_4, *[int_5, int_6], **kwargs_7)
# Assigning a type to the variable 'ret' (line 29)
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 29, 4), 'ret', range_call_result_8)
# Assigning a Call to a Name (line 31):
# Call to range(...): (line 31)
# Processing the call arguments (line 31)
# Getting the type of 'Sample' (line 31)
Sample_10 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 31, 16), 'Sample', False)
# Getting the type of 'Sample' (line 31)
Sample_11 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 31, 24), 'Sample', False)
# Processing the call keyword arguments (line 31)
kwargs_12 = {}
# Getting the type of 'range' (line 31)
range_9 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 31, 10), 'range', False)
# Calling range(args, kwargs) (line 31)
range_call_result_13 = invoke(stypy.reporting.localization.Localization(__file__, 31, 10), range_9, *[Sample_10, Sample_11], **kwargs_12)
# Assigning a type to the variable 'ret' (line 31)
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 31, 4), 'ret', range_call_result_13)
# Assigning a Call to a Name (line 33):
# Call to range(...): (line 33)
# Processing the call arguments (line 33)
# Getting the type of 'Sample' (line 33)
Sample_15 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 33, 16), 'Sample', False)
# Getting the type of 'Sample' (line 33)
Sample_16 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 33, 24), 'Sample', False)
int_17 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 33, 32), 'int')
# Processing the call keyword arguments (line 33)
kwargs_18 = {}
# Getting the type of 'range' (line 33)
range_14 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 33, 10), 'range', False)
# Calling range(args, kwargs) (line 33)
range_call_result_19 = invoke(stypy.reporting.localization.Localization(__file__, 33, 10), range_14, *[Sample_15, Sample_16, int_17], **kwargs_18)
# Assigning a type to the variable 'ret' (line 33)
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 33, 4), 'ret', range_call_result_19)
# ################# End of the type inference program ##################
module_errors = stypy.errors.type_error.StypyTypeError.get_error_msgs()
module_warnings = stypy.errors.type_warning.TypeWarning.get_warning_msgs()
|
996,240 | 62a3efe51366effbf3468b77c580d7fe52e91d6f | m = len(matrix)
n = len(matrix[0])
zombies = deque([])
for i in range(m):
for j in range(n):
if matrix[i][j] == 1:
zombies.append((i, j))
if len(zombies) == 0:
return -1
visited = set()
days = 0
while zombies:
p = len(zombies)
for _ in range(p):
i, j = zombies.popleft()
visited.add((i, j))
for k, v in [(0, 1), (1, 0), (0, -1), (-1, 0)]:
newi = i + k
newj = j + v
if newi < m and newj < n and newi >= 0 and newj >= 0 and matrix[newi][newj] == 0 and (
newi, newj) not in visited:
matrix[newi][newj] = 1
zombies.append((newi, newj))
if zombies:
days += 1
return days |
996,241 | 942f164741209540ad82a9d437018d5c53c634bc | from itertools import product
print([
'B{}C{}S{}P{}'.format(b,c,s,p) for b,c,s,p in product(
range(1, 3), range(1, 4), range(1, 3), range(1, 4)
)
])
|
996,242 | cb32067e16281ff0215ef5c56f6f61cbaf8f70c8 | #!/usr/bin/python2
import commands
import cgi
import cgitb
cgitb.enable()
print "content-type: text/html"
print
data=cgi.FormContent()
NNIP=data['nn'][0]
JTIP=data['jt'][0]
u=data['client'][0]
network=commands.getoutput('route -n | grep 255 |cut -d" " -f1')
host1 = commands.getoutput("arp-scan --interface=eth0 %s /24 |grep '192.168.*' |cut -f1" %network)
print "Do not use the first IP(network IP)"
print host1
o=open(' namenode.py',"w")
o.write('''#!/usr/bin/python2
import commands
commands.getoutput('yum install hadoop -y')
commands.getoutput('yum install jdk -y')
f=open('/etc/hadoop/hdfs-site.xml',"w")
f1=open('/etc/hadoop/core-site.xml',"w")
f.write("""<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!-- Put site-specific property overrides in this file. -->
<configuration>
<property>
<name>dfs.name.dir</name>
<value>/Name</value>
</property>
</configuration>""")
f1.write("""<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!-- Put site-specific property overrides in this file. -->
<configuration>
<property>
<name>fs.default.name</name>
<value>hdfs://%s:9001</value>
</property>
</configuration>""" )
f.close()
f1.close()
commands.getoutput('hadoop namenode -format')
commands.getoutput('PATH=/usr/java/jdk1.7.0_51/bin/:$PATH')
commands.getoutput('hadoop-daemon.sh start namenode')
commands.getoutput('jps')
raw_input()'''% NNIP)
o.close()
commands.getoutput('sudo chmod 755 namenode.py')
m=open('jobtracker.py',"w")
m.write('''#!/usr/bin/python2
import commands
commands.getoutput('yum install hadoop -y')
commands.getoutput('yum install jdk -y')
f=open('/etc/hadoop/mapred-site.xml',"w")
f1=open('/etc/hadoop/core-site.xml',"w")
f.write("""<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!-- Put site-specific property overrides in this file. -->
<configuration>
<property>
<name>mapred.job.tracker</name>
<value>%s:9002</value>
</property>
</configuration>""" )
f1.write("""<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!-- Put site-specific property overrides in this file. -->
<configuration>
<property>
<name>fs.default.name</name>
<value>hdfs://%s:9001</value>
</property>
</configuration>""")
f.close()
f1.close()
commands.getoutput('PATH=/usr/java/jdk1.7.0_51/bin/:$PATH')
commands.getoutput('hadoop-daemon.sh start jobtracker')
commands.getoutput('jps')
raw_input()'''%(JTIP, NNIP))
m.close()
commands.getoutput('sudo chmod 755 jobtracker.py')
n=open('datanode.py',"w")
n.write('''#!/usr/bin/python2
import commands
commands.getoutput('yum install hadoop -y')
commands.getoutput('yum install jdk -y')
f=open('/etc/hadoop/hdfs-site.xml',"w")
f1=open('/etc/hadoop/core-site.xml',"w")
f.write("""<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!-- Put site-specific property overrides in this file. -->
<configuration>
<property>
<name>dfs.data.dir</name>
<value>/Data</value>
</property>
</configuration>""")
f1.write("""<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!-- Put site-specific property overrides in this file. -->
<configuration>
<property>
<name>fs.default.name</name>
<value>hdfs://%s:9001</value>
</property>
</configuration>""" )
f.close()
f1.close()
commands.getoutput('PATH=/usr/java/jdk1.7.0_51/bin/:$PATH')
commands.getoutput('hadoop-daemon.sh start datanode')
commands.getoutput('jps')
raw_input()'''% NNIP)
n.close()
commands.getoutput('sudo chmod 755 datanode.py')
p=open('tasktracker.py',"w")
p.write('''#!/usr/bin/python2
import commands
commands.getoutput('yum install hadoop')
commands.getoutput('yum install jdk')
host = raw_input("input the IP of jobtracker")
f=open('/etc/hadoop/mapred-site.xml',"w")
f.write("""<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!-- Put site-specific property overrides in this file. -->
<configuration>
<property>
<name>mapred.job.tracker</name>
<value>%s:9002</value>
</property>
</configuration>""")
f.close()
commands.getoutput('PATH=/usr/java/jdk1.7.0_51/bin/:$PATH')
commands.getoutput('hadoop-daemon.sh start tasktracker')
commands.getoutput('jps')
raw_input()''' % JTIP)
p.close()
commands.getoutput('sudo chmod 755 tasktracker.py')
q=open('client.py',"w")
q.write('''#!/usr/bin/python2
import commands
commands.getoutput('yum install hadoop -y')
commands.getoutput('yum install jdk -y')
f=open('/etc/hadoop/mapred-site.xml',"w")
f1=open('/etc/hadoop/core-site.xml',"w")
f.write("""<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!-- Put site-specific property overrides in this file. -->
<configuration>
<property>
<name>mapred.job.tracker</name>
<value>%s:9002</value>
</property>
</configuration>""")
f1.write("""<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!-- Put site-specific property overrides in this file. -->
<configuration>
<property>
<name>fs.default.name</name>
<value>hdfs://%s:9001</value>
</property>
</configuration>""")
f.close()
f1.close()''' %(JTIP, NNIP))
commands.getoutput('sudo chmod 755 client.py')
q.close()
e=open('snn.py',"w")
e.write('''#!/usr/bin/python2
import commands
commands.getoutput('sudo yum install hadoop -y')
commands.getoutput('sudo yum install jdk -y')
e1=open('/etc/hadoop/hdfs-site.xml',"w")
e2=open('/etc/hadoop/core-site.xml',"w")
e1.write("""<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!-- Put site-specific property overrides in this file. -->
<configuration>
<property>
<name>dfs.http.address</name>
<value>%s:50070</value>
</property>
<property>
<name>dfs.secondary.http.address</name>
<value>%s:50090</value>
</property>
<property>
<name>fs.checkpoints.edits.dir</name>
<value>/old</value>
</property>
<property>
<name>fs.checkpoint.dir</name>
<value>/new</value>
</property>
</configuration>""")
e2.write("""<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!-- Put site-specific property overrides in this file. -->
<configuration>
<property>
<name>fs.default.name</name>
<value>hdfs://%s:9001</value>
</property>
</configuration>""" )
e1.close()
e2.close()
x=commands.getoutput("ping -c 1 %s | grep received | cut -f4 -d' ' ")
if x!=1:
commands.getoutput("sudo iptable -F")
commands.getoutput("sudo hadoop-daemon.sh stop namenode")
commands.getoutput("sudo hadoop-daemon.sh start namenode")
r=open("/etc/crontab","a+")
r.write("\n 59 * * * * hadoop secondarynamenode -checkpoint force")
r.close()
commands.getoutput('PATH=/usr/java/jdk1.7.0_51/bin/:$PATH')
'''%(NNIP,SNIP,NNIP))
e.close()
commands.getoutput('sudo chmod 755 snn.py')
commands.getoutput('sudo sshpass -predhat scp namenode.py %s:/root/' % NNIP)
commands.getoutput('sudo sshpass -predhat ssh %s /python2 namenode.py' % NNIP)
commands.getoutput('sudo sshpass -predhat ssh %s rm -rfv namenode.py' % NNIP)
commands.getoutput('sudo sshpass -predhat scp jobtracker.py %s:/root/' % JTIP)
commands.getoutput('sudo sshpass -predhat ssh %s /python2 jobtracker.py' % JTIP)
commands.getoutput('sudo sshpass -predhat ssh %s rm -rfv jobtracker.py' % JTIP)
commands.getoutput('sudo sshpass -predhat scp client.py %s:/root/' %u)
commands.getoutput('sudo sshpass -predhat ssh %s python2 client.py' % u)
commands.getoutput('sudo sshpass -predhat ssh %s rm -rfv client.py' % u)
commands.getoutput('sudo sshpass -predhat scp datanode.py %s:/root/' % JTIP)
commands.getoutput('sudo sshpass -predhat ssh %s /root/datanode.py' % JTIP)
commands.getoutput('sudo sshpass -predhat ssh %s rm -rfv /root/datanode.py' % JTIP)
commands.getoutput('sudo sshpass -predhat scp tasktracker.py %s:/root/' % JTIP)
commands.getoutput('sudo sshpass -predhat ssh %s python2 tasktracker.py' % JTIP)
commands.getoutput('sudo sshpass -predhat ssh %s rm -rfv tasktracker.py' % JTIP)
print "Cluster formed successfully.Please go back one page to continue"
|
996,243 | 9023a092e33b1d0a4c43019a2bc19c51c1d8c0f3 | import torch as pt
import numpy as np
from model.PFSeg import PFSeg3D
from medpy.metric.binary import jc,hd95
from dataset.GuidedBraTSDataset3D import GuidedBraTSDataset3D
# from loss.FALoss3D import FALoss3D
import cv2
from loss.TaskFusionLoss import TaskFusionLoss
from loss.DiceLoss import BinaryDiceLoss
from config import config
import argparse
from tqdm import tqdm
# from tensorboardX import SummaryWriter
crop_size=config.crop_size
size=crop_size[2]
img_size=config.input_img_size
parser = argparse.ArgumentParser(description='Patch-free 3D Medical Image Segmentation.')
parser.add_argument('-dataset_path',type=str,default='/newdata/why/BraTS20',help='path to dataset')
parser.add_argument('-model_save_to',type=str,default='.',help='path to output')
parser.add_argument('-bs', type=int, default=1, help='input batch size')
parser.add_argument('-epoch', type=int, default=100, help='number of epochs')
parser.add_argument('-lr', type=float, default=0.0001, help='learning rate')
parser.add_argument('-w_sr', type=float, default=0.5, help='w_sr of the lossfunc')
parser.add_argument('-w_tf', type=float, default=0.5, help='w_tf of the lossfunc')
parser.add_argument('-load_pretrained',type=str,default='',help='load a pretrained model')
parser.add_argument('-v', help="increase output verbosity", action="store_true")
args = parser.parse_args()
dataset_path=args.dataset_path
lr=args.lr
epoch=args.epoch
batch_size=args.bs
model_path=args.model_save_to
w_sr=args.w_sr
w_tf=args.w_tf
pretrained_model=args.load_pretrained
print(args)
model=PFSeg3D(in_channels=1,out_channels=1).cuda()
if pt.cuda.device_count()>1:
if batch_size<pt.cuda.device_count():
batch_size=pt.cuda.device_count()
print('Batch size has to be larger than GPU#. Set to {:d} instead.'.format(batch_size))
model=pt.nn.DataParallel(model)
if not pretrained_model=='':
model.load_state_dict(pt.load(pretrained_model,map_location = 'cpu'))
trainset=GuidedBraTSDataset3D(dataset_path,mode='train')
valset=GuidedBraTSDataset3D(dataset_path,mode='val')
testset=GuidedBraTSDataset3D(dataset_path,mode='test')
train_dataset=pt.utils.data.DataLoader(trainset,batch_size=batch_size,shuffle=True,drop_last=True)
val_dataset=pt.utils.data.DataLoader(valset,batch_size=1,shuffle=True,drop_last=True)
test_dataset=pt.utils.data.DataLoader(testset,batch_size=1,shuffle=True,drop_last=True)
lossfunc_sr=pt.nn.MSELoss()
lossfunc_seg=pt.nn.BCELoss()
lossfunc_dice=BinaryDiceLoss()
lossfunc_pf=TaskFusionLoss()
optimizer = pt.optim.Adam(model.parameters(), lr=lr)
# # scheduler = pt.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.99)
scheduler=pt.optim.lr_scheduler.ReduceLROnPlateau(optimizer,mode='max',patience=20)
def ValModel():
model.eval()
dice_sum=0
hd_sum=0
jc_sum=0
weight_map=np.zeros((1,1,2*img_size[0],2*img_size[1],2*img_size[2]))
for a in range(0,img_size[0]-crop_size[0]+1,crop_size[0]//2): # overlap0.5
for b in range(0,img_size[1]-crop_size[1]+1,crop_size[1]//2):
for c in range(0,img_size[2]-crop_size[2]+1,crop_size[2]//2):
weight_map[:,:,(2*a):(2*(a+crop_size[0])),(2*b):(2*(b+crop_size[1])),(2*c):(2*(c+crop_size[2]))]+=1
weight_map=1./weight_map
for i,data in enumerate(val_dataset):
output_list=np.zeros((1,1,2*img_size[0],2*img_size[1],2*img_size[2]))
label_list=np.zeros((1,1,2*img_size[0],2*img_size[1],2*img_size[2]))
(inputs,labels,_,guidance,mask)=data
labels3D = pt.autograd.Variable(labels).type(pt.FloatTensor).cuda().unsqueeze(1)
guidance = pt.autograd.Variable(guidance).type(pt.FloatTensor).cuda().unsqueeze(1)
mask = pt.autograd.Variable(mask).type(pt.FloatTensor).cuda().unsqueeze(1)
for a in range(0,img_size[0]-crop_size[0]+1,crop_size[0]//2): # overlap0.5
for b in range(0,img_size[1]-crop_size[1]+1,crop_size[1]//2):
for c in range(0,img_size[2]-crop_size[2]+1,crop_size[2]//2):
inputs3D = pt.autograd.Variable(inputs[:,a:(a+crop_size[0]),b:(b+crop_size[1]),c:(c+crop_size[2])]).type(pt.FloatTensor).cuda().unsqueeze(1)
with pt.no_grad():
outputs3D,_ = model(inputs3D,guidance)
outputs3D=np.array(outputs3D.cpu().data.numpy())
output_list[:,:,(2*a):(2*(a+crop_size[0])),(2*b):(2*(b+crop_size[1])),(2*c):(2*(c+crop_size[2]))]+=outputs3D
label_list=np.array(labels3D.cpu().data.numpy())
output_list=np.array(output_list)*weight_map
output_list[output_list<0.5]=0
output_list[output_list>=0.5]=1
pr_sum = output_list.sum()
gt_sum = label_list.sum()
pr_gt_sum = np.sum(output_list[label_list == 1])
dice = 2 * pr_gt_sum / (pr_sum + gt_sum)
dice_sum += dice
if args.v:
final_img=np.zeros(shape=(2*img_size[1],2*2*img_size[2]))
final_img[:,:2*img_size[2]]=output_list[0,0,64,:,:]*255
final_img[:,2*img_size[2]:]=label_list[0,0,64,:,:]*255
cv2.imwrite('ValPhase_BraTS.png',final_img)
print("dice:",dice)
hausdorff=hd95(output_list.squeeze(0).squeeze(0),label_list.squeeze(0).squeeze(0))
jaccard=jc(output_list.squeeze(0).squeeze(0),label_list.squeeze(0).squeeze(0))
hd_sum+=hausdorff
jc_sum+=jaccard
print("Finished. Total dice: ",dice_sum/len(val_dataset),'\n')
print("Finished. Avg Jaccard: ",jc_sum/len(val_dataset))
print("Finished. Avg hausdorff: ",hd_sum/len(val_dataset))
return dice_sum/len(val_dataset)
def TestModel():
model.eval()
dice_sum=0
hd_sum=0
jc_sum=0
weight_map=np.zeros((1,1,2*img_size[0],2*img_size[1],2*img_size[2]))
for a in range(0,img_size[0]-crop_size[0]+1,crop_size[0]//2): # overlap0.5
for b in range(0,img_size[1]-crop_size[1]+1,crop_size[1]//2):
for c in range(0,img_size[2]-crop_size[2]+1,crop_size[2]//2):
weight_map[:,:,(2*a):(2*(a+crop_size[0])),(2*b):(2*(b+crop_size[1])),(2*c):(2*(c+crop_size[2]))]+=1
weight_map=1./weight_map
for i,data in enumerate(test_dataset):
output_list=np.zeros((1,1,2*img_size[0],2*img_size[1],2*img_size[2]))
label_list=np.zeros((1,1,2*img_size[0],2*img_size[1],2*img_size[2]))
(inputs,labels,_,guidance,mask)=data
labels3D = pt.autograd.Variable(labels).type(pt.FloatTensor).cuda().unsqueeze(1)
guidance = pt.autograd.Variable(guidance).type(pt.FloatTensor).cuda().unsqueeze(1)
mask = pt.autograd.Variable(mask).type(pt.FloatTensor).cuda().unsqueeze(1)
for a in range(0,img_size[0]-crop_size[0]+1,crop_size[0]//2): # overlap0.5
for b in range(0,img_size[1]-crop_size[1]+1,crop_size[1]//2):
for c in range(0,img_size[2]-crop_size[2]+1,crop_size[2]//2):
inputs3D = pt.autograd.Variable(inputs[:,a:(a+crop_size[0]),b:(b+crop_size[1]),c:(c+crop_size[2])]).type(pt.FloatTensor).cuda().unsqueeze(1)
with pt.no_grad():
outputs3D,_ = model(inputs3D,guidance)
outputs3D=np.array(outputs3D.cpu().data.numpy())
output_list[:,:,(2*a):(2*(a+crop_size[0])),(2*b):(2*(b+crop_size[1])),(2*c):(2*(c+crop_size[2]))]+=outputs3D
label_list=np.array(labels3D.cpu().data.numpy())
output_list=np.array(output_list)*weight_map
output_list[output_list<0.5]=0
output_list[output_list>=0.5]=1
final_img=np.zeros(shape=(2*img_size[1],2*2*img_size[2]))
final_img[:,:2*img_size[2]]=output_list[0,0,64,:,:]*255
final_img[:,2*img_size[2]:]=label_list[0,0,64,:,:]*255
cv2.imwrite('TestPhase_BraTS.png',final_img)
pr_sum = output_list.sum()
gt_sum = label_list.sum()
pr_gt_sum = np.sum(output_list[label_list == 1])
dice = 2 * pr_gt_sum / (pr_sum + gt_sum)
dice_sum += dice
hausdorff=hd95(output_list.squeeze(0).squeeze(0),label_list.squeeze(0).squeeze(0))
jaccard=jc(output_list.squeeze(0).squeeze(0),label_list.squeeze(0).squeeze(0))
hd_sum+=hausdorff
jc_sum+=jaccard
print("Finished. Test Total dice: ",dice_sum/len(test_dataset),'\n')
print("Finished. Test Avg Jaccard: ",jc_sum/len(test_dataset))
print("Finished. Test Avg hausdorff: ",hd_sum/len(test_dataset))
return dice_sum/len(test_dataset)
best_dice=0
iterator=tqdm(train_dataset, ncols=100)
for x in range(epoch):
model.train()
loss_sum=0
print('\n==>Epoch',x,': lr=',optimizer.param_groups[0]['lr'],'==>\n')
for data in iterator:
(inputs,labels_seg,labels_sr,guidance,mask)=data
optimizer.zero_grad()
inputs = pt.autograd.Variable(inputs).type(pt.FloatTensor).cuda().unsqueeze(1)
guidance = pt.autograd.Variable(guidance).type(pt.FloatTensor).cuda().unsqueeze(1)
mask = pt.autograd.Variable(mask).type(pt.FloatTensor).cuda().unsqueeze(1)
labels_seg = pt.autograd.Variable(labels_seg).type(pt.FloatTensor).cuda().unsqueeze(1)
labels_sr = pt.autograd.Variable(labels_sr).type(pt.FloatTensor).cuda().unsqueeze(1)
outputs_seg,outputs_sr = model(inputs,guidance)
loss_seg = lossfunc_seg(outputs_seg, labels_seg)
loss_sr = lossfunc_sr(outputs_sr, labels_sr)
loss_pf = lossfunc_pf(outputs_seg,outputs_sr,labels_seg*labels_sr)
loss_guide=lossfunc_sr(mask*outputs_sr,mask*labels_sr)
loss=lossfunc_dice(outputs_seg,labels_seg)+loss_seg+w_sr*(loss_sr+loss_guide)+w_tf*loss_pf
loss.backward()
optimizer.step()
loss_sum+=loss.item()
if args.v:
final_img=np.zeros(shape=(2*size,2*size*5))
iterator.set_postfix(loss=loss.item(),loss_seg=loss_seg.item(),loss_sr=loss_sr.item())
final_img[:,0:(2*size)]=outputs_seg.cpu().data.numpy()[0,0,size//2,:,:]*255
final_img[:,(2*size):(4*size)]=outputs_sr.cpu().data.numpy()[0,0,size//2,:,:]*255
final_img[:,(4*size):(6*size)]=labels_seg.cpu().data.numpy()[0,0,size//2,:,:]*255
final_img[:,(6*size):(8*size)]=labels_sr.cpu().data.numpy()[0,0,size//2,:,:]*255
final_img[:,(8*size):]=cv2.resize(inputs.cpu().data.numpy()[0,0,size//4,:,:],((2*size),(2*size)))*255
cv2.imwrite('combine.png',final_img)
print('==>End of epoch',x,'==>\n')
print('===VAL===>')
dice=ValModel()
scheduler.step(dice)
if dice>best_dice:
best_dice=dice
print('New best dice! Model saved to',model_path+'/PFSeg_3D_BraTS_patch-free_bs'+str(batch_size)+'_best.pt')
pt.save(model.state_dict(), model_path+'/PFSeg_3D_BraTS_patch-free_bs'+str(batch_size)+'_best.pt')
print('===TEST===>')
TestModel()
print('\nBest Dice:',best_dice) |
996,244 | c5401efd0be6401c95fbbef61723894f01909381 | # 161. One Edit Distance
from functools import lru_cache
class Solution:
def isOneEditDistance(self, s: str, t: str) -> bool:
if not s and not t:
return False
if not s:
return len(t) == 1
if not t:
return len(s) == 1
sN, tN = len(s), len(t)
if abs(sN - tN) > 1:
return False
@lru_cache(maxsize=None)
def rec(si, ti, used):
if si == sN and ti == tN:
return used
if si == sN:
return (tN - ti == 1) and used == False
if ti == tN:
return (sN - si == 1) and used == False
if s[si] == t[ti]:
return rec(si+1, ti+1, used)
if used:
return False
return rec(si+1, ti, True) or rec(si, ti+1, True) or rec(si+1, ti+1, True)
return rec(0, 0, False)
|
996,245 | 91022c6a6006403e91dc954751981f7e10121637 | # -*- coding: utf-8 -*-
"""
__title__="chekc"
__author__="ngc7293"
__mtime__="2020/9/20"
"""
import os
import numpy as np
import pandas as pd
import time
from tqdm import tqdm
from gensim.scripts.glove2word2vec import glove2word2vec
from gensim.models import KeyedVectors, Word2Vec
glove_file = "../data/pre/glove.42B.300d.zip"
word2vec_file = "../data/pre/glove.42B.300d.word2vec.txt"
#
start_time = time.time()
if not os.path.exists(word2vec_file):
(count, dim) = glove2word2vec(glove_file,word2vec_file)
print(time.time()-start_time)
#
# glove_model = KeyedVectors.load_word2vec_format(word2vec_file, binary=False)
# print(time.time()-start_time)
#
# start_time = time.time()
# cat_vec = glove_model['csfkjaf']
#
# print(cat_vec)
#
# print(time.time()-start_time)
# df = pd.read_csv("../data/train.tsv",sep="\t")
#
# corpus = []
# for i in tqdm(range(len(df))):
# corpus.append(df.Phrase[i].lower().split())
#
# word2vec_model = Word2Vec(corpus,size=100)
# word2vec_wv = word2vec_model.wv
#
# word2vec_wv.save_word2vec_format("../data/pre/word2vec.100d.word2vec.txt",binary=False) |
996,246 | 23bc051b66729db9f0075d1155b96aab78b4e7d2 | def rules(x, y, serial_number):
rack_id = x + 10
power_level = rack_id * y
power_level += serial_number
power_level *= rack_id
power_level = int((power_level - int(power_level / 1000) * 1000) / 100)
power_level -= 5
return power_level
assert rules(3,5,8) == 4
assert rules(122,79,57) == -5
assert rules(217,196,39) == 0
assert rules(101,153,71) == 4
def get_grid(n, serial_number):
grid = {}
x = 1
y = 1
while x <= n:
while y <= n:
grid[f"{x},{y}"] = rules(x, y, serial_number)
y += 1
y = 1
x += 1
return grid
def calc_power(x, y, n, grid):
new_power = 0
for i in range(x, x + n):
for j in range(y, y + n):
new_power += grid[f"{i},{j}"]
return new_power
def calc_power_pp2(x, y, n, grid):
new_power = 0
if n % 2 == 0:
for i in range(2):
for j in range(2):
new_power += grid[f"{x + i},{y + j},{int(n/2)}"]
if n > 1:
new_power = grid[f"{x},{y},{int(n - 1)}"]
for j in range(0,n-1):
new_power += grid[f"{x + n -1},{y + j}"]
for i in range(0,n):
new_power += grid[f"{x + i},{y + n - 1}"]
else:
new_power = calc_power(x,y,n,grid)
grid[f"{x},{y},{n}"] = new_power
# print(f"{x},{y},{n}")
return new_power
def find_cell(n, size, grid, it=False):
x = 1
y = 1
total_power = 0
points = [1, 1]
while x <= (n - size):
while y <= (n - size):
if it:
new_power = calc_power_pp2(x, y, size, grid)
else:
new_power = calc_power(x, y, size, grid)
if new_power > total_power:
total_power = new_power
points = [x, y]
y += 1
y = 1
x += 1
return (points, total_power)
assert find_cell(300, 3, get_grid(300, 18)) == ([33, 45], 29)
assert find_cell(300, 3, get_grid(300, 42)) == ([21, 61], 30)
grid = get_grid(300, 4151)
print(find_cell(300, 3, grid))
grid1 = get_grid(300, 18)
find_cell(300, 1, grid1, it=True)
find_cell(300, 2, grid1, it=True)
print(find_cell(300, 3, grid1, it=True))
assert find_cell(300, 3, grid1, it=True) == ([33, 45], 29)
grid2 = get_grid(300, 42)
find_cell(300, 1, grid2, it=True)
find_cell(300, 2, grid2, it=True)
print(find_cell(300, 3, grid2, it=True))
assert find_cell(300, 3, grid2, it=True) == ([21, 61], 30)
def incrementor(grid):
points = [1,1]
total_power = 0
size = 1
# with larger sets - the average will tend to 0 since they are subtracting
# 5 from a random number between 0 and 9.
for n in range(1, 20):
(new_points, new_power) = find_cell(300, n, grid, it=True)
print(f"n: {n}, Power: {new_power}")
if new_power > total_power:
total_power = new_power
points = new_points
size = n
return (points, size, total_power)
print(incrementor(get_grid(300, 4151))) |
996,247 | 850a182622eb0d6c5fa4e32b8af7dd6fbd1ab638 | A = b'ABC'
B = b'DEF'
C = [A, B]
C.append(3)
print(C)
print(type(C))
for i in range(2):
C.append(A)
print(C) |
996,248 | ea8e8ab4e55922a8068b60d5b5180f3e91e38c84 | from zipfile import ZipFile
with ZipFile('input.zip') as myzip:
for z in myzip.filelist:
name = z.filename
# print(name)
if name[-1] == '/': # каталог
print(' ' * (name.count('/') - 1) + z.orig_filename.split('/')[-2])
else:
print(' ' * (name.count('/')) + z.orig_filename.split('/')[-1]) |
996,249 | 19c6f792a3b100dfb8e8e5289fb88e00923c663f | """ Reading and writing of spectra
"""
from __future__ import print_function, absolute_import, division, unicode_literals
from six import itervalues
try: # Python 2 & 3 compatibility
basestring
except NameError:
basestring = str
# Import libraries
import numpy as np
import warnings
import os, pdb
import json
from six import itervalues
from astropy.io import fits
from astropy import units as u
from astropy.table import Table, Column
from astropy.io.fits.hdu.table import BinTableHDU
from .xspectrum1d import XSpectrum1D
def readspec(specfil, inflg=None, efil=None, verbose=False, multi_ivar=False,
format='ascii', exten=None, head_exten=0, debug=False, select=0,
**kwargs):
""" Read a FITS file (or astropy Table or ASCII file) into a
XSpectrum1D class
Parameters
----------
specfil : str or Table or XSpectrum1D
Input file. If str:
* FITS file are detected by searching for '.fit' in their filename.
* ASCII must either have a proper Table format or be 3 (WAVE,
FLUX, ERROR) or 4 (WAVE, FLUX, ERROR, CONTINUUM) columns. If
the file has more than 4 columns with no header it will raise an error.
efil : string, optional
A filename for Error array, if it's in a separate file to the
flux. The code will attempt to find this file on its own.
multi_ivar : bool, optional
If True, assume BOSS format of flux, ivar, log10(wave) in a
multi-extension FITS.
format : str, optional
Format for ASCII table input. Default 'ascii'.
exten : int, optional
FITS extension (mainly for multiple binary FITS tables)
select : int, optional
Selected spectrum (for sets of 1D spectra, e.g. DESI brick)
head_exten : int, optional
Extension for header to ingest
**kwargs : optional
Passed to XSpectrum1D object
Returns
-------
An XSpectrum1D class
"""
# Initialize
if inflg is None:
inflg = 0
# Check specfil type
if isinstance(specfil, Table):
datfil = 'None'
# Dummy hdulist
hdulist = [fits.PrimaryHDU(), specfil]
elif isinstance(specfil, basestring):
datfil = specfil.strip()
flg_fits = False
for ext in ['.fit']:
if ext in specfil:
flg_fits = True
if flg_fits: # FITS
# Read header
datfil, chk = chk_for_gz(specfil.strip())
if chk == 0:
raise IOError('File does not exist {}'.format(specfil))
hdulist = fits.open(os.path.expanduser(datfil), **kwargs)
elif '.hdf5' in specfil: # HDF5
return parse_hdf5(specfil, **kwargs)
else: #ASCII
tbl = Table.read(specfil,format=format)
# No header?
if tbl.colnames[0] == 'col1':
if len(tbl.colnames) > 4:
raise IOError('No header found in ASCII file {}, \
and has more than four columns. Please check its format.'.format(specfil))
names = ['WAVE', 'FLUX', 'ERROR', 'CONTINUUM']
for i, name in enumerate(tbl.colnames):
tbl[name].name = names[i]
warnings.warn('No header found in ASCII file {}, assuming columns to be: {}'.format(specfil, names[:len(tbl.colnames)]))
# import pdb; pdb.set_trace()
hdulist = [fits.PrimaryHDU(), tbl]
elif isinstance(specfil, XSpectrum1D):
return specfil
else:
raise IOError('readspec: Bad spectra input.')
head0 = hdulist[0].header
if is_UVES_popler(head0):
if debug:
print('linetools.spectra.io.readspec(): Reading UVES popler format')
xspec1d = parse_UVES_popler(hdulist, **kwargs)
elif head0['NAXIS'] == 0:
# Binary FITS table
if debug:
print('linetools.spectra.io.readspec(): Assuming binary fits table')
xspec1d = parse_FITS_binary_table(hdulist, exten=exten, **kwargs)
elif head0['NAXIS'] == 1: # Data in the zero extension
# How many entries?
if len(hdulist) == 1: # Old school (one file per flux, error)
if debug:
print(
'linetools.spectra.io.readspec(): Assuming flux and err in separate files')
xspec1d = parse_two_file_format(specfil, hdulist, efil=efil, **kwargs)
elif hdulist[0].name == 'FLUX':
if debug:
print(
'linetools.spectra.io.readspec(): Assuming separate flux and err files.')
xspec1d = parse_linetools_spectrum_format(hdulist, **kwargs)
else: # ASSUMING MULTI-EXTENSION
co=None
if debug:
print(
'linetools.spectra.io.readspec(): Assuming multi-extension')
if len(hdulist) <= 2:
raise RuntimeError('No wavelength info but only 2 extensions!')
fx = hdulist[0].data.flatten()
try:
sig = hdulist[1].data.flatten()
except AttributeError: # Error array is "None"
sig = None
wave = hdulist[2].data.flatten()
# BOSS/SDSS?
try:
multi_ivar = head0['TELESCOP'][0:4] in ['SDSS']
except KeyError:
pass
#
if multi_ivar is True:
tmpsig = np.zeros(len(sig))
gdp = np.where(sig > 0.)[0]
tmpsig[gdp] = np.sqrt(1./sig[gdp])
sig = tmpsig
wave = 10.**wave
# Look for co
if len(hdulist) == 4:
data = hdulist[3].data
if 'float' in data.dtype.name: # This can be an int mask (e.g. BOSS)
co = data
wave = give_wv_units(wave)
xspec1d = XSpectrum1D.from_tuple((wave, fx, sig, co), **kwargs)
elif head0['NAXIS'] == 2:
if (hdulist[0].name == 'FLUX') and (hdulist[2].name == 'WAVELENGTH'): # DESI
if debug:
print('linetools.spectra.io.readspec(): Assuming DESI brick')
xspec1d = parse_DESI_brick(hdulist, select=select)
else: # SDSS
if debug:
print('linetools.spectra.io.readspec(): Assuming SDSS format')
fx = hdulist[0].data[0, :].flatten()
sig = hdulist[0].data[2, :].flatten()
wave = setwave(head0)
xspec1d = XSpectrum1D.from_tuple(
(give_wv_units(wave), fx, sig, None))
else: # Should not be here
print('Not sure what has been input. Send to JXP.')
return
# Check for bad wavelengths
if np.any(np.isnan(xspec1d.wavelength)):
warnings.warn('WARNING: Some wavelengths are NaN')
# Filename
xspec1d.filename = datfil
if not xspec1d.co_is_set:
# Final check for continuum in a separate file
if isinstance(specfil, basestring) and (specfil.endswith('.fits') or specfil.endswith('.fits.gz')):
co_filename = specfil.replace('.fits', '_c.fits')
if os.path.exists(co_filename):
tmpco = fits.getdata(co_filename)
if tmpco.size != xspec1d.totpix:
warnings.warn("Continuum size does not match native spectrum")
warnings.warn("Continuing under the assumption that this is due to a masked array")
gdp = ~xspec1d.data['flux'][xspec1d.select].mask
xspec1d.data['co'][xspec1d.select][gdp] = tmpco
else:
xspec1d.data['co'][xspec1d.select] = tmpco
# Mask
xspec1d.data['co'][xspec1d.select].mask = xspec1d.data['flux'][xspec1d.select].mask
# Add in the header
if head_exten == 0:
xspec1d.meta['headers'][0] = head0
else:
head = hdulist[head_exten].header
xspec1d.meta['headers'][0] = head
if xspec1d.nspec > 1:
warnings.warn("Read in only 1 header (into meta['headers'][0]")
# Return
return xspec1d
#### ###############################
# Grab values from the Binary FITS Table or Table
def get_table_column(tags, hdulist, idx=None):
""" Find a column in a FITS binary table
Used to return flux/error/wave values from a binary FITS table
from a list of tags.
Parameters
----------
tags : list
List of string tag names
hdulist : fits header data unit list
idx : int, optional
Index of list for Table input
Returns
-------
dat : float array
Data values corresponding to the first tag found
Returns None if no match
"""
if idx is None:
idx = 1
dat = None
# Use Table
if isinstance(hdulist[idx],BinTableHDU):
tab = Table(hdulist[idx].data)
else:
tab = hdulist[idx]
# Grab
names = set(tab.dtype.names)
for tag in tags:
if tag in names:
dat = np.array(tab[tag])
break # Break with first hit
# Return
if dat is not None:
return dat.flatten(), tag
else:
return dat, 'NONE'
def get_wave_unit(tag, hdulist, idx=None):
""" Attempt to pull wavelength unit from the Table
Parameters
----------
tag : str
Tag used for wavelengths
hdulist : fits header data unit list
idx : int, optional
Index of list for Table input
Returns
-------
unit : astropy Unit
Defaults to None
"""
from astropy.units import Unit
if idx is None:
idx = 1
# Use Table
if isinstance(hdulist[idx],BinTableHDU):
tab = Table(hdulist[idx].data)
header = hdulist[idx].header
else:
# NEED HEADER INFO
return None
# Try table header (following VLT/X-Shooter here)
keys = list(header) # Python 3
values = list(itervalues(header)) # Python 3
hidx = values.index(tag)
if keys[hidx][0:5] == 'TTYPE':
try:
tunit = header[keys[hidx].replace('TYPE','UNIT')]
except KeyError:
return None
else:
if tunit in ['Angstroem', 'Angstroms', 'ANGSTROMS']:
tunit = 'Angstrom'
unit = Unit(tunit)
return unit
else:
return None
#### ###############################
# Set wavelength array using Header cards
def setwave(hdr):
""" Generate wavelength array from a header
Parameters
----------
hdr : FITS header
Returns
-------
wave : ndarray
No units yet
"""
# Parse the header
npix = hdr['NAXIS1']
crpix1 = hdr['CRPIX1'] if 'CRPIX1' in hdr else 1.
crval1 = hdr['CRVAL1']
cdelt1, dc_flag = get_cdelt_dcflag(hdr)
# Generate
wave = crval1 + cdelt1 * (np.arange(npix) + 1. - crpix1)
if dc_flag == 1:
wave = 10.**wave # Log
return wave
def get_cdelt_dcflag(hd):
""" Find the wavelength stepsize and dcflag from a fits header.
Parameters
----------
hd : astropy.io.fits header instance
Returns
-------
cdelt, dc_flag : float, int
Wavelength stepsize and dcflag (1 if log-linear scale, 0 if linear).
"""
cdelt = None
if 'CDELT1' in hd:
cdelt1 = hd['CDELT1']
elif 'CD1_1' in hd:
cdelt1 = hd['CD1_1'] # SDSS style
dc_flag = 0
if 'DC-FLAG' in hd:
dc_flag = hd['DC-FLAG']
elif cdelt1 < 1e-4:
import warnings
warnings.warn('WARNING: CDELT1 < 1e-4, Assuming log wavelength scale')
dc_flag = 1
return cdelt1, dc_flag
#### ###############################
# Deal with .gz extensions, usually on FITS files
# See if filenm exists, if so pass it back
#
def chk_for_gz(filenm):
""" Checks for .gz extension to an input filename and returns file
Also parses the ~ if given
Parameters
----------
filenm : string
Filename to query
Returns
-------
filenm+XX : string
Returns in this order:
i. Input filename if it exists
ii. Input filename if it has .gz extension already
iii. Input filename.gz if that exists
iv. Input filename.gz if that exists
chk : bool or int
* True if file exists
* 0 if No check was performed
* False if no file exists
"""
import os
from os.path import expanduser
filenm = expanduser(filenm)
# File exist?
if os.path.lexists(filenm):
chk=True
return filenm, chk
# .gz already
if filenm.find('.gz') > 0:
chk=0
return filenm, chk
# Add .gz
if os.path.lexists(filenm+'.gz'):
chk=True
return filenm+'.gz', chk
else:
chk=False
return None, chk
def give_wv_units(wave):
""" Give a wavelength array units of Angstroms, if unitless.
Parameters
----------
wave : array or Quantity
Input wavelength array
Returns
-------
uwave: Quantity
Output wavelengths in Angstroms if input is unitless, or the
input array unchanged otherwise.
"""
if not hasattr(wave, 'unit'):
uwave = u.Quantity(wave, unit=u.AA)
elif wave.unit is None:
uwave = u.Quantity(wave, unit=u.AA)
else:
uwave = u.Quantity(wave)
return uwave
def is_UVES_popler(hd):
""" Check if this header is UVES_popler output.
Parameters
----------
hd : FITS header
Returns
-------
True if a UVES_popler file, False otherwise.
"""
if 'history' not in hd:
return False
for row in hd['history']:
if 'UVES POst Pipeline Echelle Reduction' in row:
return True
return False
def parse_UVES_popler(hdulist, **kwargs):
""" Read a spectrum from a UVES_popler-style fits file.
Parameters
----------
hdulist : FITS HDU list
Returns
-------
xspec1d : XSpectrum1D
Parsed spectrum
"""
from linetools.spectra.xspectrum1d import XSpectrum1D
hd = hdulist[0].header
uwave = setwave(hd) * u.Angstrom
co = hdulist[0].data[3]
fx = hdulist[0].data[0] * co # Flux
sig = hdulist[0].data[1] * co
xspec1d = XSpectrum1D.from_tuple((uwave, fx, sig, co), **kwargs)
return xspec1d
def parse_FITS_binary_table(hdulist, exten=None, wave_tag=None, flux_tag=None,
sig_tag=None, co_tag=None, var_tag=None,
ivar_tag=None, **kwargs):
""" Read a spectrum from a FITS binary table
Parameters
----------
hdulist : FITS HDU list
exten : int, optional
Extension for the binary table.
wave_tag : str, optional
flux_tag : str, optional
sig_tag : str, optional
co_tag : str, optional
var_tag : str, optional
Returns
-------
xspec1d : XSpectrum1D
Parsed spectrum
"""
# Flux
if flux_tag is None:
flux_tags = ['SPEC', 'FLUX', 'FLAM', 'FX', 'FNORM',
'FLUXSTIS', 'FLUX_OPT', 'fl', 'flux', 'counts',
'COUNTS', 'OPT_FLAM']
else:
flux_tags = [flux_tag]
fx, fx_tag = get_table_column(flux_tags, hdulist, idx=exten)
if fx is None:
print('Binary FITS Table but no Flux tag. Searched fo these tags:\n',
flux_tags)
return
# Error
if sig_tag is None:
sig_tags = ['ERROR','ERR','SIGMA_FLUX','ERR_FLUX', 'ENORM', 'FLAM_SIG',
'SIGMA_UP','ERRSTIS', 'FLUXERR', 'SIGMA', 'sigma',
'sigma_flux','er', 'err', 'error', 'sig', 'fluxerror',
'FLUX_ERROR','flux_error', 'OPT_FLAM_SIG']
else:
sig_tags = [sig_tag]
sig, sig_tag = get_table_column(sig_tags, hdulist, idx=exten)
if sig is None:
if ivar_tag is None:
ivar_tags = ['IVAR', 'IVAR_OPT', 'ivar', 'FLUX_IVAR']
else:
ivar_tags = [ivar_tag]
ivar, ivar_tag = get_table_column(ivar_tags, hdulist, idx=exten)
if ivar is None:
if var_tag is None:
var_tags = ['VAR', 'var']
else:
var_tags = [var_tag]
var, var_tag = get_table_column(var_tags, hdulist, idx=exten)
if var is None:
warnings.warn('No error tag found. Searched for these tags:\n'+ str(sig_tags + ivar_tags + var_tags))
else:
sig = np.sqrt(var)
else:
sig = np.zeros(ivar.size)
gdi = np.where( ivar > 0.)[0]
sig[gdi] = np.sqrt(1./ivar[gdi])
# Wavelength
if wave_tag is None:
wave_tags = ['WAVE','WAVELENGTH','LAMBDA','LOGLAM',
'WAVESTIS', 'WAVE_OPT', 'wa', 'wave', 'loglam','wl',
'wavelength', 'OPT_WAVE']
else:
wave_tags = [wave_tag]
wave, wave_tag = get_table_column(wave_tags, hdulist, idx=exten)
if wave_tag in ['LOGLAM','loglam']:
wave = 10.**wave
# Try for unit
wv_unit = get_wave_unit(wave_tag, hdulist, idx=exten)
if wv_unit is not None:
wave = wave * wv_unit
if wave is None:
print('Binary FITS Table but no wavelength tag. Searched for these tags:\n',
wave_tags)
return
if co_tag is None:
co_tags = ['CONT', 'CO', 'CONTINUUM', 'co', 'cont', 'continuum']
else:
co_tags = [co_tag]
co, co_tag = get_table_column(co_tags, hdulist, idx=exten)
# Finish
xspec1d = XSpectrum1D.from_tuple((give_wv_units(wave), fx, sig, co), **kwargs)
if 'METADATA' in hdulist[0].header:
xspec1d.meta.update(json.loads(hdulist[0].header['METADATA']))
return xspec1d
def parse_linetools_spectrum_format(hdulist, **kwargs):
""" Parse an old linetools-format spectrum from an hdulist
Parameters
----------
hdulist : FITS HDU list
Returns
-------
xspec1d : XSpectrum1D
Parsed spectrum
"""
if 'WAVELENGTH' not in hdulist:
pdb.set_trace()
xspec1d = XSpectrum1D.from_spec1d(spec1d)
else:
wave = hdulist['WAVELENGTH'].data * u.AA
fx = hdulist['FLUX'].data
# Error array
if 'ERROR' in hdulist:
sig = hdulist['ERROR'].data
else:
sig = None
if 'CONTINUUM' in hdulist:
co = hdulist['CONTINUUM'].data
else:
co = None
xspec1d = XSpectrum1D.from_tuple((wave, fx, sig, co), **kwargs)
if 'METADATA' in hdulist[0].header:
# Prepare for JSON (bug fix of sorts)
metas = hdulist[0].header['METADATA']
ipos = metas.rfind('}')
try:
xspec1d.meta.update(json.loads(metas[:ipos+1]))
except:
# TODO: fix this in a better manner, if possible
print("Bad METADATA; proceeding without")
return xspec1d
def parse_hdf5(inp, close=True, **kwargs):
""" Read a spectrum from HDF5 written in XSpectrum1D format
Expects: meta, data, units
Parameters
----------
inp : str or hdf5
Returns
-------
"""
import json
import h5py
# Path
path = kwargs.pop('path', '/')
# Open
if isinstance(inp, basestring):
hdf5 = h5py.File(inp, 'r')
else:
hdf5 = inp
# Data
data = hdf5[path+'data'][()]
# Meta
if 'meta' in hdf5[path].keys():
meta = json.loads(hdf5[path+'meta'][()])
# Headers
for jj,heads in enumerate(meta['headers']):
try:
meta['headers'][jj] = fits.Header.fromstring(meta['headers'][jj])
except TypeError: # dict
if not isinstance(meta['headers'][jj], dict):
raise IOError("Bad meta type")
else:
meta = None
# Units
units = json.loads(hdf5[path+'units'][()])
for key,item in units.items():
if item == 'dimensionless_unit':
units[key] = u.dimensionless_unscaled
else:
units[key] = getattr(u, item)
# Other arrays
try:
sig = data['sig']
except (NameError, IndexError):
sig = None
try:
co = data['co']
except (NameError, IndexError):
co = None
# Finish
if close:
hdf5.close()
return XSpectrum1D(data['wave'], data['flux'], sig=sig, co=co,
meta=meta, units=units, **kwargs)
def parse_DESI_brick(hdulist, select=0, **kwargs):
""" Read a spectrum from a DESI brick format HDU list
Parameters
----------
hdulist : FITS HDU list
select : int, optional
Spectrum selected. Default is 0
Returns
-------
xspec1d : XSpectrum1D
Parsed spectrum
"""
fx = hdulist[0].data
# Sig
if hdulist[1].name in ['ERROR', 'SIG']:
sig = hdulist[1].data
else:
ivar = hdulist[1].data
sig = np.zeros_like(ivar)
gdi = ivar > 0.
sig[gdi] = np.sqrt(1./ivar[gdi])
# Wave
wave = hdulist[2].data
wave = give_wv_units(wave)
if wave.shape != fx.shape:
wave = np.tile(wave, (fx.shape[0],1))
# Finish
xspec1d = XSpectrum1D(wave, fx, sig, select=select, **kwargs)
return xspec1d
def parse_two_file_format(specfil, hdulist, efil=None, **kwargs):
""" Parse old two file format (one for flux, another for error).
Parameters
----------
specfil : str
Flux filename
hdulist : FITS HDU list
efil : str, optional
Error filename. By default this is inferred from the flux
filename.
Returns
-------
xspec1d : XSpectrum1D
Parsed spectrum
"""
head0 = hdulist[0].header
# Error
if efil is None:
ipos = max(specfil.find('F.fits'),
specfil.find('f.fits'), specfil.find('flx.fits'))
if ipos < 0:
# Becker XShooter style
ipos = specfil.find('.fits')
efil,chk = chk_for_gz(specfil[0:ipos]+'e.fits')
else:
if specfil.find('F.fits') > 0:
efil,chk = chk_for_gz(specfil[0:ipos]+'E.fits')
else:
efil,chk = chk_for_gz(specfil[0:ipos]+'e.fits')
if efil is None:
efil,chk = chk_for_gz(specfil[0:ipos]+'err.fits')
# Error file
if efil is not None:
efil = os.path.expanduser(efil)
sighdu = fits.open(efil, **kwargs)
sig = sighdu[0].data
else:
sig = None
#Log-Linear?
try:
dc_flag = head0['DC-FLAG']
except KeyError:
# The following is necessary for Becker's XShooter output
cdelt1, dc_flag = get_cdelt_dcflag(head0)
# Read
if dc_flag in [0,1]:
# Data
fx = hdulist[0].data
# Generate wave
wave = setwave(head0)
else:
raise ValueError('DC-FLAG has unusual value {:d}'.format(dc_flag))
# Finish
xspec1d = XSpectrum1D.from_tuple((wave, fx, sig, None), **kwargs)
return xspec1d
|
996,250 | 75e9ef5735ca09bd771fd5a3d02aee50babf947f | import os
import socket
import time
import contextlib
from threading import Thread
from threading import Event
from threading import Lock
import json
import subprocess
from contextlib import contextmanager
import pytest
import mock
import requests
from urllib3.util.retry import Retry
from requests.adapters import HTTPAdapter
from chalice import app
from chalice.awsclient import TypedAWSClient
from chalice.deploy.models import LambdaFunction
from chalice.deploy.packager import LambdaDeploymentPackager
from chalice.deploy.packager import LayerDeploymentPackager
from chalice.docker import LambdaImageBuilder
from chalice.local import create_local_server, DockerPackager
from chalice.local import ContainerProxyResourceManager
from chalice.local import LambdaLayerDownloader
from chalice.config import Config
from chalice.utils import OSUtils, UI
APPS_DIR = os.path.dirname(os.path.abspath(__file__))
ENV_APP_DIR = os.path.join(APPS_DIR, 'envapp')
BASIC_APP = os.path.join(APPS_DIR, 'basicapp')
NEW_APP_VERSION = """
from chalice import Chalice
app = Chalice(app_name='basicapp')
@app.route('/')
def index():
return {'version': 'reloaded'}
"""
@contextmanager
def cd(path):
try:
original_dir = os.getcwd()
os.chdir(path)
yield
finally:
os.chdir(original_dir)
@pytest.fixture()
def basic_app(tmpdir):
tmpdir = str(tmpdir.mkdir('basicapp'))
OSUtils().copytree(BASIC_APP, tmpdir)
return tmpdir
class ThreadedLocalServer(Thread):
def __init__(self, port, host='localhost'):
super(ThreadedLocalServer, self).__init__()
self._app_object = None
self._config = None
self._host = host
self._port = port
self._server = None
self._server_ready = Event()
def wait_for_server_ready(self):
self._server_ready.wait()
def configure(self, app_object, config):
self._app_object = app_object
self._config = config
def run(self):
self._server = create_local_server(
self._app_object, self._config, self._host, self._port)
self._server_ready.set()
self._server.serve_forever()
def make_call(self, method, path, port, timeout=0.5):
self._server_ready.wait()
return method('http://{host}:{port}{path}'.format(
path=path, host=self._host, port=port), timeout=timeout)
def shutdown(self):
if self._server is not None:
self._server.server.shutdown()
@pytest.fixture
def config():
return Config()
@pytest.fixture()
def unused_tcp_port():
with contextlib.closing(socket.socket()) as sock:
sock.bind(('127.0.0.1', 0))
return sock.getsockname()[1]
@pytest.fixture()
def http_session():
session = requests.Session()
retry = Retry(
# How many connection-related errors to retry on.
connect=10,
# A backoff factor to apply between attempts after the second try.
backoff_factor=2,
method_whitelist=['GET', 'POST', 'PUT'],
)
session.mount('http://', HTTPAdapter(max_retries=retry))
return HTTPFetcher(session)
class HTTPFetcher(object):
def __init__(self, session):
self.session = session
def json_get(self, url):
response = self.session.get(url)
response.raise_for_status()
return json.loads(response.content)
@pytest.fixture()
def local_server_factory(unused_tcp_port):
threaded_server = ThreadedLocalServer(unused_tcp_port)
def create_server(app_object, config):
threaded_server.configure(app_object, config)
threaded_server.start()
return threaded_server, unused_tcp_port
try:
yield create_server
finally:
threaded_server.shutdown()
@pytest.fixture
def sample_app():
demo = app.Chalice('demo-app')
thread_safety_check = []
lock = Lock()
@demo.route('/', methods=['GET'])
def index():
return {'hello': 'world'}
@demo.route('/test-cors', methods=['POST'], cors=True)
def test_cors():
return {'hello': 'world'}
@demo.route('/count', methods=['POST'])
def record_counter():
# An extra delay helps ensure we consistently fail if we're
# not thread safe.
time.sleep(0.001)
count = int(demo.current_request.json_body['counter'])
with lock:
thread_safety_check.append(count)
@demo.route('/count', methods=['GET'])
def get_record_counter():
return thread_safety_check[:]
return demo
def test_has_thread_safe_current_request(config, sample_app,
local_server_factory):
local_server, port = local_server_factory(sample_app, config)
local_server.wait_for_server_ready()
num_requests = 25
num_threads = 5
# The idea here is that each requests.post() has a unique 'counter'
# integer. If the current request is thread safe we should see a number
# for each 0 - (num_requests * num_threads). If it's not thread safe
# we'll see missing numbers and/or duplicates.
def make_requests(counter_start):
for i in range(counter_start * num_requests,
(counter_start + 1) * num_requests):
# We're slowing the sending rate down a bit. The threaded
# http server is good, but not great. You can still overwhelm
# it pretty easily.
time.sleep(0.001)
requests.post(
'http://localhost:%s/count' % port, json={'counter': i})
threads = []
for i in range(num_threads):
threads.append(Thread(target=make_requests, args=(i,)))
for thread in threads:
thread.start()
for thread in threads:
thread.join()
response = requests.get('http://localhost:%s/count' % port)
assert len(response.json()) == len(range(num_requests * num_threads))
assert sorted(response.json()) == list(range(num_requests * num_threads))
def test_can_accept_get_request(config, sample_app, local_server_factory):
local_server, port = local_server_factory(sample_app, config)
response = local_server.make_call(requests.get, '/', port)
assert response.status_code == 200
assert response.text == '{"hello":"world"}'
def test_can_get_unicode_string_content_length(
config, local_server_factory):
demo = app.Chalice('app-name')
@demo.route('/')
def index_view():
return u'\u2713'
local_server, port = local_server_factory(demo, config)
response = local_server.make_call(requests.get, '/', port)
assert response.headers['Content-Length'] == '3'
def test_can_accept_options_request(config, sample_app, local_server_factory):
local_server, port = local_server_factory(sample_app, config)
response = local_server.make_call(requests.options, '/test-cors', port)
assert response.headers['Content-Length'] == '0'
assert response.headers['Access-Control-Allow-Methods'] == 'POST,OPTIONS'
assert response.text == ''
def test_can_accept_multiple_options_request(config, sample_app,
local_server_factory):
local_server, port = local_server_factory(sample_app, config)
response = local_server.make_call(requests.options, '/test-cors', port)
assert response.headers['Content-Length'] == '0'
assert response.headers['Access-Control-Allow-Methods'] == 'POST,OPTIONS'
assert response.text == ''
response = local_server.make_call(requests.options, '/test-cors', port)
assert response.headers['Content-Length'] == '0'
assert response.headers['Access-Control-Allow-Methods'] == 'POST,OPTIONS'
assert response.text == ''
def test_can_accept_multiple_connections(config, sample_app,
local_server_factory):
# When a GET request is made to Chalice from a browser, it will send the
# connection keep-alive header in order to hold the connection open and
# reuse it for subsequent requests. If the conncetion close header is sent
# back by the server the connection will be closed, but the browser will
# reopen a new connection just in order to have it ready when needed.
# In this case, since it does not send any content we do not have the
# opportunity to send a connection close header back in a response to
# force it to close the socket.
# This is an issue in Chalice since the single threaded local server will
# now be blocked waiting for IO from the browser socket. If a request from
# any other source is made it will be blocked until the browser sends
# another request through, giving us a chance to read from another socket.
local_server, port = local_server_factory(sample_app, config)
local_server.wait_for_server_ready()
# We create a socket here to emulate a browser's open connection and then
# make a request. The request should succeed.
socket.create_connection(('localhost', port), timeout=1)
try:
response = local_server.make_call(requests.get, '/', port)
except requests.exceptions.ReadTimeout:
assert False, (
'Read timeout occurred, the socket is blocking the next request '
'from going though.'
)
assert response.status_code == 200
assert response.text == '{"hello":"world"}'
def test_can_import_env_vars(unused_tcp_port, http_session):
with cd(ENV_APP_DIR):
p = subprocess.Popen(['chalice', 'local', '--port',
str(unused_tcp_port)],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
_wait_for_server_ready(p)
try:
_assert_env_var_loaded(unused_tcp_port, http_session)
finally:
p.terminate()
def _wait_for_server_ready(process):
if process.poll() is not None:
raise AssertionError(
'Local server immediately exited with rc: %s' % process.poll()
)
def _assert_env_var_loaded(port_number, http_session):
response = http_session.json_get('http://localhost:%s/' % port_number)
assert response == {'hello': 'bar'}
def test_can_reload_server(unused_tcp_port, basic_app, http_session):
with cd(basic_app):
p = subprocess.Popen(['chalice', 'local', '--port',
str(unused_tcp_port)],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
_wait_for_server_ready(p)
url = 'http://localhost:%s/' % unused_tcp_port
try:
assert http_session.json_get(url) == {'version': 'original'}
# Updating the app should trigger a reload.
with open(os.path.join(basic_app, 'app.py'), 'w') as f:
f.write(NEW_APP_VERSION)
time.sleep(2)
assert http_session.json_get(url) == {'version': 'reloaded'}
finally:
p.terminate()
def test_container_proxy_resource_manager_build(basic_app, config):
class DummyLambda(LambdaFunction):
def __init__(self, handler, function_name):
self.handler = handler
self.function_name = function_name
self.resource_name = function_name
ui = mock.Mock(spec=UI)
osutils = mock.Mock(spec=OSUtils)
packager = mock.Mock(spec=DockerPackager)
image_builder = mock.Mock(spec=LambdaImageBuilder)
packager.package_layers.return_value = {
"a": "/path/a",
"b": "/path/b"
}
with cd(basic_app):
resource_manager = ContainerProxyResourceManager(
config, ui, osutils, packager, image_builder)
lambda_functions = [DummyLambda("1", "a"), DummyLambda("2", "b")]
containers = resource_manager.build_resources(lambda_functions)
packager.package_app.assert_called_with()
packager.package_layers.assert_called_with(lambda_functions)
image_builder.build.assert_called_with(config.lambda_python_version)
assert len(containers) == 2
assert 'a' in containers
assert 'b' in containers
def test_container_proxy_resource_manager_cleanup_nothing_no_errors():
config = Config(config_from_disk={"project_dir": "path"})
osutils = mock.Mock(spec=OSUtils)
resource_manager = ContainerProxyResourceManager(
config, None, osutils, None, None
)
resource_manager.cleanup()
class TestLambdaLayerDownloader(object):
@pytest.fixture
def lambda_client(self):
client = mock.Mock(spec=TypedAWSClient)
client.get_layer_version.return_value = {
"Content": {
"Location": "uri"
}
}
return client
@pytest.fixture
def osutils(self):
osutils = mock.Mock(spec=OSUtils)
osutils.joinpath = os.path.join
osutils.file_exists.return_value = False
return osutils
@pytest.fixture
def session(self):
session = mock.Mock(spec=requests.Session)
session.get.return_value.iter_content.return_value = []
return session
@pytest.fixture
def layer_downloader(self, config, lambda_client, osutils, session):
ui = mock.Mock(spec=UI)
layer_downloader = LambdaLayerDownloader(config, ui, lambda_client,
osutils, session)
return layer_downloader
def test_layer_downloader_download_all(self, osutils, lambda_client,
session, layer_downloader,
basic_app, config):
layer_arns = {"arn1", "arn2", "arn3"}
with cd(basic_app):
cache_dir = os.path.join(basic_app, "cache")
os.mkdir(cache_dir)
paths = layer_downloader.download_all(layer_arns, cache_dir)
files = os.listdir(cache_dir)
for file in files:
assert file.startswith("layer-")
python_version = config.lambda_python_version
assert file.endswith("-" + python_version + ".zip")
assert os.path.join(cache_dir, file) in paths
assert len(files) == len(layer_arns)
assert osutils.file_exists.call_count == len(layer_arns)
assert lambda_client.get_layer_version.call_count == len(layer_arns)
assert session.get.call_count == len(layer_arns)
assert len(paths) == len(layer_arns)
def test_layer_downloader_download_one(self, osutils, lambda_client,
session, layer_downloader,
basic_app, config):
with cd(basic_app):
cache_dir = os.path.join(basic_app, "cache")
os.mkdir(cache_dir)
path = layer_downloader.download("layer", cache_dir)
files = os.listdir(cache_dir)
assert len(files) == 1
file = files[0]
assert file.startswith("layer-")
python_version = config.lambda_python_version
assert file.endswith("-" + python_version + ".zip")
assert os.path.join(cache_dir, file) == path
osutils.file_exists.assert_called_once()
lambda_client.get_layer_version.assert_called_once()
session.get.assert_called_once()
def test_layer_downloader_ignores_cached(self, osutils, lambda_client,
session, layer_downloader,
basic_app):
osutils.file_exists.return_value = True
with cd(basic_app):
cache_dir = os.path.join(basic_app, "cache")
os.mkdir(cache_dir)
osutils.file_exists.return_value = True
layer_downloader.download("hello", cache_dir)
files = os.listdir(cache_dir)
assert len(files) == 0
osutils.file_exists.assert_called_once()
lambda_client.get_layer_version.assert_not_called()
session.get.assert_not_called()
def test_layer_downloader_download_invalid_arn_raises_error(
self, lambda_client, layer_downloader, basic_app):
lambda_client.get_layer_version.return_value = {}
with cd(basic_app):
cache_dir = os.path.join(basic_app, "cache")
os.mkdir(cache_dir)
with pytest.raises(ValueError) as e:
layer_downloader.download("hello", cache_dir)
files = os.listdir(cache_dir)
assert len(files) == 0
assert "Invalid layer arn" in str(e.value)
class TestDockerPackager(object):
@pytest.fixture
def config(self, basic_app):
config = Config(
config_from_disk={
'project_dir': basic_app,
'layers': ['hello', 'world', 'layers']
}
)
def dummy_scope(stage, function):
return config
config.scope = dummy_scope
return config
@pytest.fixture
def autolayer_config(self, basic_app):
config = Config(
config_from_disk={
'project_dir': basic_app,
'layers': ['hello', 'world', 'layers'],
'automatic_layer': True
}
)
def dummy_scope(stage, function):
return config
config.scope = dummy_scope
return config
@pytest.fixture
def layer_downloader(self):
layer_downloader = mock.Mock(spec=LambdaLayerDownloader)
layer_downloader.download_all.return_value = [
'hello.zip', 'world.zip', 'layers.zip'
]
return layer_downloader
@pytest.fixture
def app_packager(self):
app_packager = mock.Mock(spec=LambdaDeploymentPackager)
app_packager.create_deployment_package.return_value = "app.zip"
return app_packager
@pytest.fixture
def layer_packager(self):
layer_packager = mock.Mock(spec=LayerDeploymentPackager)
layer_packager.create_deployment_package.return_value = "layer.zip"
return layer_packager
@pytest.fixture
def docker_packager(self, config, osutils, app_packager,
layer_packager, layer_downloader):
return DockerPackager(config, osutils, app_packager,
layer_packager, layer_downloader)
@pytest.fixture
def osutils(self):
osutils = mock.Mock(spec=OSUtils)
osutils.joinpath = os.path.join
osutils.makedirs = os.makedirs
osutils.directory_exists.return_value = False
osutils.file_exists.return_value = False
return osutils
class DummyLambda(LambdaFunction):
def __init__(self, handler, function_name):
self.handler = handler
self.function_name = function_name
self.resource_name = function_name
def test_package_app_not_existing(self, basic_app, osutils, config,
app_packager, docker_packager):
with cd(basic_app):
cache_dir = os.path.join(basic_app, ".chalice", "deployments")
path = docker_packager.package_app()
files = os.listdir(cache_dir)
assert "app" in files
expected_path = os.path.join(cache_dir, "app")
assert path == expected_path
assert osutils.extract_zipfile.called_with("app", expected_path)
python_version = config.lambda_python_version
app_packager.create_deployment_package.assert_called_with(
basic_app, python_version)
def test_package_app_already_exists(self, basic_app, osutils, config,
app_packager, docker_packager):
osutils.directory_exists.return_value = True
with cd(basic_app):
cache_dir = os.path.join(basic_app, ".chalice", "deployments")
os.makedirs(cache_dir)
path = docker_packager.package_app()
files = os.listdir(cache_dir)
assert len(files) == 0
expected_path = os.path.join(cache_dir, "app")
assert path == expected_path
osutils.extract_zipfile.assert_not_called()
def test_package_layers_no_auto_layer(self, basic_app, osutils, config,
layer_packager, docker_packager):
osutils.directory_exists = os.path.isdir
with cd(basic_app):
prefix = os.path.join(basic_app, ".chalice",
"deployments", "layers-")
lambdas = [
self.DummyLambda("1", "a"),
self.DummyLambda("2", "b"),
self.DummyLambda("3", "c"),
]
path_map = docker_packager.package_layers(lambdas)
assert len(path_map) == 3
assert path_map["a"] == path_map["b"] == path_map["c"]
assert path_map["a"].startswith(prefix)
python_version = config.lambda_python_version
assert path_map["a"].endswith("-" + python_version)
layer_packager.create_deployment_package.assert_not_called()
def test_package_layers_with_auto_layer(self, basic_app, osutils,
autolayer_config, app_packager,
layer_packager, layer_downloader):
docker_packager = DockerPackager(autolayer_config, osutils,
app_packager, layer_packager,
layer_downloader)
with cd(basic_app):
docker_packager.package_layers([self.DummyLambda("1", "a")])
python_version = autolayer_config.lambda_python_version
layer_packager.create_deployment_package.assert_called_with(
basic_app, python_version)
def test_create_layer_directory_not_existing(self, basic_app, config,
docker_packager, osutils,
layer_downloader):
with cd(basic_app):
cache_dir = os.path.join(basic_app, ".chalice", "deployments")
layer_arns = ["arn1", "arn2", "arn3"]
path = docker_packager.create_layer_directory(layer_arns, "/path")
files = os.listdir(cache_dir)
assert len(files) == 1
assert files[0].startswith("layers")
python_version = config.lambda_python_version
assert files[0].endswith("-" + python_version)
expected_path = os.path.join(cache_dir, files[0])
assert path == expected_path
unzip_calls = [
mock.call("/path", path),
mock.call("hello.zip", path),
mock.call("world.zip", path),
mock.call("layers.zip", path)
]
osutils.extract_zipfile.assert_has_calls(unzip_calls)
assert osutils.extract_zipfile.call_count == 4
layer_downloader.download_all.assert_called_with(layer_arns,
cache_dir)
def test_create_layer_directory_already_exists(self, basic_app, config,
docker_packager, osutils,
layer_downloader):
osutils.directory_exists.return_value = True
with cd(basic_app):
cache_dir = os.path.join(basic_app, ".chalice", "deployments")
os.makedirs(cache_dir)
layer_arns = ["arn1", "arn2", "arn3"]
path = docker_packager.create_layer_directory(layer_arns, "/path")
files = os.listdir(cache_dir)
assert len(files) == 0
expected_prefix = os.path.join(cache_dir, "layers-")
assert path.startswith(expected_prefix)
python_version = config.lambda_python_version
assert path.endswith("-" + python_version)
osutils.extract_zipfile.assert_not_called()
def test_create_layer_directory_no_autolayer(self, basic_app, config,
docker_packager, osutils,
layer_downloader):
with cd(basic_app):
cache_dir = os.path.join(basic_app, ".chalice", "deployments")
layer_arns = ["arn1", "arn2", "arn3"]
path = docker_packager.create_layer_directory(layer_arns, "")
files = os.listdir(cache_dir)
assert len(files) == 1
assert files[0].startswith("layers")
python_version = config.lambda_python_version
assert files[0].endswith("-" + python_version)
expected_path = os.path.join(cache_dir, files[0])
assert path == expected_path
unzip_calls = [
mock.call("hello.zip", path),
mock.call("world.zip", path),
mock.call("layers.zip", path)
]
osutils.extract_zipfile.assert_has_calls(unzip_calls)
assert osutils.extract_zipfile.call_count == 3
layer_downloader.download_all.assert_called_with(layer_arns,
cache_dir)
def test_create_layer_directory_different_output_on_autolayer_mismatch(
self, basic_app, docker_packager, osutils):
osutils.directory_exists = os.path.isdir
with cd(basic_app):
layer_arns = ["arn1", "arn2", "arn3"]
path1 = docker_packager.create_layer_directory(layer_arns, "")
path2 = docker_packager.create_layer_directory(layer_arns, "path")
assert path1 != path2
def test_create_layer_directory_does_not_raise_filename_too_long(
self, basic_app, layer_downloader, docker_packager, osutils):
with cd(basic_app):
cache_dir = os.path.join(basic_app, ".chalice", "deployments")
filename = "zip" * 25
layer_arns = [filename, filename, filename, filename, filename]
docker_packager.create_layer_directory(layer_arns, "/path")
files = os.listdir(cache_dir)
assert len(files) == 1
assert files[0].startswith("layers-")
def test_creates_cache_dir_if_nonexistent(
self, osutils, docker_packager, basic_app):
osutils.directory_exists.return_value = False
with cd(basic_app):
docker_packager.package_app()
chalice_dir = os.path.join(basic_app, ".chalice")
assert 'deployments' in os.listdir(chalice_dir)
|
996,251 | 71bc9672bea10c99bae8c5a3c5b8401f646b441c | import streamlit as st
import pandas as pd
import numpy as np
import simplejson as json
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib.patches import Circle, Rectangle, Arc
st.title("Shot Chart Visualization")
st.subheader("Shot charts for all All Stars for the 2019-2020 season plus the top 3 draft picks")
@st.cache(persist = True)
def load_data(csv, nrows):
data = pd.read_csv(csv, nrows = nrows)
return data
raw = load_data('raw_new.csv', 200000)
@st.cache
def fill(df, player, make_or_all, time_remaining, win_or_lose, up_or_down):
fill = df[(df['PLAYER_NAME'] == player) & (df['clutch'] == time_remaining) & (df['win'] == win_or_lose) & (df['status'] == up_or_down)]
if make_or_all == 'FGA':
return fill
else:
fill2 = fill[fill['SHOT_MADE_FLAG'] == 1]
return fill2
player = st.sidebar.selectbox(
"What Player would you like to look at?",
("LeBron James", "Giannis Antetokounmpo", "James Harden", "Devin Booker", "Donovan Mitchell", "Russell Westbrook", "Damian Lillard", "Luka Doncic", "Nikola Jokic", "Anthony Davis", "Trae Young", "Jayson Tatum", "Kawhi Leonard", "Brandon Ingram", "Pascal Siakam", "Khris Middleton", "Domantas Sabonis", "Bam Adebayo", "Ja Morant", "Chris Paul", "Rudy Gobert", "Joel Embiid", "Ben Simmons", "Kemba Walker", "Jimmy Butler", "Kyle Lowry", "RJ Barrett", "Zion Williamson")
)
make = st.sidebar.selectbox(
"Do you want to see all shots or just makes?",
("All", "Just Makes")
)
time = st.sidebar.selectbox(
"Do you want to see how your player shoots in the clutch?",
("No", "Last 5 Minutes", "Last 3 Minutes")
)
win = st.sidebar.selectbox(
"Do you want to see all how your player shot in a win?",
("All", "W", "L")
)
up = st.sidebar.selectbox(
"Do you want to see how your player shot ahead or trailing?",
("All", "Ahead", "Trailing")
)
if make == 'All':
make = 'FGA'
else:
make = 'PTS'
if time == 'No':
time = 'none'
else:
time = time
if win == 'All':
win = 'none'
else:
win = win
if up == 'All':
up = 'none'
elif up == 'Ahead':
up = 'Ahead or Tied'
else:
up = 'Behind or Tied'
viz = st.button('Create Visualization Now!')
if viz:
with st.spinner('Creating Visualization...'):
df = fill(raw, player, make, time, win, up)
st.subheader('Percentage Made based on Criteria: ' + str(round(len(df[df['SHOT_MADE_FLAG'] == 1])/len(df)*100, 2)) + '%')
st.subheader('Percentage of 2 PT shots taken: ' + str(round(len(df[df['SHOT_TYPE'] == '2PT Field Goal'])/len(df)*100, 2)) + '%')
st.subheader('Percentage of 3 PT shots taken: ' + str(round(len(df[df['SHOT_TYPE'] == '3PT Field Goal'])/len(df)*100, 2)) + '%')
joint_shot_chart = sns.jointplot(df.LOC_X, df.LOC_Y,
kind='scatter', space=0, alpha=0.5)
joint_shot_chart.fig.set_size_inches(11,11)
ax = joint_shot_chart.ax_joint
hoop = Circle((0, 0), radius=7.5, linewidth=2, color='black', fill=False)
# Create backboard
backboard = Rectangle((-30, -7.5), 60, -1, linewidth=2, color='black')
# The paint
# Create the outer box 0f the paint, width=16ft, height=19ft
outer_box = Rectangle((-80, -47.5), 160, 190, linewidth=2, color='black',
fill=False)
# Create the inner box of the paint, widt=12ft, height=19ft
inner_box = Rectangle((-60, -47.5), 120, 190, linewidth=2, color='black',
fill=False)
# Create free throw top arc
top_free_throw = Arc((0, 142.5), 120, 120, theta1=0, theta2=180,
linewidth=2, color='black', fill=False)
# Create free throw bottom arc
bottom_free_throw = Arc((0, 142.5), 120, 120, theta1=180, theta2=0,
linewidth=2, color='black', linestyle='dashed')
# Restricted Zone, it is an arc with 4ft radius from center of the hoop
restricted = Arc((0, 0), 80, 80, theta1=0, theta2=180, linewidth=2,
color='black')
# Three point line
# Create the side 3pt lines, they are 14ft long before they begin to arc
corner_three_a = Rectangle((-220, -47.5), 0, 140, linewidth=2,
color='black')
corner_three_b = Rectangle((220, -47.5), 0, 140, linewidth=2, color='black')
# 3pt arc - center of arc will be the hoop, arc is 23'9" away from hoop
# I just played around with the theta values until they lined up with the
# threes
three_arc = Arc((0, 0), 475, 475, theta1=22, theta2=158, linewidth=2,
color='black')
# Center Court
center_outer_arc = Arc((0, 422.5), 120, 120, theta1=180, theta2=0,
linewidth=2, color='black')
center_inner_arc = Arc((0, 422.5), 40, 40, theta1=180, theta2=0,
linewidth=2, color='black')
court_elements = [hoop, backboard, outer_box, inner_box, top_free_throw,
bottom_free_throw, restricted, corner_three_a,
corner_three_b, three_arc, center_outer_arc,
center_inner_arc]
for element in court_elements:
ax.add_patch(element)
ax.set_xlim(-250,250)
ax.set_ylim(422.5, -47.5)
ax.set_xlabel('')
ax.set_ylabel('')
ax.tick_params(labelbottom=False, labelleft=False)
st.pyplot(joint_shot_chart)
|
996,252 | c4ef7e007b8d86f0c6562eb1b910d0e7fcdf9293 | from io import StringIO
f = StringIO()
f.write('hello')
f.write(' ')
f.write('world!')
print(f.getvalue())
t=StringIO('hello\nworld!')
while 1:
str=t.readline()
if str=='':
break
else :
print(str.strip()) |
996,253 | d524919ca80e228805f622d76db00912086be1fe | #!/usr/bin/python3
"""
This init imports fib.py only
fib.py contains the function fib(a,b=None)
"""
from .fib import fib
|
996,254 | 7f4837aa1698477bcb580e82c814a80df4a95352 | from django.conf.urls import include, url
from django.contrib import admin
from swautocheckin import views
urlpatterns = [
url(r'^$', views.email_view, name='email'),
url(r'^passenger/(?P<passenger_uuid>[^/]+)/create-reservation$', views.reservation_view, name='reservation'),
url(r'^reservation/(?P<reservation_uuid>[^/]+)/success$', views.success_view, name='success'),
url(r'^error$', views.force_error_view),
url(r'^admin/', include(admin.site.urls)),
# url(r'^sentry/', include('sentry.web.urls')),
]
handler404 = 'swautocheckin.views.handler404'
handler500 = 'swautocheckin.views.handler500'
handler400 = 'swautocheckin.views.handler400' |
996,255 | 8a4a833124f1e2b127866ede5a839806d9b31156 | set24 = {"1", "2", "3", "4", "6", "8", "12", "24"}
set36 = {"1"",2", "3", "4", "6", "9", "12", "18", "36"}
set24_1 = {"24", "48", "72", "96", "120"}
set36_1 = {"36", "72", "108", "142", "178"}
print("24和36的最大公约数为:%s" % max(set24.intersection(set36)))
print("24和36的最小公倍数为:%s" % min(set24_1.intersection(set36_1)))
|
996,256 | a1d4427d54b500203ae2190c32d5d75896b55791 | from os.path import isfile, join
from os import listdir
import allVariables
myDirectory = allVariables.pathToTrain
#fonction récursive permettant d'ecrire dans chaque fichier sa categorie
def fichier_rec(myDirectory):
for f in listdir(myDirectory):
chemin = join(myDirectory, f)
if isfile(chemin):
#on récupére le nom de la categorie en cours
loc = myDirectory.split("\\")
s = open(chemin,"a")
s.write((" " + loc[-1]) * 50)
s.close()
else:
#si le chemin n'est pas un fchier alors c'est un dossier et il faut recommencer en entrant dedans
print(chemin)
fichier_rec(chemin)
fichier_rec(myDirectory) |
996,257 | 06722fcea789ea111b87a575534f767187bea904 | import os
import datetime
import pickle
import json
import numpy as np
import matplotlib.pyplot as plt
from pathlib import Path
from dotenv import find_dotenv, load_dotenv
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_auc_score
from sklearn.pipeline import Pipeline
from zenitai.utils.metrics import get_roc_curves
class Experiment:
"""
Класс для запуска экспериментов и сохранения артефактов модели на диск.
Каждый запуск порождает отдельную подпапку с таймстемпом в папке
с названием эксперимента
Сохраняет json-файл с метриками, график ROC-AUC в png, модель и самого себя в pickle
и обучающую выборку в pickle (отдельно предикторы и таргет)
Класс пытается найти файл с переменными окружения `.env` и найти там переменную
`PROJECT_DIR` - рекомендуется создать такой файл в корне папки с проектом
Основной метод - `run`, но есть доступ к `fit/predict` модели
Note
----
поддерживаются только алгоритмы классификации
.. todo::
создать отдельные классы-наследники для работы с регрессией ИЛИ
отдельный класс для сохранение метрик
Parameters
----------
name : str
Название эксперимента, определяет родительскую папку, куда будут
сохраняться артефакты модели
target_column : str
Имя столбца с целевой переменной - сохраняется в файл с метриками
estimator : Generic
Модель для применения к данным. На данный момент лучше всего
протестировано с `sklearn.pipeline.Pipeline`, но должен подойти любой
класс, поддерживающий `fit/predict`
proj_dir : str
Имя корневой директории проекта, если не указана, то предпринимается
поиск в файле `.env`. Если поиск оказывается не успешным, то корневой
считается та же директория, из которой запускается скрипт
subfolder : str
Название общей папки со всеми экспериментами. Если не указано, то
устанавливается в значение `models`.
Папка создается внутри рабочей директории `proj_dir`
params : dict
Не используется. Предполагается загрузка гиперпараметров для `estimator`
random_seed : int
Инициализация генероторо случайных чисел
"""
def __init__(
self,
name,
target_column,
estimator,
proj_dir=None,
subfolder=None,
params=None,
random_seed=0,
):
load_dotenv(find_dotenv())
self.name = name
self.target_column = target_column
self.est = estimator
self.params = params
self.seed = random_seed
if proj_dir is None:
self.proj_dir = Path(os.environ.get("PROJECT_DIR", "."))
self.subfolder = "models" if subfolder is None else subfolder
def run(self, X, y, X_valid=None, y_valid=None, save_to_disk=True, **fit_params):
if X_valid is not None and y_valid is not None:
self.X_valid, self.y_valid = X_valid, y_valid
self.fit(X, y, **fit_params)
if save_to_disk:
self._create_exp_directory()
self._dump_all(self.exp_dirname)
self.save_roc_curve(self.exp_dirname)
def fit(self, X, y, **fit_params):
np.random.seed(self.seed)
self._get_current_time_str()
self._generate_exp_dirname()
self._split_data(X, y)
self.est.fit(self.X_train, self.y_train, **fit_params)
self._get_model()
return self
def predict(self, X, y=None):
res = self._predict(X, y)
return res
def get_metrics(self):
self._get_metrics()
return self.metrics
def _get_current_time_str(self):
self.cur_time = str(datetime.datetime.now())[:-7]
self.cur_time = (
self.cur_time.replace("-", "").replace(":", "").replace(" ", "_")
)
def _generate_exp_dirname(self):
self.exp_dirname = self.proj_dir / self.subfolder / self.name / self.cur_time
def _create_exp_directory(self):
self.exp_dirname.mkdir(parents=True, exist_ok=True)
def _split_data(self, X, y):
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(
X, y, stratify=y, random_state=self.seed
)
def _predict(self, X, y=None):
return self.est.predict_proba(X)[:, 1]
def _get_gini_score(self, y_true, y_score):
try:
roc_auc = roc_auc_score(y_true, y_score)
gini = np.round(2 * roc_auc - 1, 7)
except Exception:
gini = None
return gini
def _get_predictions_and_scores(self):
self.preds_train = self._predict(self.X_train)
self.preds_test = self._predict(self.X_test)
self.gini_train = self._get_gini_score(self.y_train, self.preds_train)
self.gini_test = self._get_gini_score(self.y_test, self.preds_test)
if hasattr(self, "X_valid") and hasattr(self, "y_valid"):
self.preds_valid = self._predict(self.X_valid)
self.gini_valid = self._get_gini_score(self.y_valid, self.preds_valid)
else:
self.preds_valid = None
self.gini_valid = None
def _get_model(self):
if hasattr(self.est, "best_estimator_"):
pipe = self.est.best_estimator_
else:
pipe = self.est
if hasattr(pipe, "steps"):
self.model = pipe.steps[-1][1]
else:
self.model = pipe
def _get_metrics(self):
self._get_predictions_and_scores()
try:
params = self.model.get_params()
except AttributeError:
params = None
self.metrics = {
"seed": self.seed,
"target_column": self.target_column,
"gini_train": self.gini_train,
"gini_test": self.gini_test,
"gini_valid": self.gini_valid,
"est_algorithm_params": params,
}
def save_roc_curve(self, path):
fname = "roc_curve.png"
self._get_roc_curve()
plt.savefig(path / fname)
plt.close()
def roc_curve(self):
self._get_roc_curve()
def _get_roc_curve(self):
if not hasattr(self, "metrics"):
self._get_metrics()
plt.figure(figsize=(8, 8))
if hasattr(self, "y_valid") and hasattr(self, "preds_valid"):
facts = [self.y_train, self.y_test, self.y_valid]
preds = [self.preds_train, self.preds_test, self.preds_valid]
labels = ["train", "test", "valid"]
else:
facts = [self.y_train, self.y_test]
preds = [self.preds_train, self.preds_test]
labels = ["train", "test"]
self.roc_plot = get_roc_curves(
facts=facts,
preds=preds,
labels=labels,
)
def _dump_all(self, path):
self._dump_metrics(path)
self._dump_train_data(path)
self._dump_estimator(path)
self._dump_self(path)
def _dump_metrics(self, path):
if not hasattr(self, "metrics"):
self._get_metrics()
p = path / "metrics.json"
with open(p, "w") as file:
json.dump(self.metrics, file, ensure_ascii=True, indent=4)
def _dump_train_data(self, path):
for n, d in zip(["X_train", "y_train"], [self.X_train, self.y_train]):
p = path / (n + ".pkl")
with open(p, "wb") as file:
pickle.dump(d, file)
def _dump_estimator(self, path):
p = path / ("estimator" + ".pkl")
with open(p, "wb") as file:
pickle.dump(self.est, file)
def _dump_self(self, path):
p = path / (self.name + "_experiment.pkl")
with open(p, "wb") as file:
pickle.dump(self, file)
class ExperimentCatboost(Experiment):
"""
Класс для уточнения реализации при работе c Catboost[Classifier]
"""
def fit(self, X, y, save_to_disk=True, **fit_params):
"""
Дублирует поведение родительского метода fit, но добавляет параметр eval_set
"""
est_step_name = self.est.steps[-1][0]
self._get_current_time_str()
self._generate_exp_dirname()
self._split_data(X, y)
fit_params.update({f"{est_step_name}__eval_set": (self.X_test, self.y_test)})
self.est.fit(self.X_train, self.y_train, **fit_params)
return self
|
996,258 | 89fe6e137826fe24d8bd745409caf0e27b6ae673 | import unittest
import graph
from test_data import nations_of_the_world
class testAStar( unittest.TestCase ):
"""
Test some very basic graph functions
"""
def setUp(self):
self.G = graph.graph()
nations_of_the_world(self.G)
def testBasic1(self):
"""
Retrieve all the nodes in distance order
"""
nodes = self.G.nodes()
assert len(nodes) == len( set(nodes) )
|
996,259 | 08f67a7783d8c5a9f83930da6dfbfa655c8fa8a0 | import unittest
import pandas as pd
import operator as op
import pandas.util.testing as pdt
from data import DrinkData
class TestCase(unittest.TestCase):
# Create instance with test data
test_isd = DrinkData("testing_data.csv")
def test_search_criteria(self):
"""Test for search_criteria."""
expected = pd.Series([True, False, False, False, False, False, True,
True, False, False, False, False, False, False,
False, False, True, True, False, False, False, True,
True, False], name='Parent Brand', dtype='bool')
actual = self.test_isd.search_criteria("Parent Brand", "Monster", op.eq)
pdt.assert_series_equal(expected, actual)
def test_get_total_units(self):
"""Test for get_total_units."""
self.assertEqual(self.test_isd.get_total_units(), 40)
self.assertEqual(self.test_isd.get_total_units("Monster"), 11)
self.assertEqual(self.test_isd.get_total_units(retailer="Walmart"), 9)
self.assertEqual(self.test_isd.get_total_units("Monster", "Walmart"), 5)
self.assertEqual(self.test_isd.get_total_units("Water"), 0)
def test_calc_affinity(self):
"""Test for calc_affinity."""
self.assertEqual(self.test_isd.calc_affinity("Monster", "Walmart"), 2.02)
self.assertEqual(self.test_isd.calc_affinity("Monster", "Subway"), 0)
self.assertEqual(self.test_isd.calc_affinity("Water", "Walmart"), 0)
self.assertEqual(self.test_isd.calc_affinity("Water", "Subway"), 0)
def test_get_retailer_affinity_values(self):
"""Test for get_retailer_affinity_values."""
expected_for_monster = {'CVS': 0, 'Costco': 0, 'Kroger': 0.64, 'Publix': 0,
'Target': 2.73, 'Walgreens': 0, 'Walmart': 2.02}
expected_for_water = {'CVS': 0, 'Costco': 0, 'Kroger': 0, 'Publix': 0,
'Target': 0, 'Walgreens': 0, 'Walmart': 0}
self.assertEqual(self.test_isd.get_retailer_affinity_values("Monster"), expected_for_monster)
self.assertEqual(self.test_isd.get_retailer_affinity_values("Water"), expected_for_water)
def test_get_dict_max(self):
"""Test for get_dict_max."""
test_dict1 = {'apple': 5, 'berry': 78, 'cherry': 39, 'durian': 20}
test_dict2 = {'apple': 5, 'berry': 30, 'cherry': 19, 'durian': 30}
td2_results = self.test_isd.get_dict_max(test_dict2)
td2_results.sort()
self.assertEqual(self.test_isd.get_dict_max(test_dict1), ['berry'])
self.assertEqual(td2_results, ['berry', 'durian'])
def test_retailer_affinity(self):
"""Test for retailer_affinity."""
rockstar_results = self.test_isd.retailer_affinity("Rockstar")
rockstar_results.sort()
self.assertEqual(self.test_isd.retailer_affinity("Monster"), ['Target'])
self.assertEqual(rockstar_results, ['CVS', 'Costco'])
self.assertEqual(self.test_isd.retailer_affinity("Water"), None)
def test_count_hhs(self):
"""Test for count_hhs."""
self.assertEqual(self.test_isd.count_hhs(start_date="1/3/14"), 5)
self.assertEqual(self.test_isd.count_hhs(start_date="1/3/14", end_date="1/1/15"), 3)
self.assertEqual(self.test_isd.count_hhs("Monster"), 6)
self.assertEqual(self.test_isd.count_hhs(retailer="Walmart"), 6)
self.assertEqual(self.test_isd.count_hhs("Monster", "Walmart"), 2)
self.assertEqual(self.test_isd.count_hhs("Red Bull", "Kroger", "1/1/14"), 1)
self.assertEqual(self.test_isd.count_hhs("Water"), 0)
def test_calc_buy_rate(self):
"""Test for calc_buy_rate."""
self.assertEqual(self.test_isd.calc_buy_rate("Monster"), 4.5)
self.assertEqual(self.test_isd.calc_buy_rate("Water"), 0)
def test_get_buy_rate_values(self):
"""Test for get_buy_rate_values."""
expected = {'5 Hour Energy': 7.0, 'Monster': 4.5, 'Red Bull': 4.14, 'Rockstar': 24.0}
self.assertEqual(self.test_isd.get_buy_rate_values(), expected)
def test_top_buying_brand(self):
"""Test for top_buying_brand."""
self.assertEqual(self.test_isd.top_buying_brand(), ['Rockstar'])
if __name__ == "__main__":
unittest.main()
|
996,260 | 167ef20099fcaf5b9a24fde6ea635339e4a15dc9 | from setuptools import setup
setup(
name='cffi-lz4frame',
version='0.0.0',
author='nathants',
author_email='me@nathants.com',
url='http://github.com/nathants/cffi-lz4frame/',
packages=['lz4frame'],
install_requires=['cffi>=1.0.0'],
cffi_modules=["lz4frame/__init__.py:ffibuilder"],
setup_requires=['cffi>=1.0.0'],
)
|
996,261 | 39491ce054bb9bca67cda670b3c41d372f497e5b | def verifica_velocidade(velocidade):
if(velocidade >= 70):
velocidade = velocidade-70
pontos = int(velocidade/5)
print("Pontos: "+ str(pontos))
if(pontos > 12):
print("Licença suspensa")
else:
print("ok")
verifica_velocidade(int(input("Digite a velocidade em km/h: ")))
|
996,262 | faba85e6d651917f2a592e417022d7396a5e5527 | demo_list = [1, 'hello', 1.34, True, [1, 2, 3]]
colors = ['red', 'green', 'blue']
numbers_list = list((1, 2, 3, 4, 5, 6))
print(list(range(1, 100)))
print(dir(colors))
print(len(demo_list))
print(colors[1])
print('green' in colors)
print(8 in colors)
colors.append('black')
colors.extend(('violet', 'yellow'))
colors.remove('green')
print(colors) |
996,263 | f4c83dbc632e5aa9efe6fa4d3af6b3d254801656 | import unittest
from src.rock_paper_scissors import RockPaperScissorsGame
class TestGame(unittest.TestCase):
def setUp(self):
self.game = RockPaperScissorsGame()
def test_rock_beats_scissors(self):
assert self.game.beats("rock", "scissors") == True
def test_scissors_does_not_beat_rock(self):
assert self.game.beats("scissors", "rock") == False
def test_rock_does_not_beat_rock(self):
assert self.game.beats("rock", "rock") == False
def test_scissors_beats_papers(self):
assert self.game.beats("scissors", "paper") == True
def test_paper_beats_rock(self):
assert self.game.beats("paper", "rock") == True
def test_paper_does_not_beat_scissors(self):
assert self.game.beats("paper", "scissors") == False
def test_lizard_beats_scissors(self):
assert self.game.beats("lizard", "scissors") == True
|
996,264 | a543cff1122402336755ca2c6454aa92670c81bb | import logging
from typing import Dict, List, Tuple, Set
from meltano.core.permissions.utils.snowflake_connector import SnowflakeConnector
GRANT_ROLE_TEMPLATE = "GRANT ROLE {role_name} TO {type} {entity_name}"
GRANT_PRIVILEGES_TEMPLATE = (
"GRANT {privileges} ON {resource_type} {resource_name} TO ROLE {role}"
)
GRANT_ALL_PRIVILEGES_TEMPLATE = "GRANT {privileges} ON ALL {resource_type}S IN SCHEMA {resource_name} TO ROLE {role}"
GRANT_FUTURE_PRIVILEGES_TEMPLATE = "GRANT {privileges} ON FUTURE {resource_type}S IN SCHEMA {resource_name} TO ROLE {role}"
ALTER_USER_TEMPLATE = "ALTER USER {user_name} SET {privileges}"
GRANT_OWNERSHIP_TEMPLATE = (
"GRANT OWNERSHIP"
" ON {resource_type} {resource_name}"
" TO ROLE {role_name} COPY CURRENT GRANTS"
)
class SnowflakeGrantsGenerator:
def __init__(self, grants_to_role: Dict, roles_granted_to_user: Dict) -> None:
self.grants_to_role = grants_to_role
self.roles_granted_to_user = roles_granted_to_user
def check_grant_to_role(
self, role: str, privilege: str, entity_type: str, entity_name: str
) -> bool:
"""
Check if <role> has been granted the privilege <privilege> on entity type
<entity_type> with name <entity_name>.
For example:
check_grant_to_role('reporter', 'USAGE', 'DATABASE', 'ANALYTICS') -> True
means that role reported has been granted the privilege to use the
Database ANALYTICS on the Snowflake server.
"""
if SnowflakeConnector.snowflaky(entity_name).upper() in self.grants_to_role.get(
role, {}
).get(privilege, {}).get(entity_type, []):
return True
else:
return False
def generate_grant_roles(
self, entity_type: str, entity: str, config: str
) -> List[Dict]:
"""
Generate the GRANT statements for both roles and users.
entity_type: "USER" or "ROLE"
entity: the name of the entity (e.g. "yannis" or "REPORTER")
config: the subtree for the entity as specified in the spec
Returns the SQL commands generated as a list
"""
sql_commands = []
try:
for member_role in config["member_of"]:
granted_role = SnowflakeConnector.snowflaky(member_role).upper()
already_granted = False
if (
entity_type == "USER"
and granted_role in self.roles_granted_to_user[entity]
) or (
entity_type == "ROLE"
and self.check_grant_to_role(entity, "USAGE", "ROLE", member_role)
):
already_granted = True
sql_commands.append(
{
"already_granted": already_granted,
"sql": GRANT_ROLE_TEMPLATE.format(
role_name=SnowflakeConnector.snowflaky(member_role),
type=entity_type,
entity_name=SnowflakeConnector.snowflaky(entity),
),
}
)
except KeyError:
logging.debug(
"`member_of` not found for {}, skipping generation of GRANT ROLE statements.".format(
entity
)
)
return sql_commands
def generate_grant_privileges_to_role(
self, role: str, config: str, shared_dbs: Set
) -> List[Dict]:
"""
Generate all the privilege granting statements for a role.
Most of the SQL command that will be generated are privileges granted to
roles and this function orchestrates the whole process
role: the name of the role (e.g. "LOADER" or "REPORTER") the privileges
are GRANTed to
config: the subtree for the role as specified in the spec
shared_dbs: a set of all the shared databases defined in the spec.
Used down the road by generate_database_grants() to also grant
"IMPORTED PRIVILEGES" when access is granted to a shared DB.
Returns the SQL commands generated as a list
"""
sql_commands = []
# Track all the DBs and schemas that have been given access to (GRANT USAGE)
# the given role. Used in order to recursively grant the required access
# to DBs or schemas implicetly reference when permissions are GRANTED for a
# child Schema or Table.
# Example: A role is given read access to table MY_DB.MY_SCHEMA.MY_TABLE
# 1. In order to access MY_TABLE, the role has to be able to access MY_DB.MY_SCHEMA
# 2. The script checks if USAGE on MY_DB has been granted to the role and
# assigns it to the role if not (and adds the DB to usage_granted["databases"])
# 3. The same for the schema MY_SCHEMA
# 4. Finaly the requested permissions are GRANTED to role for MY_TABLE
usage_granted = {"databases": set(), "schemas": set()}
try:
for warehouse in config["warehouses"]:
new_commands = self.generate_warehouse_grants(
role=role, warehouse=warehouse
)
sql_commands.extend(new_commands)
except KeyError:
logging.debug(
"`warehouses` not found for role {}, skipping generation of Warehouse GRANT statements.".format(
role
)
)
try:
for database in config["privileges"]["databases"]["read"]:
new_commands, usage_granted = self.generate_database_grants(
role=role,
database=database,
grant_type="read",
usage_granted=usage_granted,
shared_dbs=shared_dbs,
)
sql_commands.extend(new_commands)
except KeyError:
logging.debug(
"`privileges.databases.read` not found for role {}, skipping generation of DATABASE read level GRANT statements.".format(
role
)
)
try:
for database in config["privileges"]["databases"]["write"]:
new_commands, usage_granted = self.generate_database_grants(
role=role,
database=database,
grant_type="write",
usage_granted=usage_granted,
shared_dbs=shared_dbs,
)
sql_commands.extend(new_commands)
except KeyError:
logging.debug(
"`privileges.databases.write` not found for role {}, skipping generation of DATABASE write level GRANT statements.".format(
role
)
)
try:
for schema in config["privileges"]["schemas"]["read"]:
new_commands, usage_granted = self.generate_schema_grants(
role=role,
schema=schema,
grant_type="read",
usage_granted=usage_granted,
shared_dbs=shared_dbs,
)
sql_commands.extend(new_commands)
except KeyError:
logging.debug(
"`privileges.schemas.read` not found for role {}, skipping generation of SCHEMA read level GRANT statements.".format(
role
)
)
try:
for schema in config["privileges"]["schemas"]["write"]:
new_commands, usage_granted = self.generate_schema_grants(
role=role,
schema=schema,
grant_type="write",
usage_granted=usage_granted,
shared_dbs=shared_dbs,
)
sql_commands.extend(new_commands)
except KeyError:
logging.debug(
"`privileges.schemas.write` not found for role {}, skipping generation of SCHEMA write level GRANT statements.".format(
role
)
)
try:
for table in config["privileges"]["tables"]["read"]:
new_commands, usage_granted = self.generate_table_and_view_grants(
role=role,
table=table,
grant_type="read",
usage_granted=usage_granted,
shared_dbs=shared_dbs,
)
sql_commands.extend(new_commands)
except KeyError:
logging.debug(
"`privileges.tables.read` not found for role {}, skipping generation of TABLE read level GRANT statements.".format(
role
)
)
try:
for table in config["privileges"]["tables"]["write"]:
new_commands, usage_granted = self.generate_table_and_view_grants(
role=role,
table=table,
grant_type="write",
usage_granted=usage_granted,
shared_dbs=shared_dbs,
)
sql_commands.extend(new_commands)
except KeyError:
logging.debug(
"`privileges.tables.write` not found for role {}, skipping generation of TABLE write level GRANT statements.".format(
role
)
)
return sql_commands
def generate_warehouse_grants(self, role: str, warehouse: str) -> List[str]:
"""
Generate the GRANT statements for Warehouse usage and operation.
role: the name of the role the privileges are GRANTed to
warehouse: the name of the warehouse (e.g. "transforming")
Returns the SQL command generated
"""
sql_commands = []
if self.check_grant_to_role(role, "USAGE", "WAREHOUSE", warehouse):
already_granted = True
else:
already_granted = False
sql_commands.append(
{
"already_granted": already_granted,
"sql": GRANT_PRIVILEGES_TEMPLATE.format(
privileges="USAGE",
resource_type="WAREHOUSE",
resource_name=SnowflakeConnector.snowflaky(warehouse),
role=SnowflakeConnector.snowflaky(role),
),
}
)
if self.check_grant_to_role(role, "OPERATE", "WAREHOUSE", warehouse):
already_granted = True
else:
already_granted = False
sql_commands.append(
{
"already_granted": already_granted,
"sql": GRANT_PRIVILEGES_TEMPLATE.format(
privileges="OPERATE",
resource_type="WAREHOUSE",
resource_name=SnowflakeConnector.snowflaky(warehouse),
role=SnowflakeConnector.snowflaky(role),
),
}
)
return sql_commands
def generate_database_grants(
self,
role: str,
database: str,
grant_type: str,
usage_granted: Dict,
shared_dbs: Set,
) -> Tuple[List[str], Dict]:
"""
Generate the GRANT statements for Databases.
role: the name of the role the privileges are GRANTed to
database: the name of the database (e.g. "RAW")
grant_type: What type of privileges are granted? One of {"read", "write"}
usage_granted: Passed by generate_grant_privileges_to_role() to track all
all the entities a role has been granted access (USAGE) to.
shared_dbs: a set of all the shared databases defined in the spec.
Returns the SQL commands generated and the updated usage_granted as a Tuple
"""
sql_commands = []
usage_granted["databases"].add(database.upper())
already_granted = False
if grant_type == "read":
privileges = "USAGE"
if self.check_grant_to_role(role, "USAGE", "DATABASE", database):
already_granted = True
elif grant_type == "write":
privileges = "USAGE, MONITOR, CREATE SCHEMA"
if (
self.check_grant_to_role(role, "USAGE", "DATABASE", database)
and self.check_grant_to_role(role, "MONITOR", "DATABASE", database)
and self.check_grant_to_role(
role, "CREATE SCHEMA", "DATABASE", database
)
):
already_granted = True
else:
raise SpecLoadingError(
f"Wrong grant_type {spec_path} provided to generate_database_grants()"
)
# If this is a shared database, we have to grant the "IMPORTED PRIVILEGES"
# privilege to the user and skip granting the specific permissions as
# "Granting individual privileges on imported databases is not allowed."
if database in shared_dbs:
sql_commands.append(
{
"already_granted": already_granted,
"sql": GRANT_PRIVILEGES_TEMPLATE.format(
privileges="IMPORTED PRIVILEGES",
resource_type="DATABASE",
resource_name=SnowflakeConnector.snowflaky(database),
role=SnowflakeConnector.snowflaky(role),
),
}
)
return (sql_commands, usage_granted)
# And then grant privileges to the database
sql_commands.append(
{
"already_granted": already_granted,
"sql": GRANT_PRIVILEGES_TEMPLATE.format(
privileges=privileges,
resource_type="DATABASE",
resource_name=SnowflakeConnector.snowflaky(database),
role=SnowflakeConnector.snowflaky(role),
),
}
)
return (sql_commands, usage_granted)
def generate_schema_grants(
self,
role: str,
schema: str,
grant_type: str,
usage_granted: Dict,
shared_dbs: Set,
) -> Tuple[List[str], Dict]:
"""
Generate the GRANT statements for SCHEMAs.
role: the name of the role the privileges are GRANTed to
schema: the name of the Schema (e.g. "RAW.PUBLIC")
grant_type: What type of privileges are granted? One of {"read", "write"}
usage_granted: Passed by generate_grant_privileges_to_role() to track all
all the entities a role has been granted access (USAGE) to.
shared_dbs: a set of all the shared databases defined in the spec.
Returns the SQL commands generated and the updated usage_granted as a Tuple
"""
sql_commands = []
# Split the schema identifier into parts {DB_NAME}.{SCHEMA_NAME}
# so that we can check and use each one
name_parts = schema.split(".")
# Do nothing if this is a schema inside a shared database:
# "Granting individual privileges on imported databases is not allowed."
if name_parts[0] in shared_dbs:
return (sql_commands, usage_granted)
if grant_type == "read":
privileges = "USAGE"
elif grant_type == "write":
privileges = (
"USAGE, MONITOR, CREATE TABLE,"
" CREATE VIEW, CREATE STAGE, CREATE FILE FORMAT,"
" CREATE SEQUENCE, CREATE FUNCTION, CREATE PIPE"
)
else:
raise SpecLoadingError(
f"Wrong grant_type {spec_path} provided to generate_schema_grants()"
)
# Before assigning privileges to a schema, check if
# usage to the database has been granted and
# if not grant usage first to the database
if name_parts[0].upper() not in usage_granted["databases"]:
new_commands, usage_granted = self.generate_database_grants(
role=role,
database=name_parts[0],
grant_type="read",
usage_granted=usage_granted,
shared_dbs=shared_dbs,
)
sql_commands.extend(new_commands)
# Generate the INFORMATION_SCHEMA identifier for that database
# in order to be able to filter it out
info_schema = f"{name_parts[0].upper()}.INFORMATION_SCHEMA"
schemas = []
if name_parts[1] == "*":
# If {DB_NAME}.* was provided as the schema identifier, we have to fetch
# each schema in database DB_NAME, so that we can grant privileges
# for each schema seperatelly.
# We could GRANT {privileges} TO ALL SCHEMAS IN DATABASE
# but that would not allow us to know if a specific privilege has
# been already granted or not
conn = SnowflakeConnector()
db_schemas = conn.show_schemas(name_parts[0])
for db_schema in db_schemas:
if db_schema != info_schema:
schemas.append(db_schema)
elif "*" in name_parts[1]:
conn = SnowflakeConnector()
db_schemas = conn.show_schemas(name_parts[0])
for db_schema in db_schemas:
schema_name = db_schema.split(".", 1)[1].lower()
if schema_name.startswith(name_parts[1].split("*", 1)[0]):
schemas.append(db_schema)
else:
schemas = [schema]
for db_schema in schemas:
already_granted = False
if (
grant_type == "read"
and self.check_grant_to_role(role, "USAGE", "SCHEMA", db_schema)
) or (
grant_type == "write"
and self.check_grant_to_role(role, "USAGE", "SCHEMA", db_schema)
and self.check_grant_to_role(role, "MONITOR", "SCHEMA", db_schema)
and self.check_grant_to_role(role, "CREATE TABLE", "SCHEMA", db_schema)
and self.check_grant_to_role(role, "CREATE VIEW", "SCHEMA", db_schema)
and self.check_grant_to_role(role, "CREATE STAGE", "SCHEMA", db_schema)
and self.check_grant_to_role(
role, "CREATE FILE FORMAT", "SCHEMA", db_schema
)
and self.check_grant_to_role(
role, "CREATE SEQUENCE", "SCHEMA", db_schema
)
and self.check_grant_to_role(
role, "CREATE FUNCTION", "SCHEMA", db_schema
)
and self.check_grant_to_role(role, "CREATE PIPE", "SCHEMA", db_schema)
):
already_granted = True
sql_commands.append(
{
"already_granted": already_granted,
"sql": GRANT_PRIVILEGES_TEMPLATE.format(
privileges=privileges,
resource_type="SCHEMA",
resource_name=SnowflakeConnector.snowflaky(db_schema),
role=SnowflakeConnector.snowflaky(role),
),
}
)
usage_granted["schemas"].add(schema.upper())
return (sql_commands, usage_granted)
def generate_table_and_view_grants(
self,
role: str,
table: str,
grant_type: str,
usage_granted: Dict,
shared_dbs: Set,
) -> Tuple[List[str], Dict]:
"""
Generate the GRANT statements for TABLEs and VIEWs.
role: the name of the role the privileges are GRANTed to
table: the name of the TABLE/VIEW (e.g. "RAW.PUBLIC.MY_TABLE")
grant_type: What type of privileges are granted? One of {"read", "write"}
usage_granted: Passed by generate_grant_privileges_to_role() to track all
all the entities a role has been granted access (USAGE) to.
shared_dbs: a set of all the shared databases defined in the spec.
Returns the SQL commands generated and the updated usage_granted as a Tuple
"""
sql_commands = []
# Split the table identifier into parts {DB_NAME}.{SCHEMA_NAME}.{TABLE_NAME}
# so that we can check and use each one
name_parts = table.split(".")
# Do nothing if this is a table inside a shared database:
# "Granting individual privileges on imported databases is not allowed."
if name_parts[0] in shared_dbs:
return (sql_commands, usage_granted)
if grant_type == "read":
privileges = "SELECT"
elif grant_type == "write":
privileges = "SELECT, INSERT, UPDATE, DELETE, TRUNCATE, REFERENCES"
else:
raise SpecLoadingError(
f"Wrong grant_type {spec_path} provided to generate_table_and_view_grants()"
)
# Generate the INFORMATION_SCHEMA identifier for that database
# in order to be able to filter it out
info_schema = f"{name_parts[0].upper()}.INFORMATION_SCHEMA"
# Before assigning privileges to a Table, check if
# usage to the database has been granted and
# if not grant usage first to the database
# Schemas will be checked as we generate them
if name_parts[0].upper() not in usage_granted["databases"]:
new_commands, usage_granted = self.generate_database_grants(
role=role,
database=name_parts[0],
grant_type="read",
usage_granted=usage_granted,
shared_dbs=shared_dbs,
)
sql_commands.extend(new_commands)
# Gather the tables/views that privileges will be granted to
tables = []
views = []
# List of all tables/views in schema
table_list = []
view_list = []
schemas = []
conn = SnowflakeConnector()
if name_parts[1] == "*":
# If {DB_NAME}.*.* was provided as the identifier, we have to fetch
# each schema in database DB_NAME, so that we can grant privileges
# for all tables in that schema
# (You can not GRANT to all table with a wild card for the schema name)
db_schemas = conn.show_schemas(name_parts[0])
for schema in db_schemas:
if schema != info_schema:
schemas.append(schema)
elif "*" in name_parts[1]:
conn = SnowflakeConnector()
db_schemas = conn.show_schemas(name_parts[0])
for db_schema in db_schemas:
schema_name = db_schema.split(".", 1)[1].lower()
if schema_name.startswith(name_parts[1].split("*", 1)[0]):
schemas.append(db_schema)
else:
schemas = [f"{name_parts[0]}.{name_parts[1]}"]
for schema in schemas:
# first check if the role has been granted usage on the schema
# either directly or indirectly (by granting to DB.*)
if (
schema.upper() not in usage_granted["schemas"]
and f"{name_parts[0].upper()}.*" not in usage_granted["schemas"]
):
new_commands, usage_granted = self.generate_schema_grants(
role=role,
schema=schema,
grant_type="read",
usage_granted=usage_granted,
shared_dbs=shared_dbs,
)
sql_commands.extend(new_commands)
# And then add the tables for that schema to the tables[] and views[]
# that will be granted the permissions
table_list.extend(conn.show_tables(schema=schema))
view_list.extend(conn.show_views(schema=schema))
if name_parts[2] == "*":
# If *.* then you can grant all, grant future, and exit
for schema in schemas:
# Grant on ALL tables
sql_commands.append(
{
"already_granted": False,
"sql": GRANT_ALL_PRIVILEGES_TEMPLATE.format(
privileges=privileges,
resource_type="TABLE",
resource_name=SnowflakeConnector.snowflaky(schema),
role=SnowflakeConnector.snowflaky(role),
),
}
)
# Grant on ALL views
sql_commands.append(
{
"already_granted": False,
"sql": GRANT_ALL_PRIVILEGES_TEMPLATE.format(
privileges=privileges,
resource_type="VIEW",
resource_name=SnowflakeConnector.snowflaky(schema),
role=SnowflakeConnector.snowflaky(role),
),
}
)
# Grant future on all tables
sql_commands.append(
{
"already_granted": False,
"sql": GRANT_FUTURE_PRIVILEGES_TEMPLATE.format(
privileges=privileges,
resource_type="TABLE",
resource_name=SnowflakeConnector.snowflaky(schema),
role=SnowflakeConnector.snowflaky(role),
),
}
)
# Grant future on all views
sql_commands.append(
{
"already_granted": False,
"sql": GRANT_FUTURE_PRIVILEGES_TEMPLATE.format(
privileges=privileges,
resource_type="VIEW",
resource_name=SnowflakeConnector.snowflaky(schema),
role=SnowflakeConnector.snowflaky(role),
),
}
)
return (sql_commands, usage_granted)
else:
# Only one table/view to be granted permissions to
if table.upper() in table_list:
tables = [table]
if table.upper() in view_list:
views = [table]
# And then grant privileges to all tables in that schema
for db_table in tables:
already_granted = False
if (
grant_type == "read"
and self.check_grant_to_role(role, "SELECT", "TABLE", db_table)
) or (
grant_type == "write"
and self.check_grant_to_role(role, "SELECT", "TABLE", db_table)
and self.check_grant_to_role(role, "INSERT", "TABLE", db_table)
and self.check_grant_to_role(role, "UPDATE", "TABLE", db_table)
and self.check_grant_to_role(role, "DELETE", "TABLE", db_table)
and self.check_grant_to_role(role, "TRUNCATE", "TABLE", db_table)
and self.check_grant_to_role(role, "REFERENCES", "TABLE", db_table)
):
already_granted = True
# And then grant privileges to the db_table
sql_commands.append(
{
"already_granted": already_granted,
"sql": GRANT_PRIVILEGES_TEMPLATE.format(
privileges=privileges,
resource_type="TABLE",
resource_name=SnowflakeConnector.snowflaky(db_table),
role=SnowflakeConnector.snowflaky(role),
),
}
)
for db_view in views:
already_granted = False
if (
grant_type == "read"
and self.check_grant_to_role(role, "SELECT", "VIEW", db_view)
) or (
grant_type == "write"
and self.check_grant_to_role(role, "SELECT", "VIEW", db_view)
and self.check_grant_to_role(role, "INSERT", "VIEW", db_view)
and self.check_grant_to_role(role, "UPDATE", "VIEW", db_view)
and self.check_grant_to_role(role, "DELETE", "VIEW", db_view)
and self.check_grant_to_role(role, "TRUNCATE", "VIEW", db_view)
and self.check_grant_to_role(role, "REFERENCES", "VIEW", db_view)
):
already_granted = True
# And then grant privileges to the db_view
sql_commands.append(
{
"already_granted": already_granted,
"sql": GRANT_PRIVILEGES_TEMPLATE.format(
privileges=privileges,
resource_type="VIEW",
resource_name=SnowflakeConnector.snowflaky(db_view),
role=SnowflakeConnector.snowflaky(role),
),
}
)
return (sql_commands, usage_granted)
def generate_alter_user(self, user: str, config: str) -> List[Dict]:
"""
Generate the ALTER statements for USERs.
user: the name of the USER
config: the subtree for the user as specified in the spec
Returns the SQL commands generated as a List
"""
sql_commands = []
alter_privileges = []
if "can_login" in config:
if config["can_login"]:
alter_privileges.append("DISABLED = FALSE")
else:
alter_privileges.append("DISABLED = TRUE")
if alter_privileges:
sql_commands.append(
{
"already_granted": False,
"sql": ALTER_USER_TEMPLATE.format(
user_name=SnowflakeConnector.snowflaky(user),
privileges=", ".join(alter_privileges),
),
}
)
return sql_commands
def generate_grant_ownership(self, role: str, config: str) -> List[Dict]:
"""
Generate the GRANT OWNERSHIP statements for DATABASEs, SCHEMAs and TABLEs.
role: the name of the role (e.g. "LOADER") OWNERSHIP will be GRANTed to
config: the subtree for the role as specified in the spec
Returns the SQL commands generated as a List
"""
sql_commands = []
try:
for database in config["owns"]["databases"]:
if self.check_grant_to_role(role, "OWNERSHIP", "DATABASE", database):
already_granted = True
else:
already_granted = False
sql_commands.append(
{
"already_granted": already_granted,
"sql": GRANT_OWNERSHIP_TEMPLATE.format(
resource_type="DATABASE",
resource_name=SnowflakeConnector.snowflaky(database),
role_name=SnowflakeConnector.snowflaky(role),
),
}
)
except KeyError:
logging.debug(
"`owns.databases` not found for role {}, skipping generation of DATABASE OWNERSHIP statements.".format(
role
)
)
try:
for schema in config["owns"]["schemas"]:
name_parts = schema.split(".")
info_schema = f"{name_parts[0].upper()}.INFORMATION_SCHEMA"
schemas = []
if name_parts[1] == "*":
conn = SnowflakeConnector()
db_schemas = conn.show_schemas(name_parts[0])
for db_schema in db_schemas:
if db_schema != info_schema:
schemas.append(db_schema)
else:
schemas = [schema]
for db_schema in schemas:
if self.check_grant_to_role(role, "OWNERSHIP", "SCHEMA", db_schema):
already_granted = True
else:
already_granted = False
sql_commands.append(
{
"already_granted": already_granted,
"sql": GRANT_OWNERSHIP_TEMPLATE.format(
resource_type="SCHEMA",
resource_name=SnowflakeConnector.snowflaky(db_schema),
role_name=SnowflakeConnector.snowflaky(role),
),
}
)
except KeyError:
logging.debug(
"`owns.schemas` not found for role {}, skipping generation of SCHEMA OWNERSHIP statements.".format(
role
)
)
try:
# Gather the tables that OWNERSHIP will be granted to
tables = []
for table in config["owns"]["tables"]:
name_parts = table.split(".")
info_schema = f"{name_parts[0].upper()}.INFORMATION_SCHEMA"
if name_parts[2] == "*":
schemas = []
conn = SnowflakeConnector()
if name_parts[1] == "*":
db_schemas = conn.show_schemas(name_parts[0])
for schema in db_schemas:
if schema != info_schema:
schemas.append(schema)
else:
schemas = [f"{name_parts[0]}.{name_parts[1]}"]
for schema in schemas:
tables.extend(conn.show_tables(schema=schema))
else:
tables = [table]
# And then grant OWNERSHIP to all tables
for db_table in tables:
if self.check_grant_to_role(role, "OWNERSHIP", "TABLE", db_table):
already_granted = True
else:
already_granted = False
sql_commands.append(
{
"already_granted": already_granted,
"sql": GRANT_OWNERSHIP_TEMPLATE.format(
resource_type="TABLE",
resource_name=SnowflakeConnector.snowflaky(db_table),
role_name=SnowflakeConnector.snowflaky(role),
),
}
)
except KeyError:
logging.debug(
"`owns.tables` not found for role {}, skipping generation of TABLE OWNERSHIP statements.".format(
role
)
)
return sql_commands
|
996,265 | e1edba78e5647a73bf221cc45f6bb49be72a2d38 | from gevent import monkey; monkey.patch_all()
from webrecorder.utils import load_wr_config, init_logging
from webrecorder.rec.webrecrecorder import WebRecRecorder
import gevent
try:
import uwsgi
from uwsgidecorators import postfork
except:
postfork = None
pass
# =============================================================================
def init():
init_logging()
config = load_wr_config()
wr = WebRecRecorder(config)
if postfork:
@postfork
def listen_loop():
if uwsgi.mule_id() == 0:
gevent.spawn(wr.msg_listen_loop)
else:
gevent.spawn(wr.msg_listen_loop)
wr.init_app(None)
wr.app.wr = wr
return wr.app
|
996,266 | e9f6df0f18357cd4ff7c4f6e9a8cf89d2d3ce262 | import turtle
turtle.shape("turtle")
turtle.speed(10)
turtle.pencolor("blue")
turtle.width(2)
for i in range(30):
turtle.forward(10 * i)
turtle.left(90)
turtle.forward(10 * i)
turtle.left(90)
turtle.exitonclick()
# Weird Pyramid (Cool)
#
# for i in range(100):
# # turtle.left(60)
# turtle.forward(5 * i)
# turtle.left(90)
# turtle.forward(5 * i)
# turtle.left(90)
# turtle.forward(5 * i)
# turtle.left(90) |
996,267 | 544c65dd49e3e5fef86d547ed8a05932ac20d6f7 | import argparse
from collections import defaultdict
from operator import itemgetter
import pickle
import os
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator
import numpy as np
from actiondatasets.gteagazeplus import GTEAGazePlus
def get_confmat(path):
"""Processes logs in format "(somehting),loss_1:0.1234,loss_2:0.3"
"""
with open(path, 'rb') as confmat_file:
conf_mat = pickle.load(confmat_file)
return conf_mat
def plot_conf_mat(confmat, title=None, labels=None, epoch=None):
"""
Args:
score_type (str): label for current curve, [valid|train|aggreg]
"""
if epoch is None:
mat = confmat[-1]
else:
mat = confmat[epoch]
if epoch > mat.shape[0]:
raise ValueError(
'Epoch {} should be below'.format(epoch, mat.shape[0]))
# Plot confmat
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.imshow(mat, cmap='viridis')
fig.colorbar(cax)
if title is not None:
ax.set_title(title)
if labels is not None:
str_labels = [stringify(label) for label in labels]
ax.set_xticklabels(str_labels, rotation=90)
ax.set_xticks(range(len(str_labels)))
ax.set_yticklabels(str_labels)
ax.set_yticks(range(len(str_labels)))
plt.tight_layout()
plt.show()
def stringify(nested):
if isinstance(nested, str):
return nested
if (isinstance(nested, tuple)
or isinstance(nested, list)) and len(nested) == 1:
return stringify(nested[0])
else:
return stringify(nested[0]) + '_' + stringify(nested[1:])
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
'--checkpoints',
nargs='+',
type=str,
help='path to checkpoints folders')
parser.add_argument(
'--vis', action='store_true', help='Whether to plot the log curves')
parser.add_argument(
'--epoch', type=int, help='What itertation use to average results')
parser.add_argument(
'--prefixes',
nargs='+',
type=str,
help='Descriptions of run for labels, one per checkpoint')
parser.add_argument('--gteagazeplus', action='store_true')
opt = parser.parse_args()
if opt.prefixes is not None:
assert len(opt.prefixes) == len(opt.checkpoints), \
'Should have as many prefixes as checkpoints but '\
'got {} and {}'.format(opt.prefixes, opt.checkpoints)
if opt.gteagazeplus:
all_subjects = [
'Ahmad', 'Alireza', 'Carlos', 'Rahul', 'Yin', 'Shaghayegh'
]
for checkpoint in opt.checkpoints:
dataset = GTEAGazePlus(original_labels=True, seqs=all_subjects)
train_conf_template = os.path.join(
checkpoint, 'gtea_lo_{}/train_conf_mat.pickle')
val_conf_template = os.path.join(checkpoint,
'gtea_lo_{}/val_conf_mat.pickle')
for leave_out in range(6):
print(leave_out)
train_conf_path = train_conf_template.format(str(leave_out))
val_conf_path = val_conf_template.format(str(leave_out))
train_confmat = get_confmat(train_conf_path)
val_confmat = get_confmat(val_conf_path)
if opt.vis:
plot_conf_mat(
train_confmat,
title='Train conf mat',
epoch=opt.epoch,
labels=dataset.classes)
plot_conf_mat(
val_confmat,
title='Val conf mat',
epoch=opt.epoch,
labels=dataset.classes)
|
996,268 | 91c46843aadf746f73020f85db9e4c945a400fe6 | from django.test import TestCase
from django.test import Client
from django.contrib.auth import login
from linkup.models import Profile,Event,Poll
from django.contrib.auth.models import User
class CsrTest(TestCase):
# Tests if csrfmiddlewaretoken is in the login page
def login(self):
client = Client()
response1 = client.get('/login/')
self.assertEqual(response1.status_code, 200)
self.assertContains(response1,'csrfmiddlewaretoken')
# Tests user login with sample username and password data
class linkup(TestCase):
def checkuserlogin(self):
client = Client()
u = 'testuser'
p = 'password'
e = 'testemail@gmail.com'
User.objects.create(username=u, password=p, email=e)
query = {
'username' : u,
'password' : p,
'email' : e,
}
response2 = client.post('/login/',query)
self.assertEqual(response2.status_code, 302)
|
996,269 | 4c95527cecf8db01d4d5151a34863f68f14a6e8c | from gm2 import np
class Spikes(object):
def __init__(self, phis=None, freqs=None, th=None):
self.debug = True
if (freqs is not None):
self.init(phis, freqs, th)
def init(self, phis, freqs, th):
self.th = th
self.rm = np.zeros([freqs.shape[-1]])
self.freq = freqs.copy()
self.outl = np.full([freqs.shape[0]], np.nan)
def outlier(event, th):
n = 1
nn = 0
if(event-n >=0)&(event+n<freqs.shape[0]):
#for probe in range(17):
mean = (self.freq[event-1] + self.freq[event+1])/2.0
dphi = phis[event + 1] - phis[event - 1]
dfreq = (self.freq[event+1] - self.freq[event -1])
if(abs(dphi) > 0.1e-4): # use interpolation if dphi is large enough ...
mean = self.freq[event-1] + dfreq * (phis[event] - phis[event-1]) / dphi;
self.outl[event] = self.freq[event] - mean
d_pre = self.freq[event] - self.freq[event-1]
d_post = self.freq[event] - self.freq[event+1]
if ((np.abs(self.outl[event]) > th)&(np.abs(d_pre) > th)&(np.abs(d_post) > th)&((d_pre * d_post)> 0)):
self.freq[event] = mean
self.rm[event] = 1
nn += outlier(event-1, th)
return nn
for event in range(freqs.shape[0]):
outlier(event, th)
def isOutl(self):
return self.rm > 0
def isOk(self):
return (self.rm == 0)&(np.isnan(self.outl) == False)
|
996,270 | 27a24bef73ea98b1613f650567545c06602c8b13 | import datetime
import os
import psycopg2
from flask import Flask, render_template
app = Flask(__name__)
app.secret_key = os.environ['APP_SECRET_KEY']
@app.route("/", methods=('GET', 'POST'))
def index():
# Connect to database
conn = psycopg2.connect(host=os.environ['POSTGRES_HOST'], database=os.environ['POSTGRES_DB'], user=os.environ['POSTGRES_USER'], password=os.environ['POSTGRES_PASSWORD'])
cur = conn.cursor()
# Get number of all GET requests
sql_all = """SELECT COUNT(*) FROM weblogs where source='local';"""
cur.execute(sql_all)
all_local = cur.fetchone()[0]
# Get number of all succesful requests
sql_success = """SELECT COUNT(*) FROM weblogs WHERE source='local' and status LIKE \'2__\';"""
cur.execute(sql_success)
success_local = cur.fetchone()[0]
# Determine rate if there was at least one request
rate_local = "No entries yet!"
if all_local != 0:
rate_local = str(success_local / all_local)
sql_all = """SELECT COUNT(*) FROM weblogs where source='remote';"""
cur.execute(sql_all)
all_remote = cur.fetchone()[0]
# Get number of all succesful requests
sql_success = """SELECT COUNT(*) FROM weblogs WHERE source='remote' and status LIKE \'2__\';"""
cur.execute(sql_success)
success_remote = cur.fetchone()[0]
# Determine rate if there was at least one request
rate_remote = "No entries yet!"
if all_remote != 0:
rate_remote = str(success_remote / all_remote)
# add raw counts
return render_template('index.html', rate = {'local': {'total': all_local, 'success': success_local, 'rate' : rate_local } ,\
'remote': {'total': all_remote, 'success': success_remote, 'rate' : rate_remote }})
if __name__ == '__main__':
app.run(host='0.0.0.0', debug=True)
|
996,271 | 63dd569b40c411677b4c98481f25f8ec150b1fe1 | import numpy as np
import numpy
from PIL import Image
from numpy import genfromtxt
training_n = 5
image_width=7
image_height = 9
# Cantidad de input units
input_n = image_width * image_height
# Cantidad de output units
output_n = 7
threshold = 0
b = np.zeros(output_n)
w = np.zeros((input_n, output_n))
t = np.zeros((output_n, output_n))
t.fill(-1)
for i in range(0, output_n):
t[i, i] = 1
def blockshaped(arr, nrows, ncols):
h, w = arr.shape
return (arr.reshape(h//nrows, nrows, -1, ncols)
.swapaxes(1,2)
.reshape(-1, nrows, ncols))
#Creado por unutbu de Stack Overflow
def imageToArray(image):
image_array = np.array(image, dtype=int)
image_array[image_array < 255] = 1
image_array[image_array == 255] = -1
return image_array
def activation(y_in, threshold):
if y_in > threshold:
return 1
elif -threshold <= y_in and y_in <= threshold:
return 0
elif y_in < threshold:
return -1
def interpretResult(result):
for i in range(0, result.size):
if result[i] == 1:
if i == 0:
print "Puede ser A"
elif i == 1:
print "Puede ser B"
elif i == 2:
print "Puede ser C"
elif i == 3:
print "Puede ser D"
elif i == 4:
print "Puede ser E"
elif i == 5:
print "Puede ser J"
else:
print "Puede ser K"
def train( input, output ):
x = input
print "Training starts"
stopping_condition = False
while(stopping_condition == False):
stopping_condition = True
for i in range(0, input_n):
y_in = np.zeros(output_n)
y = np.zeros(output_n)
for j in range(0, output_n):
y_in[j] = b[j] + np.dot(x, w[:,j])
y[j] = activation(y_in[j], threshold)
for j in range(0, output_n):
if t[output][j] != y[j]:
b[j] = b[j] + t[output][j]
for i2 in range(0, input_n):
old_w = w[i2][j]
w[i2][j] = w[i2][j] + t[output][j]*x[i2]
if old_w != w[i2][j]:
stopping_condition = False
print "Epoch"
print "Training complete"
def classify(input):
x = input
y_in = np.zeros(output_n)
y = np.zeros(output_n)
for j in range(0, output_n):
y_in[j] = b[j] + np.dot(x, w[:,j])
y[j] = activation(y_in[j], threshold)
return y
training_data_image = Image.open("input.png").convert("L")
training_data_array = imageToArray(training_data_image)
training_data_array = blockshaped(training_data_array, image_height, image_width)
output_goal = 0
for input in training_data_array:
train(input.flatten(), output_goal)
output_goal += 1
if output_goal == 7:
output_goal = 0
character_image = Image.open("test/j_1.png").convert("L")
#character_image.show()
character_array = imageToArray(character_image)
character_result = classify(character_array.flatten())
#print(character_array)
#print(character_result)
interpretResult(character_result) |
996,272 | aaa376f81743ef85262d438edc3c7929450d4c3c | # -*- coding: utf-8 -*-
"""
Created on Tue Nov 5 20:55:24 2019
@author: Erdo
"""
# %% libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# %%
""" Data Import """
data = pd.read_excel('Iris.xls')
data.head()
#%%
x_data = data.iloc[:,0:4].values
y_data = data.iloc[:,-1:].values
#%%
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
x = sc.fit_transform(x_data)
#%%
from sklearn.model_selection import train_test_split
x_train,x_test,y_train,y_test = train_test_split(x,y_data, test_size = 0.33 , random_state = 0)
scores =[]
methods =[]
#%%
#------------------------------Logistic Regression-----------------------------
from sklearn.linear_model import LogisticRegression
lr = LogisticRegression()
lr.fit(x_train,y_train)
y_lr_pred = lr.predict(x_test)
print("Logistic regression score:",lr.score(x_test,y_test))
cm0 = confusion_matrix(y_test,y_lr_pred)
print(cm0)
scores.append(lr.score(x_test,y_test))
methods.append("lr")
#%%
#------------------------------KNN Classifier----------------------------------
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier(n_neighbors=4, metric='minkowski')
knn.fit(x_train,y_train)
y_knn_pred = knn.predict(x_test)
print("KNN score :",knn.score(x_test,y_test))
cm1 =confusion_matrix(y_test,y_knn_pred)
print(cm1)
scores.append(knn.score(x_test,y_test))
methods.append("knn")
#%%
# ------------------------------Decision tree classifier-----------------------
from sklearn.tree import DecisionTreeClassifier
dt = DecisionTreeClassifier( criterion = 'gini')
dt.fit(x_train,y_train)
y_dt_pred = dt.predict(x_test)
score = dt.score(x_test,y_test)
print("Decision tree score:",score)
cm2 = confusion_matrix(y_test,y_dt_pred)
print(cm2)
scores.append(dt.score(x_test,y_test))
methods.append("Dt")
#%%
#---------------------------- Random Forest Classifier-------------------------
from sklearn.ensemble import RandomForestClassifier
rfc = RandomForestClassifier(n_estimators=5, criterion = 'gini')
rfc.fit(x_train,y_train)
y_rfc_pred = rfc.predict(x_test)
cm3 =confusion_matrix(y_test,y_rfc_pred)
print("Random forest score:",rfc.score(x_test,y_test))
print(cm3)
scores.append(rfc.score(x_test,y_test))
methods.append("rfc")
#%%
#---------------------------SVM Classification---------------------------------
from sklearn.svm import SVC
svc = SVC(kernel='rbf')
svc.fit(x_train,y_train)
y_svc_pred = svc.predict(x_test)
print("SVM score:",svc.score(x_test,y_test))
cm4 = confusion_matrix(y_test,y_svc_pred)
print(cm4)
scores.append(svc.score(x_test,y_test))
methods.append("SVM")
#%%
#------------------------Naive Bayes Classification----------------------------
from sklearn.naive_bayes import GaussianNB
gnb = GaussianNB()
gnb.fit(x_train,y_train)
y_gnb_pred = gnb.predict(x_test)
print("Gaussian Naive bayes:",gnb.score(x_test,y_test))
cm5 = confusion_matrix(y_test,y_gnb_pred)
print(cm5)
scores.append(gnb.score(x_test,y_test))
methods.append("Gnb")
#%%
#------------------------------------------------------------------------------
# Visualization of Scores
s = scores.sort()
plt.figure(figsize =(12,6))
plt.plot(methods,scores)
plt.xlabel("Classification Methods")
plt.ylabel("Accuracy Scores")
plt.show() |
996,273 | 4e379cb2f26bd97a87f5eeb960b4cc8be631b775 | import random
pri = input('Digite a primeira pessoa: ')
seg = input('Digite a segunda pessoa: ')
terc = input('Digite a terceira pessoa: ')
qua = input('Digite a quarta pessoa: ')
lista = [pri, seg, terc, qua]
'''escolhido = random.choice(lista)
# no caso de querer escolher um
# numa lista
print('A pessoa sorteada foi {}'.format(escolhido))'''
'''random.shuffle(lista)
# no caso de querer
# embaralhar uma lista
print('A ordem de apresentação séra',lista)'''
|
996,274 | 58ce94c10e0fbbb2822078b5e4e9db4d7c957a02 | #-*-coding:utf-8-*-
#@Time :2019/2/25 16:44
#@Author:xiaoqi
#@File :task_01.py
#1:写一个类 类的作用是完成Excel数据的读写 新建表单的操作
# 函数一:读取指定表单的数据,
# 有一个列表row_list,把每一行的每一个单元格的数据存到row_list里面去。
# 每一行都有 一个单独的row_list [[1,2,3],[4,5,6]] #每一行数据读取完毕后,把row_list存到大列表all_row_list
# 函数二:在指定的单元格写入指定的数据,并保存到当前Excel
# 函数三:新建一个Excel
from openpyxl import load_workbook
from openpyxl import workbook
class DoExcel:
'''这是一个类,完成Excel数据的读写 新建表单的操作'''
def __init__(self,file_name,sheet_name):
self.file_name=file_name
self.sheet_name=sheet_name
def read_excel(self):
'''函数一:读取所有的数据,以嵌套列表的形似存储,每一行都是一个子列表,
每个单元格都是子列表的一个元素'''
wb=load_workbook(self.file_name)
sheet=wb[self.sheet_name]
#方法一:(嵌套列表)
all_row_list=[]#所有的测试数据放在一个大的列表里面
for i in range(2,sheet.max_row+1):#测试数据从第二行开始,第一行是标题
row_list=[]#每一行数据存在一个子列表里面
for j in range(1,sheet.max_column+1):
res=sheet.cell(i,j).value
row_list.append(res)
all_row_list.append(row_list)
return all_row_list#返回大列表
#方法二:(嵌套字典)
# test_data=[]
# for i in range(2,sheet.max_row+1):
# row_dict={}#每一行数据存在一个字典里面
# row_dict['用例编号']=sheet.cell(i,1).value
# row_dict['测试项目']=sheet.cell(i,2).value
# row_dict['测试标题']=sheet.cell(i,3).value
# row_dict['测试数据']=sheet.cell(i,4).value
# row_dict['操作步骤']=sheet.cell(i,5).value
# row_dict['期望结果']=sheet.cell(i,6).value
# row_dict['实际结果']=sheet.cell(i,7).value
# row_dict['测试结果']=sheet.cell(i,8).value
# test_data.append(row_dict)
# return test_data
def write_excel(self,row,column,value):
'''函数二:在指定的单元格写入指定的数据,并保存到当前Excel'''
wb=load_workbook(self.file_name)
sheet=wb[self.sheet_name]
sheet.cell(row,column).value=value
wb.save(self.file_name)
wb.close()#每次写操作完要关闭掉
def create_excel(self):
'''函数三:新建一个Excel'''
wb=workbook.Workbook()
wb.create_sheet(self.sheet_name)
wb.save(self.file_name)
if __name__=='__main__':
d=DoExcel('python_14.xlsx','Sheet')
res=d.read_excel()
print(res)
# d.write_excel(1,1,'TestId')
# d.create_excel('test')
|
996,275 | f87f513756961fea7320465e4dda865ff9f97c8f | from distutils.core import setup
from setuptools import find_packages
with open('requirements.txt') as f:
requirements = f.readlines()
# https://www.geeksforgeeks.org/command-line-scripts-python-packaging/
setup(name='mlpipe',
version='1.0',
packages=find_packages(),
entry_points = {
'console_scripts': [
'mlpipe = cli.mlpipe:main'
]
},
install_requires = requirements,
zip_safe=False)
|
996,276 | 518de3be7065742a194355b616627e523d8b07de | import json
def get_json_info(file):
with open(file) as json_file:
data = json.load(json_file)
return data
def set_json_info(file, data):
with open(file, 'w') as outfile:
json.dump(data, outfile)
def write_aircraft_to_csv(craft):
output_str = '\n{},{},{},{},{}'.format(
craft['model'], craft['fuel_percent'], craft['priority'], craft['emergency'], craft['airtime'])
with open('../csv_files/aircraft.csv', 'a') as f:
f.write(output_str)
if __name__ == "__main__":
print('db_module.py file') |
996,277 | 30a5ac6123afa72452ae46cce88b499ac061a0a7 | # -*- coding:utf-8 -*-
# 思路:遍历所有的叶子,看是否等于sum
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def hasPathSum(self, root, sum):
"""
:type root: TreeNode
:type sum: int
:rtype: bool
"""
ret = self._check(root, 0, sum)
return ret
def _check(self, root, nowSum, sum):
if not root:
return False
nowSum = nowSum + root.val
if root.left is None and root.right is None:
if nowSum == sum:
return True
else:
return False
else:
l = self._check(root.left, nowSum, sum)
r = self._check(root.right, nowSum, sum)
return l or r
t3 = TreeNode(3)
t9 = TreeNode(9)
t20 = TreeNode(20)
t15= TreeNode(15)
t7 = TreeNode(7)
t3.left = t9
t3.right = t20
t20.left = t15
t15.right = t7
s = Solution()
print s.hasPathSum(t3,13) |
996,278 | 2f1f821dac7bb6355a5b9a261fdd13470052d90a | # -*- coding: utf-8 -*-
# @Time : 2019/1/14 11:23 AM
# @Author : scl
# @Email : 1163820757@qq.com
# @File : K-Means算法.py
# @Software: PyCharm
import matplotlib
matplotlib.use("TkAgg")
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import sklearn.datasets as ds
import matplotlib.colors
from sklearn.cluster import KMeans #引入kmeans
## 设置属性防止中文乱码
mpl.rcParams['font.sans-serif'] = [u'SimHei']
mpl.rcParams['axes.unicode_minus'] = False
# 1 生成模拟数据
N = 1500
centers = 4
# 产生等方差的数据集(中心点随机)
data1,y1 = ds.make_blobs(N,n_features=2,centers=centers,random_state=12)
# 产生指定中心点和方差的数据集
data2,y2 = ds.make_blobs(N,n_features=2,centers= [(-10,-8), (-5,8), (5,2), (8,-7)],cluster_std=[1.5, 2.5, 1.9, 1],random_state=12)
# 产生方差相同 样本数量不同的数据集
data3 = np.vstack((data1[y1 == 0][:200],
data1[y1 == 1][:100],
data1[y1 == 2][:10],
data1[y1 == 3][:50]))
y3 = np.array([0] * 200 + [1] * 100 + [2] * 10 + [3] * 50)
# 2 模型构建
km = KMeans(n_clusters=centers,init='random',random_state=12)
km.fit(data1)
# 模型预测
y_hat = km.predict(data1)
print('所有样本距离簇中心点的总距离和:',km.inertia_)
print('距离聚簇中的平均距离',(km.inertia_/N))
cluster_centers = km.cluster_centers_
print('聚簇中心点\n',cluster_centers)
y_hat2 = km.fit_predict(data2)
y_hat3 = km.fit_predict(data3)
def expandBorder(a,b):
d = (b - a) * 0.1
return a-d,b+d
# 绘图
cm = mpl.colors.ListedColormap(list('rgbmyc'))
plt.figure(figsize=(15, 9), facecolor='w')
plt.subplot(241)
plt.scatter(data1[:, 0], data1[:, 1], c=y1, s=30, cmap=cm, edgecolors='none')
x1_min, x2_min = np.min(data1, axis=0)
x1_max, x2_max = np.max(data1, axis=0)
x1_min, x1_max = expandBorder(x1_min, x1_max)
x2_min, x2_max = expandBorder(x2_min, x2_max)
plt.xlim((x1_min, x1_max))
plt.ylim((x2_min, x2_max))
plt.title(u'原始数据')
plt.grid(True)
plt.subplot(242)
plt.scatter(data1[:, 0], data1[:, 1], c = y_hat, s=30, cmap=cm, edgecolors='none')
plt.xlim((x1_min, x1_max))
plt.ylim((x2_min, x2_max))
plt.title(u'K-Means算法聚类结果')
plt.grid(True)
# 对数据做一个旋转
m = np.array(((1, -5), (0.5, 5)))
data_r = data1.dot(m)
y_r_hat = km.fit_predict(data_r)
plt.subplot(243)
plt.scatter(data_r[:, 0], data_r[:, 1], c=y1, s=30, cmap=cm, edgecolors='none')
x1_min, x2_min = np.min(data_r, axis=0)
x1_max, x2_max = np.max(data_r, axis=0)
x1_min, x1_max = expandBorder(x1_min, x1_max)
x2_min, x2_max = expandBorder(x2_min, x2_max)
plt.xlim((x1_min, x1_max))
plt.ylim((x2_min, x2_max))
plt.title(u'数据旋转后原始数据图')
plt.grid(True)
plt.subplot(244)
plt.scatter(data_r[:, 0], data_r[:, 1], c=y_r_hat, s=30, cmap=cm, edgecolors='none')
plt.xlim((x1_min, x1_max))
plt.ylim((x2_min, x2_max))
plt.title(u'数据旋转后预测图')
plt.grid(True)
plt.subplot(245)
plt.scatter(data2[:, 0], data2[:, 1], c=y2, s=30, cmap=cm, edgecolors='none')
x1_min, x2_min = np.min(data2, axis=0)
x1_max, x2_max = np.max(data2, axis=0)
x1_min, x1_max = expandBorder(x1_min, x1_max)
x2_min, x2_max = expandBorder(x2_min, x2_max)
plt.xlim((x1_min, x1_max))
plt.ylim((x2_min, x2_max))
plt.title(u'不同方差的原始数据')
plt.grid(True)
plt.subplot(246)
plt.scatter(data2[:, 0], data2[:, 1], c=y_hat2, s=30, cmap=cm, edgecolors='none')
plt.xlim((x1_min, x1_max))
plt.ylim((x2_min, x2_max))
plt.title(u'不同方差簇数据的K-Means算法聚类结果')
plt.grid(True)
plt.subplot(247)
plt.scatter(data3[:, 0], data3[:, 1], c=y3, s=30, cmap=cm, edgecolors='none')
x1_min, x2_min = np.min(data3, axis=0)
x1_max, x2_max = np.max(data3, axis=0)
x1_min, x1_max = expandBorder(x1_min, x1_max)
x2_min, x2_max = expandBorder(x2_min, x2_max)
plt.xlim((x1_min, x1_max))
plt.ylim((x2_min, x2_max))
plt.title(u'不同簇样本数量原始数据图')
plt.grid(True)
plt.subplot(248)
plt.scatter(data3[:, 0], data3[:, 1], c=y_hat3, s=30, cmap=cm, edgecolors='none')
plt.xlim((x1_min, x1_max))
plt.ylim((x2_min, x2_max))
plt.title(u'不同簇样本数量的K-Means算法聚类结果')
plt.grid(True)
plt.tight_layout(2, rect=(0, 0, 1, 0.97))
plt.suptitle(u'数据分布对KMeans聚类的影响', fontsize=18)
plt.show()
# 使用轮廓系数作为衡量指标
from sklearn import metrics
km22 = KMeans(n_clusters=4, init='random',random_state=28)
km22.fit(data1, y1)
y_hat22 = km22.predict(data1)
km_score2 = metrics.silhouette_score(data1, y_hat22)
print("KMeans算法的轮廓系数指标(簇中心:%d):%.3f" % (len(km22.cluster_centers_), km_score2))
|
996,279 | de321dbed3348b2bdea5840cfb97e70854ca8884 | #
from __future__ import print_function
import os, sys
import time
import seaborn as sns
import pandas
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import tensorflow as tf
import keras as ks
from keras import Sequential
from keras.layers import Dense
from keras.callbacks import TensorBoard
from sklearn.metrics import confusion_matrix
## Fannie Mae failed loan prediction
## Model to predict if a loan will have foreclosure costs
# keras requires python 2 / get tensorflow import error
#classifier.summary()
#print(classifier.metrics()
# save the classifier to deploy
#classifier.save("kerasLoanClassifier.keras.save")
from keras.models import load_model
model = load_model("kerasLoanClassifier.keras.save")
def loanofficer(Z):
loaneval = (model.predict(Z)>.65)
return loaneval[0][0]
x = np.array([1.0, 2.0, 3.0])
#|label|channel|
#intrate|
#loanamt|
#loan2val|
#numborrowers|
#itscore|
#fcl_costs|loc_state|
inputvals=""
myvals=[ 0.600,0.00633,1.0824742,0.166666,0.62885,0.0318,3.0]
def loanplease(args):
x = np.reshape (np.array (myvals),(-1,7))
return loanofficer(x)
# loanplease(myvals)
# print(x.shape) |
996,280 | 3601e212f67c75b9d93a1a435e8da3a9bc331372 | import os
import SimpleITK as sitk
import matplotlib.pyplot as plt
import pydicom
from pydicom.data import get_testdata_files
import numpy as np
if __name__ == '__main__':
path = 'LUNGx-CT003/03-23-2006-6667-CT NON-INFUSED CHEST-15464/5-HIGH RES-37154'
filenames = os.listdir(path)
# for file in filenames:
# path_to_file = os.path.join(path, file)
# image = sitk.ReadImage(path_to_file)
# array = sitk.GetArrayFromImage(image)
# print()
ct_scan = np.zeros(shape=(512, 512, len(filenames)))
for file in filenames:
path_to_file = os.path.join(path, file)
dataset = pydicom.dcmread(path_to_file)
ct_scan[:, :, dataset.InstanceNumber-1] = dataset.pixel_array
for i in range(len(filenames)):
if i % 15 == 0:
plt.imshow(ct_scan[:, :, i], cmap=plt.cm.bone)
#plt.scatter([374], [374], c='red')
plt.show()
print()
|
996,281 | 15ccc8263f6ce2211973289b2105f88dda4ed424 | import os
import pycurl
import magic
import requests
from requests_toolbelt import MultipartEncoder, MultipartEncoderMonitor
from src.classes.Config import Config
mime = magic.Magic(mime=True)
def download(url):
return requests.get(url, allow_redirects=True).content
# Class which holds a file reference and the read callback
class FileReader:
def __init__(self, file, callback):
self.file = file
self.callback = callback
def read_callback(self, size):
return self.callback(self.file, size)
class Api:
apiUrl = "https://calomni.com/api"
bearer = Config.load_token()
user = {}
def __init__(self, app):
self.app = app
self.bearer = Config.load_token()
pass
def url(self, endpoint):
return self.apiUrl + endpoint
def login_bearer(self):
self.app.log.info("Login to system")
user = self.get("/user")
if user:
self.app.log.debug("User info: %s" % user)
self.user = user
return self.user
return False
'''Login to the api'''
def login(self, email, password):
self.app.log.info("Login to system")
try:
resp = requests.post(self.url("/auth/login"), data={'email': email, 'password': password},
headers={"Accept": "application/json"})
if resp.status_code == 200 or resp.status_code == 201:
result = resp.json()
self.bearer = result['data']['plainTextToken']
Config.save_token(self.bearer)
user = self.get("/user")
if user:
# self.app.log.debug("User info: %s" % user)
self.user = user
return True
elif resp.status_code == 422:
self.app.log.error("Login incorrect", None, True)
else:
self.app.log.error("Error: %s\n%s" % (resp.status_code, resp.reason), None, True,
lambda: self.app.timer.start(5000))
return False
except:
self.app.log.info("Connection error")
return False
'''Logout'''
def logout(self):
try:
self.post(self.url("/auth/logout"))
self.bearer = None
return True
except:
self.app.log.info("Connection error")
return False
def back_to_login(self):
self.app.sh_screen.hide()
self.app.rh_screen.hide()
self.app.login_screen.show()
'''Inspector'''
def response(self, resp):
# Success
if resp.status_code == 200 or resp.status_code == 201:
result = resp.json()
# print(result)
if 'status' in result:
if result['status']:
if "message" in result and result["message"]:
self.app.log.info(result['message'], None, True)
return result['data']
else:
self.show_error(result['message'])
else:
self.app.log.debug(result)
self.show_error("Invalid data received")
# Bad request
elif resp.status_code == 400:
result = resp.json()
self.show_error(result["message"])
# Unauthenticated
elif resp.status_code == 401:
self.show_error("Session expired! Please login again", self.back_to_login)
# Everything else
else:
self.app.log.debug(resp.json())
self.show_error("Error: %s\n%s" % (resp.status_code, resp.reason))
return False
def show_error(self, msg, callback=None):
self.app.log.error(msg, None, True, callback)
'''get request'''
def get(self, endpoint, params=None):
if self.bearer is None:
self.show_error("Session expired! Please login again", self.back_to_login)
return False
try:
resp = requests.get(self.url(endpoint), params=params,
headers={"Accept": "application/json", "Authorization": "Bearer " + self.bearer})
return self.response(resp)
except:
self.show_error("Connection error")
return False
'''put request'''
def put(self, endpoint, data=None, files=None):
if self.bearer is None:
return False
try:
resp = requests.put(self.url(endpoint), data=data,
headers={"Accept": "application/json", "Authorization": "Bearer " + self.bearer})
return self.response(resp)
except:
self.show_error("Connection error")
return False
'''post request'''
def post(self, endpoint, data=None, files=None):
if self.bearer is None:
return False
try:
resp = requests.post(self.url(endpoint), data=data, files=files,
headers={"Accept": "application/json", "Authorization": "Bearer " + self.bearer})
return self.response(resp)
except:
self.show_error("Connection error")
return False
'''upload'''
def upload(self, endpoint, data, callback=None):
if self.bearer is None:
return False
m = MultipartEncoder(fields=data)
if callback is not None:
m = MultipartEncoderMonitor(m, callback)
try:
resp = requests.post(self.url(endpoint), data=m,
headers={"Content-Type": m.content_type, "Accept": "application/json",
"Authorization": "Bearer " + self.bearer})
return self.response(resp)
except:
self.show_error("Connection error")
return False
@staticmethod
def silence_upload(endpoint, data, callback=None, return_value=None):
if Api.bearer is None:
return False
m = MultipartEncoder(fields=data)
if callback is not None:
m = MultipartEncoderMonitor(m, callback)
try:
bearer = Config.load_token()
resp = requests.post(Api.apiUrl + endpoint, data=m,
headers={"Content-Type": m.content_type, "Accept": "application/json",
"Authorization": "Bearer " + bearer})
if return_value:
return_value.value = resp
return resp
except:
print("Failed", endpoint, data)
return False
pass
@staticmethod
def static_get(endpoint, params=None):
if Api.bearer is None:
return False
try:
bearer = Config.load_token()
resp = requests.get(Api.apiUrl + endpoint, params=params,
headers={"Accept": "application/json", "Authorization": "Bearer " + bearer})
return resp.json()
except:
return False
'''delete request'''
def delete(self, endpoint):
if self.bearer is None:
return False
try:
resp = requests.delete(self.url(endpoint),
headers={"Accept": "application/json", "Authorization": "Bearer " + self.bearer})
return self.response(resp)
except:
self.show_error("Connection error")
return False
@staticmethod
def curl_upload(endpoint, file, progress=None, read_function=None):
url = Api.apiUrl + endpoint
# upload = list()
'''data['other_files'] = ""
for key in data:
if key == "run_file" or re.search("other_files", key):
content_type = mime.from_file(data[key])
upload.append((key, (pycurl.FORM_FILE, data[key], pycurl.FORM_FILENAME, os.path.basename(data[key]), pycurl.FORM_CONTENTTYPE, content_type )))
else:
if key == "run_file":
upload.append((key, os.path.basename(data[key])))
else:
upload.append((key, data[key]))
'''
# content_type = mime.from_file(file.filename)
# upload.append(('file', (pycurl.FORM_FILE, file.filename, pycurl.FORM_FILENAME, os.path.basename(file.filename), pycurl.FORM_CONTENTTYPE, content_type )))
# print(upload)
# initialize py curl
c = pycurl.Curl()
if read_function:
c.setopt(pycurl.READFUNCTION, FileReader(open(file.filename, "rb"), read_function).read_callback)
c.setopt(pycurl.INFILESIZE, os.path.getsize(file.filename))
c.setopt(pycurl.VERBOSE, 0)
c.setopt(pycurl.UPLOAD, 1)
c.setopt(pycurl.URL, url)
c.setopt(pycurl.NOPROGRESS, 0)
bearer = Config.load_token()
c.setopt(pycurl.HTTPHEADER, [
"Accept: application/json",
"Authorization: Bearer " + bearer
])
if progress:
c.setopt(pycurl.XFERINFOFUNCTION, progress)
c.setopt(pycurl.PROGRESSFUNCTION, progress)
c.perform() # this kicks off the pycurl module with all options set.
c.close()
return c
|
996,282 | bd6837f684b2689b1e3fc632d05060bea93a33d7 | # -*-coding:utf-8 -*-
import numpy as np
def cos_similarity(vec1, vec2):
#
cos_theta = np.sum(vec1 * vec2) / \
(np.sqrt(np.sum(vec1 ** 2)) * \
np.sqrt(np.sum(vec2 ** 2)))
#
return cos_theta
def hist_similarity(X1, X2):
'''
To evaluate the similarity of X1 and X2 by calculating their histogram's Cosine similarity
Note:X1 and X2 must be in the same range, which means that
the maximum and minimum values of X1 are the same as those of X1.
You can normalize firstly X1 and X2.
'''
hist_info1 = np.histogram(X1, bins = 50)
hist_info2 = np.histogram(X2, bins = 50)
#
cos_theta = cos_similarity(hist_info1[0], hist_info2[0])
#
return cos_theta, hist_info1, hist_info2
if __name__ == "__main__":
#
pass
|
996,283 | 0738f5a7f0bdc5598aceddcf6adb526bd1c0eac0 | api = dict(
url="",
token=""
)
location = "Ambient"
|
996,284 | 1b643c9ac2da65e238e8dd0cdf564e403a699a4e | def longestvalidparanthesis(self,s):
if not s:
return 0
stack = []
dp = [0]*len(s)
for i in range(len(s)):
if s[i] == '(':
stack.append(i)
continue
if stack:
leftIndex = stack.pop()
dp[i] = i-leftIndex+1+dp[leftIndex-1]
return max(dp) |
996,285 | 0ae904d50310da31ccd7ba8b2c36f0aa5ffea412 | from pyfam.mcmc import MCMC
import numpy as np
import matplotlib.pyplot as plt
#Define a model function to use when fitting data.
# For this example, that's a gaussian function plus a linear offset.
def f(x, p):
a, b, c, d, e = p
return a*np.exp(-(x-b)**2/(2*c**2)) + d*x + e
#Make some fake data with added noise.
npoints = 30
a, b, c, d, e = 5., 1., 2., 0.2, 2.
p = [a, b, c, d, e]
x = np.linspace(-10, 10, npoints)
y = f(x, p)+np.random.normal(0, 0.5, npoints)
x += np.random.normal(0, .05, npoints)
#Create MCMC instance.
mcmc = MCMC(x, y, model=f)
#Add one walker.
mcmc.add_walkers(1, p0=[1., 1., 1., 1., 1.]) #Starting guess is [1.0, 1.0, 1.0, 1.0, 1.0]
#Burn in for 200 steps.
print("Burn in")
mcmc.walk(200, run_id="burn")
#Add four additional walkers.
mcmc.add_walkers(4)
#Walk each walker for 1000 steps.
print("Walking")
mcmc.walk(5000, run_id="walk")
#Plot 50 randomly drawn accepted fits.
mcmc.plot_sample(run_id="walk", n=50)
#Plot a corner plot, showing the posterior distribution of parameters.
mcmc.corner(run_id="walk", bins=35, threshold=2, fill=True, p_crosshair="best")
plt.show()
print("WAAAAAA") |
996,286 | 5cb54209bc5478b8232c3c7b2e102206349ece62 | def transposed(matrix):
return list(map(list, zip(*matrix)))
def snail_path(matrix):
result = []
while len(matrix) > 0: # noqa: WPS507
result.extend(matrix.pop(0))
matrix = transposed(list(map(reversed, matrix)))
print(matrix)
return result
|
996,287 | 907f44ea6cf8c54b382a17119f408d087c840928 | def remove_emoji(data):
"""
去除表情
:param data:
:return:
"""
if not data:
return data
if not isinstance(data, basestring):
return data
try:
# UCS-4
patt = re.compile(u'([\U00002600-\U000027BF])|([\U0001f300-\U0001f64F])|([\U0001f680-\U0001f6FF])')
except re.error:
# UCS-2
patt = re.compile(u'([\u2600-\u27BF])|([\uD83C][\uDF00-\uDFFF])|([\uD83D][\uDC00-\uDE4F])|([\uD83D][\uDE80-\uDEFF])')
return patt.sub('', data) |
996,288 | 46a040fb6f1e88fd557ffbab9075c825bad579a8 | from django.core.files.storage import FileSystemStorage
from django.shortcuts import render ,HttpResponse,redirect
from firstapp.forms import userDataForm
from django.contrib.auth.hashers import make_password,check_password
from firstapp.models import userData
# Create your views here.
def index(request):
return HttpResponse("<h1>Welcome to First Response</h1>")
def home (request):
return render(request,"home.html")
def about(request):
n1="Vishal"
names={"Vishal","kunal","Amit"}
return render(request,"about.html",
{'n':n1,'l':names})
def index1(request):
return render(request,'base1.html')
def index2(request):
if request.method=="POST":
form = userDataForm(request.POST)
f = form.save(commit=False)
user_image = None
try:
if request.FILES["userimage"]:
my_files=request.FILES["userimage"]
fs=FileSystemStorage()
file_name=fs.save(my_files.name,my_files)
user_image=fs.url(file_name)
user_image=my_files.name
except:
pass
f.userName=request.POST["username"]
f.userPassword=make_password(request.POST["userpassword"])
f.userEmail = request.POST["useremail"]
f.userImage= user_image
f.isActive=True
f.save()
return render(request,'form.html',{'sucess':True})
return render(request,'form.html',{"sucess":True})
def updatefunction(request):
if request.method=="POST":
useremail=request.POST["useremail"]
npassword=request.POST["userpassword"]
username=request.POST["username"]
userid=request.POST["userid"]
updatedata=userData(userID=userid,userEmail=useremail,userPassword=npassword,userName=username)
updatedata.save(update_fields=["userEmail","userPassword","userName"])
return render(request,'udp.html',{"sucess":True})
return render(request,'udp.html',{"sucess":True})
def datafetch(request):
data=userData.objects.all()
#data=userData.objects.filter(isActive=1)
#data=userData.objects.get(userEmail="vishal20mandora@gmail.com")
return render(request,'viewdata.html',{"d":data})
def deletedata(request):
id=request.GET["id"]
data=userData.objects.get(userID=id)
data.delete()
return redirect('/user/fetch/')
def editdata(request):
id=request.GET["id"]
data=userData.objects.get(userID=id)
data.edit()
return redirect('/user/fetch/') |
996,289 | 617059eb099282dbf684f5cdfeaba07a5e740fa4 | from copy import deepcopy
import json
from openshift.dynamic.exceptions import NotFoundError
LAST_APPLIED_CONFIG_ANNOTATION = 'kubectl.kubernetes.io/last-applied-configuration'
def apply_object(resource, definition):
desired_annotation = dict(
metadata=dict(
annotations={
LAST_APPLIED_CONFIG_ANNOTATION: json.dumps(definition, separators=(',', ':'), indent=None)
}
)
)
try:
actual = resource.get(name=definition['metadata']['name'], namespace=definition['metadata'].get('namespace'))
except NotFoundError:
return None, dict_merge(definition, desired_annotation)
last_applied = actual.metadata.get('annotations',{}).get(LAST_APPLIED_CONFIG_ANNOTATION)
if last_applied:
last_applied = json.loads(last_applied)
actual_dict = actual.to_dict()
del actual_dict['metadata']['annotations'][LAST_APPLIED_CONFIG_ANNOTATION]
patch = merge(last_applied, definition, actual_dict)
if patch:
return actual.to_dict(), dict_merge(patch, desired_annotation)
else:
return actual.to_dict(), actual.to_dict()
else:
return actual.to_dict(), dict_merge(definition, desired_annotation)
def apply(resource, definition):
existing, desired = apply_object(resource, definition)
if not existing:
return resource.create(body=desired, namespace=definition['metadata'].get('namespace'))
if existing == desired:
return resource.get(name=definition['metadata']['name'], namespace=definition['metadata'].get('namespace'))
return resource.patch(body=desired,
name=definition['metadata']['name'],
namespace=definition['metadata'].get('namespace'),
content_type='application/merge-patch+json')
# The patch is the difference from actual to desired without deletions, plus deletions
# from last_applied to desired. To find it, we compute deletions, which are the deletions from
# last_applied to desired, and delta, which is the difference from actual to desired without
# deletions, and then apply delta to deletions as a patch, which should be strictly additive.
def merge(last_applied, desired, actual):
deletions = get_deletions(last_applied, desired)
delta = get_delta(actual, desired)
return dict_merge(deletions, delta)
# dict_merge taken from Ansible's module_utils.common.dict_transformations
def dict_merge(a, b):
'''recursively merges dicts. not just simple a['key'] = b['key'], if
both a and b have a key whose value is a dict then dict_merge is called
on both values and the result stored in the returned dictionary.'''
if not isinstance(b, dict):
return b
result = deepcopy(a)
for k, v in b.items():
if k in result and isinstance(result[k], dict):
result[k] = dict_merge(result[k], v)
else:
result[k] = deepcopy(v)
return result
def get_deletions(last_applied, desired):
patch = {}
for k, last_applied_value in last_applied.items():
desired_value = desired.get(k)
if isinstance(last_applied_value, dict) and isinstance(desired_value, dict):
p = get_deletions(last_applied_value, desired_value)
if p:
patch[k] = p
elif last_applied_value != desired_value:
patch[k] = desired_value
return patch
def get_delta(actual, desired):
patch = {}
for k, desired_value in desired.items():
actual_value = actual.get(k)
if actual_value is None:
patch[k] = desired_value
elif isinstance(desired_value, dict):
p = get_delta(actual_value, desired_value)
if p:
patch[k] = p
return patch
|
996,290 | 84df7539f1ce6c27bd06d0b702f0a8b182e6836f | #https://app.codility.com/demo/results/trainingE4UNAA-Y9B/
def cyclic_rotation_1(arr, k):
if k < 0 or k > 100:
raise ValueError
al = len(arr)
if al == 0:
return []
k = k % al
if k == 0:
return arr
return (arr * 2)[al-k:al + al-k]
#print(cyclic_rotation_1([3, 8, 9, 7, 6],5)) |
996,291 | 01b3c1d7a4138ebea4dbed6ce6a0cd0e11425b05 | import time
class DateManager(object):
def getDate(self):
return time.strftime("%Y/%m/%d")
def getYear(self):
return time.strftime("%Y")
def getMonth(self):
return time.strftime("%m")
def getDay(self):
return time.strftime("%d")
def getCreatedYear(self, datetime):
return datetime.split("-")[0]
def getCreatedMonth(self, datetime):
return datetime.split("-")[1]
def getCreatedDay(self, datetime):
return datetime.split("-")[2].split(" ")[0]
|
996,292 | 7f445bc512445c06e32c5a4db94fa6ac9def13b0 | def make_car(Manufacturer, model, **user_info):
car = {'type': 'SUV', 'sent': "20W"}
car['Manufacturer_name'] = Manufacturer
car['Model'] = model
for key, value in user_info.items():
car[key] = value
return car
# print(car)
car = make_car('subaru', 'outback', color='blue', tow_package=True)
print(car)
|
996,293 | 1fea998e4fa8245cacab11c5b9a99091e6416a77 |
import streamlit as st
import pandas as pd
import json
import matplotlib.pyplot as plt
import matplotlib
from text_data_analysis import analysis
from documentation import docs
menuItems = [
'Text Data Analysis',
'Automatic Data Analysis',
'Documentation']
st.sidebar.title('Easy Analysis')
itemSelected = st.sidebar.selectbox('', menuItems)
github = '''[ Fork/Star on Github](https://github.com/rishavrajjain/felix)'''
st.sidebar.info(github)
if itemSelected == 'Text Data Analysis':
st.title('Text Data Analysis')
analysis()
elif itemSelected == 'Automatic Data Analysis':
st.title('Automatic Data Analysis')
data = st.file_uploader("Upload a Dataset", type=["csv"])
if data is not None:
df = pd.read_csv(data)
st.dataframe(df)
st.write('Shape')
st.write(df.shape)
all_columns = df.columns.to_list()
st.write('Columns')
st.write(all_columns)
st.write('Summary of the Data')
st.write(df.describe())
if st.checkbox("Plot data for a column"):
selected_column = st.selectbox("Select Columns", all_columns)
new_df = df[selected_column]
st.dataframe(new_df.value_counts())
st.bar_chart(new_df.value_counts())
if st.checkbox("Advance Plots and Analysis"):
all_columns_names = df.columns.tolist()
type_of_plot = st.selectbox(
"Select Type of Plot", [
"area", "bar", "line"])
selected_columns_names = st.multiselect(
"Select Columns To Plot", all_columns_names)
if st.button("Generate Plot"):
st.success(
"Generating Customizable Plot of {} for {}".format(
type_of_plot, selected_columns_names))
# Plot By Streamlit
if type_of_plot == 'area':
cust_data = df[selected_columns_names]
st.area_chart(cust_data)
elif type_of_plot == 'bar':
cust_data = df[selected_columns_names]
st.bar_chart(cust_data)
elif type_of_plot == 'line':
cust_data = df[selected_columns_names]
st.line_chart(cust_data)
if itemSelected == 'Documentation':
st.title('Documentation')
st.markdown(docs())
|
996,294 | 06c5730648d03b6c536e8fa30d00ac35d8e39bf0 | import bpy
from bpy.types import Node
from mn_node_base import AnimationNode
from mn_execution import nodePropertyChanged
from mn_utils import *
class SubProgramNode(Node, AnimationNode):
bl_idname = "SubProgramNode"
bl_label = "Sub-Program"
def init(self, context):
self.inputs.new("SubProgramSocket", "Sub-Program")
self.inputs.new("IntegerSocket", "Amount")
def draw_buttons(self, context, layout):
rebuild = layout.operator("mn.rebuild_sub_program_sockets", "Rebuild Sockets")
rebuild.nodeTreeName = self.id_data.name
rebuild.nodeName = self.name
def rebuildSubProgramSockets(self):
self.removeDynamicSockets()
if hasLinks(self.inputs["Sub-Program"]):
startNode = self.inputs["Sub-Program"].links[0].from_node
for i, output in enumerate(startNode.outputs):
if i >= 2:
self.inputs.new(output.bl_idname, output.name)
self.outputs.new(output.bl_idname, output.name)
def removeDynamicSockets(self):
self.outputs.clear()
for i, socket in enumerate(self.inputs):
if i >= 2: self.inputs.remove(socket)
# register
################################
def register():
bpy.utils.register_module(__name__)
def unregister():
bpy.utils.unregister_module(__name__) |
996,295 | 8aee4b214c262b69d1c957b91ee772702b5dd289 | # -*- coding: utf-8 -*-
"""
Created on Wed Jun 17 00:20:30 2020
@author: MADHURIMA
"""
"""def arrayManipulation(n,queries):
a=[0]*(n+1)
a=a[1:]
for i in queries:
for j in range(i[0],(i[1]+1)):
a[j-1] += i[2]
return max(a)
n,m=[int(i) for i in input().split(" ")]
queries=[]
for i in range(m):
sub=[int(i) for i in input().split(" ")][:3]
queries.append(sub)
print(arrayManipulation(n,queries))"""
"""n,m=[int(i) for i in input().split(" ")]
arr=[0]*(n)
for i in range(m):
a,b,k=[int(p) for p in input().split(" ")]
for j in range(a,b+1):
arr[j-1] += k
print(max(arr))"""
#final
#difference and prefix array
n, inputs = [int(n) for n in input().split(" ")]
list = [0]*(n+1)
for _ in range(inputs):
x, y, incr = [int(n) for n in input().split(" ")]
list[x-1] += incr
if((y)<=len(list)): list[y] -= incr;
max = x = 0
for i in list:
x=x+i;
if(max<x): max=x;
print(max)
|
996,296 | 3de53f0effe4a8fba889294ba2d7c4a92f6660ee | # https://www.acmicpc.net/problem/2381
import sys
if __name__ == '__main__':
n_num = int(sys.stdin.readline())
calc = [[[0, i] for i in range(n_num)], [[0, i] for i in range(n_num)]] # [x+y, seq], [x-y, seq]
for i in range(n_num):
x, y = map(int, sys.stdin.readline().split())
calc[0][i][0] = x + y
calc[1][i][0] = x - y
answer = []
# -(x + y) + (x + y)
answer.append(-min(calc[0], key=lambda x: x[0])[0] + max(calc[0], key=lambda x: x[0])[0])
# -(x - y) + (x - y)
answer.append(-min(calc[1], key=lambda x: x[0])[0] + max(calc[1], key=lambda x: x[0])[0])
# (x - y) - (x - y)
answer.append(max(calc[1], key=lambda x: x[0])[0] - min(calc[1], key=lambda x: x[0])[0])
# (x + y) - (x + y)
answer.append(max(calc[0], key=lambda x: x[0])[0] - min(calc[0], key=lambda x: x[0])[0])
print(max(answer)) |
996,297 | ac43d23ed4128be7ab4df66cf42c84c3cf2f895b |
###########################
# PANTELIDIS NIKOS AM2787 #
###########################
from spatial_search import *
from text_search import *
# THESE FILE CONTAINS THE FUNCTIONS OF PART 3 AND
# SOME OF THE TEXTUAL AND SPATIAL SEARCH FUNCTIONS
# BUT APPROPRIATELY MODIFIED TO PIPELINE THE RESULTS
# OF THE FIRST SEARCH IN THE SECOND SEARCH TO FILTER
# THEM
def kwSpaSearchRaw(r_query, t_query, restaurants_list):
results = []
for i,r in enumerate(restaurants_list):
valid_review = True
for q in t_query:
if q not in r['tags']:
valid_review = False
break
if valid_review and r['x'] >= r_query[0] and r['x'] <= r_query[1] and \
r['y'] >= r_query[2] and r['y'] <= r_query[3]:
results.append(i)
return results
def kwSpaSearchIF(r_query, t_query, tags_list, bags_of_restaurants, x_list, y_list, grid, restaurants_list):
tags_results = kwSearchIF(t_query, tags_list, bags_of_restaurants)
final_results = spaSearchGridAfterIF(r_query, tags_results, x_list, y_list, grid, restaurants_list)
return final_results
def kwSpaSearchGrid(r_query, t_query, tags_list, bags_of_restaurants, x_list, y_list, grid, restaurants_list):
grid_results = spaSearchGrid(r_query,x_list,y_list,grid,restaurants_list)
#print("GRID",grid_results[0])
final_results = kwSearchIFAfterGrid(t_query, tags_list, bags_of_restaurants, grid_results)
return final_results
def kwSearchIFAfterGrid(text_list, tags_list, bags_list, grid_results):
s_text_list = sorted(text_list)
if_indexes = merge_join(s_text_list, tags_list)
#print(if_indexes)
if if_indexes == -1:
return []
r_intersection = set([r for r in bags_list[if_indexes[0]] if r in grid_results])
if len(if_indexes) > 1:
for i in range(1, len(if_indexes)):
r_intersection = r_intersection.intersection([r for r in bags_list[if_indexes[i]] if r in grid_results])
return list(r_intersection)
def spaSearchGridAfterIF(r_query, tags_results, x_list, y_list, grid, restaurants_list):
x_min_index = binary_search(x_list, r_query[0])
x_max_index = binary_search(x_list, r_query[1])
y_min_index = binary_search(y_list, r_query[2])
y_max_index = binary_search(y_list, r_query[3])
results = []
for i in range(y_min_index,y_max_index + 1):
for j in range(x_min_index,x_max_index + 1):
if len(grid[i][j]) > 0:
for r in grid[i][j]:
if r in tags_results:
if restaurants_list[r]['x'] >= r_query[0] and \
restaurants_list[r]['x'] <= r_query[1] and \
restaurants_list[r]['y'] >= r_query[2] and \
restaurants_list[r]['y'] <= r_query[3]:
results.append(r)
return results |
996,298 | d4871c484fe17df258ece744d1964e08a9c0b37b | """基于字典元素中特定值对应的键生成列表"""
def keys_of(dic: dict, val: 'value') -> list:
"""返回一个列表,该列表的元素是字典dic中值为val的元素的键"""
return [k for k, v in dic.items() if v == val]
txt = input('字符串:')
count = {ch: txt.count(ch) for ch in txt}
print('分布=', count)
num = int(input('字符数量:'))
print('{}个字符={}'.format(num, keys_of(count, num))) |
996,299 | df92e059a519b9cb16bca08b470b54c7aefe1154 | # -*- coding: utf-8 -*-
# Copyright (c) 2015 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import time
from calvin.requests import calvinresponse
from calvin.utilities.calvin_callback import CalvinCB
from calvin.utilities.calvinuuid import uuid
from calvin.utilities.calvinlogger import get_logger
from calvin.utilities import dynops
from calvin.utilities.requirement_matching import ReqMatch
from calvin.utilities.replication_defs import REPLICATION_STATUS, PRE_CHECK
from calvin.runtime.south.plugins.async import async
from calvin.actor.actorstate import ActorState
from calvin.runtime.north.plugins.requirements import req_operations
from calvin.actor.actorport import PortMeta
from calvin.runtime.north.plugins.port import DISCONNECT
from calvin.utilities.utils import enum
_log = get_logger(__name__)
class ReplicationData(object):
"""An actors replication data"""
def __init__(self, actor_id=None, master=None, requirements=None, initialize=True):
super(ReplicationData, self).__init__()
self.id = uuid("REPLICATION") if initialize else None
self.master = master
self.instances = [] if actor_id is None else [actor_id]
# TODO requirements should be plugin operation, now just do target number
self.requirements = requirements
self.counter = 0
# {<actor_id>: {'known_peer_ports': [peer-ports id list], <org-port-id: <replicated-port-id>, ...}, ...}
self.remaped_ports = {}
self.status = REPLICATION_STATUS.UNUSED
self._terminate_with_node = False
self._one_per_runtime = False
def state(self, remap=None):
state = {}
if self.id is not None:
# Replicas only need to keep track of id, master actor and their count number
# Other data need to be synced from registry anyway when e.g. switching master
state['id'] = self.id
state['master'] = self.master
state['counter'] = self.counter
state['_terminate_with_node'] = self._terminate_with_node
state['_one_per_runtime'] = self._one_per_runtime
if remap is None:
# For normal migration include these
state['instances'] = self.instances
state['requirements'] = self.requirements
state['remaped_ports'] = self.remaped_ports
# We might migrate at the same time as we (de)replicate
# To not lock the replication manager just change the migration state
state['status'] = REPLICATION_STATUS.READY if self.is_busy() else self.status
try:
state['req_op'] = req_operations[self.requirements['op']].get_state(self)
except:
pass
return state
def set_state(self, state):
self.id = state.get('id', None)
self.master = state.get('master', None)
self.instances = state.get('instances', [])
self.requirements = state.get('requirements', {})
self.counter = state.get('counter', 0)
self._terminate_with_node = state.get('_terminate_with_node', False)
self._one_per_runtime = state.get('_one_per_runtime', False)
self.remaped_ports = state.get('remaped_ports', {})
self.status = state.get('status', REPLICATION_STATUS.UNUSED)
try:
req_operations[self.requirements['op']].set_state(self, state['req_op'])
except:
pass
def add_replica(self, actor_id):
if actor_id in self.instances:
return
self.instances.append(actor_id)
self.counter += 1
def remove_replica(self):
if len(self.instances) < 2:
return None
actor_id = self.instances.pop()
# Should counter reflect current? Probably not, better to introduce seperate current count
# self.counter -= 1
return actor_id
def get_replicas(self, when_master=None):
if self.id and self.instances and (when_master is None or when_master == self.master):
return [a for a in self.instances if a != self.master]
else:
return []
def is_master(self, actor_id):
return self.id is not None and self.master == actor_id
def is_busy(self):
return self.status in [REPLICATION_STATUS.REPLICATING, REPLICATION_STATUS.DEREPLICATING]
def terminate_with_node(self, actor_id):
return self._terminate_with_node and not self.is_master(actor_id)
def inhibate(self, actor_id, inhibate):
if inhibate:
if self.requirements:
self.status = REPLICATION_STATUS.INHIBATED
elif self.is_master(actor_id):
self.status = REPLICATION_STATUS.READY
else:
self.status = REPLICATION_STATUS.UNUSED
def set_remaped_ports(self, actor_id, remap_ports, ports):
self.remaped_ports[actor_id] = remap_ports
# Remember the ports that we knew at replication time
self.remaped_ports[actor_id]['known_peer_ports'] = (
[pp[1] for p in (ports['inports'].values() + ports['outports'].values()) for pp in p])
def connect_verification(self, actor_id, port_id, peer_port_id):
if not self.is_master(actor_id):
return []
connects = []
for aid, ports in self.remaped_ports.items():
if peer_port_id in ports['known_peer_ports']:
continue
# Got a port connect from an unknown peer port must be a new replica created simultaneously
# as <aid> replica. Need to inform <aid> replica to do the connection
connects.append((aid, ports[port_id], peer_port_id))
return connects
def init_requirements(self, requirements=None):
if requirements is not None:
self.requirements = requirements
try:
if not self.requirements:
return
req_operations[self.requirements['op']].init(self)
except:
_log.exception("init_requirements")
class ReplicationManager(object):
def __init__(self, node):
super(ReplicationManager, self).__init__()
self.node = node
def supervise_actor(self, actor_id, requirements):
try:
actor = self.node.am.actors[actor_id]
except:
return calvinresponse.CalvinResponse(calvinresponse.NOT_FOUND)
if actor._replication_data.id is None:
actor._replication_data = ReplicationData(
actor_id=actor_id, master=actor_id, requirements=requirements)
actor._replication_data.init_requirements()
elif actor._replication_data.is_master(actor_id):
# If we already is master that is OK, update requirements
# FIXME should not update during a replication, fix when we get the
# requirements from the deployment requirements
actor._replication_data.init_requirements(requirements)
self.node.storage.add_replication(actor._replication_data, cb=None)
return calvinresponse.CalvinResponse(True, {'replication_id': actor._replication_data.id})
else:
return calvinresponse.CalvinResponse(calvinresponse.BAD_REQUEST)
actor._replication_data.status = REPLICATION_STATUS.READY
# TODO add a callback to make sure storing worked
self.node.storage.add_replication(actor._replication_data, cb=None)
self.node.storage.add_actor(actor, self.node.id, cb=None)
#TODO trigger replication loop
return calvinresponse.CalvinResponse(True, {'replication_id': actor._replication_data.id})
def list_master_actors(self):
return [a for a_id, a in self.node.am.actors.items() if a._replication_data.master == a_id]
def list_replication_actors(self, replication_id):
return [a_id for a_id, a in self.node.am.actors.items() if a._replication_data.id == replication_id]
#
# Replicate
#
def replicate(self, actor_id, dst_node_id, callback):
try:
actor = self.node.am.actors[actor_id]
except:
if callback:
callback(calvinresponse.CalvinResponse(calvinresponse.BAD_REQUEST))
return
if not actor._replication_data.is_master(actor.id):
# Only replicate master actor
if callback:
callback(calvinresponse.CalvinResponse(calvinresponse.BAD_REQUEST))
return
if actor._replication_data.status != REPLICATION_STATUS.READY:
if callback:
callback(calvinresponse.CalvinResponse(calvinresponse.SERVICE_UNAVAILABLE))
return
_log.analyze(self.node.id, "+", {'actor_id': actor_id, 'dst_node_id': dst_node_id})
actor._replication_data.status = REPLICATION_STATUS.REPLICATING
cb_status = CalvinCB(self._replication_status_cb, replication_data=actor._replication_data, cb=callback)
# TODO make name a property that combine name and counter in actor
new_id = uuid("ACTOR")
actor._replication_data.check_instances = time.time()
actor._replication_data.add_replica(new_id)
new_name = actor.name + "/{}".format(actor._replication_data.counter)
actor_type = actor._type
ports = actor.connections(self.node.id)
ports['actor_name'] = new_name
ports['actor_id'] = new_id
remap_ports = {pid: uuid("PORT") for pid in ports['inports'].keys() + ports['outports'].keys()}
actor._replication_data.set_remaped_ports(new_id, remap_ports, ports)
ports['inports'] = {remap_ports[pid]: v for pid, v in ports['inports'].items()}
ports['outports'] = {remap_ports[pid]: v for pid, v in ports['outports'].items()}
_log.analyze(self.node.id, "+ GET STATE", remap_ports)
state = actor.state(remap_ports)
state['_name'] = new_name
state['_id'] = new_id
# Make copy to make sure no objects are shared between actors or master actor state is changed
state = copy.deepcopy(state)
actor.will_replicate(ActorState(state, actor._replication_data))
if dst_node_id == self.node.id:
# Make copy to make sure no objects are shared between actors
ports = copy.deepcopy(ports)
self.node.am.new_from_migration(
actor_type, state=state, prev_connections=ports, callback=CalvinCB(
self._replicated,
replication_id=actor._replication_data.id,
actor_id=new_id, callback=cb_status, master_id=actor.id, dst_node_id=dst_node_id))
else:
self.node.proto.actor_new(
dst_node_id, CalvinCB(self._replicated, replication_id=actor._replication_data.id,
actor_id=new_id, callback=cb_status, master_id=actor.id,
dst_node_id=dst_node_id),
actor_type, state, ports)
def _replicated(self, status, replication_id=None, actor_id=None, callback=None, master_id=None, dst_node_id=None):
_log.analyze(self.node.id, "+", {'status': status, 'replication_id': replication_id, 'actor_id': actor_id})
if status:
# TODO add callback for storing
self.node.storage.add_replica(replication_id, actor_id, dst_node_id)
self.node.control.log_actor_replicate(
actor_id=master_id, replica_actor_id=actor_id,
replication_id=replication_id, dest_node_id=dst_node_id)
if callback:
status.data = {'actor_id': actor_id, 'replication_id': replication_id}
callback(status)
def connect_verification(self, actor_id, port_id, peer_port_id, peer_node_id):
actor = self.node.am.actors[actor_id]
connects = actor._replication_data.connect_verification(actor_id, port_id, peer_port_id)
for actor_id, port_id, peer_port_id in connects:
if actor_id in self.node.am.actors:
# This actors replica is local
self.node.pm.connect(actor_id=actor_id, port_id=port_id, peer_port_id=peer_port_id)
_log.debug("Our connected(actor_id=%s, port_id=%s, peer_port_id=%s)" % (actor_id, port_id, peer_port_id))
elif peer_node_id == self.node.id:
# The peer actor replica is local
self.node.pm.connect(port_id=peer_port_id, peer_port_id=port_id)
_log.debug("Peer connected(actor_id=%s, port_id=%s, peer_port_id=%s)" %
(actor_id, port_id, peer_port_id))
else:
# Tell peer actor replica to connect to our replica
_log.debug("Port remote connect request %s %s %s %s" % (actor_id, port_id, peer_port_id, peer_node_id))
self.node.proto.port_remote_connect(peer_port_id=port_id, port_id=peer_port_id, node_id=peer_node_id,
callback=CalvinCB(
self._port_connected_remote, actor_id=actor_id, port_id=port_id, peer_port_id=peer_port_id, peer_node_id=peer_node_id))
def _port_connected_remote(self, status, actor_id, port_id, peer_port_id, peer_node_id):
_log.debug("Port remote connected %s %s %s %s %s" % (actor_id, port_id, peer_port_id, peer_node_id, str(status)))
if not status:
# Failed request for connecting, likely the actor having the peer port has migrated.
# Find it and try again.
peer_port_meta = PortMeta(self, port_id=peer_port_id)
try:
peer_port_meta.retrieve(callback=CalvinCB(self._found_peer_node, actor_id=actor_id, port_id=port_id, peer_port_id=peer_port_id))
except calvinresponse.CalvinResponseException as e:
_log.exception("Failed retrieving peer port meta info %s" % str(e))
return
def _found_peer_node(self, status, actor_id, port_id, peer_port_id, port_meta):
if not status:
# FIXME retry here? Now just ignore.
_log.error("Failed finding peer node %s %s %s %s" % (actor_id, port_id, peer_port_id, str(status)))
return
_log.debug("Found peer node %s %s %s %s" % (actor_id, port_id, peer_port_id, str(status)))
self._port_connected_remote(
status=calvinresponse.CalvinResponse(True),
actor_id=actor_id, port_id=port_id, peer_port_id=peer_port_id, peer_node_id=port_meta.node_id)
#
# Dereplication
#
def dereplicate(self, actor_id, callback, exhaust=False):
_log.analyze(self.node.id, "+", {'actor_id': actor_id, 'exhaust': exhaust})
terminate = DISCONNECT.EXHAUST if exhaust else DISCONNECT.TERMINATE
try:
replication_data = self.node.am.actors[actor_id]._replication_data
except:
if callback:
callback(calvinresponse.CalvinResponse(calvinresponse.BAD_REQUEST))
return
if not replication_data.is_master(actor_id):
# Only dereplicate by master actor
if callback:
callback(calvinresponse.CalvinResponse(calvinresponse.BAD_REQUEST))
return
if replication_data.status != REPLICATION_STATUS.READY:
if callback:
callback(calvinresponse.CalvinResponse(calvinresponse.SERVICE_UNAVAILABLE))
return
replication_data.status = REPLICATION_STATUS.DEREPLICATING
last_replica_id = replication_data.remove_replica()
if last_replica_id is None:
replication_data.status = REPLICATION_STATUS.READY
if callback:
callback(calvinresponse.CalvinResponse(calvinresponse.BAD_REQUEST))
return
cb_status = CalvinCB(self._replication_status_cb, replication_data=replication_data, cb=callback)
replication_data.check_instances = time.time()
if last_replica_id in self.node.am.actors:
self.node.am.destroy_with_disconnect(last_replica_id, terminate=terminate,
callback=CalvinCB(self._dereplicated, replication_data=replication_data,
last_replica_id=last_replica_id,
node_id=self.node.id, cb=cb_status))
else:
self.node.storage.get_actor(last_replica_id,
CalvinCB(func=self._dereplicate_actor_cb,
replication_data=replication_data, terminate=terminate, cb=cb_status))
def _dereplicate_actor_cb(self, key, value, replication_data, terminate, cb):
""" Get actor callback """
_log.analyze(self.node.id, "+", {'actor_id': key, 'value': value})
if value and 'node_id' in value:
# Use app destroy since it can remotely destroy actors
self.node.proto.app_destroy(value['node_id'],
CalvinCB(self._dereplicated, replication_data=replication_data, last_replica_id=key,
node_id=value['node_id'], cb=cb),
None, [key], disconnect=terminate, replication_id=replication_data.id)
else:
# FIXME Should do retries
if cb:
cb(calvinresponse.CalvinResponse(False))
def _dereplicated(self, status, replication_data, last_replica_id, node_id, cb):
if status:
# TODO add callback for storing
self.node.storage.remove_replica(replication_data.id, last_replica_id)
if node_id == self.node.id:
self.node.storage.remove_replica_node(replication_data.id, last_replica_id)
self.node.control.log_actor_dereplicate(
actor_id=replication_data.master, replica_actor_id=last_replica_id,
replication_id=replication_data.id)
if cb:
status.data = {'actor_id': last_replica_id}
cb(status)
def _replication_status_cb(self, status, replication_data, cb):
replication_data.status = REPLICATION_STATUS.READY
if cb:
cb(status)
#
# Terminate specific replica
#
def terminate(self, actor_id, terminate=DISCONNECT.TERMINATE, callback=None):
try:
replication_data = self.node.am.actors[actor_id]._replication_data
except:
if callback:
callback(status=calvinresponse.CalvinResponse(calvinresponse.BAD_REQUEST))
return
replication_data._is_terminating = True
self.node.storage.remove_replica(replication_data.id, actor_id)
self.node.storage.remove_replica_node(replication_data.id, actor_id)
self.node.control.log_actor_dereplicate(
actor_id=replication_data.master, replica_actor_id=actor_id,
replication_id=replication_data.id)
self.node.am.destroy_with_disconnect(actor_id, terminate=terminate,
callback=callback)
#
# Requirement controlled replication
#
def replication_loop(self):
if self.node.quitting:
return
replicate = []
dereplicate = []
no_op = []
for actor in self.list_master_actors():
if actor._replication_data.status != REPLICATION_STATUS.READY:
continue
if actor._migrating_to is not None:
continue
if not actor.enabled():
continue
try:
req = actor._replication_data.requirements
if not req:
continue
pre_check = req_operations[req['op']].pre_check(self.node, actor_id=actor.id,
component=actor.component_members(), **req['kwargs'])
except:
_log.exception("Pre check exception")
pre_check = PRE_CHECK.NO_OPERATION
if pre_check == PRE_CHECK.SCALE_OUT:
replicate.append(actor)
elif pre_check == PRE_CHECK.SCALE_IN:
dereplicate.append(actor)
elif pre_check == PRE_CHECK.NO_OPERATION:
no_op.append(actor)
for actor in replicate:
_log.info("Auto-replicate")
self.replicate_by_requirements(actor, CalvinCB(self._replication_loop_log_cb, actor_id=actor.id))
for actor in dereplicate:
_log.info("Auto-dereplicate")
self.dereplicate(actor.id, CalvinCB(self._replication_loop_log_cb, actor_id=actor.id), exhaust=True)
for actor in no_op:
if not hasattr(actor._replication_data, "check_instances"):
actor._replication_data.check_instances = time.time()
t = time.time()
if t > (actor._replication_data.check_instances + 2.0):
actor._replication_data.check_instances = t
self.node.storage.get_replica(actor._replication_data.id, CalvinCB(self._current_actors_cb, actor=actor))
def _current_actors_cb(self, key, value, actor):
collect_actors = [] if value is None else value
missing = set(actor._replication_data.instances) - set(collect_actors + [actor.id])
for actor_id in missing:
actor._replication_data.instances.remove(actor_id)
def _replication_loop_log_cb(self, status, actor_id):
_log.info("Auto-(de)replicated %s: %s" % (actor_id, str(status)))
def replicate_by_requirements(self, actor, callback=None):
""" Update requirements and trigger a replication """
actor._replicate_callback = callback
req = actor._replication_data.requirements
# Initiate any scaling specific actions
req_operations[req['op']].initiate(self.node, actor, **req['kwargs'])
r = ReqMatch(self.node,
callback=CalvinCB(self._update_requirements_placements, actor=actor))
r.match_for_actor(actor.id)
_log.analyze(self.node.id, "+ END", {'actor_id': actor.id})
def _update_requirements_placements(self, actor, possible_placements, status=None):
_log.analyze(self.node.id, "+ BEGIN", {}, tb=True)
# All possible actor placements derived
if not possible_placements:
if actor._replicate_callback:
actor._replicate_callback(status=calvinresponse.CalvinResponse(False))
return
# Select, always a list of node_ids, could be more than one
req = actor._replication_data.requirements
selected = req_operations[req['op']].select(self.node, actor, possible_placements, **req['kwargs'])
_log.analyze(self.node.id, "+", {'possible_placements': possible_placements, 'selected': selected})
if selected is None:
# When None - selection will never succeed
if actor._replicate_callback:
actor._replicate_callback(status=calvinresponse.CalvinResponse(False))
return
if actor._migrating_to is not None:
# If actor started migration skip replication
if actor._replicate_callback:
actor._replicate_callback(status=calvinresponse.CalvinResponse(False))
return
if not selected:
if actor._replicate_callback:
actor._replicate_callback(status=calvinresponse.CalvinResponse(False))
return
# FIXME create as many replicas as nodes in list (would need to serialize)
self.replicate(actor.id, selected[0], callback=actor._replicate_callback)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.