index int64 | repo_name string | branch_name string | path string | content string | import_graph string |
|---|---|---|---|---|---|
49,513 | nanhua97/python_code | refs/heads/master | /tornado/tornado基础/opt.py | #coding:utf8
import tornado.web as t_web
import tornado.ioloop as t_io
import tornado.httpserver as t_http
import tornado.options as t_opt
'''
from tornado.options import options,parse_command_line
options.logging = None
parse_command_line()
'''
t_opt.define("port",default=8000,type=int,help="this is the port")
t_opt.define("rick",default=[],type=str,multiple=True,help='a b c d')
class IndexHandler(t_web.RequestHandler):
def get(self):
self.write("Hello options")
if __name__ == "__main__":
#t_opt.parse_command_line()
t_opt.parse_config_file("./config")
print(t_opt.options.rick)
app = t_web.Application([
(r'/',IndexHandler),
])
httpServer = t_http.HTTPServer(app)
httpServer.listen(t_opt.options.port)
t_io.IOLoop.current().start()
#运行 python opt.py --port==9000 --rick=a,b,c,d
| {"/django/test5/booktest/search_indexes.py": ["/django/test5/booktest/models.py"], "/django/test4/booktest/views.py": ["/django/test4/booktest/models.py"], "/django/test5/booktest/views.py": ["/django/test5/booktest/models.py"]} |
49,514 | nanhua97/python_code | refs/heads/master | /tornado/tornado模板/static1.py | #coding:utf8
import json
import tornado.web as t_web
import tornado.ioloop as t_io
import tornado.options as t_opt
import tornado.httpserver as t_http
from tornado.web import RequestHandler,url,StaticFileHandler
from tornado.options import options,define
import os
define("port",default="8000",type=int,help="this is port")
if __name__ == "__main__":
current_path = os.path.dirname(__file__)
app = t_web.Application(
[
#本质是目录拼接
(r'^/()$', StaticFileHandler, {"path":os.path.join(current_path, "statics/html"), "default_filename":"index.html"}),
(r'^/view/(.*)$', StaticFileHandler,{"path":os.path.join(current_path,"statics/html")}),
(r'^/template/(.*)$', StaticFileHandler,{"path":os.path.join(current_path,"templates")}),
],
debug=True,
#本目录下的statics目录,
static_path=os.path.join(current_path, "statics"),
#本目录下的templates目录
template_path=os.path.join(current_path, "templates"),
)
httpServer = t_http.HTTPServer(app)
httpServer.listen(options.port)
t_io.IOLoop.current().start()
| {"/django/test5/booktest/search_indexes.py": ["/django/test5/booktest/models.py"], "/django/test4/booktest/views.py": ["/django/test4/booktest/models.py"], "/django/test5/booktest/views.py": ["/django/test5/booktest/models.py"]} |
49,515 | nanhua97/python_code | refs/heads/master | /tornado/tornado异步/同步.py | #coding:utf8
def req_a():
print("start_A")
print("A_end")
def req_b():
print("start_B")
print("B_end")
def main():
#模拟tornado框架
req_a()
req_b()
if __name__ == "__main__":
main()
| {"/django/test5/booktest/search_indexes.py": ["/django/test5/booktest/models.py"], "/django/test4/booktest/views.py": ["/django/test4/booktest/models.py"], "/django/test5/booktest/views.py": ["/django/test5/booktest/models.py"]} |
49,516 | nanhua97/python_code | refs/heads/master | /django/test2/booktest/views.py | #-*-coding:utf8-*-
from django.shortcuts import render
from .models import *
from django.db.models import Max,F,Q
def index(request):
# list = BookInfo.books2.filter(heroinfo__hcontent__contains='六')
#list = BookInfo.books2.aggregate(Max('pk'))
#list = BookInfo.books2.filter(bread__gt = F('bcommet'))
list = BookInfo.books2.filter(Q(pk__gt=3)|Q(heroinfo__hname__contains = '段'))
contains={'list':list}
return render(request,'booktest/index.html',contains)
# Create your views here.
| {"/django/test5/booktest/search_indexes.py": ["/django/test5/booktest/models.py"], "/django/test4/booktest/views.py": ["/django/test4/booktest/models.py"], "/django/test5/booktest/views.py": ["/django/test5/booktest/models.py"]} |
49,517 | nanhua97/python_code | refs/heads/master | /tornado/tornado基础/uri.py | #coding:utf8
import tornado.web as t_web
import tornado.ioloop as t_io
import tornado.httpserver as t_http
import tornado.options as t_opt
from tornado.web import RequestHandler,url
from tornado.options import options,define
define("port",default=8000,type=int,help="this is the port")
class IndexHandler(RequestHandler):
def get(self):
self.write("hello rick")
#City_url = self.reverse_url("City")
#self.write('<a href="%s">City</a>'%City_url)
self.write(self.request.uri)
class SubjectCityHandler(RequestHandler):
def get(self,subject,city):
self.write("subject:%s<br/>city:%s"%(subject,city))
self.write(self.request.uri)
class SubjectDateHandler(RequestHandler):
def get(self,date,subject):
self.write("Date:%s<br/>Subject:%s"%(date,subject))
self.write(self.request.uri)
if __name__ == "__main__":
t_opt.parse_command_line()
app=t_web.Application([
url(r"/",IndexHandler),
url(r"/sub-city/(.+)/([a-z]+)",SubjectCityHandler,name="City"),
url(r"/sub-date/(?P<subject>.+)/(?P<date>\d+)",SubjectDateHandler,name="Date"),
],debug=True)
httpServer = t_http.HTTPServer(app)
httpServer.listen(options.port)
t_io.IOLoop.current().start()
| {"/django/test5/booktest/search_indexes.py": ["/django/test5/booktest/models.py"], "/django/test4/booktest/views.py": ["/django/test4/booktest/models.py"], "/django/test5/booktest/views.py": ["/django/test5/booktest/models.py"]} |
49,518 | nanhua97/python_code | refs/heads/master | /django/test4/booktest/views.py | #-*-coding:utf8-*-
from django.shortcuts import render,redirect
from .models import *
from django.http import HttpResponse
def index(request):
hero = HeroInfo.objects.get(pk=3)
list = HeroInfo.objects.all()
context = {'hero':hero,'list':list}
return render(request,'booktest/index.html',context)
def show(request,id,id1):
context = {'id':id,'id1':id1}
return render(request,'booktest/show.html',context)
def base(request):
return render(request,'booktest/base2.html')
def user(request):
return render(request,'booktest/mall_user.html')
def user1(request):
head = 'welcome to the world'
context = {'head':head}
return render(request,'booktest/mall_user1.html',context)
def htmlTest(request):
html = '<h1>hello</h1>'
context = {'h1':html}
return render(request,'booktest/htmlTest.html',context)
def csrf1(request):
return render(request,'booktest/csrf1.html')
def csrf2(request):
uname = request.POST['uname']
return HttpResponse(uname)
from PIL import Image,ImageFilter,ImageFont,ImageDraw
import random
import io
def rndChar():
return chr(random.randint(65,90))
def rndcolor():
return (random.randint(64,255),random.randint(64,255),random.randint(64,255))
def rndcolor2():
return (random.randint(37,255),random.randint(37,255),random.randint(37,255))
def gene_line(draw,width,height):
begin = (random.randint(0,width),random.randint(0,height))
end = (random.randint(0,width),random.randint(0,height))
draw.line([begin,end],fill=rndcolor())
def gene_point(draw,width,height):
for x in range(width):
for y in range(height):
draw.point((x,y),fill=rndcolor())
def gene_code(size,char):
width,height=size
image = Image.new('RGB',(width,height),(255,255,255))
font = ImageFont.truetype('FreeMono.ttf',25)
draw = ImageDraw.Draw(image)
for i in char:
draw.text((30*char.index(i),2),i,font=font,fill=rndcolor2())
image = image.filter(ImageFilter.BLUR)
return image
def change(request):
width,height=(100,30)
char=''
for i in range(4):
char = char+rndChar()
request.session['code'] = char
image = Image.new('RGB',(width,height),(255,255,255))
font = ImageFont.truetype('FreeMono.ttf',25)
draw = ImageDraw.Draw(image)
for i in char:
draw.text((20*char.index(i),2),i,font=font,fill=(0,0,0))
# draw.text((30*char.index(i),2),i,font=font,fill=rndcolor2())
# image = image.filter(ImageFilter.BLUR)
image.save('./templates/booktest/code.png')
def changeCode(request):
return redirect('/code1')
def code1(request):
change(request)
return render(request,'booktest/code1.html')
def code2(request):
if request.session['code'].lower() == request.POST['uname'].lower():
return HttpResponse('ok')
else:
return redirect('/code1')
# return HttpResponse(request.session['code'])
# Create your views here.
| {"/django/test5/booktest/search_indexes.py": ["/django/test5/booktest/models.py"], "/django/test4/booktest/views.py": ["/django/test4/booktest/models.py"], "/django/test5/booktest/views.py": ["/django/test5/booktest/models.py"]} |
49,519 | nanhua97/python_code | refs/heads/master | /tornado/tornado基础/defaultHeaderError.py | #coding:utf8
import tornado.web as t_web
import tornado.ioloop as t_io
import tornado.httpserver as t_http
import tornado.options as t_opt
from tornado.web import RequestHandler,url
from tornado.options import options,define
import json
define("port",default=8000,type=int,help="this is the port")
class IndexHandler(RequestHandler):
def set_default_headers(self):
print("set_default_headers()")
self.set_header("Content-Type", "application/json; charset=UTF-8")
self.set_header("Rick","C-137")
def get(self):
print("this is the get()")
stu = {
"name":"Morty",
"age":24,
"gender":1,
}
stu_json = json.dumps(stu)
self.write(stu_json)
self.set_header("Rick","summer")
def post(self):
print("this is the post()")
stu = {
"name":"Morty",
"age":24,
"gender":1,
}
stu_json = json.dumps(stu)
self.write(stu_json)
"""
#标准状态码不需要写reason,非标准状态玛需要写reason,否则会报错
class Err404Handler(RequestHandler):
def get(self):
self.write("Hello Rick-404")
self.set_status(404)
class Err489Handler(RequestHandler):
def get(self):
self.write("Hello Rick-489")
self.set_status(489,"Morty Error")
class Err481Handler(RequestHandler):
def get(self):
self.write("Hello Rick-481")
self.set_status(481)
#重定向
class LoginHandler(RequestHandler):
def get(self):
self.write('<form method="post"><input type="submit" value="登陆"></form>')
def post(self):
self.redirect("/")
class IndexHandler(RequestHandler):
def get(self):
self.write("主页")
self.send_error(404,content="404错误")
#self.write("error")
#send_error()后不要往缓冲区写东西
class IndexHandler(RequestHandler):
def get(self):
err_code = self.get_argument("code",None)
err_title = self.get_argument("title","")
err_content = self.get_argument("content","")
if err_code:
self.send_error(err_code,sss=123,content=err_content)
print(err_title)
print(err_content)
else:
self.write("主页")
def write_error(self,status_code,**kwargs):
self.write("<h1>出错了,程序员GG正在赶过来!</h1>")
# self.write(kwargs["sss"])
# self.write(kwargs["content"])
class IndexHandler(RequestHandler):
def get(self):
err_code = self.get_argument(u"code", None) # 注意返回的是unicode字符串,下同
err_title = self.get_argument(u"title", "")
err_content = self.get_argument(u"content", "")
print(err_code)
print(err_title)
print(err_content)
if err_code:
self.send_error(int(err_code), title=err_title, content=err_content)
else:
self.write("主页")
def write_error(self, status_code, **kwargs):
self.write("<h1>出错了,程序员GG正在赶过来!</h1>")
self.write("<p>错误名:%s</p>" % kwargs["title"])
self.write("<p>错误详情:%s</p>" % kwargs["content"])
"""
if __name__ == "__main__":
t_opt.parse_command_line()
app = t_web.Application([
(r"/",IndexHandler),
#(r"/err404",Err404Handler),
#(r"/err489",Err489Handler),
#(r"/err481",Err481Handler),
#(r"/login",LoginHandler),
],debug=True)
httpServer = t_http.HTTPServer(app)
httpServer.listen(options.port)
t_io.IOLoop.current().start()
| {"/django/test5/booktest/search_indexes.py": ["/django/test5/booktest/models.py"], "/django/test4/booktest/views.py": ["/django/test4/booktest/models.py"], "/django/test5/booktest/views.py": ["/django/test5/booktest/models.py"]} |
49,520 | nanhua97/python_code | refs/heads/master | /django/test5/booktest/views.py | import os
from django.shortcuts import render
from django.http import HttpResponse,JsonResponse
from django.conf import settings
from .models import *
from django.core.paginator import *
def index(request):
return render(request,'booktest/index.html')
# Create your views here.
def MyExp(request):
a=int('abc')
return HttpResponse('hello')
def uploadPic(request):
return render(request,'booktest/uploadPic.html')
def uploadHandle(request):
pic1 = request.FILES['pic1']
picName=os.path.join(settings.MEDIA_ROOT,pic1.name)
with open(picName,'wb+') as pic:
for c in pic1.chunks():
pic.write(c)
return HttpResponse('<img src="/static/media/%s">'%pic1.name)
#分页
def herolist(request,pindex):
if pindex == '':
pindex = '1'
list = HeroInfo.objects.all()
paginator = Paginator(list,5)
page = paginator.page(int(pindex))
context={'page':page}
return render(request,'booktest/herolist.html',context)
def getArea(request):
return render(request,'booktest/area1.html')
def getArea1(request):
list = Areas.objects.filter(parea__isnull=True)
list2=[]
for a in list:
list2.append([a.id,a.title])
return JsonResponse({'data':list2})
def getArea2(request,pid):
list = Areas.objects.filter(parea_id=pid)
list2 = []
for a in list:
list2.append({'id':a.id,'title':a.title})
return JsonResponse({'data':list2})
def html(request):
return render(request,'booktest/HTMLEdit.html')
def htmlHandler(request):
content = request.POST['content']
test = Test()
test.content = content
test.save()
return HttpResponse('ok')
def html2(request):
content = Test.objects.filter(pk=3)
context = {'content':content}
return render(request,'booktest/HTMLContent.html',context)
from django.views.decorators.cache import cache_page
from django.core.cache import cache
# @cache_page(60*10)
def cache1(request):
# return HttpResponse("longk")
# return HttpResponse("hello")
# cache.set('key1','val1',500)
# print(cache.get('key1'))
cache.clear()
# return render(request,'booktest/cache.html')
return HttpResponse('ok')
def mysearch(request):
return render(request,'booktest/mysearch.html')
from .task import *
#python manage.py celery worker --loglevel=info
def last(request):
# sayhello()
sayhello.delay()
return HttpResponse('OK')
| {"/django/test5/booktest/search_indexes.py": ["/django/test5/booktest/models.py"], "/django/test4/booktest/views.py": ["/django/test4/booktest/models.py"], "/django/test5/booktest/views.py": ["/django/test5/booktest/models.py"]} |
49,521 | nanhua97/python_code | refs/heads/master | /django/image/image1.py | from PIL import Image,ImageFilter
im = Image.open('test.jpg')
im2 = im.filter(ImageFilter.BLUR)
im2.save('vlur.jpg','jpeg')
| {"/django/test5/booktest/search_indexes.py": ["/django/test5/booktest/models.py"], "/django/test4/booktest/views.py": ["/django/test4/booktest/models.py"], "/django/test5/booktest/views.py": ["/django/test5/booktest/models.py"]} |
49,522 | nanhua97/python_code | refs/heads/master | /learPy/game/heros.py | #! usr/bin/env python3
# -*-coding:utf-8-*-
'heros-1.0'
__author__='nanhua'
import random
class Hero(object):
def __init__(self,usr,pwd):
self.name = usr
self.pwd = pwd
self.hp = 100
def change_Pwd(self):
while True:
old_Pwd = input('please input old password:')
if old_Pwd != self.pwd:
print('Your password is wrong and please try again')
else:
new_Pwd = input('Please input new password:')
self.pwd = new_Pwd
return False
def mes(self):
print([self.name,self.hp])
def apple(self):
self.hp += 10
self.mes()
account = input('Do you have a account?')
if account == 'no':
username = input('Please input your name:')
password = input('Please input your password:')
username = username if username else 'player01'
password = password if password else '123456'
mes = Hero(username,password)
f=open('player01','w')
f.write('%s\n%s'%(username,password))
f.close()
elif account == 'yes':
username = input('Please input your name:')
password = input('Please input your password:')
mes = Hero(username,password)
f=open('player01','w')
f = open('player01','r')
k = f.read().split('\n')
f.close()
if username == k[0] and password == k[1]:
pass
if username != k[0]:
print('user not found')
quit()
if password != k[1]:
print('password is wrong')
quit()
world = (
[(0,0),(0,1),(0,2)],
[(1,0),(1,1),(1,2)],
[(2,0),(2,1),(2,2)]
)
a = 0
b = 0
mes.mes()
print(world[a][b])
while True:
oper = input('Please input your operating:')
if oper == 'q':
break
elif oper == 'w':
a = a if a-1<0 else a-1
elif oper == 's':
a = a if a+1>2 else a+1
elif oper == 'a':
b = b if b-1<0 else b-1
elif oper == 'd':
b = b if b+1>2 else b+1
appA = random.randint(0,2)
appB = random.randint(0,2)
if a == appA and b == appB:
mes.apple()
print(world[a][b])
| {"/django/test5/booktest/search_indexes.py": ["/django/test5/booktest/models.py"], "/django/test4/booktest/views.py": ["/django/test4/booktest/models.py"], "/django/test5/booktest/views.py": ["/django/test5/booktest/models.py"]} |
49,523 | nanhua97/python_code | refs/heads/master | /tornado/安全应用/XSRF.py | #coding:utf8
import tornado.web
import tornado.ioloop
import tornado.httpserver
import tornado.options
from tornado.web import RequestHandler,url
from tornado.options import options,define
define("port",default=8000,type=int,help="this is port")
'''
#127.0.0.1:8000
class IndexHandler(RequestHandler):
def get(self):
cookie = self.get_secure_cookie("count")
count = int(cookie)+1 if cookie else 1
self.set_secure_cookie("count",str(count))
self.write(
'<html><head><title>Cookie计数器</title></head>'
'<body><h1>您已访问本页%d次。</h1>' % count +
'</body></html>'
)
'''
#127.0.0.1:9000 因把图片的连接指向了127.0.0.1:8000 所以自动启用了cookie
class IndexHandler(RequestHandler):
def get(self):
self.write(
'<html><head><title>被攻击的网站</title></head>'
'<body><h1>此网站的图片链接被修改了</h1>'
'<img alt="这应该是图片" src="http://127.0.0.1:8000/?f=9000/">'
'</body></html>'
)
if __name__ == "__main__":
app = tornado.web.Application([
(r'/',IndexHandler),
])
app.listen(9000)
tornado.ioloop.IOLoop.current().start()
| {"/django/test5/booktest/search_indexes.py": ["/django/test5/booktest/models.py"], "/django/test4/booktest/views.py": ["/django/test4/booktest/models.py"], "/django/test5/booktest/views.py": ["/django/test5/booktest/models.py"]} |
49,524 | nanhua97/python_code | refs/heads/master | /tornado/tornado基础/application.py | #coding:utf8
import tornado.web as t_web
import tornado.ioloop as t_io
import tornado.httpserver as t_http
from tornado.options import options,define
from tornado.web import url,RequestHandler
define("port",default=8000,type=int,help="run server on the given port")
class IndexHandler(RequestHandler):
def get(self):
python_url = self.reverse_url("python_url") #反向解析
self.write('<a href="%s">to_python</a>'%python_url)
class RickHandler(RequestHandler):
def initialize(self,morty):
self.morty = morty
def get(self):
self.write(self.morty)
if __name__ == "__main__":
options.parse_command_line()
app = t_web.Application([
(r'/',IndexHandler),
(r'/cpp',RickHandler,{"morty":"C-137"}),
url(r'/python',RickHandler,{"morty":"cool_morty"},name="python_url"),
],debug=True)
httpServer=t_http.HTTPServer(app)
httpServer.listen(options.port)
t_io.IOLoop.current().start()
| {"/django/test5/booktest/search_indexes.py": ["/django/test5/booktest/models.py"], "/django/test4/booktest/views.py": ["/django/test4/booktest/models.py"], "/django/test5/booktest/views.py": ["/django/test5/booktest/models.py"]} |
49,525 | nanhua97/python_code | refs/heads/master | /tornado/安全应用/XSRF2.py | #coding:utf8
import tornado.web
import tornado.httpserver
import tornado.ioloop
import tornado.options
from tornado.web import RequestHandler,StaticFileHandler,url
from tornado.options import options,define
import os
define("port",default="8000",type=int,help="this is the port")
class XSRFTokenHandler(RequestHandler):
def get(self):
self.xsrf_token
self.write("OK")
print(self.request.headers["Cookie"])
class StaticFileHandler(tornado.web.StaticFileHandler):
def __init__(self,*args,**kwargs):
super(StaticFileHandler,self).__init__(*args,**kwargs)
self.xsrf_token
if __name__=="__main__":
current_path = os.path.dirname(__file__)
app = tornado.web.Application([
(r"/",XSRFTokenHandler),
(r"^/view/()$",StaticFileHandler,{"path":os.path.join(current_path,"statics/html"),"default_filename":"index.html"})
],
debug=True,
static_path = os.path.join(current_path,"statics"),
template_path = os.path.join(current_path,"templates")
)
httpServer = tornado.httpserver.HTTPServer(app)
httpServer.listen(8000)
tornado.ioloop.IOLoop.current().start()
| {"/django/test5/booktest/search_indexes.py": ["/django/test5/booktest/models.py"], "/django/test4/booktest/views.py": ["/django/test4/booktest/models.py"], "/django/test5/booktest/views.py": ["/django/test5/booktest/models.py"]} |
49,526 | nanhua97/python_code | refs/heads/master | /django/test3/booktest/urls.py | from django.conf.urls import url
from . import views
urlpatterns=[
url(r'^$',views.index,name='index'),
url(r'^\(\d+\)$',views.test),
#url(r'^(\d+)$',views.detail),
url(r'^abc/(?P<num>\d+)/$',views.detail),
url(r'^(\d+)/(\d+)/(\d+)$',views.arg),
url(r'^(?P<p2>\d+)/(?P<p1>\d+)$',views.kwarg),
url(r'^getTest1/$',views.getTest1),
url(r'^getTest2/$',views.getTest2),
url(r'^getTest3/$',views.getTest3),
url(r'^postTest1$',views.postTest1),
url(r'^postTest2$',views.postTest2),
url(r'cookies$',views.cookie),
url(r'^red1$',views.red1),
url(r'^red2$',views.red2),
url(r'^session1$',views.session1),
url(r'^session2$',views.session2),
url(r'^session3$',views.session3),
url(r'^session_handler$',views.session_handler),
]
| {"/django/test5/booktest/search_indexes.py": ["/django/test5/booktest/models.py"], "/django/test4/booktest/views.py": ["/django/test4/booktest/models.py"], "/django/test5/booktest/views.py": ["/django/test5/booktest/models.py"]} |
49,527 | nanhua97/python_code | refs/heads/master | /tornado/exercise/application.py | #coding:utf8
import tornado.web as t_web
import tornado.ioloop as t_io
import tornado.options as t_opt
import tornado.httpserver as t_http
from tornado.web import RequestHandler,url
from tornado.options import options,define
import json
define("port",default=8000,type=int,help="this is port")
class BaseHandler(RequestHandler):
def prepare(self):
if self.request.headers.get("Content-Type").startswith("application/json"):
self.dict_json = json.loads(self.request.body)
else:
self.dict_json = None
def post(self):
if self.dict_json:
for k,v in self.dict_json.items():
self.write("<h3>%s</h3><p>%s</p>" % (k,v))
def get(self):
err_code = self.get_argument("code",None)
err_title = self.get_argument("title","")
err_content = self.get_argument("content","")
if err_code:
self.write_error(int(err_code),title=err_title,content=err_content)
else:
self.write("ABC")
def write_error(self,status_code,**kwargs):
self.write(kwargs["title"])
self.write(kwargs["content"])
if __name__ == "__main__":
t_opt.parse_command_line()
app = t_web.Application([
(r"/",BaseHandler),
],debug=True)
httpServer = t_http.HTTPServer(app)
httpServer.listen(options.port)
t_io.IOLoop.current().start()
| {"/django/test5/booktest/search_indexes.py": ["/django/test5/booktest/models.py"], "/django/test4/booktest/views.py": ["/django/test4/booktest/models.py"], "/django/test5/booktest/views.py": ["/django/test5/booktest/models.py"]} |
49,528 | nanhua97/python_code | refs/heads/master | /tornado/安全应用/XSRF1.py | #coding:utf8
import tornado.web
import tornado.httpserver
import tornado.options
import tornado.ioloop
import os
from tornado.web import RequestHandler,url,StaticFileHandler
class IndexHandler(RequestHandler):
def get(self):
self.render("index2.html")
def post(self):
print(self.request.headers["Cookie"])
self.write("hello world")
if __name__ == "__main__":
current_path = os.path.dirname(__file__)
app = tornado.web.Application([
(r"/",IndexHandler),
],
debug=True,
static_path = os.path.join(current_path,"statics"),
template_path = os.path.join(current_path,"templates"),
)
httpServer = tornado.httpserver.HTTPServer(app)
httpServer.listen(8000)
tornado.ioloop.IOLoop.current().start()
| {"/django/test5/booktest/search_indexes.py": ["/django/test5/booktest/models.py"], "/django/test4/booktest/views.py": ["/django/test4/booktest/models.py"], "/django/test5/booktest/views.py": ["/django/test5/booktest/models.py"]} |
49,529 | nanhua97/python_code | refs/heads/master | /django/image/paint.py | from PIL import Image,ImageDraw,ImageFont,ImageFilter
import random
def rndChar():
return chr(random.randint(65,90))
def rndColor():
return (random.randint(64,255),random.randint(64,255),random.randint(64,255))
def rndColor2():
return (random.randint(37,127),random.randint(37,127),random.randint(37,127))
width = 60*4
height = 60
image = Image.new('RGB',(width,height),(255,255,255))
font = ImageFont.truetype('FreeMono.ttf',36)
draw = ImageDraw.Draw(image)
for x in range(width):
for y in range(height):
draw.point((x,y),fill=rndColor())
for t in range(4):
draw.text((60*t+10,10),rndChar(),font=font,fill=rndColor2())
image = image.filter(ImageFilter.BLUR)
image.save('code.jpg','jpeg')
| {"/django/test5/booktest/search_indexes.py": ["/django/test5/booktest/models.py"], "/django/test4/booktest/views.py": ["/django/test4/booktest/models.py"], "/django/test5/booktest/views.py": ["/django/test5/booktest/models.py"]} |
49,530 | nanhua97/python_code | refs/heads/master | /tiantian/df_user/views.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import json
from models import *
from django.http import JsonResponse,HttpResponse,HttpResponseRedirect
from django.shortcuts import render,redirect
from hashlib import sha1
# Create your views here.
def register(req):
return render(req,'df_user/register.html',{})
def register_handle(req):
post = req.POST
uname = post.get('user_name')
upwd = post.get('pwd')
upwd2 = post.get('cpwd')
uemail = post.get('email')
if upwd != upwd2:
return redirect('df_user/register.html')
s1 = sha1()
s1.update(upwd)
upwd3 = s1.hexdigest()
user = UserInfo()
user.uname = uname
user.upwd = upwd3
user.uemail = uemail
user.save()
return redirect('df_user/login.html')
def register_exist(req):
uname = req.GET.get('uname')
count = UserInfo.objects.filter(uname=uname).count()
return JsonResponse({'count':count})
def login(req):
uname = req.COOKIES.get('uname','')
context = {'title':'用户登录','error_name':0,'error_pwd':0,'uname':uname}
return render(req,'df_user/login.html',context)
def login2(req):
uname = req.COOKIES.get('uname','')
context = {'title':'用户登录','error_name':0,'error_pwd':0,'uname':uname}
return render(req,'df_user/login2.html',context)
def login2_handle(req):
post = req.POST
uname = post['uname']
upwd = post['upwd']
print(uname,upwd)
users = UserInfo.objects.filter(uname=uname)
if len(users)==1:
s1 = sha1()
s1.update(upwd)
if s1.hexdigest() == users[0].upwd:
return HttpResponseRedirect('/user/info/')
else:
return JsonResponse({'error_pwd':1})
else:
return JsonResponse({'error_name':1})
def login2_exist(req):
post = req.POST
print(post[uname],post[upwd]);
def login_handle(req):
post = req.POST
uname = post.get('username')
upwd = post.get('pwd')
memory = post.get('memory')
users = UserInfo.objects.filter(uname=uname)
print(uname)
if len(users)==1:
s1 = sha1()
s1.update(upwd)
if s1.hexdigest() == users[0].upwd:
red = HttpResponseRedirect('/user/info/')
if memory != 0 :
red.set_cookie('uname',uname)
else:
red.set_cookie('uname','',max_age=-1)
req.session['user_id']=users[0].id
req.session['user_name']=uname
return red
else:
context = {'title':'用户登录','error_name':0,'error_pwd':1,'uname':uname,'upwd':upwd}
return render(req,'df_user/login.html',context)
else:
context = {'title':'用户登录','error_name':1,'error_pwd':0,'uname':uname,'upwd':upwd}
return render(req,'df_user/login.html',context)
def info(req):
'''
user_eamil = UserInfo.objects.get(id=req.session['user_id']).uemail
context={'title':'用户中心',
'user_email':user_email,
'user_name':req.session(['user_name'])
}
'''
return render(req,'df_user/user_center_info.html',{'title':'用户中心'})
def order(req):
return render(req,'df_user/user_center_order.html',{'title':'用户中心'})
def site(req):
return render(req,'df_user/user_center_site.html',{'title':'用户中心'})
def login(req):
return render(req,'df_user/login.html',{'title':'天天生鲜-登陆'})
| {"/django/test5/booktest/search_indexes.py": ["/django/test5/booktest/models.py"], "/django/test4/booktest/views.py": ["/django/test4/booktest/models.py"], "/django/test5/booktest/views.py": ["/django/test5/booktest/models.py"]} |
49,531 | nanhua97/python_code | refs/heads/master | /blog/app/urls.py | from django.conf.urls import url
from django.contrib import admin
from . import views
urlpatterns = [
# url(r'admin/',admin.site.urls),
url(r'base/',views.base),
url(r'toRegister/',views.toRegister),
url(r'toLogin/',views.toLogin),
url(r'login/',views.login),
]
| {"/django/test5/booktest/search_indexes.py": ["/django/test5/booktest/models.py"], "/django/test4/booktest/views.py": ["/django/test4/booktest/models.py"], "/django/test5/booktest/views.py": ["/django/test5/booktest/models.py"]} |
49,532 | nanhua97/python_code | refs/heads/master | /learPy/zou/qd.py | #!/usr/bin/env/python3
# -*- coding:utf-8 -*-
import requests
from bs4 import BeautifulSoup
headers={
'UserAgent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36'
}
total=[]
for i in range(1,11):
url='https://www.qidian.com/rank/yuepiao?chn=0&page={}'+str(i)
res=requests.get(url,headers=headers)
soup=BeautifulSoup(res.text,'html.parser')
书名s=soup.select('#rank-view-list > div > ul > li > div.book-mid-info > h4 > a')
作者s=soup.select('#rank-view-list > div > ul > li > div.book-mid-info > p.author > a.name')
类型s=soup.select('#rank-view-list > div > ul > li > div.book-mid-info > p.author > a:nth-of-type(2)')
简介s=soup.select('#rank-view-list > div > ul > li > div.book-mid-info > p.intro')
最新章节s=soup.select('#rank-view-list > div > ul > li > div.book-mid-info > p.update > a')
链接s=soup.select('#rank-view-list > div > ul > li > div.book-mid-info > h4 > a')
for 书名,作者,类型,简介,最新章节,链接 in zip(书名s,作者s,类型s,简介s,最新章节s,链接s):
data={'书名':书名.get_text().strip(),\
'作者':作者.get_text().strip(),\
'类型':类型.get_text().strip(),\
'简介':简介.get_text().strip(),\
'最新章节':最新章节.get_text().strip(),\
'链接':链接['href'].strip()}
total.append(data)
print(total)
import pandas
deal1=pandas.DataFrame(total)
#print(deal1)
deal1.to_excel('qidian.xls')
| {"/django/test5/booktest/search_indexes.py": ["/django/test5/booktest/models.py"], "/django/test4/booktest/views.py": ["/django/test4/booktest/models.py"], "/django/test5/booktest/views.py": ["/django/test5/booktest/models.py"]} |
49,533 | nanhua97/python_code | refs/heads/master | /django/test4/booktest/models.py | from django.db import models
class BookInfo(models.Model):
btitle = models.CharField(max_length = 20)
bpub_date = models.DateTimeField(db_column = 'pub_date')
bread = models.IntegerField(default=0)
bcommet = models.IntegerField(default=0)
isDelete = models.BooleanField(default = False)
class Meta:
db_table = 'bookinfo'
class HeroInfo(models.Model):
hname = models.CharField(max_length=10)
hgender = models.BooleanField(default = False)
hcontent = models.CharField(max_length=1000)
isDelete = models.BooleanField(default = False)
book = models.ForeignKey(BookInfo)
def showname(self):
return self.hname
# Create your models here.
| {"/django/test5/booktest/search_indexes.py": ["/django/test5/booktest/models.py"], "/django/test4/booktest/views.py": ["/django/test4/booktest/models.py"], "/django/test5/booktest/views.py": ["/django/test5/booktest/models.py"]} |
49,534 | nanhua97/python_code | refs/heads/master | /django/test5/booktest/urls.py | from django.conf.urls import url
from . import views
urlpatterns=[
url(r'^$',views.index),
url(r'^myexp$',views.MyExp),
url(r'^uploadpic$',views.uploadPic),
url(r'^uploadHandle$',views.uploadHandle),
url(r'^herolist(\d*)/$',views.herolist),
url(r'^area$',views.getArea),
url(r'^area1/$',views.getArea1),
url(r'^(\d+)/$',views.getArea2),
url(r'^html/$',views.html),
url(r'^htmlHandler/$',views.htmlHandler,name='htmlHandler'),
url(r'^html2/$',views.html2),
url(r'^cache1/$',views.cache1),
url(r'^mysearch/$',views.mysearch),
url(r'^last/$',views.last),
]
| {"/django/test5/booktest/search_indexes.py": ["/django/test5/booktest/models.py"], "/django/test4/booktest/views.py": ["/django/test4/booktest/models.py"], "/django/test5/booktest/views.py": ["/django/test5/booktest/models.py"]} |
49,535 | nanhua97/python_code | refs/heads/master | /tornado/tornado基础/argument.py | #coding:utf8
import tornado.web as t_web
import tornado.ioloop as t_io
import tornado.httpserver as t_http
import tornado.options as t_opt
from tornado.web import RequestHandler
from tornado.options import options,define
define("port",default=8000,type=int,help="this is the port")
class IndexHandler(RequestHandler):
'''
def post(self):
query_arg = self.get_query_argument("a")
query_args = self.get_query_arguments("a")
body_arg = self.get_body_argument("a")
body_args = self.get_body_arguments("a",strip=False)
arg = self.get_arguments("a")
args = self.get_arguments("a")
default_arg = self.get_argument("b","Ricka")
default_args = self.get_arguments("b")
try:
missing_arg = self.get_argument("c")
except MissingArgumentError as e:
missing_arg = "cached the MissingArguentError!"
print(e)
missing_args = self.get_arguments("c")
rep = "query_arg:%s<br/>" % query_arg
rep += "query_args:%s<br/>" % query_args
rep += "body_arg:%s<br/>" % body_arg
rep += "body_args:%s<br/>" % body_args
rep += "arg:%s<br/>" % arg
rep += "args:%s<br/>" % args
rep += "default_arg:%s<br/>" % default_arg
rep += "default_args:%s<br/>" % default_args
rep += "missing_arg:%s<br/>" % missing_arg
rep += "missing_args:%s<br/>" % missing_args
self.write(rep)
'''
def post(self):
#获取网址里的参数
#query_arg = self.get_query_argument('r')
#query_args = self.get_query_arguments('r')
#获取body里的参数
#body_arg = self.get_body_arguments('r')
#body_args = self.get_body_arguments('r')
#获取所有参数
#arg = self.get_argument('r')
args = self.get_arguments('r')
#self.write(str(query_args))
#self.write(str(body_args))
self.write(str(args))
if __name__ == "__main__":
t_opt.parse_command_line()
app = t_web.Application([
(r"/",IndexHandler),
],debug=True)
httpServer = t_http.HTTPServer(app)
httpServer.listen(options.port)
t_io.IOLoop.current().start()
| {"/django/test5/booktest/search_indexes.py": ["/django/test5/booktest/models.py"], "/django/test4/booktest/views.py": ["/django/test4/booktest/models.py"], "/django/test5/booktest/views.py": ["/django/test5/booktest/models.py"]} |
49,536 | nanhua97/python_code | refs/heads/master | /tornado/tornado模板/static2.py | #coding:utf8
import json
import tornado.web as t_web
import tornado.ioloop as t_io
import tornado.options as t_opt
import tornado.httpserver as t_http
from tornado.web import RequestHandler,url,StaticFileHandler
from tornado.options import options,define
import os
define("port",default="8000",type=int,help="this is port")
'''
class IndexHandler(RequestHandler):
def initialize(self,path,default_filename):
self.path=path
self.filename=filename
print(self.path)
print(self.filename)
def get(self):
current_file = self.path+"/"+self.filename
with open(current_file,'r') as f:
self.write(f.read())
'''
if __name__ == "__main__":
current_path = os.path.dirname(__file__)
app = t_web.Application(
[
#(r'/',IndexHandler,{"path":os.path.join(current_path,"static/html"),"default_filename":"index.html"}),
(r"^/()$",StaticFileHandler,{"path":os.path.join(current_path,"statics/html"),"default_filename":"index.html"}),
(r"^/view/(.*)$",StaticFileHandler,{"path":os.path.join(current_path,"statics/html")}),
],
debug=True,
static_path = os.path.join(current_path,"statics"),
template_path=os.path.join(current_path, "templates"),
)
httpServer = t_http.HTTPServer(app)
httpServer.listen(options.port)
t_io.IOLoop.current().start()
| {"/django/test5/booktest/search_indexes.py": ["/django/test5/booktest/models.py"], "/django/test4/booktest/views.py": ["/django/test4/booktest/models.py"], "/django/test5/booktest/views.py": ["/django/test5/booktest/models.py"]} |
49,537 | nanhua97/python_code | refs/heads/master | /tornado/tornado基础/prepare.py | #coding:utf8
import tornado.web as t_web
import tornado.ioloop as t_io
import tornado.httpserver as t_http
import tornado.options as t_opt
from tornado.web import RequestHandler,url
from tornado.options import options,define
import json
define("port",default=8000,type=int,help="this is the port")
class IndexHandler(RequestHandler):
def prepare(self):
if self.request.headers.get("Content-Type").startswith("application/json"):
self.json_dict = json.loads(self.request.body)
else:
self.json_dict = None
def post(self):
if self.json_dict:
for key,value in self.json_dict.items():
self.write("<h3>%s</h3><p>%s</p>" % (key,value))
def put(self):
if self.json_dict:
for key,value in self.json_dict.items():
self.write("<h3>%s</h3><p>%s</p>" % (key,value))
if __name__ == "__main__":
t_opt.parse_command_line()
app = t_web.Application([
(r"/",IndexHandler),
],debug=True)
httpServer = t_http.HTTPServer(app)
httpServer.listen(options.port)
t_io.IOLoop.current().start()
| {"/django/test5/booktest/search_indexes.py": ["/django/test5/booktest/models.py"], "/django/test4/booktest/views.py": ["/django/test4/booktest/models.py"], "/django/test5/booktest/views.py": ["/django/test5/booktest/models.py"]} |
49,538 | nanhua97/python_code | refs/heads/master | /tornado/tornado异步/asyc.py | #coding:utf8
import tornado
import tornado.web
import tornado.ioloop as t_io
import tornado.options as t_opt
import tornado.httpserver as t_http
from tornado.web import RequestHandler,url
from tornado.options import options,define
import json
define("port",default=8000,type=int,help="this is port")
class IndexHandler(tornado.web.RequestHandler):
@tornado.gen.coroutine
def get(self):
http = tornado.httpclient.AsyncHTTPClient()
response = yield http.fetch("http://int.dpool.sina.com.cn/iplookup/iplookup.php?format=json&ip=14.130.112.24")
if response.error:
self.send_error(500)
else:
data = json.loads(response.body)
if 1 == data["ret"]:
self.write(u"国家:%s 省份: %s 城市: %s" % (data["country"], data["province"], data["city"]))
else:
self.write("查询IP信息错误")
"""
class IndexHandler(RequestHandler):
@tornado.web.asynchronous
def get(self):
http = tornado.httpclient.AsyncHTTPClient()
#httpClient = tornado.httpclient.AsyncHTTPClient()
http.fetch("http://int.dpool.sina.com.cn/iplookup/iplookup.php?format=json&ip=14.130.112.24",callback=self.on_response)
def on_response(self,response):
if response.error:
self.send_error(500)
else:
data = json.load(response.body)
if 1 == data["ret"]:
self.write(u"国家:%s 省份: %s 城市: %s" % (data["country"], data["province"], data["city"]))
else:
self.write("查询IP信息错误")
self.finish()
"""
if __name__ == "__main__":
t_opt.parse_command_line()
app = tornado.web.Application([
(r"/",IndexHandler),
],debug=True)
httpServer = t_http.HTTPServer(app)
httpServer.listen(options.port)
t_io.IOLoop.current().start()
| {"/django/test5/booktest/search_indexes.py": ["/django/test5/booktest/models.py"], "/django/test4/booktest/views.py": ["/django/test4/booktest/models.py"], "/django/test5/booktest/views.py": ["/django/test5/booktest/models.py"]} |
49,539 | nanhua97/python_code | refs/heads/master | /tornado/安全应用/current_user.py | #coding:utf8
import tornado.web
import tornado.httpserver
import tornado.ioloop
import tornado.options
from tornado.web import RequestHandler,StaticFileHandler,url
from tornado.options import options,define
import os
define("port",default="8000",type=int,help="this is the port")
class ProfileHandler(RequestHandler):
def get_current_user(self):
user_name = self.get_argument("name",None)
return user_name
@tornado.web.authenticated
def get(self):
self.write("这是我的个人主页")
class LoginHandler(RequestHandler):
def get(self):
self.write("登陆页面")
#next记录的是/login转过来之前的页面
next = self.get_argument("next","/")
self.redirect(next+"?name=logined")
if __name__ == "__main__":
current_path = os.path.dirname(__file__)
app = tornado.web.Application([
(r"/",ProfileHandler),
(r"/login",LoginHandler),
],
debug=True,
login_url="/login",
static_path = os.path.join(current_path,"statics"),
template_path = os.path.join(current_path,"templates")
)
httpServer = tornado.httpserver.HTTPServer(app)
httpServer.listen(8000)
tornado.ioloop.IOLoop.current().start()
| {"/django/test5/booktest/search_indexes.py": ["/django/test5/booktest/models.py"], "/django/test4/booktest/views.py": ["/django/test4/booktest/models.py"], "/django/test5/booktest/views.py": ["/django/test5/booktest/models.py"]} |
49,548 | mericadil/TextureGeneration | refs/heads/master | /dataset/deprecated/chictopia_plus.py | import os
import cv2
import numpy as np
from torch.utils.data import Dataset
from dataset.data_utils import ToTensor, Resize
class ChictopiaPlusDataset(Dataset):
def bbox(self, mask):
rows = np.any(mask, axis=0)
cols = np.any(mask, axis=1)
cmin, cmax = np.where(rows)[0][[0, -1]]
rmin, rmax = np.where(cols)[0][[0, -1]]
h = rmax - rmin
w = int(h / 2)
r_center = float(rmax + rmin) / 2
c_center = float(cmax + cmin) / 2
rmin = int(r_center - h / 2)
rmax = int(r_center + h / 2)
cmin = int(c_center - w / 2)
cmax = int(c_center + w / 2)
return (cmin, rmin), (cmax, rmax)
def __getitem__(self, index):
img_path = self.data[index]
img = cv2.imread(img_path)
segment_img_path = img_path.replace('image:png', 'bodysegments')
segment_img = cv2.imread(segment_img_path)
mask = (segment_img >= 1).astype(np.float)
tl, br = self.bbox(mask)
img = img[tl[1]: br[1], tl[0]:br[0], :]
mask = mask[tl[1]: br[1], tl[0]: br[0], :]
if img is None or img.shape[0] <= 0 or img.shape[1] <= 0:
return self.__getitem__(np.random.randint(0, self.__len__()))
img = self.resize(img)
mask = self.resize(mask)
img = self.to_tensor(img)
mask = self.mask_to_tensor(mask)
return img, mask
def __len__(self):
return len(self.data)
def __init__(self, data_path, img_size=(128, 64), normalize=True):
self.data_path = data_path
self.img_size = img_size
self.normalize = normalize
self.resize = Resize(self.img_size)
self.to_tensor = ToTensor(normalize=self.normalize)
self.mask_to_tensor = ToTensor(normalize=False)
self.data = []
self.generate_index()
def generate_index(self):
print('generating ChictopiaPlus index')
for root, dirs, files in os.walk(self.data_path):
for name in files:
if name.endswith('.png') and 'image' in name:
self.data.append(os.path.join(root, name))
print('finish generating index, found texture image: {}'.format(len(self.data)))
if __name__ == '__main__':
dataset = ChictopiaPlusDataset('/unsullied/sharefs/wangjian02/isilon-home/datasets/ChictopiaPlus/train')
img, mask = dataset.__getitem__(1)
img = img.permute(1, 2, 0).numpy()
mask = mask.permute(1, 2, 0).numpy()
img = img / 2.0 + 0.5
cv2.imshow('img', img)
cv2.waitKey()
cv2.imshow('mask', mask)
cv2.waitKey()
| {"/deprecated/texture_reid.py": ["/dataset/real_texture.py", "/config.py", "/utils/body_part_mask.py", "/utils/data_loader.py", "/loss/PCB_intern_loss.py", "/loss/PCB_MiddleFeature.py", "/loss/PCB_softmax_loss.py", "/loss/PCB_AllCat.py"], "/deprecated/create_uvmap_textured.py": ["/dataset/market1501_pose_split_test.py"], "/loss/color_var_loss.py": ["/utils/body_part_mask.py"], "/deprecated/get_render_matrix.py": ["/smpl/render_texture.py"], "/metrics/inception_score.py": ["/utils/data_loader.py"], "/misc/noface_after_process.py": ["/utils/body_part_mask.py"]} |
49,549 | mericadil/TextureGeneration | refs/heads/master | /deprecated/texture_reid.py | # -*- coding:utf-8 -*-
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from dataset.depreciated.background import BackgroundDataset
from dataset.real_texture import RealTextureDataset
from torch.optim import Adam
from config import get_config
from tensorboard_logger import configure, log_value
import datetime
import os
from utils.body_part_mask import TextureMask
from smpl.diff_renderer import TextureToImage
import numpy as np
from torch.utils.data import ConcatDataset
from network_models.unet import UNet
from utils.samplers import RandomIdentitySampler
from utils.data_loader import ImageData
import torch.nn.functional as F
from dataset.market1501_pose_split_train import Market1501Dataset
# 主要脚本
# 贴了背景图,有face、手 loss,
class TextureReID:
def __init__(self, config):
print('Batch size: ',config.batch_size)
print('read background_dataset!'+'\n')
background_dataset = BackgroundDataset([config.PRW_img_path,
config.CUHK_SYSU_path])
self.background_dataloader = DataLoader(dataset=background_dataset, batch_size=config.batch_size,
shuffle=True, num_workers=config.worker_num, drop_last=True)
print('read surreal_dataset dataset!'+'\n')
# 读取真实的uvmap
surreal_dataset = RealTextureDataset(pkl_path = config.texture_pkl_path)
self.surreal_dataloader = DataLoader(dataset=surreal_dataset, batch_size=config.batch_size,
shuffle=True, num_workers=config.worker_num, drop_last=True)
print('read reid_dataset dataset!'+'\n')
print('read market_dataset dataset!'+'\n')
dataset = Market1501Dataset()
if config.triplet:
print('4*4!')
trainloader = DataLoader(
ImageData(dataset.train),
sampler=RandomIdentitySampler(dataset.train, config.num_instance),
batch_size=config.batch_size, num_workers=config.worker_num, drop_last=True
)
queryloader = DataLoader(
ImageData(dataset.query),
sampler=RandomIdentitySampler(dataset.query, config.num_instance),
batch_size=config.batch_size, num_workers=config.worker_num, drop_last=True
)
galleryloader = DataLoader(
ImageData(dataset.gallery),
sampler=RandomIdentitySampler(dataset.gallery, config.num_instance),
batch_size=config.batch_size, num_workers=config.worker_num, drop_last=True
)
self.reid_dataloader = [trainloader,queryloader,galleryloader]
'''
prw_dataset = PRWDataset(pkl_path = config.frames_mat_pkl_path,num_instance=4)
market_dataset = Market1501Dataset(pkl_path = config.Market_all_pkl,num_instance=4)
reid_dataset = ConcatDataset([market_dataset, prw_dataset])
#market_dataset = Market1501Dataset(pkl_path = '/unsullied/sharefs/zhongyunshan/isilon-home/datasets/Texture/market_1501_train.pkl',num_instance=4)
market_dataset = Market1501Dataset(pkl_path = config.Market_all_pkl,num_instance=4)
reid_dataset = market_dataset
self.reid_dataloader = DataLoader(dataset=reid_dataset, batch_size=int(config.batch_size/config.num_instance),
shuffle=True, num_workers=config.worker_num, drop_last=True)
'''
else:
print('16*1!')
prw_dataset = PRWDataset(pkl_path = config.frames_mat_pkl_path,num_instance=1)
market_dataset = Market1501Dataset(pkl_path = config.Market_all_pkl,num_instance=1)
reid_dataset = ConcatDataset([market_dataset, prw_dataset])
self.reid_dataloader = DataLoader(dataset=reid_dataset, batch_size=config.batch_size,
shuffle=True, num_workers=config.worker_num, drop_last=True)
# read the mask of face and hand
texture_mask = TextureMask(size=64) # 设定读取64*64大小的mask
self.face_mask = texture_mask.get_mask('face')
self.hand_mask = texture_mask.get_mask('hand')
self.mask = self.face_mask + self.hand_mask
self.gpu_available = torch.cuda.is_available()
if self.gpu_available:
print('Use GPU! GPU num: ',config.gpu_nums)
gpu_ids = [i for i in range(config.gpu_nums)]
# 读取pretrained model
if config.pretrained_model_path is None:
print('No resume train model!')
self.generator = UNet(input_channels=3, output_channels=3, gpu_ids=gpu_ids)
else:
print('resume train model!')
print(config.epoch_now)
self.generator = torch.load(config.pretrained_model_path)
if config.reid_model == 'reid_loss_market1501':
print('origin model!')
from loss.reid_loss_market1501 import ReIDLoss
config.num_classes = 1501
self.reid_loss = ReIDLoss(model_path=config.reid_weight_path, num_classes=config.num_classes, gpu_ids=gpu_ids,
margin=config.margin)
elif config.reid_model == 'PCB_intern_loss':
print('PCB_intern_loss!')
from loss.PCB_intern_loss import ReIDLoss
self.reid_loss = ReIDLoss(model_path=config.reid_weight_path, num_classes=config.num_classes, gpu_ids=gpu_ids,
margin=config.margin)
elif config.reid_model == 'ImageNet_Resnet':
print('ImageNet_Resnet!')
print('layer: ',config.layer)
from loss.ImageNet_Resnet import ReIDLoss
self.reid_loss = ReIDLoss(gpu_ids=gpu_ids)
elif config.reid_model == 'PCB_MiddleFeature':
print('PCB_MiddleFeature!')
print('layer: ',config.layer)
from loss.PCB_MiddleFeature import ReIDLoss
self.reid_loss = ReIDLoss(model_path=config.reid_weight_path, num_classes=config.num_classes, gpu_ids=gpu_ids,margin=config.margin, layer = config.layer)
elif config.reid_model == 'NoPCB_Resnet':
print('NoPCB_Resnet!')
print('layer: ',config.layer)
from loss.NoPCB_Resnet import ReIDLoss
self.reid_loss = ReIDLoss(gpu_ids=gpu_ids)
elif config.reid_model == 'NoPCB_Resnet_deepfashion':
print('NoPCB_Resnet_deepfashion!')
print('layer: ',config.layer)
from loss.NoPCB_Resnet_deepfashion import ReIDLoss
self.reid_loss = ReIDLoss(gpu_ids=gpu_ids)
elif config.reid_model == 'PCB_softmax':
print('PCB_softmax!')
from loss.PCB_softmax_loss import ReIDLoss
config.num_classes = 1501
self.reid_loss = ReIDLoss(model_path=config.reid_weight_path, num_classes=config.num_classes, gpu_ids=gpu_ids,
margin=config.margin)
elif config.reid_model == 'PCB_PerLoss':
print('PCB_PerLoss!')
from loss.PCB_PerLoss import ReIDLoss
self.reid_loss = ReIDLoss(model_path=config.reid_weight_path, num_classes=config.num_classes, gpu_ids=gpu_ids)
elif config.reid_model == 'PCB_AllCat':
print('PCB_AllCat!')
from loss.PCB_AllCat import ReIDLoss
self.reid_loss = ReIDLoss(model_path=config.reid_weight_path, num_classes=config.num_classes, gpu_ids=gpu_ids,margin=config.margin)
else:
raise KeyError('{} not in keys!'.format(config.reid_model))
if self.gpu_available:
self.generator=nn.DataParallel(self.generator) # multi-GPU
self.generator = self.generator.cuda()
self.reid_loss = self.reid_loss.cuda()
self.mask = self.mask.cuda()
self.texture2img = TextureToImage(action_npz=config.action_npz, batch_size=config.batch_size,
use_gpu=self.gpu_available)
# 计算face and hand 的共同 loss, 均方损失函数
self.face_loss = nn.MSELoss()
# Unet optimizer
self.generator_optimizer = Adam(params=self.generator.parameters(), lr=config.learning_rate,
weight_decay=config.weight_decay)
configure(os.path.join(config.runs_log_path,
config.log_name + str(datetime.datetime.now()).replace(' ', '_')))
self.model_save_dir = os.path.join(config.model_log_path,
config.log_name + str(datetime.datetime.now()).replace(' ', '_'))
if not os.path.exists(self.model_save_dir):
os.mkdir(self.model_save_dir)
def train(self):
print('Start train!')
count = 0
# backgroud shuffle后是随机的
background_image_data = iter(self.background_dataloader)
# real texture 是不是和训练图一一对应的?
real_texture_data = iter(self.surreal_dataloader)
for epoch in range(config.epoch_now,config.epoch):
# 表明是训练阶段
self.generator.train()
running_face_loss = 0.0
running_triL1_loss = 0.0
running_softmax_loss = 0.0
running_tri_hard_loss = 0.0
running_tri_loss = 0.0
running_perLoss_loss = 0.0
running_uvmap_l2_loss = 0.0
running_generator_total_loss = 0.0
for dataloader in self.reid_dataloader:
for i, data in enumerate(dataloader):
real_image_batch, pose_paths, targets, _, img_paths = data
# load real texture batch,随机找出一个真实uvmap,为了减缓手脸不相似的问题
try:
real_texture_batch = real_texture_data.next()
except StopIteration:
real_texture_data = iter(self.surreal_dataloader)
real_texture_batch = real_texture_data.next()
# load background image batch,随机找出一个真实的背景,为了把生成的人物贴上去
try:
background_image_batch = background_image_data.next()
except StopIteration:
background_image_data = iter(self.background_dataloader)
background_image_batch = background_image_data.next()
# 放置GPU
if self.gpu_available:
real_image_batch = real_image_batch.cuda()
real_texture_batch = real_texture_batch.cuda()
background_image_batch = background_image_batch.cuda()
label_image_batch = real_image_batch
# train generator
self.generator_optimizer.zero_grad()
# generator is Unet, generated_texture_batch is outpurt
generated_texture_batch = self.generator(real_image_batch)
# bilinear 双线性插值插出来
generated_texture_batch = F.interpolate(generated_texture_batch, size=(64, 64), mode='bilinear')
# 生成的uvmap的face and hand
generated_face_hand_batch = generated_texture_batch * self.mask
# 真实的uvmap的face and hand
real_face_hand_batch = real_texture_batch * self.mask
# face and hand的loss
face_loss = self.face_loss(generated_face_hand_batch, real_face_hand_batch.detach())
# 累计face and hand 的共同loss
running_face_loss += face_loss.item()
# 贴图
img_batch, mask_batch, bbox = self.texture2img(generated_texture_batch)
tl, br = bbox
if config.use_real_background:
generated_img_batch = img_batch * mask_batch + background_image_batch * (1 - mask_batch)
else:
generated_img_batch = img_batch * mask_batch
generated_img_batch = generated_img_batch[:, :, tl[1]:br[1], tl[0]:br[0]]
# train generator
loses = self.reid_loss(generated_img_batch, label_image_batch,targets)
triple_feature_loss = loses[0]
softmax_feature_loss = loses[1]
triple_hard_loss = loses[2]
triple_loss = loses[3]
perceptual_loss = loses[4]
uvmap_l2_loss = loses[5]
running_triL1_loss += triple_feature_loss.item()
running_softmax_loss += softmax_feature_loss.item()
running_tri_hard_loss += triple_hard_loss.item()
running_tri_loss += triple_loss.item()
running_perLoss_loss += perceptual_loss.item()
running_uvmap_l2_loss += uvmap_l2_loss.item()
generator_total_loss = config.reid_triplet_loss_weight * triple_feature_loss + \
config.reid_softmax_loss_weight * softmax_feature_loss + \
config.face_loss_weight * face_loss + \
config.reid_triplet_hard_loss_weight * triple_hard_loss + \
config.reid_triplet_loss_not_feature_weight * triple_loss + \
config.uvmap_intern_loss_weight * uvmap_l2_loss + \
config.perceptual_loss_weight * perceptual_loss
running_generator_total_loss += generator_total_loss.item()
generator_total_loss.backward()
self.generator_optimizer.step()
# logs
count += 1
if count % config.log_step == 0:
torch.save(self.generator,
os.path.join(self.model_save_dir, str(datetime.datetime.now()).replace(' ', '_')))
if count % config.eval_step == 0:
eval_loss = self.eval()
log_value('eval_loss', eval_loss, step=count)
if count % config.runs_log_step == 0:
if running_softmax_loss == 0 and running_triL1_loss == 0 and running_face_loss == 0 and running_tri_hard_loss == 0 and running_tri_loss == 0 and running_uvmap_l2_loss == 0:
continue
log_value('face loss', config.face_loss_weight * running_face_loss, step=count)
log_value('triplet feature loss', config.reid_triplet_loss_weight * running_triL1_loss, step=count)
log_value('softmax feature loss', config.reid_softmax_loss_weight * running_softmax_loss, step=count)
log_value('triplet hard loss', config.reid_triplet_hard_loss_weight * running_tri_hard_loss, step=count)
log_value('triplet loss loss', config.reid_triplet_loss_not_feature_weight * running_tri_loss, step=count)
log_value('perceptual loss', config.perceptual_loss_weight * running_perLoss_loss, step=count)
log_value('uvmap l2 loss', config.uvmap_intern_loss_weight * uvmap_l2_loss, step=count)
log_value('generator total loss', running_generator_total_loss, step=count)
running_face_loss = 0.0
running_triL1_loss = 0.0
running_softmax_loss = 0.0
running_tri_hard_loss = 0.0
running_tri_loss = 0.0
running_perLoss_loss = 0.0
running_uvmap_l2_loss = 0.0
running_generator_total_loss = 0.0
print('Epoch {}, iter {}, face loss: {}, triplet feature loss: {}, softmax loss: {}, triplet hard loss {}, triplet loss {}, perceptual loss {}, uvmap l2 loss {}'.format(
str(epoch),
str(i),
config.face_loss_weight * face_loss.item(),
config.reid_triplet_loss_weight * triple_feature_loss.item(),
config.reid_softmax_loss_weight * softmax_feature_loss.item(),
config.reid_triplet_hard_loss_weight * triple_hard_loss.item(),
config.reid_triplet_loss_not_feature_weight * triple_loss.item(),
config.perceptual_loss_weight * perceptual_loss.item(),
config.uvmap_intern_loss_weight * uvmap_l2_loss.item()
))
# one epoch save once!
torch.save(self.generator,
os.path.join(self.model_save_dir, str(datetime.datetime.now()).replace(' ', '_')+'_epoch_'+str(epoch)))
def eval(self):
print('fake eval start')
return 0
if __name__ == '__main__':
torch.manual_seed(0)
np.random.seed(0)
config = get_config()
body = TextureReID(config)
body.train()
| {"/deprecated/texture_reid.py": ["/dataset/real_texture.py", "/config.py", "/utils/body_part_mask.py", "/utils/data_loader.py", "/loss/PCB_intern_loss.py", "/loss/PCB_MiddleFeature.py", "/loss/PCB_softmax_loss.py", "/loss/PCB_AllCat.py"], "/deprecated/create_uvmap_textured.py": ["/dataset/market1501_pose_split_test.py"], "/loss/color_var_loss.py": ["/utils/body_part_mask.py"], "/deprecated/get_render_matrix.py": ["/smpl/render_texture.py"], "/metrics/inception_score.py": ["/utils/data_loader.py"], "/misc/noface_after_process.py": ["/utils/body_part_mask.py"]} |
49,550 | mericadil/TextureGeneration | refs/heads/master | /dataset/market1501_pose_split_test.py | from __future__ import print_function, absolute_import
import glob
import re
from os import path as osp
import numpy as np
import pdb
import cv2
from torch.utils.data import Dataset
import pickle
class Market1501Dataset(object):
"""
Market1501
Reference:
Zheng et al. Scalable Person Re-identification: A Benchmark. ICCV 2015.
URL: http://www.liangzheng.org/Project/project_reid.html
Dataset statistics:
# identities: 1501 (+1 for background)
# images: 12936 (train) + 3368 (query) + 15913 (gallery)
"""
# dataset_dir = '/unsullied/sharefs/wangjian02/isilon-home/datasets/Market1501/data'
# pose_dataset_dir = '/unsullied/sharefs/zhongyunshan/isilon-home/datasets/Texture/market-pose/'
pkl_path = '/unsullied/sharefs/zhongyunshan/isilon-home/datasets/Texture/saveForTest.pkl'
def __init__(self, dataset_dir, render_tensors_dir):
self.dataset_dir = dataset_dir
self.render_tensors_dir = render_tensors_dir
self.train_dir = osp.join(self.dataset_dir, 'bounding_box_train')
self.pose_train_dir = osp.join(self.render_tensors_dir, 'bounding_box_train')
self._check_before_run()
train, num_train_pids, num_train_imgs = self._process_dir(self.train_dir, self.pose_train_dir, relabel=True,
pkl_path=self.pkl_path)
print("=> Market1501 loaded")
print("Dataset statistics:")
print(" ------------------------------")
print(" subset | # ids | # images")
print(" ------------------------------")
print(" train | {:5d} | {:8d}".format(num_train_pids, num_train_imgs))
print(" ------------------------------")
print(" total | {:5d} | {:8d}".format(num_train_pids, num_train_imgs))
print(" ------------------------------")
self.train = train
self.num_train_pids = num_train_pids
def _check_before_run(self):
"""Check if all files are available before going deeper"""
if not osp.exists(self.dataset_dir):
raise RuntimeError("'{}' is not available".format(self.dataset_dir))
if not osp.exists(self.train_dir):
raise RuntimeError("'{}' is not available".format(self.train_dir))
def _process_dir(self, dir_path, pose_dir_path, relabel=False, pkl_path=None):
if pkl_path is not None:
with open(pkl_path, 'rb') as f:
saveForTest = pickle.load(f)
else:
saveForTest = []
img_paths = glob.glob(osp.join(dir_path, '*.jpg'))
pattern = re.compile(r'([-\d]+)_c(\d)')
pid_container = set()
for img_path in img_paths:
# 对每一个 pattern.search(img_path).groups() 使用map函数
pid, _ = map(int, pattern.search(img_path).groups())
if pid == -1 or pid not in saveForTest:
continue # junk images are just ignored
pid_container.add(pid)
pid2label = {pid: label for label, pid in enumerate(pid_container)}
dataset = []
for img_path in img_paths:
img_name = img_path[67:]
img_name = img_name[img_name.find('/') + 1:]
pose_path = osp.join(pose_dir_path, img_name + '.npy')
pid, camid = map(int, pattern.search(img_path).groups())
if pid == -1 or pid not in saveForTest:
continue # junk images are just ignored
assert 0 <= pid <= 1501 # pid == 0 means background
assert 1 <= camid <= 6
camid -= 1 # index starts from 0
if relabel:
pid = pid2label[pid]
dataset.append((img_path, pose_path, pid, camid))
num_pids = len(pid_container)
num_imgs = len(dataset)
return dataset, num_pids, num_imgs
| {"/deprecated/texture_reid.py": ["/dataset/real_texture.py", "/config.py", "/utils/body_part_mask.py", "/utils/data_loader.py", "/loss/PCB_intern_loss.py", "/loss/PCB_MiddleFeature.py", "/loss/PCB_softmax_loss.py", "/loss/PCB_AllCat.py"], "/deprecated/create_uvmap_textured.py": ["/dataset/market1501_pose_split_test.py"], "/loss/color_var_loss.py": ["/utils/body_part_mask.py"], "/deprecated/get_render_matrix.py": ["/smpl/render_texture.py"], "/metrics/inception_score.py": ["/utils/data_loader.py"], "/misc/noface_after_process.py": ["/utils/body_part_mask.py"]} |
49,551 | mericadil/TextureGeneration | refs/heads/master | /deprecated/create_uvmap_textured.py | import torch
import cv2
import argparse
import numpy as np
import os
import sys
import tqdm
import os
import torch.nn as nn
import time
import random
from torch.autograd import Function
import os
from dataset.market1501_pose_split_test import Market1501Dataset
class DifferentialTextureRenderer(Function):
@staticmethod
def forward(ctx, texture_img_flat, render_sparse_matrix):
result = torch.mm(render_sparse_matrix, texture_img_flat)
ctx.save_for_backward(render_sparse_matrix)
return result
@staticmethod
def backward(ctx, grad_outputs):
render_sparse_matrix = ctx.saved_tensors[0]
result = torch.mm(render_sparse_matrix.transpose(0, 1), grad_outputs)
return result, None
class TextureToImage(nn.Module):
def sparse_mx_to_torch_sparse_tensor(self, sparse_mx):
sparse_mx = sparse_mx.tocoo().astype(np.float32)
indices = torch.from_numpy(np.vstack((sparse_mx.row, sparse_mx.col)))
indices = indices.long()
values = torch.from_numpy(sparse_mx.data)
shape = torch.Size(sparse_mx.shape)
return torch.sparse.FloatTensor(indices, values, shape)
def forward(self, x):
# the input x is uv map batch of (N, C, H, W)
# transfer it into (N, H, W, C)
x = x.permute(0, 2, 3, 1)
# flat it and transpose it(H * W * C, N)
x_flat = x.reshape(self.batch_size, -1).transpose(0, 1)
if self.isRandom:
action_tensor = random.choice(self.action_sparse_tensor_data)
else:
action_tensor = self.action_sparse_tensor_data[0]
mat = action_tensor['mat']
mask = action_tensor['mask']
bbox = action_tensor['bbox']
mat = nn.Parameter(mat, requires_grad=False)
result_flat = DifferentialTextureRenderer.apply(x_flat, mat)
result_flat = result_flat.transpose(0, 1)
# get the result of (NHWC)
result = result_flat.reshape(self.batch_size, self.img_size, self.img_size, -1)
# to NCHW
result = result.permute(0, 3, 1, 2)
return result, mask, bbox
# train,isRandom is True , test , isRandom is False
def __init__(self, action_npz, batch_size, img_size=224, use_gpu=False, bbox_size=(128, 64),
center_random_margin=2, isRandom=True):
super(TextureToImage, self).__init__()
# print('start init the texture to image module')
action_npz_data = np.load(action_npz, encoding="latin1")
self.center_random_margin = center_random_margin
self.action_sparse_tensor_data = []
self.batch_size = batch_size
self.img_size = img_size
self.bbox_size = bbox_size
self.isRandom = isRandom
for data in action_npz_data:
data['mat'] = self.sparse_mx_to_torch_sparse_tensor(data['mat'])
data['bbox'] = self.bbox(data['mask'][:, :, 0])
data['mask'] = torch.from_numpy(data['mask']).float() \
.unsqueeze(0).permute(0, 3, 1, 2).repeat(self.batch_size, 1, 1, 1)
if use_gpu:
data['mat'] = data['mat'].cuda()
data['mask'] = data['mask'].cuda()
self.action_sparse_tensor_data.append(data)
# print('finish init the texture to image module')
def bbox(self, img):
h = self.bbox_size[0]
w = self.bbox_size[1]
rows = np.any(img, axis=0)
cols = np.any(img, axis=1)
cmin, cmax = np.where(rows)[0][[0, -1]]
rmin, rmax = np.where(cols)[0][[0, -1]]
r_center = float(rmax + rmin) / 2 + random.randint(-self.center_random_margin, 0)
c_center = float(cmax + cmin) / 2 + random.randint(0, self.center_random_margin)
rmin = int(r_center - h / 2)
rmax = int(r_center + h / 2)
cmin = int(c_center - w / 2)
cmax = int(c_center + w / 2)
return (cmin, rmin), (cmax, rmax)
def test(self):
texture_img = cv2.imread('models/default_texture2.jpg')
texture_img = torch.from_numpy(texture_img).unsqueeze(0).float()
texture_img = texture_img.reshape(1, -1).transpose(0, 1)
start_time = time.time()
action_tensor = random.choice(self.action_sparse_tensor_data)['mat']
result_flat = torch.smm(action_tensor, texture_img).to_dense()
result_flat = result_flat.transpose(0, 1)
result_flat = result_flat.reshape(1, 224, 224, 3)
stop_time = time.time()
print('time use: {}'.format(stop_time - start_time))
result_flat = result_flat.numpy()[0, :]
cv2.imshow('result', result_flat.astype(np.uint8))
cv2.waitKey()
class Demo:
def __init__(self, model_path, z_size=1024):
print(model_path)
self.model = torch.load(model_path)
self.model.eval()
self.z_size = z_size
def generate_texture(self, img_path):
img = cv2.imread(img_path)
if img is None or img.shape[0] <= 0 or img.shape[1] <= 0:
return 0, 0
img = cv2.resize(img, (64, 128))
img = (img / 225. - 0.5) * 2.0
img = torch.from_numpy(img).permute(2, 0, 1).float().unsqueeze(0)
out = self.model(img)
out = out.cpu().detach().numpy()[0]
out = out.transpose((1, 2, 0))
out = (out / 2.0 + 0.5) * 255.
out = out.astype(np.uint8)
out = cv2.resize(out, dsize=(64, 64))
return out, 1
def create_dir(uvmap_dir, textured_dir):
if not os.path.exists(uvmap_dir):
os.mkdir(uvmap_dir)
if not os.path.exists(textured_dir):
os.mkdir(textured_dir)
def read_background():
data_path = '/unsullied/sharefs/wangjian02/isilon-home/datasets/SURREAL/smpl_data/textures'
PRW_img_path = '/unsullied/sharefs/wangjian02/isilon-home/datasets/PRW/frames'
CUHK_SYSU_path = '/unsullied/sharefs/wangjian02/isilon-home/datasets/CUHK-SYSU'
data_path_list = [PRW_img_path, CUHK_SYSU_path]
backgrounds = []
for data_path in data_path_list:
for root, dirs, files in os.walk(data_path):
for name in files:
if name.endswith('.jpg'):
backgrounds.append(os.path.join(root, name))
return backgrounds
def create_uvmap(model_path, uvmap_dir):
demo = Demo(model_path)
dataset = Market1501Dataset()
input_imgs = dataset.train
out_path = uvmap_dir
print('len of input images', len(input_imgs))
for full_path in tqdm.tqdm(input_imgs):
p = full_path[0]
out, flag = demo.generate_texture(img_path=p)
if flag == 0:
continue
name = p[p.find('/', 68) + 1:]
cv2.imwrite(os.path.join(out_path, name), out)
def create_textured(uvmap_dir, textured_dir, backgrounds):
uv_map_path = uvmap_dir
out_path = textured_dir
tex_2_img = TextureToImage(
action_npz='/unsullied/sharefs/wangjian02/isilon-home/datasets/texture/tex_gan/walk_64.npy',
batch_size=1,
center_random_margin=2,
isRandom=False)
count = 0
for root, dir, names in os.walk(uv_map_path):
for name in tqdm.tqdm(names):
background = cv2.imread(backgrounds[np.random.randint(len(backgrounds), size=1)[0]])
background = cv2.resize(background, (224, 224))
'''
background[:,:,0] = 255
background[:,:,1] = 255
background[:,:,2] = 255
'''
count += 1
full_path = os.path.join(root, name)
texture_img = cv2.imread(full_path)
texture_img = cv2.resize(texture_img, (64, 64))
texture_img = torch.from_numpy(texture_img).unsqueeze(0).float()
texture_img = texture_img.permute(0, 3, 1, 2)
texture_img.requires_grad = True
img, mask, bbox = tex_2_img(texture_img)
img = img.squeeze(0).permute(1, 2, 0).detach().numpy().astype(np.uint8)
mask = mask.squeeze(0).permute(1, 2, 0).detach().numpy()
c_center = (bbox[0][0] + bbox[1][0]) / 2
r_center = (bbox[0][1] + bbox[1][1]) / 2
img = img.astype(np.uint8)
img = img * mask + background * (1 - mask)
tl, br = bbox
img = img[tl[1]:br[1], tl[0]:br[0], :]
cv2.imwrite(os.path.join(out_path, name), img)
def run():
'''
model_names = ['ImageNet_PerLoss2018-10-23_18:14:53.982469/2018-10-24_10:30:39.835040_epoch_120',
'NoPCB_PerLoss2018-10-23_18:16:04.651977/2018-10-24_06:20:33.259434_epoch_120',
'PCB_2048_256_L12018-10-23_18:13:29.746996/2018-10-24_05:17:39.706192_epoch_120',
'PCB_ALLCat_PerLoss2018-10-23_18:17:51.451793/2018-10-24_09:42:22.511739_epoch_120',
'PCB_PerLoss2018-10-23_18:16:59.216650/2018-10-24_13:27:16.867817_epoch_120',
'PCB_PerLoss_NoPosed2018-10-24_11:01:36.682130/2018-10-24_12:27:34.799378_epoch_120',
'PCB_RGB_L12018-10-23_18:12:42.827038/2018-10-23_23:51:33.516745_epoch_120',
'PCB_softmax2018-10-23_18:18:39.775789/2018-10-24_05:05:52.977378_epoch_120',
'PCB_TripletHard2018-10-23_18:20:48.070572/2018-10-24_04:35:05.054042_epoch_120']
'''
model_names = ['PCB_256_L12018-11-16_17:53:20.894085/2018-11-17_05:16:20.990883_epoch_120']
model_root = '/unsullied/sharefs/zhongyunshan/isilon-home/model-parameters/Texture'
for model_name in model_names:
model_path = os.path.join(model_root, model_name)
model = model_path[model_path.find('/', 61) + 1:model_path.find('/', 69)] + '_' + model_path[
model_path.find('epoch'):]
# model = model+'_all'
uvmap_root = '/unsullied/sharefs/zhongyunshan/isilon-home/datasets/Texture/market-uvmap'
textured_root = '/unsullied/sharefs/zhongyunshan/isilon-home/datasets/Texture/market-textured'
uvmap_dir = os.path.join(uvmap_root, model)
textured_dir = os.path.join(textured_root, model)
print('model', model_name)
print('uvmap_root', uvmap_root)
print('textured_root', textured_root)
print('uvmap_dir', uvmap_dir)
print('textured_dir', textured_dir)
print('create dir')
create_dir(uvmap_dir, textured_dir)
print('create uvmap')
create_uvmap(model_path, uvmap_dir)
print('read backgrounds')
backgrounds = read_background()
print('create textued img')
create_textured(uvmap_dir, textured_dir, backgrounds)
run()
| {"/deprecated/texture_reid.py": ["/dataset/real_texture.py", "/config.py", "/utils/body_part_mask.py", "/utils/data_loader.py", "/loss/PCB_intern_loss.py", "/loss/PCB_MiddleFeature.py", "/loss/PCB_softmax_loss.py", "/loss/PCB_AllCat.py"], "/deprecated/create_uvmap_textured.py": ["/dataset/market1501_pose_split_test.py"], "/loss/color_var_loss.py": ["/utils/body_part_mask.py"], "/deprecated/get_render_matrix.py": ["/smpl/render_texture.py"], "/metrics/inception_score.py": ["/utils/data_loader.py"], "/misc/noface_after_process.py": ["/utils/body_part_mask.py"]} |
49,552 | mericadil/TextureGeneration | refs/heads/master | /dataset/deprecated/background.py |
import os
import cv2
import numpy as np
from torch.utils.data import Dataset
from dataset.data_utils import ToTensor, RandomCrop, RandomFlip, Resize
import tqdm
class BackgroundDataset(Dataset):
def __getitem__(self, index):
texture_img_path = self.data[index]
texture_img = cv2.imread(texture_img_path)
if texture_img is None or texture_img.shape[0] <= 0 or texture_img.shape[1] <= 0:
return self.__getitem__(np.random.randint(0, self.__len__()))
texture_img = self.resize(texture_img)
texture_img = self.random_crop(texture_img)
texture_img = self.random_flip(texture_img)
texture_img = self.to_tensor(texture_img)
return texture_img
def __len__(self):
return len(self.data)
def __init__(self, data_path_list, img_size=224, normalize=True):
self.data_path_list = data_path_list
self.img_size = img_size
self.normalize = normalize
self.to_tensor = ToTensor(normalize=self.normalize)
self.data = []
self.generate_index()
self.random_crop = RandomCrop(output_size=self.img_size)
self.random_flip = RandomFlip(flip_prob=0.5)
self.resize = Resize(output_size=int(self.img_size * 2))
def generate_index(self):
print('generating background index')
for data_path in self.data_path_list:
for root, dirs, files in os.walk(data_path):
for name in tqdm.tqdm(files):
if name.endswith('.jpg'):
self.data.append(os.path.join(root, name))
print('finish generating background index, found texture image: {}'.format(len(self.data)))
if __name__ == '__main__':
dataset = BackgroundDataset('/unsullied/sharefs/wangjian02/isilon-home/datasets/PRW/frames')
image = dataset.__getitem__(1)
image = image.permute(1, 2, 0).numpy()
image = image * 2.0 + 0.5
cv2.imshow('image', image)
cv2.waitKey()
| {"/deprecated/texture_reid.py": ["/dataset/real_texture.py", "/config.py", "/utils/body_part_mask.py", "/utils/data_loader.py", "/loss/PCB_intern_loss.py", "/loss/PCB_MiddleFeature.py", "/loss/PCB_softmax_loss.py", "/loss/PCB_AllCat.py"], "/deprecated/create_uvmap_textured.py": ["/dataset/market1501_pose_split_test.py"], "/loss/color_var_loss.py": ["/utils/body_part_mask.py"], "/deprecated/get_render_matrix.py": ["/smpl/render_texture.py"], "/metrics/inception_score.py": ["/utils/data_loader.py"], "/misc/noface_after_process.py": ["/utils/body_part_mask.py"]} |
49,553 | mericadil/TextureGeneration | refs/heads/master | /save_UVmaps.py | from NMR.neural_render_test import NrTextureRenderer
import torch
import cv2
import argparse
import numpy as np
import os.path as osp
import pickle
from tqdm import tqdm
class Saver:
def __init__(self, model_path, data_dir, output_path):
print(model_path)
self.model = torch.load(model_path, map_location='cpu')
self.model.eval()
self.output_path = output_path
self.data_dir = data_dir
paths_pkl_path = osp.join(self.data_dir, 'eval_list.pkl')
with open(paths_pkl_path, 'rb') as f:
self.img_paths = pickle.load(f)
def generate_texture(self, img_path):
img = cv2.imread(osp.join(self.data_dir, img_path))
img = cv2.resize(img, (64, 128))
img = (img / 225. - 0.5) * 2.0
img = torch.from_numpy(img).permute(2, 0, 1).float().unsqueeze(0)
out = self.model(img)
out = out.cpu().detach().numpy()[0]
out = out.transpose((1, 2, 0))
out = (out / 2.0 + 0.5) * 255.
out = out.astype(np.uint8)
out = cv2.resize(out, dsize=(64, 64))
return out
def save_all_UV_maps(self):
print(len(self.img_paths))
for img_path in tqdm(self.img_paths):
image = self.generate_texture(img_path)
img_name = img_path.split('/')[-1]
cv2.imwrite(osp.join(self.output_path, img_name), image)
if __name__ == '__main__':
#add smpl_dir to read pickle file for verts and cam params
model_path = 'pretrained_model/pretrained_weight.pkl'
smpl_data_dir = '/auto/k2/adundar/3DSynthesis/data/texformer/datasets/SMPLMarket'
output_path = '/auto/k2/adundar/3DSynthesis/data/texformer/datasets/TextureGenerationResults'
torch.nn.Module.dump_patches = True
demo = Saver(model_path, smpl_data_dir, output_path)
demo.save_all_UV_maps() | {"/deprecated/texture_reid.py": ["/dataset/real_texture.py", "/config.py", "/utils/body_part_mask.py", "/utils/data_loader.py", "/loss/PCB_intern_loss.py", "/loss/PCB_MiddleFeature.py", "/loss/PCB_softmax_loss.py", "/loss/PCB_AllCat.py"], "/deprecated/create_uvmap_textured.py": ["/dataset/market1501_pose_split_test.py"], "/loss/color_var_loss.py": ["/utils/body_part_mask.py"], "/deprecated/get_render_matrix.py": ["/smpl/render_texture.py"], "/metrics/inception_score.py": ["/utils/data_loader.py"], "/misc/noface_after_process.py": ["/utils/body_part_mask.py"]} |
49,554 | mericadil/TextureGeneration | refs/heads/master | /loss/color_var_loss.py | import torch.nn as nn
import torch
# 无用
class ClothesColorVarLoss(nn.Module):
def forward(self, image):
total_var = 0
for i, item in enumerate(image):
for j, channel in enumerate(item):
up_channel = channel[self.short_up_mask[i, j]]
trouser_channel = channel[self.short_trouser_mask[i, j]]
total_var += torch.var(up_channel) + torch.var(trouser_channel)
return total_var / (2 * image.shape[0] * image.shape[1])
def __init__(self, texture_mask, use_gpu):
super(ClothesColorVarLoss, self).__init__()
self.short_up_mask = texture_mask.get_mask('short_up')
self.short_trouser_mask = texture_mask.get_mask('short_trouser')
self.short_up_mask = self.short_up_mask.type(torch.ByteTensor)
self.short_trouser_mask = self.short_trouser_mask.type(torch.ByteTensor)
self.use_gpu = use_gpu
if self.use_gpu:
self.short_up_mask = self.short_up_mask.cuda()
self.short_trouser_mask = self.short_trouser_mask.cuda()
if __name__ == '__main__':
from utils.body_part_mask import TextureMask
texture_mask = TextureMask(size=64, batch_size=4)
color_loss = ClothesColorVarLoss(texture_mask, False)
img = torch.randn(4, 3, 64, 64).float()
result = color_loss(img)
print(result)
| {"/deprecated/texture_reid.py": ["/dataset/real_texture.py", "/config.py", "/utils/body_part_mask.py", "/utils/data_loader.py", "/loss/PCB_intern_loss.py", "/loss/PCB_MiddleFeature.py", "/loss/PCB_softmax_loss.py", "/loss/PCB_AllCat.py"], "/deprecated/create_uvmap_textured.py": ["/dataset/market1501_pose_split_test.py"], "/loss/color_var_loss.py": ["/utils/body_part_mask.py"], "/deprecated/get_render_matrix.py": ["/smpl/render_texture.py"], "/metrics/inception_score.py": ["/utils/data_loader.py"], "/misc/noface_after_process.py": ["/utils/body_part_mask.py"]} |
49,555 | mericadil/TextureGeneration | refs/heads/master | /network_models/depreciated/vanilla_gan.py | import numpy as np
import torch
from torch import nn
from torchvision.models import resnet, vgg
def conv3x3(in_planes, out_planes, stride=1, padding=1, has_bias=False):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=padding, bias=has_bias)
def conv5x5(in_features, out_features, stride=1, padding=2, has_bias=False):
# 5x5 convolution with padding
return nn.Conv2d(in_features, out_features, kernel_size=5, stride=stride, padding=padding, bias=has_bias)
def conv7x7(in_channels, out_channels, stride=1, padding=3, has_bias=False):
# 7x7 convolution with padding
return nn.Conv2d(in_channels=in_channels, out_channels=out_channels,
kernel_size=7, stride=stride, padding=padding, bias=has_bias)
def conv_bn_relu(in_planes, out_planes, kernel_size=3, stride=1):
if kernel_size == 3:
conv = conv3x3(in_planes, out_planes, stride, 1)
elif kernel_size == 5:
conv = conv5x5(in_planes, out_planes, stride, 2)
elif kernel_size == 7:
conv = conv7x7(in_planes, out_planes, stride, 3)
else:
return None
nn.init.xavier_uniform_(conv.weight)
bn = nn.BatchNorm2d(out_planes)
nn.init.constant_(bn.weight, 1)
nn.init.constant_(bn.bias, 0)
relu = nn.LeakyReLU(inplace=True, negative_slope=0.02)
return nn.Sequential(conv, bn, relu)
class EncoderBlock(nn.Module):
def __init__(self, in_channels, middle_channels, out_channels, kernel_size=3, scale_factor=2):
super(EncoderBlock, self).__init__()
if scale_factor == 2:
self.block = nn.Sequential(
conv_bn_relu(in_channels, middle_channels, kernel_size=kernel_size),
conv_bn_relu(middle_channels, out_channels, kernel_size=kernel_size),
nn.MaxPool2d(kernel_size=2)
)
elif scale_factor == 4:
self.block = nn.Sequential(
conv_bn_relu(in_channels, middle_channels, kernel_size=kernel_size),
nn.MaxPool2d(kernel_size=2),
conv_bn_relu(middle_channels, out_channels, kernel_size=kernel_size),
nn.MaxPool2d(kernel_size=2)
)
def forward(self, x):
return self.block(x)
class DecoderBlock(nn.Module):
def __init__(self,
in_channels,
middle_channels,
out_channels,
kernel_size=3,
scale_factor=2,
upsample='upsample'):
super(DecoderBlock, self).__init__()
self.in_channels = in_channels
if upsample == 'deconv':
if scale_factor == 2:
self.block = nn.Sequential(
nn.ConvTranspose2d(in_channels, middle_channels, bias=False, kernel_size=2, stride=2),
nn.BatchNorm2d(middle_channels),
nn.LeakyReLU(inplace=True, negative_slope=0.02),
conv_bn_relu(middle_channels, out_channels, kernel_size=kernel_size),
)
elif scale_factor == 4:
self.block = nn.Sequential(
nn.ConvTranspose2d(in_channels, middle_channels,
kernel_size=kernel_size, bias=False),
nn.BatchNorm2d(middle_channels),
nn.LeakyReLU(inplace=True, negative_slope=0.02),
nn.ConvTranspose2d(in_channels, middle_channels,
kernel_size=kernel_size, bias=False),
nn.BatchNorm2d(middle_channels),
nn.LeakyReLU(inplace=True, negative_slope=0.02),
)
else:
if scale_factor == 2:
self.block = nn.Sequential(
nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True),
conv_bn_relu(in_channels, middle_channels, kernel_size=kernel_size),
conv_bn_relu(middle_channels, out_channels, kernel_size=kernel_size),
)
elif scale_factor == 4:
self.block = nn.Sequential(
nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True),
conv_bn_relu(in_channels, middle_channels, kernel_size=kernel_size),
nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True),
conv_bn_relu(middle_channels, out_channels, kernel_size=kernel_size),
)
def forward(self, x):
return self.block(x)
class Generator_(torch.nn.Module):
def __init__(self, input_dimensions, output_channels):
super(Generator_, self).__init__()
self.in_dimension = input_dimensions
self.out_channels = output_channels
self.linear = nn.Linear(input_dimensions, out_features=1024)
self.relu = nn.LeakyReLU(negative_slope=0.02, inplace=True)
self.bn = nn.BatchNorm1d(num_features=1024)
self.model = nn.Sequential(
DecoderBlock(in_channels=1024, middle_channels=512, out_channels=512, kernel_size=5),
DecoderBlock(in_channels=512, middle_channels=256, out_channels=256, kernel_size=5),
DecoderBlock(in_channels=256, middle_channels=128, out_channels=128, kernel_size=5),
DecoderBlock(in_channels=128, middle_channels=64, out_channels=64, kernel_size=5),
DecoderBlock(in_channels=64, middle_channels=32, out_channels=32, kernel_size=5),
DecoderBlock(in_channels=32, middle_channels=16, out_channels=8, kernel_size=5),
nn.Conv2d(in_channels=8, out_channels=3, kernel_size=3, padding=1, bias=False),
nn.Tanh()
)
def forward(self, x):
x = self.bn(self.relu(self.linear(x)))
x = x.view(-1, 1024, 1, 1)
x = self.model(x)
return x
class Discriminator_(torch.nn.Module):
def __init__(self, input_channels, out_dimension):
super(Discriminator_, self).__init__()
self.input_channels = input_channels
self.output_dimension = out_dimension
self.feature = nn.Sequential(
EncoderBlock(in_channels=3, middle_channels=8, out_channels=8, kernel_size=5),
EncoderBlock(in_channels=8, middle_channels=16, out_channels=16, kernel_size=5),
EncoderBlock(in_channels=16, middle_channels=32, out_channels=32, kernel_size=5),
EncoderBlock(in_channels=32, middle_channels=64, out_channels=64, kernel_size=5),
EncoderBlock(in_channels=64, middle_channels=128, out_channels=128, kernel_size=5),
EncoderBlock(in_channels=128, middle_channels=256, out_channels=256, kernel_size=5)
)
self.dropout = nn.Dropout(p=0.4)
self.linear = nn.Linear(in_features=512, out_features=out_dimension)
def forward(self, x):
x = self.feature(x)
x = x.view(-1, 512)
x = self.dropout(x)
x = self.linear(x)
return x
class Discriminator(nn.Module):
def forward(self, input):
if self.gpu_ids and isinstance(input.data, torch.cuda.FloatTensor):
return nn.parallel.data_parallel(self.model, input, self.gpu_ids)
else:
return self.model(input)
def __init__(self, input_channels, output_dimension, gpu_ids=None):
super(Discriminator, self).__init__()
self.model = Discriminator_(input_channels, output_dimension)
self.gpu_ids = gpu_ids
class Generator(nn.Module):
def forward(self, input):
if self.gpu_ids and isinstance(input.data, torch.cuda.FloatTensor):
return nn.parallel.data_parallel(self.model, input, self.gpu_ids)
else:
return self.model(input)
def __init__(self, input_dimension, output_channels, gpu_ids=None):
super(Generator, self).__init__()
self.model = Generator_(input_dimension, output_channels)
self.gpu_ids = gpu_ids
if __name__ == '__main__':
generator = Generator(input_dimension=1024, output_channels=3)
discriminator = Discriminator(input_channels=3, output_dimension=1)
fixed_z_ = torch.randn(4, 1024) # fixed noise
result = generator(fixed_z_)
print(result.shape)
pic = torch.ones(4, 3, 128, 64)
pic = discriminator(pic)
print(pic.shape)
| {"/deprecated/texture_reid.py": ["/dataset/real_texture.py", "/config.py", "/utils/body_part_mask.py", "/utils/data_loader.py", "/loss/PCB_intern_loss.py", "/loss/PCB_MiddleFeature.py", "/loss/PCB_softmax_loss.py", "/loss/PCB_AllCat.py"], "/deprecated/create_uvmap_textured.py": ["/dataset/market1501_pose_split_test.py"], "/loss/color_var_loss.py": ["/utils/body_part_mask.py"], "/deprecated/get_render_matrix.py": ["/smpl/render_texture.py"], "/metrics/inception_score.py": ["/utils/data_loader.py"], "/misc/noface_after_process.py": ["/utils/body_part_mask.py"]} |
49,556 | mericadil/TextureGeneration | refs/heads/master | /loss/PCB_softmax_loss.py | # -*- coding:utf-8 -*-
import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
from torchvision.transforms.functional import normalize
import os
from .resnet_market1501 import resnet50
import sys
# ReID Loss
class ReIDLoss(nn.Module):
def __init__(self, model_path, num_classes=1501, size=(384, 128), gpu_ids=None, margin=0.3,is_trainable=False):
super(ReIDLoss, self).__init__()
self.size = size
self.gpu_ids = gpu_ids
model_structure = resnet50(num_features=256, dropout=0.5, num_classes=num_classes, cut_at_pooling=False,
FCN=True)
# load checkpoint
if self.gpu_ids is None:
checkpoint = torch.load(model_path, map_location=lambda storage, loc: storage)
else:
checkpoint = torch.load(model_path)
self.margin = margin
if self.margin is not None:
self.ranking_loss = nn.MarginRankingLoss(margin=margin)
else:
raise ValueError('self.margin is None!')
model_dict = model_structure.state_dict()
checkpoint_load = {k: v for k, v in (checkpoint['state_dict']).items() if k in model_dict}
model_dict.update(checkpoint_load)
model_structure.load_state_dict(model_dict)
self.model = model_structure
#self.model.eval()
if gpu_ids is not None:
self.model.cuda()
self.is_trainable = is_trainable
for param in self.model.parameters():
param.requires_grad = self.is_trainable
self.triple_feature_loss = nn.L1Loss()
self.softmax_feature_loss = nn.BCELoss()
self.normalize_mean = torch.Tensor([0.485, 0.456, 0.406])
self.normalize_mean = self.normalize_mean.expand(384, 128, 3).permute(2, 0, 1) # 调整为通道在前
self.normalize_std = torch.Tensor([0.229, 0.224, 0.225])
self.normalize_std = self.normalize_std.expand(384, 128, 3).permute(2, 0, 1) # 调整为通道在前
if gpu_ids is not None:
self.normalize_std = self.normalize_std.cuda()
self.normalize_mean = self.normalize_mean.cuda()
def extract_feature(self, inputs):
outputs = self.model(inputs)
#feature_tri = outputs[0].view(outputs[0].size(0), -1)
#feature_tri = feature_tri / feature_tri.norm(2, 1, keepdim=True).expand_as(feature_tri)
(c0, c1, c2, c3, c4, c5) = outputs[1]
#c0 = c0 / c0.norm(2, 1, keepdim=True).expand_as(c0)
c0 = F.softmax(c0)
#c1 = c1 / c1.norm(2, 1, keepdim=True).expand_as(c1)
c1 = F.softmax(c1)
#c2 = c2 / c2.norm(2, 1, keepdim=True).expand_as(c2)
c2 = F.softmax(c2)
#c3 = c3 / c3.norm(2, 1, keepdim=True).expand_as(c3)
c3 = F.softmax(c3)
#c4 = c4 / c4.norm(2, 1, keepdim=True).expand_as(c4)
c4 = F.softmax(c4)
#c5 = c5 / c5.norm(2, 1, keepdim=True).expand_as(c5)
c5 = F.softmax(c5)
feature_softmax = torch.cat((c0,c1,c2,c3,c4,c5))
#feature_softmax = F.softmax(feature_softmax)
return feature_softmax
def preprocess(self, data):
"""
the input image is normalized in [-1, 1] and in bgr format, should be changed to the format accecpted by model
:param data:
:return:
"""
data_unnorm = data / 2.0 + 0.5
permute = [2, 1, 0]
data_rgb_unnorm = data_unnorm[:, permute]
data_rgb_unnorm = F.upsample(data_rgb_unnorm, size=self.size, mode='bilinear')
data_rgb = (data_rgb_unnorm - self.normalize_mean) / self.normalize_std
return data_rgb
# label 就是原始图
# data 是生成图
# targets 是pids
def forward(self, data, label, targets):
assert label.requires_grad is False
data = self.preprocess(data)
label = self.preprocess(label)
feature_softmax_data = self.extract_feature(data)
feature_softmax_label = self.extract_feature(label)
# avoid bugs
feature_softmax_label.detach_()
feature_softmax_label.requires_grad = False
'''
for n, k in self.model.base.named_children():
print(n)
if n == 'avgpool':
break
print(self.model.state_dict()['base']['conv1'])
sys.exit(0)
'''
# print('Reid para',self.model.state_dict()['base.conv1.weight'][10][1][1])
return torch.Tensor([0]).cuda(),\
self.softmax_feature_loss(feature_softmax_data, feature_softmax_label)/6,\
torch.Tensor([0]).cuda(),\
torch.Tensor([0]).cuda(),\
torch.Tensor([0]).cuda(),\
torch.Tensor([0]).cuda()
| {"/deprecated/texture_reid.py": ["/dataset/real_texture.py", "/config.py", "/utils/body_part_mask.py", "/utils/data_loader.py", "/loss/PCB_intern_loss.py", "/loss/PCB_MiddleFeature.py", "/loss/PCB_softmax_loss.py", "/loss/PCB_AllCat.py"], "/deprecated/create_uvmap_textured.py": ["/dataset/market1501_pose_split_test.py"], "/loss/color_var_loss.py": ["/utils/body_part_mask.py"], "/deprecated/get_render_matrix.py": ["/smpl/render_texture.py"], "/metrics/inception_score.py": ["/utils/data_loader.py"], "/misc/noface_after_process.py": ["/utils/body_part_mask.py"]} |
49,557 | mericadil/TextureGeneration | refs/heads/master | /loss/PCB_intern_loss.py | # -*- coding:utf-8 -*-
import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
from torchvision.transforms.functional import normalize
import os
from .resnet_market1501 import resnet50
import sys
# ReID Loss
class ReIDLoss(nn.Module):
def __init__(self, model_path, num_classes=1501, size=(384, 128), gpu_ids=None, margin=0.3,is_trainable=False):
super(ReIDLoss, self).__init__()
self.size = size
self.gpu_ids = gpu_ids
model_structure = resnet50(num_features=256, dropout=0.5, num_classes=num_classes, cut_at_pooling=False,
FCN=True)
# load checkpoint
if self.gpu_ids is None:
checkpoint = torch.load(model_path, map_location=lambda storage, loc: storage)
else:
checkpoint = torch.load(model_path)
self.margin = margin
if self.margin is not None:
self.ranking_loss = nn.MarginRankingLoss(margin=margin)
else:
raise ValueError('self.margin is None!')
model_dict = model_structure.state_dict()
checkpoint_load = {k: v for k, v in (checkpoint['state_dict']).items() if k in model_dict}
model_dict.update(checkpoint_load)
model_structure.load_state_dict(model_dict)
self.model = model_structure
#self.model.eval()
if gpu_ids is not None:
self.model.cuda()
self.is_trainable = is_trainable
for param in self.model.parameters():
param.requires_grad = self.is_trainable
self.triple_feature_loss = nn.L1Loss()
self.softmax_feature_loss = nn.BCELoss()
self.normalize_mean = torch.Tensor([0.485, 0.456, 0.406])
self.normalize_mean = self.normalize_mean.expand(384, 128, 3).permute(2, 0, 1) # 调整为通道在前
self.normalize_std = torch.Tensor([0.229, 0.224, 0.225])
self.normalize_std = self.normalize_std.expand(384, 128, 3).permute(2, 0, 1) # 调整为通道在前
if gpu_ids is not None:
self.normalize_std = self.normalize_std.cuda()
self.normalize_mean = self.normalize_mean.cuda()
def extract_feature(self, inputs):
# 2048*6+256*6
out = self.model(inputs)
o1 = out[0].view(out[0].size(0), -1)
o1 = o1 / o1.norm(2, 1, keepdim=True).expand_as(o1)
o2 = out[2].view(out[2].size(0), -1)
o2 = o2 / o2.norm(2, 1, keepdim=True).expand_as(o2)
#feature_tri = torch.cat((o1,o2),dim=1)
#feature_tri = feature_tri / feature_tri.norm(2, 1, keepdim=True).expand_as(feature_tri)
feature_tri = torch.cat((o1,o2),dim=1)
return feature_tri
def preprocess(self, data):
"""
the input image is normalized in [-1, 1] and in bgr format, should be changed to the format accecpted by model
:param data:
:return:
"""
data_unnorm = data / 2.0 + 0.5
permute = [2, 1, 0]
data_rgb_unnorm = data_unnorm[:, permute]
data_rgb_unnorm = F.upsample(data_rgb_unnorm, size=self.size, mode='bilinear')
data_rgb = (data_rgb_unnorm - self.normalize_mean) / self.normalize_std
return data_rgb
# label 就是原始图
# data 是生成图
# targets 是pids
def forward(self, data, label, targets):
assert label.requires_grad is False
data = self.preprocess(data)
label = self.preprocess(label)
feature_tri_data = self.extract_feature(data)
feature_tri_label = self.extract_feature(label)
# avoid bugs
feature_tri_label.detach_()
feature_tri_label.requires_grad = False
return self.triple_feature_loss(feature_tri_data, feature_tri_label),\
torch.Tensor([0]).cuda(),\
torch.Tensor([0]).cuda(),\
torch.Tensor([0]).cuda(),\
torch.Tensor([0]).cuda(),\
self.uvmap_l2_loss(feature_tri_data,targets)
def uvmap_l2_loss(self,feature_tri_data,targets):
dist_mat = self.euclidean_dist(feature_tri_data, feature_tri_data)
N = dist_mat.size(0)
is_pos = targets.expand(N, N).eq(targets.expand(N, N).t())
is_pos = is_pos.type(torch.FloatTensor)
is_pos = is_pos.cuda()
dist_mat = dist_mat.cuda()
return torch.sum(dist_mat * is_pos)
def euclidean_dist(self,x, y):
# 矩阵运算直接得出欧几里得距离
"""
Args:
x: pytorch Variable, with shape [m, d]
y: pytorch Variable, with shape [n, d]
Returns:
dist: pytorch Variable, with shape [m, n]
"""
m, n = x.size(0), y.size(0)
xx = torch.pow(x, 2).sum(1, keepdim=True).expand(m, n)
yy = torch.pow(y, 2).sum(1, keepdim=True).expand(n, m).t()
dist = xx + yy
dist.addmm_(1, -2, x, y.t())
dist = dist.clamp(min=1e-12).sqrt() # for numerical stability
return dist
def hard_example_mining(self,dist_mat, labels, return_inds=False):
"""For each anchor, find the hardest positive and negative sample.
Args:
dist_mat: pytorch Variable, pair wise distance between samples, shape [N, N]
labels: pytorch LongTensor, with shape [N]
return_inds: whether to return the indices. Save time if `False`(?)
Returns:
dist_ap: pytorch Variable, distance(anchor, positive); shape [N]
dist_an: pytorch Variable, distance(anchor, negative); shape [N]
p_inds: pytorch LongTensor, with shape [N];
indices of selected hard positive samples; 0 <= p_inds[i] <= N - 1
n_inds: pytorch LongTensor, with shape [N];
indices of selected hard negative samples; 0 <= n_inds[i] <= N - 1
NOTE: Only consider the case in which all labels have same num of samples,
thus we can cope with all anchors in parallel.
"""
assert len(dist_mat.size()) == 2
assert dist_mat.size(0) == dist_mat.size(1)
N = dist_mat.size(0)
# shape [N, N]
is_pos = labels.expand(N, N).eq(labels.expand(N, N).t())
is_neg = labels.expand(N, N).ne(labels.expand(N, N).t())
# `dist_ap` means distance(anchor, positive)
# both `dist_ap` and `relative_p_inds` with shape [N, 1]
dist_ap, relative_p_inds = torch.max(
dist_mat[is_pos].contiguous().view(N, -1), 1, keepdim=True)
# `dist_an` means distance(anchor, negative)
# both `dist_an` and `relative_n_inds` with shape [N, 1]
dist_an, relative_n_inds = torch.min(
dist_mat[is_neg].contiguous().view(N, -1), 1, keepdim=True)
# shape [N]
dist_ap = dist_ap.squeeze(1)
dist_an = dist_an.squeeze(1)
if return_inds:
# shape [N, N]
ind = (labels.new().resize_as_(labels)
.copy_(torch.arange(0, N).long())
.unsqueeze(0).expand(N, N))
# shape [N, 1]
p_inds = torch.gather(
ind[is_pos].contiguous().view(N, -1), 1, relative_p_inds.data)
n_inds = torch.gather(
ind[is_neg].contiguous().view(N, -1), 1, relative_n_inds.data)
# shape [N]
p_inds = p_inds.squeeze(1)
n_inds = n_inds.squeeze(1)
return dist_ap, dist_an, p_inds, n_inds
return dist_ap, dist_an
def triplet_hard_Loss(self,global_feat,feature_tri_label,labels):
"""Modified from Tong Xiao's open-reid (https://github.com/Cysu/open-reid).
Related Triplet Loss theory can be found in paper 'In Defense of the Triplet
Loss for Person Re-Identification'."""
# no normalize
dist_mat = self.euclidean_dist(global_feat, feature_tri_label)
dist_ap, dist_an = self.hard_example_mining(
dist_mat, labels)
y = dist_an.new().resize_as_(dist_an).fill_(1)
loss = self.ranking_loss(dist_an, dist_ap, y)
return loss
def triplet_Loss(self,global_feat,feature_tri_label,labels):
"""Modified from Tong Xiao's open-reid (https://github.com/Cysu/open-reid).
Related Triplet Loss theory can be found in paper 'In Defense of the Triplet
Loss for Person Re-Identification'."""
# no normalize
dist_mat = self.euclidean_dist(global_feat, feature_tri_label)
dist_ap = torch.diagonal(dist_mat) # 正例距离选择生成图特征和对应的原始图特征
_, dist_an = self.hard_example_mining(
dist_mat, labels)
y = dist_an.new().resize_as_(dist_an).fill_(1)
loss = self.ranking_loss(dist_an, dist_ap, y)
return loss
if __name__ == '__main__':
import cv2
from torchvision import transforms as T
trans = T.Compose([
# T.ColorJitter(brightness=0.1, contrast=0.1, saturation=0.1, hue=0),
T.Resize((384, 128)),
T.ToTensor(),
T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
img1 = cv2.imread('/home/wangjian02/Projects/TextureGAN/tmp/test_img/in/0112_c1s1_019001_00.jpg')
img1 = (img1 / 255. - 0.5) * 2.0
img1 = torch.from_numpy(img1).permute(2, 0, 1).float()
img1 = img1.unsqueeze(0)
img1.requires_grad = True
img2 = cv2.imread('/home/wangjian02/Projects/TextureGAN/tmp/test_img/out_render_prw/0112_c1s1_019001_00.jpg')
img2 = (img2 / 255. - 0.5) * 2.0
img2 = torch.from_numpy(img2).permute(2, 0, 1).float()
img2 = img2.unsqueeze(0)
loss = ReIDLoss(model_path='/home/wangjian02/Projects/pcb_market1501_best/checkpoint_120.pth.tar')
l = loss(img1, img2)
l.backward()
print(l)
| {"/deprecated/texture_reid.py": ["/dataset/real_texture.py", "/config.py", "/utils/body_part_mask.py", "/utils/data_loader.py", "/loss/PCB_intern_loss.py", "/loss/PCB_MiddleFeature.py", "/loss/PCB_softmax_loss.py", "/loss/PCB_AllCat.py"], "/deprecated/create_uvmap_textured.py": ["/dataset/market1501_pose_split_test.py"], "/loss/color_var_loss.py": ["/utils/body_part_mask.py"], "/deprecated/get_render_matrix.py": ["/smpl/render_texture.py"], "/metrics/inception_score.py": ["/utils/data_loader.py"], "/misc/noface_after_process.py": ["/utils/body_part_mask.py"]} |
49,558 | mericadil/TextureGeneration | refs/heads/master | /dataset/real_texture.py | import os
import cv2
import numpy as np
from torch.utils.data import Dataset
from .data_utils import ToTensor
import tqdm
class RealTextureDataset(Dataset):
def __getitem__(self, index):
texture_img_path = self.data[index]
texture_img = cv2.imread(texture_img_path)
texture_img = cv2.resize(texture_img, dsize=(self.img_size, self.img_size))
texture_img = self.to_tensor(texture_img)
return texture_img
def __len__(self):
return len(self.data)
def __init__(self, data_path, img_size=64, normalize=True):
self.data_path = data_path
self.img_size = img_size
self.normalize = normalize
self.to_tensor = ToTensor(normalize=self.normalize)
self.data = []
self.generate_index()
def generate_index(self):
print('generating index')
for root, dirs, files in os.walk(self.data_path):
for name in tqdm.tqdm(files):
if name.endswith('.jpg') and 'nongrey' in name:
self.data.append(os.path.join(root, name))
print('finish generating index, found texture image: {}'.format(len(self.data)))
# -*- coding:utf-8 -*-
#
#
# import os
#
# import cv2
# import numpy as np
# from torch.utils.data import Dataset
# import pickle
# import nori2 as nori
# from utils.imdecode import imdecode
# from .data_utils import ToTensor
#
#
# # 真实的uvmap
#
# class RealTextureDataset(Dataset):
#
# def __init__(self, data_path=None, img_size=64, pkl_path=None, normalize=True):
# # self.data_path = data_path
# self.img_size = img_size
# self.normalize = normalize
#
# self.to_tensor = ToTensor(normalize=self.normalize)
#
# # 检查是否有该文件
# if not os.path.exists(pkl_path):
# raise ValueError('{} not exists!!'.format(pkl_path))
# # 打开pkl pid:[_,image_id,camera_id]
# with open(pkl_path, 'rb') as fs:
# self.pkl = pickle.load(fs)
# self.len = len(self.pkl)
#
# # nori
# self.nf = nori.Fetcher()
#
# def __getitem__(self, index):
# texture_img = self.nf.get(self.pkl[index][0])
#
# # decode
# texture_img = imdecode(texture_img)
# texture_img = cv2.resize(texture_img, dsize=(self.img_size, self.img_size))
#
# texture_img = self.to_tensor(texture_img)
#
# return texture_img
#
# def __len__(self):
# return self.len
| {"/deprecated/texture_reid.py": ["/dataset/real_texture.py", "/config.py", "/utils/body_part_mask.py", "/utils/data_loader.py", "/loss/PCB_intern_loss.py", "/loss/PCB_MiddleFeature.py", "/loss/PCB_softmax_loss.py", "/loss/PCB_AllCat.py"], "/deprecated/create_uvmap_textured.py": ["/dataset/market1501_pose_split_test.py"], "/loss/color_var_loss.py": ["/utils/body_part_mask.py"], "/deprecated/get_render_matrix.py": ["/smpl/render_texture.py"], "/metrics/inception_score.py": ["/utils/data_loader.py"], "/misc/noface_after_process.py": ["/utils/body_part_mask.py"]} |
49,559 | mericadil/TextureGeneration | refs/heads/master | /utils/body_part_mask.py | import os
import torch
import cv2
import numpy as np
class TextureMask:
def __init__(self, size):
if isinstance(size, int):
self.size = (size, size)
else:
self.size = size
self.part = {
'face': 'models/face_mask.png',
'hand': 'models/hand_mask.png',
'body': 'models/body_mask.png',
'short_up': 'models/short_up_mask.jpg',
'short_trouser': 'models/short_trouser_mask.jpg'
}
def get_mask(self, part):
mask_path = self.part[part]
mask = cv2.imread(mask_path)
mask = cv2.resize(mask, self.size)
mask = mask / 255.
mask = mask.transpose((2, 0, 1))
mask = np.expand_dims(mask, 0)
mask = torch.from_numpy(mask).float()
return mask
def get_numpy_mask(self, part):
mask_path = self.part[part]
mask = cv2.imread(mask_path)
mask = cv2.resize(mask, self.size)
mask = mask / 255.
return mask
if __name__ == '__main__':
masker = TextureMask(size=64)
mask = masker.get_mask("face")
cv2.imshow('mask', mask)
cv2.waitKey()
| {"/deprecated/texture_reid.py": ["/dataset/real_texture.py", "/config.py", "/utils/body_part_mask.py", "/utils/data_loader.py", "/loss/PCB_intern_loss.py", "/loss/PCB_MiddleFeature.py", "/loss/PCB_softmax_loss.py", "/loss/PCB_AllCat.py"], "/deprecated/create_uvmap_textured.py": ["/dataset/market1501_pose_split_test.py"], "/loss/color_var_loss.py": ["/utils/body_part_mask.py"], "/deprecated/get_render_matrix.py": ["/smpl/render_texture.py"], "/metrics/inception_score.py": ["/utils/data_loader.py"], "/misc/noface_after_process.py": ["/utils/body_part_mask.py"]} |
49,560 | mericadil/TextureGeneration | refs/heads/master | /deprecated/get_render_matrix.py | import numpy as np
import pickle
import os
import cv2
import torch
from smpl.render_texture import Renderer
action_files = [
# '104/104_09.pkl', # run
'104/104_19.pkl', # walk
'39/39_14.pkl', # walk
# '36/36_32.pkl' # up stairs
]
result = []
rotate_total_div = 8
def sparse_mx_to_torch_sparse_tensor(sparse_mx):
sparse_mx = sparse_mx.tocoo().astype(np.float32)
indices = torch.from_numpy(np.vstack((sparse_mx.row, sparse_mx.col)))
indices = indices.long()
values = torch.from_numpy(sparse_mx.data)
shape = torch.Size(sparse_mx.shape)
return torch.sparse.FloatTensor(indices, values, shape)
for file_name in action_files:
path = os.path.join('neutrSMPL_CMU', file_name)
with open(path, 'rb') as f:
data = pickle.load(f)
renderer = Renderer('smpl/models/body.obj', 'smpl/models/neutral.pkl', w=224, h=224)
texture_bgr = cv2.imread('smpl/models/default_texture2.jpg')
texture_bgr = cv2.resize(texture_bgr, dsize=(224, 224))
for rotate_div in range(0, rotate_total_div):
for i in range(0, len(data['poses']), 20):
thetas = np.concatenate((data['trans'][i], data['poses'][i], data['betas']))
thetas[3:6] = [np.pi, 0, 0]
rn, deviation, silhouette = renderer.render(thetas, texture_bgr,
rotate=np.array([0,
2 * np.pi * rotate_div / rotate_total_div
, 0]))
result.append({
'mat': deviation,
'mask': silhouette
})
print('process: {} / {}'.format(rotate_div, rotate_total_div))
np.save('walk_224', result)
| {"/deprecated/texture_reid.py": ["/dataset/real_texture.py", "/config.py", "/utils/body_part_mask.py", "/utils/data_loader.py", "/loss/PCB_intern_loss.py", "/loss/PCB_MiddleFeature.py", "/loss/PCB_softmax_loss.py", "/loss/PCB_AllCat.py"], "/deprecated/create_uvmap_textured.py": ["/dataset/market1501_pose_split_test.py"], "/loss/color_var_loss.py": ["/utils/body_part_mask.py"], "/deprecated/get_render_matrix.py": ["/smpl/render_texture.py"], "/metrics/inception_score.py": ["/utils/data_loader.py"], "/misc/noface_after_process.py": ["/utils/body_part_mask.py"]} |
49,561 | mericadil/TextureGeneration | refs/heads/master | /loss/PCB_MiddleFeature.py | # -*- coding:utf-8 -*-
import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
from torchvision.transforms.functional import normalize
import os
from .resnet_market1501 import resnet50
import sys
# ReID Loss
class ReIDLoss(nn.Module):
def __init__(self, model_path, num_classes=1501, size=(384, 128), gpu_ids=None, margin=0.3,is_trainable=False, layer = None):
super(ReIDLoss, self).__init__()
self.size = size
self.gpu_ids = gpu_ids
model_structure = resnet50(num_features=256, dropout=0.5, num_classes=num_classes, cut_at_pooling=False,
FCN=True)
# if gpu_ids is not None:
# model_structure = nn.DataParallel(model_structure, device_ids=gpu_ids)
# load checkpoint
if self.gpu_ids is None:
checkpoint = torch.load(model_path, map_location=lambda storage, loc: storage)
else:
checkpoint = torch.load(model_path)
self.margin = margin
if self.margin is not None:
self.ranking_loss = nn.MarginRankingLoss(margin=margin)
else:
raise ValueError('self.margin is None!')
model_dict = model_structure.state_dict()
checkpoint_load = {k: v for k, v in (checkpoint['state_dict']).items() if k in model_dict}
model_dict.update(checkpoint_load)
model_structure.load_state_dict(model_dict)
self.model = model_structure
self.model.eval()
self.layer = layer
print('Stop in layer:',layer)
if self.margin is not None:
self.ranking_loss = nn.MarginRankingLoss(margin=margin)
else:
raise ValueError('self.margin is None!')
if self.layer is not None:
print('Feature layer:', 'layer'+str(self.layer))
else:
raise ValueError('self.layer is None!')
if gpu_ids is not None:
self.model.cuda()
self.is_trainable = is_trainable
for param in self.model.parameters():
param.requires_grad = self.is_trainable
self.triple_feature_loss = nn.L1Loss()
self.softmax_feature_loss = nn.BCELoss()
self.normalize_mean = torch.Tensor([0.485, 0.456, 0.406])
self.normalize_mean = self.normalize_mean.expand(384, 128, 3).permute(2, 0, 1) # 调整为通道在前
self.normalize_std = torch.Tensor([0.229, 0.224, 0.225])
self.normalize_std = self.normalize_std.expand(384, 128, 3).permute(2, 0, 1) # 调整为通道在前
if gpu_ids is not None:
self.normalize_std = self.normalize_std.cuda()
self.normalize_mean = self.normalize_mean.cuda()
def extract_feature(self, inputs):
if self.layer not in [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]:
raise KeyError('{} not in keys!'.format(self.layer))
if self.layer == 5:
# 256特征
inputs = self.model(inputs)
outputs = inputs[2].view(inputs[2].size(0), -1)
#print(outputs.shape)
feature_tri = outputs
feature_tri = feature_tri / feature_tri.norm(2, 1, keepdim=True).expand_as(feature_tri)
return feature_tri
elif self.layer == 6:
# 2048*6+256*6
out = self.model(inputs)
o1 = out[0].view(out[0].size(0), -1)
o1 = o1 / o1.norm(2, 1, keepdim=True).expand_as(o1)
o2 = out[2].view(out[2].size(0), -1)
o2 = o2 / o2.norm(2, 1, keepdim=True).expand_as(o2)
feature_tri = torch.cat((o1,o2),dim=1)
return feature_tri
elif self.layer == 7:
# 2048*6+layer4
out = self.model(inputs)
o1 = out[0].view(out[0].size(0), -1)
o1 = o1 / o1.norm(2, 1, keepdim=True).expand_as(o1)
o2 = inputs
for n, m in self.model.base.named_children():
o2 = m.forward(o2)
if n == 'layer4':
break
o2 = o2.view(o2.size(0),-1)
o2 = o2 / o2.norm(2, 1, keepdim=True).expand_as(o2)
feature_tri = torch.cat((o1,o2),dim=1)
return feature_tri
elif self.layer == 8:
# 256*6+layer4
out = self.model(inputs)
o1 = out[2].view(out[2].size(0), -1)
o1 = o1 / o1.norm(2, 1, keepdim=True).expand_as(o1)
o2 = inputs
for n, m in self.model.base.named_children():
o2 = m.forward(o2)
if n == 'layer4':
break
o2 = o2.view(o2.size(0),-1)
o2 = o2 / o2.norm(2, 1, keepdim=True).expand_as(o2)
feature_tri = torch.cat((o1,o2),dim=1)
return feature_tri
elif self.layer == 9:
# layer3+layer4
for n, m in self.model.base.named_children():
inputs = m.forward(inputs)
if n == 'layer3':
o1 = inputs
if n == 'layer4':
o2 = inputs
break
o1 = o1.view(o1.size(0),-1)
o1 = o1 / o1.norm(2, 1, keepdim=True).expand_as(o1)
o2 = o2.view(o2.size(0),-1)
o2 = o2 / o2.norm(2, 1, keepdim=True).expand_as(o2)
feature_tri = torch.cat((o1,o2),dim=1)
return feature_tri
elif self.layer == 10:
# layer2+layer3
for n, m in self.model.base.named_children():
inputs = m.forward(inputs)
if n == 'layer2':
o1 = inputs
if n == 'layer3':
o2 = inputs
break
o1 = o1.view(o1.size(0),-1)
o1 = o1 / o1.norm(2, 1, keepdim=True).expand_as(o1)
o2 = o2.view(o2.size(0),-1)
o2 = o2 / o2.norm(2, 1, keepdim=True).expand_as(o2)
feature_tri = torch.cat((o1,o2),dim=1)
return feature_tri
elif self.layer == 11:
# layer2+layer4
for n, m in self.model.base.named_children():
inputs = m.forward(inputs)
if n == 'layer2':
o1 = inputs
if n == 'layer4':
o2 = inputs
break
o1 = o1.view(o1.size(0),-1)
o1 = o1 / o1.norm(2, 1, keepdim=True).expand_as(o1)
o2 = o2.view(o2.size(0),-1)
o2 = o2 / o2.norm(2, 1, keepdim=True).expand_as(o2)
feature_tri = torch.cat((o1,o2),dim=1)
return feature_tri
elif self.layer == 12:
# layer2+layer3+layer4
for n, m in self.model.base.named_children():
inputs = m.forward(inputs)
if n == 'layer2':
o1 = inputs
if n == 'layer3':
o2 = inputs
if n == 'layer4':
o3 = inputs
break
o1 = o1.view(o1.size(0),-1)
o1 = o1 / o1.norm(2, 1, keepdim=True).expand_as(o1)
o2 = o2.view(o2.size(0),-1)
o2 = o2 / o2.norm(2, 1, keepdim=True).expand_as(o2)
o3 = o3.view(o3.size(0),-1)
o3 = o3 / o3.norm(2, 1, keepdim=True).expand_as(o3)
feature_tri = torch.cat((o1,o2,o3),dim=1)
return feature_tri
elif self.layer == 13:
# 2048*6+256*6
out = self.model(inputs)
o1 = out[0].view(out[0].size(0), -1)
o1 = o1 / o1.norm(2, 1, keepdim=True).expand_as(o1)
o2 = out[2].view(out[2].size(0), -1)
o2 = o2 / o2.norm(2, 1, keepdim=True).expand_as(o2)
o3 = inputs.view(inputs.size(0), -1)
o3 = o3 / o3.norm(2, 1, keepdim=True).expand_as(o3)
feature_tri = torch.cat((o1,o2,o3),dim=1)
return feature_tri
elif self.layer == 14:
# layer4
for n, m in self.model.base.named_children():
inputs = m.forward(inputs)
if n == 'layer2':
o1 = inputs
if n == 'layer3':
o2 = inputs
if n == 'layer4':
o3 = inputs
break
o1 = o1.view(o1.size(0),-1)
o1 = o1 / o1.norm(2, 1, keepdim=True).expand_as(o1)
o2 = o2.view(o2.size(0),-1)
o2 = o2 / o2.norm(2, 1, keepdim=True).expand_as(o2)
o3 = o3.view(o3.size(0),-1)
o3 = o3 / o3.norm(2, 1, keepdim=True).expand_as(o3)
feature_tri = o3
return feature_tri
elif self.layer == 15:
feature_tri = inputs.view(inputs.size(0),-1)
feature_tri = feature_tri / feature_tri.norm(2, 1, keepdim=True).expand_as(feature_tri)
return feature_tri
else:
for n, m in self.model.base.named_children():
inputs = m.forward(inputs)
if n == 'layer'+str(self.layer):
break
outputs = inputs
feature_tri = outputs.view(outputs.size(0), -1)
feature_tri = feature_tri / feature_tri.norm(2, 1, keepdim=True).expand_as(feature_tri)
return feature_tri
def preprocess(self, data):
"""
the input image is normalized in [-1, 1] and in bgr format, should be changed to the format accecpted by model
:param data:
:return:
"""
data_unnorm = data / 2.0 + 0.5
permute = [2, 1, 0]
data_rgb_unnorm = data_unnorm[:, permute]
data_rgb_unnorm = F.upsample(data_rgb_unnorm, size=self.size, mode='bilinear')
data_rgb = (data_rgb_unnorm - self.normalize_mean) / self.normalize_std
return data_rgb
# label 就是原始图
# data 是生成图
# targets 是pids
def forward(self, data, label, targets):
assert label.requires_grad is False
data = self.preprocess(data)
label = self.preprocess(label)
feature_tri_data = self.extract_feature(data)
feature_tri_label = self.extract_feature(label)
# avoid bugs
feature_tri_label.detach_()
feature_tri_label.requires_grad = False
'''
for n, k in self.model.base.named_children():
print(n)
if n == 'avgpool':
break
print(self.model.state_dict()['base']['conv1'])
sys.exit(0)
'''
# print('Reid para',self.model.state_dict()['base.conv1.weight'][10][1][1])
return self.triple_feature_loss(feature_tri_data, feature_tri_label),\
torch.Tensor([0]).cuda(),\
torch.Tensor([0]).cuda(),\
torch.Tensor([0]).cuda(),\
torch.Tensor([0]).cuda(),\
torch.Tensor([0]).cuda()
| {"/deprecated/texture_reid.py": ["/dataset/real_texture.py", "/config.py", "/utils/body_part_mask.py", "/utils/data_loader.py", "/loss/PCB_intern_loss.py", "/loss/PCB_MiddleFeature.py", "/loss/PCB_softmax_loss.py", "/loss/PCB_AllCat.py"], "/deprecated/create_uvmap_textured.py": ["/dataset/market1501_pose_split_test.py"], "/loss/color_var_loss.py": ["/utils/body_part_mask.py"], "/deprecated/get_render_matrix.py": ["/smpl/render_texture.py"], "/metrics/inception_score.py": ["/utils/data_loader.py"], "/misc/noface_after_process.py": ["/utils/body_part_mask.py"]} |
49,562 | mericadil/TextureGeneration | refs/heads/master | /network_models/depreciated/lsgan.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class Generator_(nn.Module):
def __init__(self, nz, nChannels, ngf=64):
super(Generator_, self).__init__()
# input : z
# Generator will be consisted with a series of deconvolution networks
self.nz = nz
self.layer1 = nn.Sequential(
# input : z
# Generator will be consisted with a series of deconvolution networks
# Input size : input latent vector 'z' with dimension (nz)*1*1
# Output size: output feature vector with (ngf*8)*4*4
nn.ConvTranspose2d(
in_channels=nz,
out_channels=ngf * 8,
kernel_size=4,
stride=1,
padding=0,
bias=False
),
nn.BatchNorm2d(ngf * 8),
nn.ReLU(True)
)
self.layer2 = nn.Sequential(
# Input size : input feature vector with (ngf*8)*4*4
# Output size: output feature vector with (ngf*4)*8*8
nn.ConvTranspose2d(
in_channels=ngf * 8,
out_channels=ngf * 4,
kernel_size=4,
stride=2,
padding=1,
bias=False
),
nn.BatchNorm2d(ngf * 4),
nn.ReLU(True)
)
self.layer3 = nn.Sequential(
# Input size : input feature vector with (ngf*4)*8*8
# Output size: output feature vector with (ngf*2)*16*16
nn.ConvTranspose2d(
in_channels=ngf * 4,
out_channels=ngf * 2,
kernel_size=4,
stride=2,
padding=1,
bias=False
),
nn.BatchNorm2d(ngf * 2),
nn.ReLU(True)
)
self.layer4 = nn.Sequential(
# Input size : input feature vector with (ngf*2)*16*16
# Output size: output feature vector with (ngf)*32*32
nn.ConvTranspose2d(
in_channels=ngf * 2,
out_channels=ngf,
kernel_size=4,
stride=2,
padding=1,
bias=False
),
nn.BatchNorm2d(ngf),
nn.ReLU(True)
)
self.layer5 = nn.Sequential(
# Input size : input feature vector with (ngf)*32*32
# Output size: output image with (nChannels)*(image width)*(image height)
nn.ConvTranspose2d(
in_channels=ngf,
out_channels=nChannels,
kernel_size=4,
stride=2,
padding=1,
bias=False
),
nn.Tanh() # To restrict each pixels of the fake image to 0~1
# Yunjey seems to say that this does not matter much
)
def forward(self, x):
x = x.view(-1, self.nz, 1, 1)
out = self.layer1(x)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = self.layer5(out)
return out
class Discriminator_(nn.Module):
def __init__(self, nChannels, ndf=64):
super(Discriminator_, self).__init__()
# input : (batch * nChannels * image width * image height)
# Discriminator will be consisted with a series of convolution networks
self.layer1 = nn.Sequential(
# Input size : input image with dimension (nChannels)*64*64
# Output size: output feature vector with (ndf)*32*32
nn.Conv2d(
in_channels=nChannels,
out_channels=ndf,
kernel_size=5,
stride=2,
padding=2,
bias=False
),
nn.BatchNorm2d(ndf),
nn.LeakyReLU(0.2, inplace=True)
)
self.layer2 = nn.Sequential(
# Input size : input feature vector with (ndf)*32*32
# Output size: output feature vector with (ndf*2)*16*16
nn.Conv2d(
in_channels=ndf,
out_channels=ndf * 2,
kernel_size=5,
stride=2,
padding=2,
bias=False
),
nn.BatchNorm2d(ndf * 2),
nn.LeakyReLU(0.2, inplace=True)
)
self.layer3 = nn.Sequential(
# Input size : input feature vector with (ndf*2)*16*16
# Output size: output feature vector with (ndf*4)*8*8
nn.Conv2d(
in_channels=ndf * 2,
out_channels=ndf * 4,
kernel_size=5,
stride=2,
padding=2,
bias=False
),
nn.BatchNorm2d(ndf * 4),
nn.LeakyReLU(0.2, inplace=True)
)
self.layer4 = nn.Sequential(
# Input size : input feature vector with (ndf*4)*8*8
# Output size: output feature vector with (ndf*8)*4*4
nn.Conv2d(
in_channels=ndf * 4,
out_channels=ndf * 4,
kernel_size=5,
stride=2,
padding=2,
bias=False
),
nn.BatchNorm2d(ndf * 4),
nn.LeakyReLU(0.2, inplace=True)
)
self.layer5 = nn.Sequential(
# Input size : input feature vector with (ndf*8)*4*4
# Output size: output probability of fake/real image
nn.Conv2d(
in_channels=ndf * 4,
out_channels=ndf * 4,
kernel_size=5,
stride=2,
padding=2,
bias=False
),
nn.BatchNorm2d(ndf * 4),
nn.LeakyReLU(0.2, inplace=True)
)
self.linear = nn.Linear(in_features=256 * 8, out_features=1)
def forward(self, x):
out = self.layer1(x)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = self.layer5(out)
out = out.view(-1, 256 * 4 * 2)
out = self.linear(out)
return out
class Discriminator(nn.Module):
def forward(self, input):
if self.gpu_ids and isinstance(input.data, torch.cuda.FloatTensor):
return nn.parallel.data_parallel(self.model, input, self.gpu_ids)
else:
return self.model(input)
def __init__(self, input_channels, output_dimension=1, gpu_ids=None):
super(Discriminator, self).__init__()
self.model = Discriminator_(input_channels)
self.gpu_ids = gpu_ids
class Generator(nn.Module):
def forward(self, input):
if self.gpu_ids and isinstance(input.data, torch.cuda.FloatTensor):
return nn.parallel.data_parallel(self.model, input, self.gpu_ids)
else:
return self.model(input)
def __init__(self, input_dimension, output_channels, gpu_ids=None):
super(Generator, self).__init__()
self.model = Generator_(input_dimension, output_channels)
self.gpu_ids = gpu_ids
if __name__ == '__main__':
net = Generator(
input_dimension=100, output_channels=3
)
print "Input(=z) : ",
print(torch.randn(128,100).size())
y = net(Variable(torch.randn(128,100))) # Input should be a 4D tensor
print "Output(batchsize, channels, width, height) : ",
print(y.size()) | {"/deprecated/texture_reid.py": ["/dataset/real_texture.py", "/config.py", "/utils/body_part_mask.py", "/utils/data_loader.py", "/loss/PCB_intern_loss.py", "/loss/PCB_MiddleFeature.py", "/loss/PCB_softmax_loss.py", "/loss/PCB_AllCat.py"], "/deprecated/create_uvmap_textured.py": ["/dataset/market1501_pose_split_test.py"], "/loss/color_var_loss.py": ["/utils/body_part_mask.py"], "/deprecated/get_render_matrix.py": ["/smpl/render_texture.py"], "/metrics/inception_score.py": ["/utils/data_loader.py"], "/misc/noface_after_process.py": ["/utils/body_part_mask.py"]} |
49,563 | mericadil/TextureGeneration | refs/heads/master | /metrics/inception_score.py | import glob
import os
import pickle
import re
from os import path as osp
import numpy as np
import torch
import tqdm
from scipy.stats import entropy
from torch import nn
from torch.nn import functional as F
from torch.utils.data import DataLoader
from torchvision.models.inception import inception_v3
from dataset.market1501_pose_split_train import Market1501Dataset
from utils.data_loader import ImageData
class Market1501Dataset(object):
"""
Market1501
Reference:
Zheng et al. Scalable Person Re-identification: A Benchmark. ICCV 2015.
URL: http://www.liangzheng.org/Project/project_reid.html
Dataset statistics:
# identities: 1501 (+1 for background)
# images: 12936 (train) + 3368 (query) + 15913 (gallery)
"""
pose_dataset_dir = '/unsullied/sharefs/zhongyunshan/isilon-home/datasets/Texture/market-pose/'
pkl_path = '/unsullied/sharefs/zhongyunshan/isilon-home/datasets/Texture/saveForTest.pkl'
def __init__(self, dataset_dir):
self.dataset_dir = dataset_dir
print(self.pkl_path)
self._check_before_run()
train, num_train_pids, num_train_imgs = self._process_dir(self.dataset_dir, relabel=True,
pkl_path=self.pkl_path)
print("=> Market1501 loaded")
print("Dataset statistics:")
print(" ------------------------------")
print(" subset | # ids | # images")
print(" ------------------------------")
print(" train | {:5d} | {:8d}".format(num_train_pids, num_train_imgs))
print(" ------------------------------")
print(" total | {:5d} | {:8d}".format(num_train_pids, num_train_imgs))
print(" ------------------------------")
self.train = train
self.num_train_pids = num_train_pids
def _check_before_run(self):
"""Check if all files are available before going deeper"""
if not osp.exists(self.dataset_dir):
raise RuntimeError("'{}' is not available".format(self.dataset_dir))
def _process_dir(self, dir_path, relabel=False, pkl_path=None):
if pkl_path is not None:
with open(pkl_path, 'rb') as f:
saveForTest = pickle.load(f)
else:
saveForTest = []
img_paths = glob.glob(osp.join(dir_path, '*.jpg'))
pattern = re.compile(r'([-\d]+)_c(\d)')
pid_container = set()
for img_path in img_paths:
# 对每一个 pattern.search(img_path).groups() 使用map函数
pid, _ = map(int, pattern.search(img_path).groups())
if pid == -1 or pid not in saveForTest:
continue # junk images are just ignored
pid_container.add(pid)
pid2label = {pid: label for label, pid in enumerate(pid_container)}
dataset = []
for img_path in img_paths:
img_name = img_path[67:]
img_name = img_name[img_name.find('/') + 1:]
pid, camid = map(int, pattern.search(img_path).groups())
if pid == -1 or pid not in saveForTest:
continue # junk images are just ignored
assert 0 <= pid <= 1501 # pid == 0 means background
assert 1 <= camid <= 6
camid -= 1 # index starts from 0
if relabel:
pid = pid2label[pid]
dataset.append((img_path, '', pid, camid))
num_pids = len(pid_container)
num_imgs = len(dataset)
return dataset, num_pids, num_imgs
def inception_score(cuda=True, batch_size=128, resize=True, splits=5):
"""Computes the inception score of the generated images imgs
imgs -- Torch dataset of (3xHxW) numpy images normalized in the range [-1, 1]
cuda -- whether or not to run on GPU
batch_size -- batch size for feeding into Inception v3
splits -- number of splits
"""
assert batch_size > 0
if cuda:
dtype = torch.cuda.FloatTensor
else:
if torch.cuda.is_available():
print("WARNING: You have a CUDA device, so you should probably set cuda=True")
dtype = torch.FloatTensor
temp = []
# root = '/unsullied/sharefs/zhongyunshan/isilon-home/datasets/Texture/market-uvmap/'
# root = '/unsullied/sharefs/zhongyunshan/isilon-home/datasets/Texture/market-textured-ssim'
root = '/unsullied/sharefs/zhongyunshan/isilon-home/datasets/Texture/market-textured-ssim'
for d in os.listdir(root):
print('model', d)
if d != 'PCB_256_L12018-11-16_17:53:20.894085_epoch_120':
continue
p = os.path.join(root, d)
dataset = Market1501Dataset(p) # test
dataloader = DataLoader(
ImageData(dataset.train),
batch_size=32, num_workers=2,
pin_memory=True
)
# Load inception model
inception_model = inception_v3(pretrained=True, transform_input=False).type(dtype)
inception_model.eval()
up = nn.Upsample(size=(299, 299), mode='bilinear').type(dtype)
def get_pred(x):
if resize:
x = up(x)
x = inception_model(x)
return F.softmax(x).data.cpu().numpy()
preds = []
for i, batch in tqdm.tqdm(enumerate(dataloader, 0)):
imgs, pids, _, _, _ = batch
imgs = imgs.cuda()
preds.append(get_pred(imgs))
preds = np.concatenate(preds)
# Now compute the mean kl-div
split_scores = []
N = len(preds)
print('len of preds', len(preds))
for k in range(splits):
part = preds[k * (N // splits): (k + 1) * (N // splits), :]
py = np.mean(part, axis=0)
scores = []
for i in range(part.shape[0]):
pyx = part[i, :]
scores.append(entropy(pyx, py))
split_scores.append(np.exp(np.mean(scores)))
temp.append((d, np.mean(split_scores), np.std(split_scores)))
return temp
temp = inception_score(cuda=True, batch_size=128, resize=True, splits=10)
for i in temp:
print(i)
| {"/deprecated/texture_reid.py": ["/dataset/real_texture.py", "/config.py", "/utils/body_part_mask.py", "/utils/data_loader.py", "/loss/PCB_intern_loss.py", "/loss/PCB_MiddleFeature.py", "/loss/PCB_softmax_loss.py", "/loss/PCB_AllCat.py"], "/deprecated/create_uvmap_textured.py": ["/dataset/market1501_pose_split_test.py"], "/loss/color_var_loss.py": ["/utils/body_part_mask.py"], "/deprecated/get_render_matrix.py": ["/smpl/render_texture.py"], "/metrics/inception_score.py": ["/utils/data_loader.py"], "/misc/noface_after_process.py": ["/utils/body_part_mask.py"]} |
49,564 | mericadil/TextureGeneration | refs/heads/master | /misc/background_index_generator.py | import os
import numpy
dir_list = [
'/unsullied/sharefs/wangjian02/isilon-home/datasets/PRW/frames',
'/unsullied/sharefs/wangjian02/isilon-home/datasets/CUHK-SYSU'
]
result = []
for dir_path in dir_list:
for root, dirs, files in os.walk(dir_path):
for name in files:
if name.endswith('.jpg'):
result.append(os.path.join(root, name))
print('Found {} images'.format(len(result)))
numpy.save('background_index', result) | {"/deprecated/texture_reid.py": ["/dataset/real_texture.py", "/config.py", "/utils/body_part_mask.py", "/utils/data_loader.py", "/loss/PCB_intern_loss.py", "/loss/PCB_MiddleFeature.py", "/loss/PCB_softmax_loss.py", "/loss/PCB_AllCat.py"], "/deprecated/create_uvmap_textured.py": ["/dataset/market1501_pose_split_test.py"], "/loss/color_var_loss.py": ["/utils/body_part_mask.py"], "/deprecated/get_render_matrix.py": ["/smpl/render_texture.py"], "/metrics/inception_score.py": ["/utils/data_loader.py"], "/misc/noface_after_process.py": ["/utils/body_part_mask.py"]} |
49,565 | mericadil/TextureGeneration | refs/heads/master | /smpl/render_texture.py | # Create renderer
import chumpy as ch
import numpy as np
from opendr.renderer import TexturedRenderer, ColoredRenderer
# Assign attributes to renderer
from get_body_mesh import get_body_mesh
from opendr.camera import ProjectPoints
from smpl_webuser.serialization import load_model
import cv2
from scipy.sparse import csc_matrix
import scipy.sparse as sp
class Renderer:
def __init__(self, obj_path, model_path, w=224, h=224):
self.m = get_body_mesh(obj_path, trans=ch.array([0, 0, 4]), rotation=ch.array([np.pi / 2, 0, 0]))
# Load SMPL model (here we load the female model)
self.body = load_model(model_path)
self.w = w
self.h = h
self.img_size = min(self.w, self.h)
self.num_cam = 3
self.num_theta = 72
self.num_beta = 10
def set_texture(self, img_bgr):
"""
set the texture image for the human body
:param img_bgr: image should be bgr format
:return:
"""
# sz = np.sqrt(np.prod(img_bgr.shape[:2]))
# sz = int(np.round(2 ** np.ceil(np.log(sz) / np.log(2))))
self.m.texture_image = img_bgr.astype(np.float64) / 255.
return self.m
def render(self, thetas, texture_bgr, rotate=np.array([0, 0, 0]), background_img=None):
"""
get the rendered image and rendered silhouette
:param thetas: model parameters, 3 * camera parameter + 72 * body pose + 10 * body shape
:param texture_bgr: texture image in bgr format
:return: the rendered image and deviation of rendered image to texture image
(rendered image, deviation of rendered image, silhouette)
"""
self.set_texture(texture_bgr)
thetas = thetas.reshape(-1)
cams = thetas[:self.num_cam]
theta = thetas[self.num_cam: (self.num_cam + self.num_theta)]
beta = thetas[(self.num_cam + self.num_theta):]
self.body.pose[:] = theta
self.body.betas[:] = beta
#
# size = cams[0] * min(self.w, self.h)
# position = cams[1:3] * min(self.w, self.h) / 2 + min(self.w, self.h) / 2
"""
####################################################################
ATTENTION!
I do not know why the flength is 500.
But it worked
####################################################################
"""
texture_rn = TexturedRenderer()
texture_rn.camera = ProjectPoints(v=self.body, rt=rotate, t=ch.array([0, 0, 2]),
f=np.ones(2) * self.img_size * 0.62,
c=np.array([self.w / 2, self.h / 2]),
k=ch.zeros(5))
texture_rn.frustum = {'near': 1., 'far': 10., 'width': self.w, 'height': self.h}
texture_rn.set(v=self.body, f=self.m.f, vc=self.m.vc, texture_image=self.m.texture_image, ft=self.m.ft,
vt=self.m.vt)
if background_img is not None:
texture_rn.background_image = background_img / 255. if background_img.max() > 1 else background_img
silhouette_rn = ColoredRenderer()
silhouette_rn.camera = ProjectPoints(v=self.body, rt=rotate, t=ch.array([0, 0, 2]),
f=np.ones(2) * self.img_size * 0.62,
c=np.array([self.w / 2, self.h / 2]),
k=ch.zeros(5))
silhouette_rn.frustum = {'near': 1., 'far': 10., 'width': self.w, 'height': self.h}
silhouette_rn.set(v=self.body, f=self.m.f, vc=np.ones_like(self.body), bgcolor=np.zeros(3))
return texture_rn.r, texture_dr_wrt(texture_rn, silhouette_rn.r), silhouette_rn.r
def texture_dr_wrt(texture_rn, clr_im):
"""
Change original texture dr_wrt
use the rendered silhouette to avoid holes in the rendered image
change the output dr from rgb format to bgr format
:param texture_rn:
:param clr_im:
:return:
"""
IS = np.nonzero(clr_im[:, :, 0].ravel() != 0)[0]
JS = texture_rn.texcoord_image_quantized.ravel()[IS]
# if True:
# cv2.imshow('clr_im', clr_im)
# # cv2.imshow('texmap', texture_rn.texture_image.r)
# cv2.waitKey(0)
r = clr_im[:, :, 0].ravel()[IS]
g = clr_im[:, :, 1].ravel()[IS]
b = clr_im[:, :, 2].ravel()[IS]
data = np.concatenate((b, g, r))
IS = np.concatenate((IS * 3, IS * 3 + 1, IS * 3 + 2))
JS = np.concatenate((JS * 3, JS * 3 + 1, JS * 3 + 2))
return sp.csc_matrix((data, (IS, JS)), shape=(texture_rn.r.size, texture_rn.texture_image.r.size))
def bbox(img):
rows = np.any(img, axis=0)
cols = np.any(img, axis=1)
rmin, rmax = np.where(rows)[0][[0, -1]]
cmin, cmax = np.where(cols)[0][[0, -1]]
return rmin, rmax, cmin, cmax
if __name__ == '__main__':
renderer = Renderer('models/body.obj', 'models/neutral.pkl', w=224, h=224)
thetas = np.zeros(85)
thetas[0:3] = 112
thetas[3] = np.pi
texture_bgr = cv2.imread('/home/wangjian02/Projects/TextureGAN/tmp/test_img/out_uv_prw/pede.png')
texture_bgr = cv2.resize(texture_bgr, dsize=(64, 64), interpolation=cv2.INTER_LINEAR)
rn, deviation, silhouette = renderer.render(thetas, texture_bgr, rotate=np.array([0, 0, 0]))
# # Show it
rn = (rn * 255.).astype(np.uint8)
rn = cv2.cvtColor(rn, code=cv2.COLOR_RGB2BGR)
texture_bgr = cv2.imread('/home/wangjian02/Projects/TextureGAN/tmp/video_avatar/tex-female-1-casual.jpg')
texture_bgr = cv2.resize(texture_bgr, dsize=(64, 64), interpolation=cv2.INTER_LINEAR)
compare, deviation, silhouette = renderer.render(thetas, texture_bgr, rotate=np.array([0, 0, 0]))
# # Show it
compare = (compare * 255.).astype(np.uint8)
compare = cv2.cvtColor(compare, code=cv2.COLOR_RGB2BGR)
cv2.imshow('rn1', compare)
cv2.waitKey(0)
# cv2.destroyWindow('rn1')
cv2.imshow('rn2', rn)
cv2.waitKey(0)
render_other = False
if render_other:
# show silhouette
silhouette = (silhouette * 255.).astype(np.uint8)
silhouette = cv2.cvtColor(silhouette, code=cv2.COLOR_RGB2BGR)
cv2.imshow('silhouette', silhouette)
cv2.waitKey()
rmin, rmax, cmin, cmax = bbox(silhouette[:, :, 0])
texture_bgr = texture_bgr.reshape(-1)
new_rendered = deviation.dot(texture_bgr.T)
# new_rendered = new_rendered.toarray()
new_rendered = np.reshape(new_rendered, [224, 224, 3]).astype(np.uint8)
new_rendered = cv2.rectangle(new_rendered, (rmin, cmin), (rmax, cmax), color=(0, 0, 255), thickness=2)
# new_rendered = cv2.inpaint(new_rendered, )
# new_rendered = cv2.resize(new_rendered, dsize=(224, 224), interpolation=cv2.INTER_CUBIC)
cv2.imshow('new', new_rendered)
cv2.waitKey()
| {"/deprecated/texture_reid.py": ["/dataset/real_texture.py", "/config.py", "/utils/body_part_mask.py", "/utils/data_loader.py", "/loss/PCB_intern_loss.py", "/loss/PCB_MiddleFeature.py", "/loss/PCB_softmax_loss.py", "/loss/PCB_AllCat.py"], "/deprecated/create_uvmap_textured.py": ["/dataset/market1501_pose_split_test.py"], "/loss/color_var_loss.py": ["/utils/body_part_mask.py"], "/deprecated/get_render_matrix.py": ["/smpl/render_texture.py"], "/metrics/inception_score.py": ["/utils/data_loader.py"], "/misc/noface_after_process.py": ["/utils/body_part_mask.py"]} |
49,566 | mericadil/TextureGeneration | refs/heads/master | /models/baseline_model.py | # encoding: utf-8
"""
@author: liaoxingyu
@contact: xyliao1993@qq.com
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import itertools
import torch.nn.functional as F
from torch import nn
from .resnet import ResNet
def weights_init_kaiming(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
nn.init.kaiming_normal_(m.weight, a=0, mode='fan_out')
nn.init.constant_(m.bias, 0.0)
elif classname.find('Conv') != -1:
nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in')
if m.bias is not None:
nn.init.constant_(m.bias, 0.0)
elif classname.find('BatchNorm') != -1:
if m.affine:
nn.init.normal_(m.weight, 1.0, 0.02)
nn.init.constant_(m.bias, 0.0)
def weights_init_classifier(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
nn.init.normal_(m.weight, std=0.001)
nn.init.constant_(m.bias, 0.0)
class ResNetBuilder(nn.Module):
in_planes = 2048
def __init__(self, num_classes=None, last_stride=1, eval_norm=1, model_path=None):
super().__init__()
self.base = ResNet(last_stride)
self.eval_norm = eval_norm
if self.eval_norm == 1:
print('Eval normalize before feature!!')
else:
print('Without eval normalize before feature!!')
if model_path is not None:
print('Use pretrained model initialize!!')
self.base.load_param(model_path)
else:
print('Use kaiming initialize!!')
self.base.apply(weights_init_kaiming)
# raise ValueError('ResNet Builder must input a pretrained model path')
self.num_classes = num_classes
if num_classes is not None:
self.bottleneck = nn.Sequential(
nn.Linear(self.in_planes, 512),
nn.BatchNorm1d(512),
nn.LeakyReLU(0.1),
nn.Dropout(p=0.5)
)
self.bottleneck.apply(weights_init_kaiming)
self.classifier = nn.Linear(512, self.num_classes)
self.classifier.apply(weights_init_classifier)
def forward(self, x):
global_feat = self.base(x)
global_feat = F.avg_pool2d(global_feat, global_feat.shape[2:]) # (b, 2048, 1, 1)
global_feat = global_feat.view(global_feat.shape[0], -1)
if self.training and self.num_classes is not None:
feat = self.bottleneck(global_feat)
cls_score = self.classifier(feat)
return cls_score, global_feat
else:
if self.eval_norm == 1:
global_feat = F.normalize(global_feat) # normalize feat to unit vector
return global_feat
def get_optim_policy(self):
base_param_group = self.base.parameters()
if self.num_classes is not None:
add_param_group = itertools.chain(self.bottleneck.parameters(), self.classifier.parameters())
return [
{'params': base_param_group},
{'params': add_param_group}
]
else:
return [
{'params': base_param_group}
]
if __name__ == '__main__':
net = ResNetBuilder(None)
net.cuda()
import torch as th
x = th.ones(2, 3, 256, 128).cuda()
y = net(x)
from IPython import embed
embed()
| {"/deprecated/texture_reid.py": ["/dataset/real_texture.py", "/config.py", "/utils/body_part_mask.py", "/utils/data_loader.py", "/loss/PCB_intern_loss.py", "/loss/PCB_MiddleFeature.py", "/loss/PCB_softmax_loss.py", "/loss/PCB_AllCat.py"], "/deprecated/create_uvmap_textured.py": ["/dataset/market1501_pose_split_test.py"], "/loss/color_var_loss.py": ["/utils/body_part_mask.py"], "/deprecated/get_render_matrix.py": ["/smpl/render_texture.py"], "/metrics/inception_score.py": ["/utils/data_loader.py"], "/misc/noface_after_process.py": ["/utils/body_part_mask.py"]} |
49,567 | mericadil/TextureGeneration | refs/heads/master | /demo.py | from NMR.neural_render_test import NrTextureRenderer
import torch
import cv2
import argparse
import numpy as np
import os
import pickle
class Demo:
def __init__(self, model_path):
print(model_path)
self.model = torch.load(model_path, map_location='cpu')
self.model.eval()
def generate_texture(self, img_path):
img = cv2.imread(img_path)
img = cv2.resize(img, (64, 128))
img = (img / 225. - 0.5) * 2.0
img = torch.from_numpy(img).permute(2, 0, 1).float().unsqueeze(0)
out = self.model(img)
out = out.cpu().detach().numpy()[0]
out = out.transpose((1, 2, 0))
out = (out / 2.0 + 0.5) * 255.
out = out.astype(np.uint8)
out = cv2.resize(out, dsize=(64, 64))
#changed again to feed renderer
out = out.transpose((2, 0, 1))
return out
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Show generated image')
parser.add_argument('--gpu', '-g')
parser.add_argument('--img', '-i')
parser.add_argument('--model', '-m', default='model_path')
parser.add_argument('--out', '-o', default=None)
#add smpl_dir to read pickle file for verts and cam params
parser.add_argument('--dir', '-d', default='/auto/k2/adundar/3DSynthesis/data/texformer/datasets/SMPLMarket')
args = parser.parse_args()
img_path = args.img
out_path = args.out
model_path = args.model
smpl_data_dir = args.dir
renderer = NrTextureRenderer(render_res=128, device='cuda:0')
torch.nn.Module.dump_patches = True
demo = Demo(model_path)
smpl_dir = os.path.join(smpl_data_dir, 'SMPL_RSC', 'pkl')
print(img_path)
for root, dir, names in os.walk(img_path):
for name in names:
full_path = os.path.join(img_path, name)
print('executing: ', full_path)
uvmap = torch.from_numpy(demo.generate_texture(img_path=full_path)).to('cuda:0').float()
#Add batch size
uvmap = torch.unsqueeze(uvmap, 0)
print("*********************************************")
print(uvmap.shape)
print('finish: ', os.path.join(out_path, name))
pkl_path = os.path.join(smpl_dir, name[:-4]+'.pkl')
print(pkl_path)
with open(pkl_path, 'rb') as f:
smpl_list = pickle.load(f)
verts = torch.from_numpy(smpl_list[0])
verts = verts.view(1, -1, 3)
#Verts dimension debugging
print(verts)
print(verts.shape)
verts = verts.to('cuda:0')
print(verts.ndimension())
cam_t = torch.from_numpy(smpl_list[1])
cam_t = torch.unsqueeze(cam_t, 0)
cam_t = cam_t.to('cuda:0')
rendered_img, depth, mask = renderer.render(verts, cam_t, uvmap)
rendered_img = rendered_img.squeeze(0).cpu().numpy()
rendered_img = rendered_img.transpose((1, 2, 0))
print(rendered_img.shape)
cv2.imwrite(os.path.join(out_path, name), rendered_img)
| {"/deprecated/texture_reid.py": ["/dataset/real_texture.py", "/config.py", "/utils/body_part_mask.py", "/utils/data_loader.py", "/loss/PCB_intern_loss.py", "/loss/PCB_MiddleFeature.py", "/loss/PCB_softmax_loss.py", "/loss/PCB_AllCat.py"], "/deprecated/create_uvmap_textured.py": ["/dataset/market1501_pose_split_test.py"], "/loss/color_var_loss.py": ["/utils/body_part_mask.py"], "/deprecated/get_render_matrix.py": ["/smpl/render_texture.py"], "/metrics/inception_score.py": ["/utils/data_loader.py"], "/misc/noface_after_process.py": ["/utils/body_part_mask.py"]} |
49,568 | mericadil/TextureGeneration | refs/heads/master | /utils/data_loader.py | from __future__ import print_function, absolute_import
from dataset.data_utils import ToTensor, Resize
import cv2
from torch.utils.data import Dataset
import os
import numpy as np
def read_image(img_path):
"""Keep reading image until succeed.
This can avoid IOError incurred by heavy IO process."""
got_img = False
while not got_img:
try:
# do not change rgb for now!
img = cv2.imread(img_path)
#print(img_path)
img = cv2.resize(img,(64,128))
got_img = True
except IOError:
print("IOError incurred when reading '{}'. Will redo. Don't worry. Just chill.".format(img_path))
pass
return img
def read_deepfashion_image(img_path):
"""Keep reading image until succeed.
This can avoid IOError incurred by heavy IO process."""
got_img = False
while not got_img:
try:
img = cv2.imread(img_path)
# for deepfashion dataset
img = np.array(img)
img = img[:,40:-40,:]
img = cv2.resize(img,(64,128))
got_img = True
except IOError:
print("IOError incurred when reading '{}'. Will redo. Don't worry. Just chill.".format(img_path))
pass
return img
def read_mask(mask_path):
got_mask = False
while not got_mask:
try:
mask = np.load(mask_path)
# for deepfashion dataset
mask = mask.astype(np.float)
mask = cv2.resize(mask,(64,128),cv2.INTER_NEAREST)
mask = np.expand_dims(mask, axis=2)
mask = np.c_[mask, mask,mask]
got_mask = True
except IOError:
print("IOError incurred when reading '{}'. Will redo. Don't worry. Just chill.".format(mask_path))
pass
return mask
class ImageData(Dataset):
def __init__(self, dataset):
self.dataset = dataset
self.normalize = True
self.to_tensor = ToTensor(normalize=self.normalize)
#self.random_flip = RandomFlip(flip_prob=0.5)
def __getitem__(self, item):
img_path, pose_path, pid, camid = self.dataset[item]
# print(img_path, pose_path, pid, camid)
img = read_image(img_path)
#img = self.random_flip(img)
img = self.to_tensor(img)
return img,pose_path, pid, camid, img_path
def __len__(self):
return len(self.dataset)
class ImageData_deepfashoin_addmask(Dataset):
def __init__(self, dataset, transform=None):
self.normalize = True
self.to_tensor = ToTensor(normalize=self.normalize)
self.dataset = dataset
self.transform = transform
def __getitem__(self, item):
img_path, mask_path, pose_path, pid, camid = self.dataset[item]
if os.path.isdir(img_path):
print('img_path',img_path)
sys.exit(0)
if os.path.isdir(mask_path):
print('mask_path',mask_path)
sys.exit(0)
img = read_deepfashion_image(img_path)
mask = read_mask(mask_path)
if self.transform is not None:
img,mask = self.transform(img,mask)
img = self.to_tensor(img)
mask = mask.transpose((2, 0, 1))
return img,mask, pose_path, pid, camid, img_path, mask_path
def __len__(self):
return len(self.dataset)
| {"/deprecated/texture_reid.py": ["/dataset/real_texture.py", "/config.py", "/utils/body_part_mask.py", "/utils/data_loader.py", "/loss/PCB_intern_loss.py", "/loss/PCB_MiddleFeature.py", "/loss/PCB_softmax_loss.py", "/loss/PCB_AllCat.py"], "/deprecated/create_uvmap_textured.py": ["/dataset/market1501_pose_split_test.py"], "/loss/color_var_loss.py": ["/utils/body_part_mask.py"], "/deprecated/get_render_matrix.py": ["/smpl/render_texture.py"], "/metrics/inception_score.py": ["/utils/data_loader.py"], "/misc/noface_after_process.py": ["/utils/body_part_mask.py"]} |
49,569 | mericadil/TextureGeneration | refs/heads/master | /loss/PCB_AllCat.py | # -*- coding:utf-8 -*-
import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
from torchvision.transforms.functional import normalize
import os
from .resnet_market1501 import resnet50
import sys
# ReID Loss
class ReIDLoss(nn.Module):
def __init__(self, model_path, num_classes=1501, size=(384, 128), gpu_ids=None, margin=0.3,is_trainable=False, w = [1,1,1,1]):
super(ReIDLoss, self).__init__()
self.size = size
self.gpu_ids = gpu_ids
model_structure = resnet50(num_features=256, dropout=0.5, num_classes=num_classes, cut_at_pooling=False,
FCN=True)
# if gpu_ids is not None:
# model_structure = nn.DataParallel(model_structure, device_ids=gpu_ids)
# load checkpoint
if self.gpu_ids is None:
checkpoint = torch.load(model_path, map_location=lambda storage, loc: storage)
else:
checkpoint = torch.load(model_path)
model_dict = model_structure.state_dict()
checkpoint_load = {k: v for k, v in (checkpoint['state_dict']).items() if k in model_dict}
model_dict.update(checkpoint_load)
model_structure.load_state_dict(model_dict)
self.model = model_structure
self.model.eval()
self.w = w
print('weight',w)
if gpu_ids is not None:
self.model.cuda()
self.is_trainable = is_trainable
for param in self.model.parameters():
param.requires_grad = self.is_trainable
self.triple_feature_loss = nn.L1Loss()
self.MSELoss = nn.MSELoss()
self.normalize_mean = torch.Tensor([0.485, 0.456, 0.406])
self.normalize_mean = self.normalize_mean.expand(384, 128, 3).permute(2, 0, 1) # 调整为通道在前
self.normalize_std = torch.Tensor([0.229, 0.224, 0.225])
self.normalize_std = self.normalize_std.expand(384, 128, 3).permute(2, 0, 1) # 调整为通道在前
if gpu_ids is not None:
self.normalize_std = self.normalize_std.cuda()
self.normalize_mean = self.normalize_mean.cuda()
def extract_feature(self, inputs):
# 2048*6+256*6
out = self.model(inputs)
o2048 = out[0].view(out[0].size(0), -1)
o2048 = o2048 / o2048.norm(2, 1, keepdim=True).expand_as(o2048)
o256 = out[2].view(out[2].size(0), -1)
o256 = o256 / o256.norm(2, 1, keepdim=True).expand_as(o256)
for n, m in self.model.base.named_children():
inputs = m.forward(inputs)
if n == 'layer1':
o1 = inputs
elif n == 'layer2':
o2 = inputs
elif n == 'layer3':
o3 = inputs
elif n == 'layer4':
o4 = inputs
break
o1 = o1.view(o1.size(0),-1)
o1 = o1 / o1.norm(2, 1, keepdim=True).expand_as(o1)
o2 = o2.view(o2.size(0),-1)
o2 = o2 / o2.norm(2, 1, keepdim=True).expand_as(o2)
o3 = o3.view(o3.size(0),-1)
o3 = o3 / o3.norm(2, 1, keepdim=True).expand_as(o3)
o4 = o4.view(o4.size(0),-1)
o4 = o4 / o4.norm(2, 1, keepdim=True).expand_as(o4)
feature_tri = torch.cat((o2048,o256),dim=1)
'''
z = torch.cat((o1,o2,o3,o4,o2048,o256),dim=1)
o1 = o1 / z.norm(2, 1, keepdim=True).expand_as(o1)
o2 = o2 / z.norm(2, 1, keepdim=True).expand_as(o2)
o3 = o3 / z.norm(2, 1, keepdim=True).expand_as(o3)
o4 = o4 / z.norm(2, 1, keepdim=True).expand_as(o4)
o2048 = o2048 / z.norm(2, 1, keepdim=True).expand_as(o2048)
o256 = o256 / z.norm(2, 1, keepdim=True).expand_as(o256)
'''
return (o1,o2,o3,o4),feature_tri
def preprocess(self, data):
"""
the input image is normalized in [-1, 1] and in bgr format, should be changed to the format accecpted by model
:param data:
:return:
"""
data_unnorm = data / 2.0 + 0.5
permute = [2, 1, 0]
data_rgb_unnorm = data_unnorm[:, permute]
data_rgb_unnorm = F.upsample(data_rgb_unnorm, size=self.size, mode='bilinear')
data_rgb = (data_rgb_unnorm - self.normalize_mean) / self.normalize_std
return data_rgb
# label 就是原始图
# data 是生成图
# targets 是pids
def forward(self, data, label, targets):
assert label.requires_grad is False
data = self.preprocess(data)
label = self.preprocess(label)
feature_tri_data, PCB_feat_data = self.extract_feature(data)
feature_tri_label, PCB_feat_label = self.extract_feature(label)
# avoid bugs
'''
for n, k in self.model.base.named_children():
print(n)
if n == 'avgpool':
break
print(self.model.state_dict()['base']['conv1'])
sys.exit(0)
'''
perceptual_loss = self.w[0] * self.MSELoss(feature_tri_data[0],feature_tri_label[0]) + \
self.w[1] * self.MSELoss(feature_tri_data[1],feature_tri_label[1]) + \
self.w[2] * self.MSELoss(feature_tri_data[2],feature_tri_label[2]) + \
self.w[3] * self.MSELoss(feature_tri_data[3],feature_tri_label[3])
return self.triple_feature_loss(PCB_feat_data, PCB_feat_label),\
torch.Tensor([0]).cuda(),\
torch.Tensor([0]).cuda(),\
torch.Tensor([0]).cuda(),\
perceptual_loss,\
torch.Tensor([0]).cuda()
| {"/deprecated/texture_reid.py": ["/dataset/real_texture.py", "/config.py", "/utils/body_part_mask.py", "/utils/data_loader.py", "/loss/PCB_intern_loss.py", "/loss/PCB_MiddleFeature.py", "/loss/PCB_softmax_loss.py", "/loss/PCB_AllCat.py"], "/deprecated/create_uvmap_textured.py": ["/dataset/market1501_pose_split_test.py"], "/loss/color_var_loss.py": ["/utils/body_part_mask.py"], "/deprecated/get_render_matrix.py": ["/smpl/render_texture.py"], "/metrics/inception_score.py": ["/utils/data_loader.py"], "/misc/noface_after_process.py": ["/utils/body_part_mask.py"]} |
49,570 | mericadil/TextureGeneration | refs/heads/master | /config.py | # -*- coding:utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
from absl import flags
# ------------------------------modify this to your own data path--------------------------------------------
# path of pretrained re-id weight network
flags.DEFINE_string('reid_weight_path',
'/home/wangj/Models/TextureGeneration/reid_models/checkpoint_120.pth.tar',
'weight path for reid')
flags.DEFINE_string('market1501_dir',
'/home/wangj/Datasets/market1501',
'directory of market1501 dataset')
flags.DEFINE_string('surreal_texture_path',
'/home/wangj/Datasets/SURREAL/smpl_data/textures',
'surreal texture dataset')
flags.DEFINE_string('CUHK_SYSU_path',
'/home/wangj/Datasets/CUHK-SYSU',
'CUHK SYSU dataset')
flags.DEFINE_string('PRW_img_path',
'/home/wangj/Datasets/PRW/frames',
'prw dataset raw frame path')
flags.DEFINE_string('market1501_render_tensor_dir',
'/home/wangj/Datasets/Texture/market1501_rendering_matrix_new',
'directory of rendering tensor of market1501')
# -----------------------finish setting dataset path---------------------------------------------------------
# -----------------------Start Setting Model Logging Path------------------------------------------------
flags.DEFINE_string('model_log_path', '/home/wangj/Models/TextureGeneration/model_log',
'model save path')
flags.DEFINE_string('runs_log_path', '/home/wangj/Models/TextureGeneration/runs_log',
'run log save path')
# -----------------------Finish Setting Model Logging Path-----------------------------------
# ---------------------------training parameters-------------------------------------------------------------
flags.DEFINE_integer('num_instance', 4, 'num_instance')
flags.DEFINE_integer('epoch', 120, 'train epoch num')
flags.DEFINE_integer('batch_size', 16, 'Input batch size after pre-processing')
flags.DEFINE_float('learning_rate', 1e-4, 'generator learning rate')
flags.DEFINE_float('weight_decay', 1e-5, 'weight decay')
flags.DEFINE_integer('log_step', 2000, 'log step')
flags.DEFINE_integer('runs_log_step', 10, 'runs log step')
flags.DEFINE_integer('eval_step', 10000, 'eval step')
flags.DEFINE_integer('worker_num', 4, 'number of data loader workers')
flags.DEFINE_integer('gpu_nums', 1, 'gpu ids')
flags.DEFINE_string('pretrained_model_path', None, "use the pre_trained model on the generated data to do fine tune")
flags.DEFINE_string('log_name', '', 'define the log name, convenient for recognizing the model and run log')
flags.DEFINE_string('model', 'unet', 'use which model')
flags.DEFINE_integer('num_classes', 86642, 'num of classes of reid model')
flags.DEFINE_string('reid_model', 'market1501', 'use which reid model')
# loss weights
flags.DEFINE_float('reid_triplet_loss_weight', 0, 'weight of triplet feature reid loss')
flags.DEFINE_float('reid_softmax_loss_weight', 0, 'weight of softmax feature reid loss')
flags.DEFINE_float('face_loss_weight', 1.0, 'weight of face loss')
flags.DEFINE_float('perceptual_loss_weight', 5000, 'weight of perceptual loss')
flags.DEFINE_float('reid_triplet_hard_loss_weight', 0.0, 'weight of triplet hard reid loss')
flags.DEFINE_float('reid_triplet_loss_not_feature_weight', 0, 'weight of triplet reid loss')
flags.DEFINE_float('uvmap_intern_loss_weight', 0, 'weight of uvmap intern loss')
flags.DEFINE_float('fake_and_true_loss_weight', 0, 'weight of fake and true loss')
flags.DEFINE_float('margin', 0.3, 'margin for triplet hard loss')
flags.DEFINE_integer('texture_size', 64, 'size of generated texture')
flags.DEFINE_integer('epoch_now', 0, 'epoch start num')
flags.DEFINE_integer('layer', 5, 'which layer\'s feature')
flags.DEFINE_integer('triplet', 1, 'use triplet or not')
flags.DEFINE_bool('use_real_background', True, 'whether use real background or no background')
#Newly added for texformer comparison
ra_body_path = '/auto/k2/adundar/3DSynthesis/data/texformer/meta/ra_body.pkl'
VERTEX_TEXTURE_FILE = '/auto/k2/adundar/3DSynthesis/data/texformer/meta/vertex_texture.npy'
cube_parts_path = '/auto/k2/adundar/3DSynthesis/data/texformer/meta/cube_parts_12.npy'
# -------------------------------------finish training parameters----------------------------------------
SMPL_OBJ = 'smpl/models/body.obj'
SMPL_MODEL = 'smpl/models/neutral.pkl'
IMG_SIZE = 224
TRANS_MAX = 20 # value of jitter translation
SCALE_MAX = 1.23 # Max value of scale jitter
SCALE_MIN = 0.8 # Min value of scale jitter
INPUT_DIM = 3 # input dim, always 3
OUTPUT_DIM = 3 # output dim, always 3
# define train super parameters
flags.DEFINE_integer('h', 128, 'image height')
flags.DEFINE_integer('w', 64, 'image width')
flags.DEFINE_integer('z_size', 256, 'size of random z')
def get_config():
config = flags.FLAGS
config(sys.argv)
return config
if __name__ == '__main__':
config = get_config()
print(config.worker_num)
| {"/deprecated/texture_reid.py": ["/dataset/real_texture.py", "/config.py", "/utils/body_part_mask.py", "/utils/data_loader.py", "/loss/PCB_intern_loss.py", "/loss/PCB_MiddleFeature.py", "/loss/PCB_softmax_loss.py", "/loss/PCB_AllCat.py"], "/deprecated/create_uvmap_textured.py": ["/dataset/market1501_pose_split_test.py"], "/loss/color_var_loss.py": ["/utils/body_part_mask.py"], "/deprecated/get_render_matrix.py": ["/smpl/render_texture.py"], "/metrics/inception_score.py": ["/utils/data_loader.py"], "/misc/noface_after_process.py": ["/utils/body_part_mask.py"]} |
49,571 | mericadil/TextureGeneration | refs/heads/master | /dataset/deprecated/market1501_filename.py | import os
import cv2
import numpy as np
from torch.utils.data import Dataset
from dataset.data_utils import ToTensor, RandomCrop, RandomFlip, Resize
# 读图和读文件名(包含id)
class Market1501Dataset(Dataset):
def __getitem__(self, index):
texture_img_path = self.data[index]
texture_img = cv2.imread(texture_img_path)
if texture_img is None or texture_img.shape[0] <= 0 or texture_img.shape[1] <= 0:
return self.__getitem__(np.random.randint(0, self.__len__()))
texture_img = self.random_flip(texture_img)
texture_img = self.to_tensor(texture_img)
return texture_img_path, texture_img
def __len__(self):
return len(self.data)
def __init__(self, data_path_list, normalize=True):
self.data_path_list = data_path_list
self.normalize = normalize
self.to_tensor = ToTensor(normalize=self.normalize)
self.data = []
self.generate_index()
self.random_flip = RandomFlip(flip_prob=0.5)
def generate_index(self):
print('generating market 1501 index')
for data_path in self.data_path_list:
for root, dirs, files in os.walk(data_path):
for name in files:
if name.endswith('.jpg'):
self.data.append(os.path.join(root, name))
print('finish generating market 1501 index, found texture image: {}'.format(len(self.data)))
| {"/deprecated/texture_reid.py": ["/dataset/real_texture.py", "/config.py", "/utils/body_part_mask.py", "/utils/data_loader.py", "/loss/PCB_intern_loss.py", "/loss/PCB_MiddleFeature.py", "/loss/PCB_softmax_loss.py", "/loss/PCB_AllCat.py"], "/deprecated/create_uvmap_textured.py": ["/dataset/market1501_pose_split_test.py"], "/loss/color_var_loss.py": ["/utils/body_part_mask.py"], "/deprecated/get_render_matrix.py": ["/smpl/render_texture.py"], "/metrics/inception_score.py": ["/utils/data_loader.py"], "/misc/noface_after_process.py": ["/utils/body_part_mask.py"]} |
49,572 | mericadil/TextureGeneration | refs/heads/master | /misc/noface_after_process.py | import os
from utils.body_part_mask import TextureMask
import cv2
import numpy as np
texture_mask = TextureMask(size=64)
face_mask = texture_mask.get_numpy_mask('face')
hand_mask = texture_mask.get_numpy_mask('hand')
mask = face_mask + hand_mask
uv_map_path = '/home/wangjian02/Projects/TextureGAN/tmp/test_img/uv_no_face'
out_path = '/home/wangjian02/Projects/TextureGAN/tmp/test_img/uv_no_face_process'
gt_path = '/home/wangjian02/Projects/TextureGAN/models/nongrey_male_0002.jpg'
gt_img = cv2.imread(gt_path)
gt_img = cv2.resize(gt_img, dsize=(64, 64))
if not os.path.exists(out_path):
os.mkdir(out_path)
for root, dir, names in os.walk(uv_map_path):
for name in names:
full_path = os.path.join(root, name)
print(full_path)
texture_img = cv2.imread(full_path)
texture_img = cv2.resize(texture_img, (64, 64))
new_img = texture_img * (1 - mask) + gt_img * mask
new_img = new_img.astype(np.uint8)
cv2.imwrite(os.path.join(out_path, name), new_img)
| {"/deprecated/texture_reid.py": ["/dataset/real_texture.py", "/config.py", "/utils/body_part_mask.py", "/utils/data_loader.py", "/loss/PCB_intern_loss.py", "/loss/PCB_MiddleFeature.py", "/loss/PCB_softmax_loss.py", "/loss/PCB_AllCat.py"], "/deprecated/create_uvmap_textured.py": ["/dataset/market1501_pose_split_test.py"], "/loss/color_var_loss.py": ["/utils/body_part_mask.py"], "/deprecated/get_render_matrix.py": ["/smpl/render_texture.py"], "/metrics/inception_score.py": ["/utils/data_loader.py"], "/misc/noface_after_process.py": ["/utils/body_part_mask.py"]} |
49,573 | mericadil/TextureGeneration | refs/heads/master | /metrics/ssim_score.py | from PIL import Image
from torch.utils.data import Dataset
import glob
import re
from os import path as osp
import numpy as np
import pdb
import os
import cv2
from pytorch_ssim_master import pytorch_ssim
import torch
from torch.autograd import Variable
import tqdm
from multiprocessing import Pool
def process_dir(dir_path, relabel=False):
img_paths = glob.glob(osp.join(dir_path, '*.jpg'))
pattern = re.compile(r'([-\d]+)_c(\d)')
pid_container = set()
for img_path in img_paths:
# 对每一个 pattern.search(img_path).groups() 使用map函数
pid, _ = map(int, pattern.search(img_path).groups())
if pid == -1: continue # junk images are just ignored
pid_container.add(pid)
pid2label = {pid: label for label, pid in enumerate(pid_container)}
dataset = []
for img_path in img_paths:
pid, camid = map(int, pattern.search(img_path).groups())
if pid == -1:
continue # junk images are just ignored
assert 0 <= pid <= 1501 # pid == 0 means background
assert 1 <= camid <= 6
camid -= 1 # index starts from 0
if relabel: pid = pid2label[pid]
dataset.append((img_path, pid, camid))
num_pids = len(pid_container)
num_imgs = len(dataset)
return dataset, num_pids, num_imgs
def get_data(dataset_dir):
train, num_train_pids, num_train_imgs = process_dir(dataset_dir, relabel=True)
num_total_pids = num_train_pids
num_total_imgs = num_train_imgs
print("=> Market1501 loaded")
print("Dataset statistics:")
print(" ------------------------------")
print(" subset | # ids | # images")
print(" ------------------------------")
print(" train | {:5d} | {:8d}".format(num_train_pids, num_train_imgs))
print(" ------------------------------")
print(" total | {:5d} | {:8d}".format(num_total_pids, num_total_imgs))
print(" ------------------------------")
return train
def fun(root, model, ori_train):
print(model)
scores = []
dataset = ori_train
path = os.path.join(root, model)
for item in tqdm.tqdm(dataset):
img_ori = cv2.imread(item[0])
img_ori = cv2.cvtColor(img_ori, cv2.COLOR_BGR2RGB)
img_ori = torch.from_numpy(np.rollaxis(img_ori, 2)).float().unsqueeze(0) / 255.0
p = item[0]
p = p[p.find('market-origin-ssim'):]
p = p[p.find('/') + 1:]
p = os.path.join(path, p)
img_oth = cv2.imread(p)
img_oth = cv2.cvtColor(img_oth, cv2.COLOR_BGR2RGB)
img_oth = torch.from_numpy(np.rollaxis(img_oth, 2)).float().unsqueeze(0) / 255.0
ssim_loss = pytorch_ssim.SSIM(window_size=11)
scores.append(ssim_loss(img_ori, img_oth))
return model, np.mean(scores)
root = '/unsullied/sharefs/zhongyunshan/isilon-home/datasets/Texture/market-textured-ssim'
ori_train = get_data('/unsullied/sharefs/zhongyunshan/isilon-home/datasets/Texture/market-origin-ssim')
results = []
# model = 'no_face2018-11-09_10:57:53.148362_epoch_120'
# result = fun(root,model,ori_train)
# results.append(result)
for model in os.listdir(root):
if model != 'PCB_256_L12018-11-16_17:53:20.894085_epoch_120':
continue
result = fun(root, model, ori_train)
results.append(result)
for i in results:
print(i)
| {"/deprecated/texture_reid.py": ["/dataset/real_texture.py", "/config.py", "/utils/body_part_mask.py", "/utils/data_loader.py", "/loss/PCB_intern_loss.py", "/loss/PCB_MiddleFeature.py", "/loss/PCB_softmax_loss.py", "/loss/PCB_AllCat.py"], "/deprecated/create_uvmap_textured.py": ["/dataset/market1501_pose_split_test.py"], "/loss/color_var_loss.py": ["/utils/body_part_mask.py"], "/deprecated/get_render_matrix.py": ["/smpl/render_texture.py"], "/metrics/inception_score.py": ["/utils/data_loader.py"], "/misc/noface_after_process.py": ["/utils/body_part_mask.py"]} |
49,574 | mericadil/TextureGeneration | refs/heads/master | /smpl/diff_renderer_setted.py | # import torch
import cv2
import numpy as np
import torch
import torch.nn as nn
import time
import random
from torch.autograd import Function
import os
import tqdm
class DifferentialTextureRenderer(Function):
@staticmethod
def forward(ctx, texture_img_flat, render_sparse_matrix):
result = torch.mm(render_sparse_matrix, texture_img_flat)
ctx.save_for_backward(render_sparse_matrix)
return result
@staticmethod
def backward(ctx, grad_outputs):
render_sparse_matrix = ctx.saved_tensors[0]
result = torch.mm(render_sparse_matrix.transpose(0, 1), grad_outputs)
return result, None
class TextureToImage(nn.Module):
def sparse_mx_to_torch_sparse_tensor(self, sparse_mx):
sparse_mx = sparse_mx.tocoo().astype(np.float32)
indices = torch.from_numpy(np.vstack((sparse_mx.row, sparse_mx.col)))
indices = indices.long()
values = torch.from_numpy(sparse_mx.data)
shape = torch.Size(sparse_mx.shape)
return torch.sparse.FloatTensor(indices, values, shape)
def forward(self, x, npy_paths,img_paths):
# the input x is uv map batch of (N, C, H, W)
# transfer it into (N, H, W, C)
x = x.permute(0, 2, 3, 1)
# flat it and transpose it(H * W * C, N)
x_flat = x.reshape(self.batch_size, -1).transpose(0, 1)
result_flats = []
masks = []
for i in range(x_flat.shape[1]):
#print(npy_paths[i])
#print(img_paths[i])
data = {}
x_sing_flat = x_flat[:,i]
x_sing_flat = x_sing_flat.unsqueeze(1)
npy_path = npy_paths[i]
action_npz_data = np.load(npy_path,encoding="latin1")
action_npz_data.resize(1,)
action_npz_data = action_npz_data[0]
data['mat'] = self.sparse_mx_to_torch_sparse_tensor(action_npz_data['mat'])
#data['bbox'] = self.bbox(action_npz_data['mask'][:, :, 0])
data['mask'] = torch.from_numpy(action_npz_data['mask']).float().unsqueeze(0).permute(0, 3, 1, 2)
if self.use_gpu:
data['mat'] = data['mat'].cuda()
data['mask'] = data['mask'].cuda()
action_tensor = data
mat = action_tensor['mat']
mask = action_tensor['mask']
#bbox = action_tensor['bbox']
mat = nn.Parameter(mat, requires_grad=False)
result_flat = DifferentialTextureRenderer.apply(x_sing_flat, mat)
result_flat = result_flat.transpose(0, 1)
masks.append(mask)
result_flats.append(result_flat)
masks = torch.cat(masks,dim=0)
result_flats = torch.cat(result_flats,dim=0)
# get the result of (NHWC)
result = result_flats.reshape(self.batch_size, 128, 64, -1)
# to NCHW
result = result.permute(0, 3, 1, 2)
return result, masks
# train,isRandom is True , test , isRandom is False
def __init__(self, batch_size, use_gpu=False, bbox_size=(128, 64), center_random_margin=2):
super(TextureToImage, self).__init__()
print('start init the texture to image module')
self.center_random_margin = center_random_margin
self.use_gpu = use_gpu
self.batch_size = batch_size
self.bbox_size = bbox_size
def bbox(self, img):
h = self.bbox_size[0]
w = self.bbox_size[1]
rows = np.any(img, axis=0)
cols = np.any(img, axis=1)
cmin, cmax = np.where(rows)[0][[0, -1]]
rmin, rmax = np.where(cols)[0][[0, -1]]
r_center = float(rmax + rmin) / 2 + random.randint(-self.center_random_margin, 0)
c_center = float(cmax + cmin) / 2 + random.randint(0, self.center_random_margin)
rmin = int(r_center - h / 2)
rmax = int(r_center + h / 2)
cmin = int(c_center - w / 2)
cmax = int(c_center + w / 2)
return (cmin, rmin), (cmax, rmax)
def test(self):
texture_img = cv2.imread('models/default_texture2.jpg')
texture_img = torch.from_numpy(texture_img).unsqueeze(0).float()
texture_img = texture_img.reshape(1, -1).transpose(0, 1)
start_time = time.time()
action_tensor = random.choice(self.action_sparse_tensor_data)['mat']
result_flat = torch.smm(action_tensor, texture_img).to_dense()
result_flat = result_flat.transpose(0, 1)
result_flat = result_flat.reshape(1, 224, 224, 3)
stop_time = time.time()
print('time use: {}'.format(stop_time - start_time))
result_flat = result_flat.numpy()[0, :]
cv2.imshow('result', result_flat.astype(np.uint8))
cv2.waitKey()
if __name__ == '__main__':
uv_map_path = '/home/zhongyunshan/TextureGAN/TextureGAN/example_result'
out_path = '/home/zhongyunshan/TextureGAN/TextureGAN/example_result_after'
background = cv2.imread('/unsullied/sharefs/zhongyunshan/isilon-home/datasets/Texture/example_data/background.png')
background = cv2.resize(background, (64, 128))
tex_2_img = TextureToImage(batch_size=1,use_gpu=False)
if not os.path.exists(out_path):
os.mkdir(out_path)
for root, dir, names in os.walk(uv_map_path):
for name in names:
full_path = os.path.join(root, name)
texture_img = cv2.imread(full_path)
texture_img = cv2.resize(texture_img, (64, 64))
texture_img = torch.from_numpy(texture_img).unsqueeze(0).float()
texture_img = texture_img.permute(0, 3, 1, 2)
texture_img.requires_grad = True
img, mask = tex_2_img(texture_img,['/unsullied/sharefs/zhongyunshan/isilon-home/datasets/Texture/market-pose/query/1448_c3s3_057278_00.jpg.npy'])
img = img.squeeze(0).permute(1, 2, 0).detach().numpy().astype(np.uint8)
mask = mask.squeeze(0).permute(1, 2, 0).detach().numpy()
img = img.astype(np.uint8)
# cv2.imshow('img', img)
# cv2.waitKey()
img = img * mask + background * (1 - mask)
print(os.path.join(out_path, name))
cv2.imwrite(os.path.join(out_path, name), img)
| {"/deprecated/texture_reid.py": ["/dataset/real_texture.py", "/config.py", "/utils/body_part_mask.py", "/utils/data_loader.py", "/loss/PCB_intern_loss.py", "/loss/PCB_MiddleFeature.py", "/loss/PCB_softmax_loss.py", "/loss/PCB_AllCat.py"], "/deprecated/create_uvmap_textured.py": ["/dataset/market1501_pose_split_test.py"], "/loss/color_var_loss.py": ["/utils/body_part_mask.py"], "/deprecated/get_render_matrix.py": ["/smpl/render_texture.py"], "/metrics/inception_score.py": ["/utils/data_loader.py"], "/misc/noface_after_process.py": ["/utils/body_part_mask.py"]} |
49,575 | mericadil/TextureGeneration | refs/heads/master | /dataset/deprecated/market1501.py | # -*- coding:utf-8 -*-
import os
import cv2
import numpy as np
from torch.utils.data import Dataset
import pickle
import nori2 as nori
from dataset.data_utils import ToTensor, RandomCrop, RandomFlip, Resize
from utils.imdecode import imdecode
from numpy.random import RandomState
# 读图
class Market1501Dataset(Dataset):
def __init__(self, pkl_path = None, normalize=True,num_instance=4):
self.normalize = normalize
self.to_tensor = ToTensor(normalize=self.normalize)
#self.data = []
#self.generate_index()
self.random_flip = RandomFlip(flip_prob=0.5)
# 检查是否有该文件
if not os.path.exists(pkl_path):
raise ValueError('{} not exists!!'.format(pkl_path))
# 打开pkl pid:[_,image_id,camera_id]
with open(pkl_path, 'rb') as fs:
self.pkl = pickle.load(fs)
self.sort_keys = list(sorted(self.pkl.keys()))
self.len = len(self.pkl)
# nori
self.nf = nori.Fetcher()
# 一次性一个人取多少张图片
self.num_instance = num_instance
def __getitem__(self, index):
person_id = self.sort_keys[index] # 找到str的person id
nori_ids_list = self.pkl[person_id]['nori_id']
rng = RandomState()
nori_ids = rng.choice(nori_ids_list, self.num_instance, replace=(len(nori_ids_list) < self.num_instance))
img_list = []
nori_list = []
for nori_id in nori_ids:
market_img = self.nf.get(nori_id)
texture_img = imdecode(market_img)
while texture_img is None or texture_img.shape[0] <= 0 or texture_img.shape[1] <= 0:
new_nori_id = np.random.randint(0, len(nori_ids_list))
market_img = self.nf.get(nori_ids[new_nori_id])
texture_img = imdecode(market_img)
texture_img = self.random_flip(texture_img)
texture_img = self.to_tensor(texture_img)
img_list.append(texture_img)
nori_list.append(nori_id)
idx_list = [index] * self.num_instance
#texture_img_path = self.data[index]
#texture_img = cv2.imread(texture_img_path)
return img_list,idx_list
def __len__(self):
return self.len
#return len(self.data)
| {"/deprecated/texture_reid.py": ["/dataset/real_texture.py", "/config.py", "/utils/body_part_mask.py", "/utils/data_loader.py", "/loss/PCB_intern_loss.py", "/loss/PCB_MiddleFeature.py", "/loss/PCB_softmax_loss.py", "/loss/PCB_AllCat.py"], "/deprecated/create_uvmap_textured.py": ["/dataset/market1501_pose_split_test.py"], "/loss/color_var_loss.py": ["/utils/body_part_mask.py"], "/deprecated/get_render_matrix.py": ["/smpl/render_texture.py"], "/metrics/inception_score.py": ["/utils/data_loader.py"], "/misc/noface_after_process.py": ["/utils/body_part_mask.py"]} |
49,576 | mericadil/TextureGeneration | refs/heads/master | /dataset/deprecated/prw.py | # -*- coding:utf-8 -*-
import os
import cv2
import numpy as np
from torch.utils.data import Dataset
from scipy.io import loadmat
from dataset.data_utils import ToTensor, RandomCrop, RandomFlip, Resize
import pickle
import nori2 as nori
from utils.imdecode import imdecode
from numpy.random import RandomState
# 读图,把图中的人的bounding box截掉,返回
class PRWDataset(Dataset):
def __init__(self,img_size=(128, 64), bbox_threshold=200, pkl_path = None,normalize=True,num_instance=4):
self.img_size = img_size
self.normalize = normalize
self.to_tensor = ToTensor(normalize=self.normalize)
self.bbox_threshold = bbox_threshold
self.random_flip = RandomFlip(flip_prob=0.5)
self.resize = Resize(output_size=self.img_size)
# 检查是否有该文件
if not os.path.exists(pkl_path):
raise ValueError('{} not exists!!'.format(pkl_path))
# 打开pkl pid:[_,image_id,camera_id]
with open(pkl_path, 'rb') as fs:
self.pkl = pickle.load(fs)
self.len = len(self.pkl)
# nori
self.nf = nori.Fetcher()
# 一次性一个人取多少张图片
self.num_instance = num_instance
def isReChoice(self,img,bbox):
while img is None or img.shape[0] <= 0 or img.shape[1] <= 0:
return True
x = int(bbox[1])
y = int(bbox[2])
w = int(bbox[3])
h = int(bbox[4])
img = img[y:y + h, x:x + w]
while img is None or img.shape[0] <= 0 or img.shape[1] <= 0:
return True
return False
def __getitem__(self, index):
items_list = self.pkl[index]
rng = RandomState()
items_ids = rng.choice(len(items_list), self.num_instance, replace=(len(items_list) < self.num_instance))
img_list = []
nori_list = []
for items_id in items_ids:
raw = self.nf.get(items_list[items_id][0])
img,bbox = pickle.loads(raw)
#img = imdecode(img)
while self.isReChoice(img,bbox):
# re select
new_items_id = np.random.randint(0, len(items_list))
raw = self.nf.get(items_list[new_items_id][0])
img,bbox = pickle.loads(raw)
#img = imdecode(img)
#img = img[:, :, ::-1] # BGR to RGBs
# 裁剪
x = int(bbox[1])
y = int(bbox[2])
w = int(bbox[3])
h = int(bbox[4])
img = img[y:y + h, x:x + w]
img = self.resize(img)
# img = self.random_flip(img) 原本就没有加
img = self.to_tensor(img)
img_list.append(img)
nori_list.append(items_list[items_id][0])
idx_list = [index] * self.num_instance
return img_list,idx_list
def __len__(self):
return self.len
'''
def generate_index(self):
print('generating prw index')
for root, dirs, files in os.walk(self.frames_path):
for name in files:
if name.endswith('.jpg'):
img_path = os.path.join(root, name)
anno_name = name + '.mat'
anno_path = os.path.join(self.annotation_path, anno_name)
anno_mat = loadmat(anno_path)
if 'box_new' in anno_mat:
bboxs = anno_mat['box_new']
elif 'anno_file' in anno_mat:
bboxs = anno_mat['anno_file']
else:
continue
for bbox in bboxs:
self.data.append({'img_path': img_path,
'bbox': bbox
})
print('finish generating PRW index, found texture image: {}'.format(len(self.data)))
'''
if __name__ == '__main__':
dataset = PRWDataset('/unsullied/sharefs/wangjian02/isilon-home/datasets/PRW')
for i in range(10):
img = dataset.__getitem__(i * 300)
img = img.permute(1, 2, 0).detach().numpy()
img = img / 2.0 + 0.5
cv2.imshow('img', img)
cv2.waitKey(0)
| {"/deprecated/texture_reid.py": ["/dataset/real_texture.py", "/config.py", "/utils/body_part_mask.py", "/utils/data_loader.py", "/loss/PCB_intern_loss.py", "/loss/PCB_MiddleFeature.py", "/loss/PCB_softmax_loss.py", "/loss/PCB_AllCat.py"], "/deprecated/create_uvmap_textured.py": ["/dataset/market1501_pose_split_test.py"], "/loss/color_var_loss.py": ["/utils/body_part_mask.py"], "/deprecated/get_render_matrix.py": ["/smpl/render_texture.py"], "/metrics/inception_score.py": ["/utils/data_loader.py"], "/misc/noface_after_process.py": ["/utils/body_part_mask.py"]} |
49,578 | yoongyo/bizchoolup | refs/heads/master | /ch1/blog/forms.py | from django import forms
from .models import Post
from froala_editor.widgets import FroalaEditor
class PostForm(forms.ModelForm):
class Meta:
model = Post
fields = ['category', 'title', 'content']
widgets = {
'content': FroalaEditor(),
'category': forms.Select(
attrs={
'style': 'height: 30px; margin-bottom:15px; width:150px;',
'class': 'form-control'
}
),
'title': forms.TextInput(
attrs={
'style': 'height: 30px; margin-bottom:15px; width:300px;',
'class': 'form-control',
'autocomplete': 'off'
}
)
}
| {"/ch1/blog/forms.py": ["/ch1/blog/models.py"], "/ch1/blog/views.py": ["/ch1/blog/models.py", "/ch1/blog/forms.py"], "/ch1/mysite/config/settings/debug.py": ["/ch1/mysite/config/settings/base.py"]} |
49,579 | yoongyo/bizchoolup | refs/heads/master | /ch1/blog/views.py | import os
from django.shortcuts import render, get_object_or_404, redirect
from django.views.generic import TemplateView
from . models import Post,Category
from .forms import PostForm
def category_list(request):
qs1 = Category.objects.all()
return render(request, 'blog/category_list.html', {
'category_list': qs1,
})
def post_list(request, category):
qs1 = Category.objects.all()
qs = Post.objects.all()
qs = qs.filter(category__name=category)
return render(request, 'blog/post_list.html', {
'post_list': qs,
'category_list': qs1,
})
def post_detail(request, category, title):
qs1 = Category.objects.all()
qs = get_object_or_404(Post, title=title)
return render(request, 'blog/post_detail.html', {
'post_detail': qs,
'category_list': qs1,
})
def post_new(request):
if request.method =='POST':
form = PostForm(request.POST, request.FILES)
if form.is_valid():
post = form.save()
return redirect(post)
else:
form = PostForm()
return render(request, 'blog/post_new.html', {
'form': form
})
def post_edit(request, category, title):
post1 = get_object_or_404(Post, title=title)
if request.method == 'POST':
form = PostForm(request.POST, request.FILES, instance=post1)
if form.is_valid():
post = form.save()
return redirect(post)
else:
form = PostForm(instance=post1)
return render(request, 'blog/post_edit.html', {
'form': form,
})
| {"/ch1/blog/forms.py": ["/ch1/blog/models.py"], "/ch1/blog/views.py": ["/ch1/blog/models.py", "/ch1/blog/forms.py"], "/ch1/mysite/config/settings/debug.py": ["/ch1/mysite/config/settings/base.py"]} |
49,580 | yoongyo/bizchoolup | refs/heads/master | /ch1/mysite/config/settings/base.py | import os
import uuid
from datetime import datetime
import json
from django.apps import apps as django_apps
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
BASE1_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
ROOT_DIR = os.path.dirname(BASE_DIR)
# .config_secret 폴더 및 하위 파일 경로
CONFIG_SECRET_DIR = os.path.join(ROOT_DIR, '.config_secret')
CONFIG_SECRET_COMMON_FILE = os.path.join(CONFIG_SECRET_DIR, 'settings_common.json')
CONFIG_SECRET_DEBUG_FILE = os.path.join(CONFIG_SECRET_DIR, 'settings_debug.json')
CONFIG_SECRET_DEPLOY_FILE = os.path.join(CONFIG_SECRET_DIR, 'settings_deploy.json')
config_secret_common = json.loads(open(CONFIG_SECRET_COMMON_FILE).read())
SECRET_KEY = config_secret_common['django']['secret_key']
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
'froala_editor',
'disqus',
'django.contrib.sites'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(ROOT_DIR, 'mysite', 'templates')
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
# WSGI_APPLICATION = 'mysite.wsgi.application'
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
LANGUAGE_CODE = 'ko-kr'
TIME_ZONE = 'Asia/Seoul'
USE_I18N = True
USE_L10N = True
USE_TZ = True
FROALA_INCLUDE_JQUERY = False
FROALA_EDITOR_PLUGINS = ('align', 'char_counter', 'code_beautifier' ,'code_view', 'colors', 'draggable', 'emoticons',
'entities', 'file', 'font_family', 'font_size', 'fullscreen', 'image_manager', 'image', 'inline_style',
'line_breaker', 'link', 'lists', 'paragraph_format', 'paragraph_style', 'quick_insert', 'quote', 'save', 'table',
'url', 'video')
DISQUS_API_KEY = 'Anhx7ZcER9hRNIcGwdrhzlEFyFG2u2eXAqsM4CJFB2AbQJuWo0qhW9aiSsoFlqSe'
DISQUS_WEBSITE_SHORTNAME = 'bizblog-1'
SITE_ID = 1 | {"/ch1/blog/forms.py": ["/ch1/blog/models.py"], "/ch1/blog/views.py": ["/ch1/blog/models.py", "/ch1/blog/forms.py"], "/ch1/mysite/config/settings/debug.py": ["/ch1/mysite/config/settings/base.py"]} |
49,581 | yoongyo/bizchoolup | refs/heads/master | /ch1/mysite/urls.py | from django.conf.urls import url,include
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^froala_editor/', include('froala_editor.urls')),
url(r'^', include('blog.urls', namespace='blog')),
]
from django.conf import settings
from django.conf.urls.static import static
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
urlpatterns += staticfiles_urlpatterns()
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| {"/ch1/blog/forms.py": ["/ch1/blog/models.py"], "/ch1/blog/views.py": ["/ch1/blog/models.py", "/ch1/blog/forms.py"], "/ch1/mysite/config/settings/debug.py": ["/ch1/mysite/config/settings/base.py"]} |
49,582 | yoongyo/bizchoolup | refs/heads/master | /ch1/blog/models.py | from django.db import models
from django.shortcuts import reverse
from froala_editor.fields import FroalaField
class Category(models.Model):
name = models.CharField(max_length=20)
def __str__(self):
return self.name
class Post(models.Model):
category = models.ForeignKey(Category)
title = models.CharField(max_length=30)
content = FroalaField(theme='dark')
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('blog:post_detail', args=[self.category,self.title])
| {"/ch1/blog/forms.py": ["/ch1/blog/models.py"], "/ch1/blog/views.py": ["/ch1/blog/models.py", "/ch1/blog/forms.py"], "/ch1/mysite/config/settings/debug.py": ["/ch1/mysite/config/settings/base.py"]} |
49,583 | yoongyo/bizchoolup | refs/heads/master | /ch1/mysite/config/settings/debug.py | from .base import *
config_secret_debug = json.loads(open(CONFIG_SECRET_DEBUG_FILE).read())
DEBUG = True
ALLOWED_HOSTS = config_secret_debug['django']['allowed_hosts']
# WSGI application
WSGI_APPLICATION = 'mysite.config.wsgi.debug.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(ROOT_DIR, 'db.sqlite3'),
}
}
MEDIA_URL = '/image_upload/'
MEDIA_ROOT = os.path.join(ROOT_DIR, 'mysite', 'media')
MIDDLEWARE += ['django.middleware.security.SecurityMiddleware']
INTERNAL_IPS = ['127.0.0.1']
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(ROOT_DIR, 'mysite', 'froala_editor'),
]
STATIC_ROOT = os.path.join(ROOT_DIR, 'mysite', 'staticfiles')
FROALA_UPLOAD_PATH = '' | {"/ch1/blog/forms.py": ["/ch1/blog/models.py"], "/ch1/blog/views.py": ["/ch1/blog/models.py", "/ch1/blog/forms.py"], "/ch1/mysite/config/settings/debug.py": ["/ch1/mysite/config/settings/base.py"]} |
49,584 | saurabhpatil/coursemate | refs/heads/master | /tests/test.py | import model
import unittest
import requests
class AppTestCases(unittest.TestCase):
def setUp(self):
self.con = model.connect_database()
def tearDown(self):
self.con.close()
###------------------ TEST CASES FOR MODEL -----------------------###
def test_get_courses(self):
cursor = self.con.cursor()
insert_query = "INSERT INTO courses(name) VALUES('TEST COURSE - STAT 651')"
cursor.execute(insert_query)
sql_query = "SELECT id, name FROM courses"
cursor.execute(sql_query)
patient_profile_id = cursor.fetchall()
self.assertIsNotNone(patient_profile_id)
delete_query = "DELETE FROM courses WHERE name = 'TEST COURSE - STAT 651'"
cursor.execute(delete_query)
self.con.commit()
def test_get_course_info(self):
cursor = self.con.cursor()
insert_query = "INSERT INTO courses(name, cost, desc) VALUES('TEST COURSE - STAT 651', 2600, 'DEMO Class')"
cursor.execute(insert_query)
sql_query = "SELECT id FROM courses WHERE name='TEST COURSE - STAT 651'"
cursor.execute(sql_query)
course_id = int(cursor.fetchone()[0])
self.assertIsNotNone(course_id)
sql_query = "SELECT name, cost, desc FROM courses WHERE id='{}'".format(course_id)
cursor.execute(sql_query)
course_info = cursor.fetchone()
self.assertIsNotNone(course_info)
self.assertTupleEqual(course_info,('TEST COURSE - STAT 651', 2600, 'DEMO Class'))
insert_query = "INSERT INTO availability(id, professor, schedule) VALUES({}, 'Dr. Paul', 'MW 10:00-11:10')".format(course_id)
cursor.execute(insert_query)
insert_query = "INSERT INTO availability(id, professor, schedule) VALUES({}, 'Dr. Matt', 'THF 11:20-12:30')".format(course_id)
cursor.execute(insert_query)
sql_query = "SELECT id, professor, schedule FROM availability WHERE course_id={}".format(course_id)
cursor.execute(sql_query)
classes = cursor.fetchall()
self.assertIsNotNone(classes)
self.con.commit()
###------------------ TEST CASES FOR VIEWS -----------------------###
def test_course_search(self):
resp = requests.get('http://localhost:5000/')
result = resp.json()
self.assertTrue(result['success'])
self.assertTrue(len(result['courses']) != 0)
if __name__ == '__main__':
unittest.main() | {"/tests/test.py": ["/model.py"], "/routes.py": ["/model.py"]} |
49,585 | saurabhpatil/coursemate | refs/heads/master | /routes.py | from flask import Flask, request, json, render_template
from model import Model
# Create a flask app and configure it
app = Flask(__name__)
app.config.from_object('config')
@app.route('/', methods=['GET'])
def course_search():
'''Get the list of courses and their information '''
course_id = request.args.get('select_course')
model = Model()
if course_id is None:
# Extract list of all courses
courses = model.get_available_courses()
return render_template("index.html", courses = courses, course_info = None)
else:
# Get list of available classes, cost and description
course_info = model.get_course_info(course_id)
return render_template('index.html', course_info = course_info)
@app.route('/', methods=['POST'])
def enrollment():
'''Enroll student to a specific class'''
model = Model()
student_UIN = request.form.get('UIN')
course = request.form.get('course_id')
schdule = request.form.get('schedule_id')
return model.enroll_student(student_UIN, course, schdule)
if __name__ == '__main__':
app.run(debug=True) | {"/tests/test.py": ["/model.py"], "/routes.py": ["/model.py"]} |
49,586 | saurabhpatil/coursemate | refs/heads/master | /model.py | from config import *
import MySQLdb as mdb
import json
def connect_database():
'''Returns a connection to database'''
try:
con = mdb.connect(os.environ.get('SQL_DATABASE_URI'), SQL_DATABASE_USER, \
SQL_DATABASE_PASS, SQL_DATABASE_SCHEMA, \
use_unicode=True, charset='utf8')
return con
except Exception as e:
return None
class Model:
'''This class handles all data related operations.
It performs information retrieval and insertion and returns JSON objects as required'''
def __init__(self):
'''Initialize connection to database'''
self.con = connect_database()
self.cursor = self.con.cursor
def __del__(self):
'''Close the active database connection'''
self.con.commit()
self.cursor.close()
def get_available_courses(self):
'''Get the entire list of available courses'''
result = dict()
result['courses'] = list()
try:
# Get search results based on doctor type and city
sql_query = '''SELECT id, name FROM courses'''
self.cursor.execute(sql_query)
course_iterator = self.cursor.fetchall()
# construct a json for all of the search result-set
for course in course_iterator:
course_dict = dict()
course_dict['course_id'] = int(course[0])
course_dict['course_name'] = str(course[1])
result['courses'].append(course_dict)
result['success'] = True
return json.dumps(result)
except Exception as e:
# Return the error information in JSON result
result['error'] = e
result['success'] = False
return json.dumps(result)
def get_course_info(self, course_id):
'''Get the information(cost, description, classes) related to a particular course'''
result = dict()
try:
# Get course cost and description
sql_query = '''SELECT name, cost, desc FROM courses WHERE id = {}'''.format(course_id)
self.cursor.execute(sql_query)
course_info = self.cursor.fetchone()
result['course'] = course_info[0]
result['cost'] = course_info[1]
result['description'] = course_info[2]
# Get the list of all available classes
sql_query = '''SELECT id, professor, schedule FROM availability WHERE course_id = {}'''.format(course_id)
self.cursor.execute(sql_query)
schedule_iterator = self.cursor.fetchall()
# Append the list to JSON object
result['availability'] = list()
for schedule in schedule_iterator:
schedule_dict = dict()
schedule_dict['id'] = int(schedule[0])
schedule_dict['professor'] = int(schedule[1])
schedule_dict['weekly_schedule'] = str(schedule[2])
result['availability'].append(schedule_dict)
result['success'] = True
return json.dumps(result)
except Exception as e:
result['error'] = e
result['success'] = False
return json.dumps(result)
def enroll_student(self, student_UIN, course_id, schedule_id):
'''Process student enrollment for classes'''
result = dict()
try:
# Add the student and class information to enrollment table
sql_query = '''INSERT IGNORE INTO enrollment(UIN, course, schedule) VALUES({}, {}, {})''' \
.format(student_UIN, course_id, schedule_id)
self.cursor.execute(sql_query)
result['success'] = True
return json.dumps(result)
except Exception as e:
result['error'] = e
result['success'] = False
return json.dumps(result) | {"/tests/test.py": ["/model.py"], "/routes.py": ["/model.py"]} |
49,608 | lukedeboer/FoodViolation-Python | refs/heads/master | /DbConnect.py | import sqlite3
class DbConnect:
def __init__(self):
self._db = sqlite3.connect("assignment2.db")
self._db.row_factory = sqlite3.Row
self._db.execute(
"create table if not exists "
"violations(ID integer primary key autoincrement,"
"points int,"
"serial_number text, "
"violation_code text, "
"violation_description text, "
"violation_status text) ")
self._db.execute(
"create table if not exists "
"inspections(ID integer primary key autoincrement,"
"activity_date text,employee_id text,facility_address text,facility_city text, facility_id text,"
"facility_name text,facility_state text,facility_zip text,grade text,owner_id text,owner_name text,"
"pe_description text,program_element_pe int,program_name text,program_status text,record_id text,"
"score int,serial_number text,service_code int,service_description text) ")
self._db.execute(
"create table if not exists "
"previous_violations(ID integer primary key autoincrement,serial_number text,name text,address text,"
"zipcode text,city text,violations int)")
self._db.commit()
def add_violations(self, points, serial_number, violation_code, violation_description, violation_status):
self._db.row_factory = sqlite3.Row
# Add Records
self._db.execute("insert into violations(points,serial_number,violation_code, violation_description,"
"violation_status) values(?,?,?,?,?)", (points, serial_number, violation_code,
violation_description, violation_status))
self._db.commit()
def add_inspections(self, activity_date, employee_id, facility_address, facility_city, facility_id, facility_name,
facility_state, facility_zip,
grade, owner_id, owner_name, pe_description, program_element_pe, program_name, program_status,
record_id, score,
serial_number, service_code, service_description):
self._db.row_factory = sqlite3.Row
# Add Records
self._db.execute("insert into inspections(activity_date,employee_id,facility_address,facility_city, "
"facility_id, "
"facility_name,facility_state,facility_zip,grade,owner_id,owner_name,pe_description,"
"program_element_pe,program_name,program_status,record_id,score,serial_number,service_code,"
"service_description) values(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)",
(activity_date,
employee_id,
facility_address,
facility_city,
facility_id,
facility_name,
facility_state,
facility_zip,
grade,
owner_id,
owner_name,
pe_description,
program_element_pe,
program_name,
program_status,
record_id,
score,
serial_number,
service_code,
service_description))
self._db.commit()
def add_previous_violations(self, serial_number, name, address, zipcode, city, violations):
self._db.row_factory = sqlite3.Row
# Add Records
self._db.execute("insert into previous_violations(serial_number,name, address,zipcode,city,violations) "
"values(?,?,?,?,?,?)", (serial_number, name,
address, zipcode, city, violations))
self._db.commit()
def group_by_violations(self):
self._db.row_factory = sqlite3.Row
# List Records
cursor = self._db.execute("select violation_code,violation_description, count(violation_code) from violations "
"GROUP by violation_code")
return cursor
def distinct_violations(self):
self._db.row_factory = sqlite3.Row
# List Records
cursor = self._db.execute("select DISTINCT(violations.serial_number),activity_date, inspections.facility_name "
"as name, "
"inspections.facility_address as address, inspections.facility_zip as zipcode, "
"inspections.facility_city as city, count(violations.serial_number) as violations "
"from violations JOIN inspections on violations.serial_number = "
"inspections.serial_number GROUP BY violations.serial_number ORDER by "
"violations")
return cursor
# def highest_violations_per_month(self):
# self._db.row_factory = sqlite3.Row
# # List Records
# cursor = self._db.execute("select activity_date,facility_zip,violations.serial_number, "
# "count(violations.serial_number) as noofviolations, strftime('%m', activity_date) "
# "as month from violations inner join inspections on violations.serial_number = "
# "inspections.serial_number group by month, violations.serial_number, facility_zip "
# "having count(violations.serial_number) = (select max(noofviolations) from (select "
# "facility_zip,violations.serial_number, count(violations.serial_number) as "
# "noofviolations, strftime('%m', activity_date) as month from violations inner join "
# "inspections on violations.serial_number = inspections.serial_number group by "
# "month, violations.serial_number, facility_zip ) )")
# return cursor
def violations_per_month(self):
self._db.row_factory = sqlite3.Row
cursor = self._db.execute("select month, max(noofviolations) as maxofviolations ,min(noofviolations) as minofviolations , avg(noofviolations) as avgofviolations from ( select facility_zip, count(violations.serial_number) as noofviolations, strftime('%Y-%m', activity_date) as month from violations inner join inspections on violations.serial_number = inspections.serial_number group by month, facility_zip ) group by month")
return cursor
# def lowest_violations_per_month(self):
# self._db.row_factory = sqlite3.Row
# # List Records
# cursor = self._db.execute("select activity_date,facility_zip,violations.serial_number, "
# "count(violations.serial_number) as noofviolations, strftime('%m', activity_date) "
# "as month from violations inner join inspections on violations.serial_number = "
# "inspections.serial_number group by month, violations.serial_number, facility_zip "
# "having count(violations.serial_number) = (select min(noofviolations) from (select "
# "facility_zip,violations.serial_number, count(violations.serial_number) as "
# "noofviolations, strftime('%m', activity_date) as month from violations inner join "
# "inspections on violations.serial_number = inspections.serial_number group by "
# "month, violations.serial_number, facility_zip ) )")
# return cursor
def average_mcdonalds_violations_per_month(self):
self._db.row_factory = sqlite3.Row
# List Records
cursor = self._db.execute("select month, (sum(noofviolations) / count(*) ) as average from ( select strftime( '%Y-%m', activity_date) as month, count(inspections.facility_name ) as noofviolations, inspections.facility_name from inspections inner join violations on violations.serial_number = inspections.serial_number where facility_name like '%MCDONALD%' group by month, facility_name order by month ) group by month")
# cursor = self._db.execute("select * from mcd")
return cursor
def average_burger_king_violations_per_month(self):
self._db.row_factory = sqlite3.Row
# List Records
cursor = self._db.execute("select month, (sum(noofviolations) / count(*) ) as average from ( select strftime( '%Y-%m', activity_date) as month, count(inspections.facility_name ) as noofviolations, inspections.facility_name from inspections inner join violations on violations.serial_number = inspections.serial_number where facility_name like '%BURGER KING%' group by month, facility_name order by month ) group by month")
return cursor
| {"/excel_food.py": ["/DbConnect.py"], "/sql_food.py": ["/DbConnect.py"], "/numpy_food.py": ["/DbConnect.py"], "/createdb_food.py": ["/DbConnect.py"]} |
49,609 | lukedeboer/FoodViolation-Python | refs/heads/master | /excel_food.py | from DbConnect import DbConnect
import xlsxwriter
class ExcelFood(DbConnect):
def __init__(self):
self.import_violations()
@staticmethod
def import_violations():
# Getting group by violation code
connect = DbConnect()
cursor = connect.group_by_violations()
workbook = xlsxwriter.Workbook('ViolationTypes.xlsx')
worksheet = workbook.add_worksheet("Violations Types")
# Start from the first cell.
# Rows and columns are zero indexed.
row = 0
# setting header
worksheet.write(row, 0, "Code")
worksheet.write(row, 1, "Description")
worksheet.write(row, 2, "Count")
total_violations = 0
for item in cursor:
row += 1
# write operation perform
worksheet.write(row, 0, item["violation_code"])
worksheet.write(row, 1, item["violation_description"])
worksheet.write(row, 2, item["count(violation_code)"])
total_violations = total_violations + int(item["count(violation_code)"])
# print( "violation_code: {}, violation_description: {}, count(violation_code): {}".format(item[
# "violation_code"], item[ "violation_description"], item[ "count(violation_code)"]))
row += 1
worksheet.write(row, 1, "Total Violations")
worksheet.write(row, 2, total_violations)
workbook.close()
def main():
ExcelFood()
if __name__ == '__main__': main()
| {"/excel_food.py": ["/DbConnect.py"], "/sql_food.py": ["/DbConnect.py"], "/numpy_food.py": ["/DbConnect.py"], "/createdb_food.py": ["/DbConnect.py"]} |
49,610 | lukedeboer/FoodViolation-Python | refs/heads/master | /sql_food.py | from DbConnect import DbConnect
class SqlFood(DbConnect):
def __init__(self):
self.get_violations()
@staticmethod
def get_violations():
# Getting distinct violation
connect = DbConnect()
cursor = connect.distinct_violations()
for item in cursor:
print("Serial Number: {}, Name: {}, Address: {}, ZipCode: {}, City: {}, Violations Count: {}"
.format(item["serial_number"], item["name"], item["address"], item["zipcode"], item["city"],
item["violations"]))
connect.add_previous_violations(item["serial_number"], item["name"], item["address"], item["zipcode"], item["city"],
int(item["violations"]))
def main():
SqlFood()
if __name__ == '__main__': main()
| {"/excel_food.py": ["/DbConnect.py"], "/sql_food.py": ["/DbConnect.py"], "/numpy_food.py": ["/DbConnect.py"], "/createdb_food.py": ["/DbConnect.py"]} |
49,611 | lukedeboer/FoodViolation-Python | refs/heads/master | /numpy_food.py | import matplotlib.pyplot as plt
from DbConnect import DbConnect
class NumpyFood(DbConnect):
def __init__(self):
self.generate_pivot()
@staticmethod
def generate_pivot():
connect = DbConnect()
all_violations = connect.violations_per_month()
# highest_violations = connect.highest_violations_per_month()
# lowest_violations = connect.lowest_violations_per_month()
# h_month = 0
# h_no_of_violations = 0
# for item in highest_violations:
# h_month = int(item["month"])
# h_no_of_violations = int(item["noofviolations"])
# break
# l_month = ''
# l_no_of_violations = 0
# for item in lowest_violations:
# l_month = int(item["month"])
# l_no_of_violations = int(item["noofviolations"])
# break
# # x-coordinates of left sides of bars
# left = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
# # heights of bars
# height = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
# print(h_no_of_violations)
# print(l_no_of_violations)
# height[h_month - 1] = h_no_of_violations
# # labels for bars
# tick_label = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
# colors = ['black', 'red', 'green', 'blue', 'cyan']
# colors2 = ['green', 'blue', 'cyan']
# plt.figure(1)
# # plotting a bar chart
# plt.bar(left, height, tick_label=tick_label,
# width=0.8, color=colors)
# height[l_month - 1] = l_no_of_violations
# plt.bar(left, height, tick_label=tick_label,
# width=0.8, color=colors2)
# # naming the x-axis
# plt.xlabel('Month')
# # naming the y-axis
# plt.ylabel('No of Violations')
# # plot title
# plt.title('Total Violations per month')
tick_labels = []
height_highest = []
height_lowest = []
height_average = []
for col in (all_violations):
tick_labels.append(col[0])
height_highest.append(col[1])
height_lowest.append(col[2])
height_average.append(col[3])
print(col[0], col[1], col[2], col[3])
ax = plt.figure().add_subplot(111)
ax.plot()
ax.set_ylabel('No of Violations')
ax.set_xlabel('Month')
ax.set_title('Violations per month')
ax.set_xticklabels(tick_labels) # add monthlabels to the xaxis
plt.plot(tick_labels, height_highest, 'go',label='Highest Violations')
plt.plot(tick_labels, height_lowest, 'rs',label='Lowest Violations')
plt.plot(tick_labels, height_average, 'b^',label='Average Violations')
plt.xticks(tick_labels, tick_labels, rotation='vertical')
legend = ax.legend(loc='best', shadow=False, fontsize='small')
## function to show the plot
## plt.figure(2)
# McD and Burger King Data
# Get average data
avg_violations_of_mcd = connect.average_mcdonalds_violations_per_month()
avg_violations_of_burger_king = connect.average_burger_king_violations_per_month()
tick_label = []
height = []
for idx, col in (avg_violations_of_mcd):
height.append(col)
tick_label.append(idx)
ax = plt.figure().add_subplot(111)
ax.plot()
ax.set_ylabel('No of Violations')
ax.set_xlabel('Month')
ax.set_title('Violations per month')
ax.set_xticklabels(tick_label) # add monthlabels to the xaxis
plt.plot(tick_label,
height, 'go',label='McDonald\'s')
plt.xticks(tick_label, tick_label, rotation='vertical')
tick_label_burger_king = []
height_burger_king = []
for idx, col in (avg_violations_of_burger_king):
height_burger_king.append(col)
tick_label_burger_king.append(idx)
plt.plot(tick_label_burger_king,
height_burger_king, 'ro',label='Burger King')
# naming the x-axis
plt.xlabel('Month')
# naming the y-axis
plt.ylabel('No of Violations')
# plot title
plt.title('Total Violations per month')
legend = ax.legend(loc='best', shadow=False, fontsize='small')
# function to show the plot
plt.show()
def main():
NumpyFood()
if __name__ == '__main__': main()
| {"/excel_food.py": ["/DbConnect.py"], "/sql_food.py": ["/DbConnect.py"], "/numpy_food.py": ["/DbConnect.py"], "/createdb_food.py": ["/DbConnect.py"]} |
49,612 | lukedeboer/FoodViolation-Python | refs/heads/master | /createdb_food.py | import xlrd
from xlrd import xldate_as_tuple
from datetime import datetime
from DbConnect import DbConnect
class CreateDbFood(DbConnect):
def __init__(self):
self.import_violations()
self.import_inspections()
@staticmethod
def import_violations():
print("\n===========Reading violations Excel started===========\n")
# Open the workbook and define the worksheet
book = xlrd.open_workbook('violations.xlsx')
sheet = book.sheet_by_name("violations")
connect = DbConnect()
print("\n===========Data Import started===========\n")
counter = 0
for r in range(1, sheet.nrows):
points = sheet.cell(r, 0).value
serial_number = sheet.cell(r, 1).value
violation_code = sheet.cell(r, 2).value
violation_description = sheet.cell(r, 3).value
violation_status = sheet.cell(r, 4).value
# print(points, serial_number, violation_code, violation_description, violation_status)
counter = counter + 1
connect.add_violations(points, serial_number, violation_code, violation_description, violation_status)
print(f'\n==========={counter} Row Imported===========\n')
@staticmethod
def import_inspections():
print("\n===========Reading inspections Excel started===========\n")
# Open the workbook and define the worksheet
book = xlrd.open_workbook('inspections.xlsx')
sheet = book.sheet_by_name("inspections")
print("\n===========Data Import started===========\n")
connect = DbConnect()
counter = 0
for r in range(1, sheet.nrows):
y, m, d, h, i, s = xldate_as_tuple(sheet.cell(r, 0).value, book.datemode)
date_str = "{0}-{1}-{2}".format(d, m, y)
activity_date = datetime.strptime(date_str, '%d-%m-%Y').date()
employee_id = sheet.cell(r, 1).value
facility_address = sheet.cell(r, 2).value
facility_city = sheet.cell(r, 3).value
facility_id = sheet.cell(r, 4).value
facility_name = sheet.cell(r, 5).value
facility_state = sheet.cell(r, 6).value
facility_zip = sheet.cell(r, 7).value
grade = sheet.cell(r, 8).value
owner_id = sheet.cell(r, 9).value
owner_name = sheet.cell(r, 10).value
pe_description = sheet.cell(r, 11).value
program_element_pe = sheet.cell(r, 12).value
program_name = sheet.cell(r, 13).value
program_status = sheet.cell(r, 14).value
record_id = sheet.cell(r, 15).value
score = sheet.cell(r, 16).value
serial_number = sheet.cell(r, 17).value
service_code = sheet.cell(r, 18).value
service_description = sheet.cell(r, 19).value
counter = counter + 1
# print(activity_date, employee_id, facility_address, facility_city, facility_id, facility_name,
# facility_state, facility_zip,
# grade, owner_id, owner_name, pe_description, program_element_pe, program_name,
# program_status,
# record_id, score,
# serial_number, service_code, service_description)
connect.add_inspections(activity_date,employee_id, facility_address, facility_city, facility_id,
facility_name,
facility_state, facility_zip,
grade, owner_id, owner_name, pe_description, program_element_pe, program_name,
program_status,
record_id, score,
serial_number, service_code, service_description)
print(f'\n==========={counter} Row Imported===========\n')
def main():
CreateDbFood()
if __name__ == '__main__': main()
| {"/excel_food.py": ["/DbConnect.py"], "/sql_food.py": ["/DbConnect.py"], "/numpy_food.py": ["/DbConnect.py"], "/createdb_food.py": ["/DbConnect.py"]} |
49,614 | kosciej16/jfh | refs/heads/master | /jfh.py | #!/usr/bin/python3
import argparse
from jf_parser import JenkinsFileParser
from helper import JenkinsFileHelper
def configure_argparse():
parser = argparse.ArgumentParser(description='Script to help manage JenkinsfIles')
parser.add_argument('-f', '--filename', dest='filename', default='Jenkinsfile')
# parser.add_argument('command', metavar='command', type=str,
# help='command')
sub = parser.add_subparsers(dest='command', help='command to run')
a = sub.add_parser('cs')
sub.add_parser('ls')
a.add_argument('stage_name')
return parser.parse_args()
if __name__ == "__main__":
args = configure_argparse()
p = JenkinsFileParser(args.filename)
h = JenkinsFileHelper(p)
if args.command == 'ls':
h.print_stages()
if args.command == 'cs':
h.process_stage_by_id(args.stage_name)
h.print_stages()
| {"/jfh.py": ["/jf_parser.py", "/helper.py"]} |
49,615 | kosciej16/jfh | refs/heads/master | /jf_parser.py | import fileinput
import json
from pyparsing import (
Forward,
Group,
Suppress,
Word,
alphanums,
delimitedList,
quotedString,
originalTextFor,
nestedExpr,
SkipTo,
Literal,
removeQuotes,
LineStart,
Optional,
)
class JenkinsFileParser:
STAGE_KEY = 'stage'
COMMENTED_STAGE_KEY = 'commented_stage'
def __init__(self, filename='Jenkinsfile'):
self.filename = filename
self.create_grammar()
def create_grammar(self):
self.beg = SkipTo(LineStart() + Literal('/*')*(0, 1) + Literal('stage'), ignore=Literal('stages'))
self.block = Forward()
self.parallel = Suppress('parallel') + self.nested(self.block)
self.parallel.setParseAction(lambda t: t[0])
self.environment = Suppress('environment') + self.nested()
self.stage_content = (
self.nested((self.parallel | self.environment.suppress()), 'parallel') |
self.nested().suppress()
)
self.stage = Group(
Suppress('stage' + '(') +
quotedString('stage_name').setParseAction(removeQuotes) +
Suppress(')') +
self.stage_content)(
self.STAGE_KEY + '*'
)
self.commented_stage = Group(Suppress('/*') + self.stage + Suppress('*/'))(self.COMMENTED_STAGE_KEY + '*')
self.any_stage = self.stage | self.commented_stage
self.block << Group(self.parallel | self.any_stage)('block*')
@staticmethod
def nested(elem=None, name=None):
expr = nestedExpr('{', '}', content=elem, ignoreExpr=Literal('*/'))
if name:
return expr.setResultsName(name)
return expr
def evaluate_stages(self):
a = self.beg.suppress() + self.block[...]
test = a.parseFile(self.filename)
# print(test.asDict())
# print(json.dumps(test.asDict(), indent=4))
return test.asDict()
def find_stage_by_name(self, name, content):
quoted_name = (Literal('"') | Literal("'")).suppress() + name + (Literal('"') | Literal("'")).suppress()
# named_stage = Literal('/*')*(0, 1) + 'stage' + '(' + quoted_name + ')' + self.nested() + Literal('*/')*(0, 1)
named_stage = 'stage' + '(' + quoted_name + ')' + self.nested()
commented_named_stage = Literal('/*') + 'stage' + '(' + quoted_name + ')' + self.nested() + Literal('*/')
return next((named_stage | commented_named_stage).scanString(content))
def definitions():
expression = Forward()
array = Suppress('[') + delimitedList(expression) + Suppress(']')
expression << (quotedString | array)('val')
ident = Word(alphanums + '_')('var')
definition = Group(Suppress('def') + ident + Suppress('=') + expression)("def*")
program = definition[...]
test = program.parseFile('tmp')
# print(originalTextFor(program))
print(test.asDict())
if __name__ == "__main__":
p = JenkinsFileParser()
p.evaluate_stages()
| {"/jfh.py": ["/jf_parser.py", "/helper.py"]} |
49,616 | kosciej16/jfh | refs/heads/master | /stage_tracker.py | import attr
from jenkinsfile.jf_parser import JenkinsFileParser
@attr.s(hash=True)
class Stage:
name = attr.ib()
is_commented = attr.ib()
children = attr.ib(factory=list)
id = attr.ib(default='-1')
parent = attr.ib(default=None)
def add_child(self, stage):
stage.parent = self
stage.id = f'{self.id}.{len(self.children)+1}'
if self.is_commented:
stage.is_commented = True
self.children.append(stage)
def update_status(self):
old_status = self.is_commented
if all([s.is_commented for s in self.children]):
self.is_commented = True
else:
self.is_commented = False
return old_status != self.is_commented
def change_state(self):
self.is_commented = not self.is_commented
for child in self.children:
child.is_commented = self.is_commented
if self.parent:
return self.parent.update_status()
return False
@property
def siblings(self):
if self.parent:
return self.parent.children
return []
def pretty_print(self, prefix=''):
comm_begin = '/* ' if self.is_commented else ''
comm_end = ' */' if self.is_commented else ''
print(f'{prefix}{self.id}: {comm_begin}{self.name}{comm_end}')
for n, child in enumerate(self.children, 1):
child.pretty_print(prefix + '-- ')
class StageTracker:
STAGE_IDENTIFIER = 1
def __init__(self, parser: JenkinsFileParser):
self.parser = parser
self.stages = {}
self.mapping = {}
self.get_stages()
def get_stages(self):
raw_stages = self.parser.evaluate_stages()
self.get_stages_recursively(raw_stages)
def map_stages(self, parent_identifier):
for identifier, name in enumerate(self.stages.keys(), 1):
self.stage_mapping[f'{parent_identifier}.{identifier}'] = name
def get_stages_recursively(self, stages_subdict, parent_stage=None):
for raw_stage in stages_subdict.get('block', []):
is_commented = False
if 'commented_stage' in raw_stage:
raw_stage = raw_stage.get('commented_stage')[0]
is_commented = True
raw_stage = raw_stage.get('stage')[0]
stage = self.parse_raw_stage(raw_stage, is_commented, is_root_stage=parent_stage is None)
if parent_stage:
parent_stage.add_child(stage)
else:
self.stages[stage.name] = stage
self.mapping[stage.id] = stage
def parse_raw_stage(self, stage_as_dict, is_commented, is_root_stage=False):
name = stage_as_dict.get('stage_name')
nested = stage_as_dict.get('parallel')
result = Stage(name, is_commented)
if is_root_stage:
result.id = str(self.STAGE_IDENTIFIER)
self.STAGE_IDENTIFIER += 1
if nested:
self.get_stages_recursively(nested[0], result)
return result
def is_commented(self, stage_name):
if stage_name not in self.stages:
return False
return self.stages.get(stage_name).is_commented
def get_parent(self, stage_name):
return self.stages.get(stage_name).parent
def get_stage(self, stage_id):
return self.mapping.get(stage_id)
def print_stages(self):
for stage in self.stages.values():
stage.pretty_print()
print()
| {"/jfh.py": ["/jf_parser.py", "/helper.py"]} |
49,617 | kosciej16/jfh | refs/heads/master | /helper.py | from jenkinsfile.jf_parser import JenkinsFileParser
from jenkinsfile.stage_tracker import StageTracker
class JenkinsFileHelper:
def __init__(self, parser):
self.parser = parser
self.filename = parser.filename
self.stage_tracker = StageTracker(parser)
def process_stage_by_id(self, stage_id):
self.process_stage(self.stage_tracker.get_stage(stage_id))
def process_stage(self, stage, switch_state=True):
with open(self.filename, 'r+') as f:
content = f.read()
scan_result = self.parser.find_stage_by_name(stage.name, content)
if not scan_result:
return
res = ''
parent_state_changed = switch_state and stage.change_state()
if stage.is_commented:
# changing state of stage updated parent
if parent_state_changed:
self.process_stage(stage.parent, switch_state=False)
return
res = self.comment(content, scan_result)
res = res[0:2] + self.uncomment(res[2:-2], scan_result) + res[-2:]
else:
if parent_state_changed:
self.process_stage(stage.parent, switch_state=False)
for child in stage.siblings:
if child.name != stage.name:
self.process_stage(child, switch_state=False)
return
res = self.uncomment(content, scan_result)
f.seek(0)
f.write(res)
f.truncate()
def comment(self, content, scan_result):
tmp = self.put_inside_string(content, scan_result[2], ' */')
return self.put_inside_string(tmp, scan_result[1], '/* ')
def uncomment(self, content, scan_result):
subcontent = content[scan_result[1] : scan_result[2]]
return content.replace(subcontent, subcontent.replace('/* ', '').replace(' */', ''))
@staticmethod
def put_inside_string(string, position, string_to_put):
return string[:position] + string_to_put + string[position:]
def print_stages(self):
self.stage_tracker.print_stages()
# p = JenkinsFileParser()
# h = JenkinsFileHelper(p)
# s = h.stage_tracker.get_stage('Deploy to dev-apps')
# print(s)
# ss = s.children[0]
# print(ss)
# h.process_stage(ss)
| {"/jfh.py": ["/jf_parser.py", "/helper.py"]} |
49,619 | LaOriana/knit-along | refs/heads/main | /crud.py | """CRUD operations."""
from model import db, User, Event, EventOwner, EventAttended, Post, connect_to_db
# add images to static file
def create_user(username, email, password, image):
"""Create and return a new user."""
user = User(username=username, email=email, password=password, image=image)
db.session.add(user)
db.session.commit()
return user
def create_event(event_name, start_date, end_date, pattern):
"""Create and return a new event."""
event = Event(event_name=event_name, start_date=start_date, end_date=end_date, pattern=pattern)
db.session.add(event)
db.session.commit()
return event
def create_post(post_date, content):
"""Create and return a new post."""
post = Post(post_date=post_date, content=content)
db.session.add(post)
db.session.commit()
return post
def get_users():
"""Return all users."""
return User.query.all()
def get_user_by_id(user_id):
"""Return user with ID."""
return User.query.get(user_id)
def get_user_by_email(email):
"""Return a user with email."""
return User.query.filter(User.email == email).first()
if __name__ == '__main__':
from server import app
connect_to_db(app) | {"/crud.py": ["/model.py", "/server.py"], "/server.py": ["/model.py", "/crud.py"], "/model.py": ["/server.py"], "/seed_database.py": ["/crud.py", "/model.py", "/server.py"]} |
49,620 | LaOriana/knit-along | refs/heads/main | /server.py | """Server for knit-along app."""
from flask import (Flask, render_template, request, flash, session, redirect)
from model import connect_to_db
from crud import (get_user_by_email, create_user)
import model
import os
import crud
app = Flask(__name__)
# need to run source secrets.sh for this to work
app.secret_key = os.environ.get('SECRET_KEY')
@app.route('/')
def homepage():
"""View homepage."""
# if session['user']:
if 'user' in session:
# wasn't redirecting with code above. When switched to if session['user']: it worked
flash('Logged in.')
return redirect('/bookshelf')
# this will most likely not be needed once I complete the above items
return render_template('homepage.html')
@app.route('/signup', methods=['POST'])
def signup():
"""Signup user."""
username = request.form.get('username')
email = request.form.get('email')
password = request.form.get('password')
image = 'https://tinyurl.com/2ujz8nxb'
if get_user_by_email(email):
flash('That email is already in use. Please login or use another email.')
else:
user = create_user(username, email, password, image)
session['user'] = user.user_id
flash('Your account has been created and you\'re logged in.')
return redirect('/bookshelf')
return redirect('/')
@app.route('/login', methods=["POST"])
def user_login():
"""Login user."""
input_email = request.form.get('email')
print(f'input_email {input_email}')
input_password = request.form.get('password')
user = get_user_by_email(input_email)
if user and user.password == input_password:
session['user'] = user.user_id
flash('Logged in.')
return redirect('/bookshelf')
else:
flash('Incorrect login')
return redirect('/')
@app.route('/bookshelf')
def bookshelf():
"""View bookshelf."""
return render_template('bookshelf.html')
@app.route('/logout')
def logout():
"""Logout user."""
session.pop('user')
return redirect('/')
@app.route('/createeventpage')
def create_event_page():
"""Create event page."""
return render_template('createeventpage.html')
@app.route('/createeventaction', methods=['POST'])
def create_event_action():
"""Creating event and adding it to the database."""
'''use crud function (create_event) to create event.
This will return an event object.
From this object can get eventid.
Pass eventID to event.html'''
input_title = request.form.get('title')
print(input_title)
return redirect('/event')
# Is this a get or post?
@app.route('/event', methods=['POST'])
def event():
"""Event information."""
# return will use event ID to get event information
# and then pass this using jinja
# app routes go here
# Create account - complete
# Login - complete
# Logout - complete
# Homepage - complete
# Account
# Bookshelf
# Create Event
# similar to login
# create fields for API/user - use database fields
# timeframe - look up on google if there is a type='date'?
# use crud function create_event
# redirect to event page and give eventID to event info
# Event Info
# Forum
# Option to edit event
if __name__ == '__main__':
connect_to_db(app)
app.run(host='0.0.0.0', debug=True) | {"/crud.py": ["/model.py", "/server.py"], "/server.py": ["/model.py", "/crud.py"], "/model.py": ["/server.py"], "/seed_database.py": ["/crud.py", "/model.py", "/server.py"]} |
49,621 | LaOriana/knit-along | refs/heads/main | /model.py | from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
class User(db.Model):
"""A user."""
__tablename__ = 'users'
# Is nullable needed for primary key?
# No, bc it's already not nullable
# Should email character limit be 64 or 320?
# The [user] section can be a maximum of 64 characters,
# and the [mysite] section can be a maximum of 255.
# The “@” symbol counts as the final character
# Is the image correct? Would the string be a link?
# yes
user_id = db.Column(db.Integer,
primary_key=True,
autoincrement=True
)
username = db.Column(db.String(30), nullable=False)
email = db.Column(db.String(320), unique=True, nullable=False)
password = db.Column(db.String, unique=True, nullable=False)
image = db.Column(db.String, nullable=True)
owned_events = db.relationship('Event', secondary='event_owner')
attended_events = db.relationship('Event', secondary='event_attended')
def __repr__(self):
return f'<User user_id={self.user_id} username={self.username} email={self.email}>'
class Event(db.Model):
"""An event."""
__tablename__ = 'events'
# Is db.Date() correct?
# Pattern - Is string the correct usage for the API link
# Yes, can also use rav ID if available?
event_id = db.Column(db.Integer,
primary_key=True,
autoincrement=True
)
event_name = db.Column(db.String(128), nullable=False)
start_date = db.Column(db.Date(), nullable=False)
end_date = db.Column(db.Date(), nullable=False)
pattern = db.Column(db.String, nullable=False)
# Do I need my chat forum running before I can add this?
# chat = db.Column(db.String, nullable=False)
owners = db.relationship('User', secondary='event_owner')
attendees = db.relationship('User', secondary='event_attended')
def __repr__(self):
return f'<Event = event_id{self.event_id} event_name{self.event_name} start_date={self.start_date} end_date={self.end_date} pattern={self.pattern}>'
# return f'<Event {self.event_name} #{self.event_id}>'
class EventOwner(db.Model):
"""Owner of an event."""
__tablename__ = 'event_owner'
owner_id = db.Column(db.Integer,
primary_key=True,
autoincrement=True
)
user_id = db.Column(db.Integer, db.ForeignKey('users.user_id'), nullable=False)
event_id = db.Column(db.Integer, db.ForeignKey('events.event_id'), nullable=False)
# Should I be using self.user_id or self.users.user_id (same for event_id)
# How I have it is fine
def __repr__(self):
return f'<EventOwner = user_id{self.user_id} event_id{self.event_id}>'
class EventAttended(db.Model):
"""Event(s) attended by user."""
__tablename__ = 'event_attended'
attendee_id = db.Column(db.Integer,
primary_key=True,
autoincrement=True
)
user_id = db.Column(db.Integer, db.ForeignKey('users.user_id'), nullable=False)
event_id = db.Column(db.Integer, db.ForeignKey('events.event_id'), nullable=False)
# Same question as EventOwner class
# Move relationship to user and event classes
# Many to many demo code
#secondary ref
# Same question as EventOwner class
def __repr__(self):
return f'<EventOwner = user_id{self.user_id} event_id{self.event_id}>'
class Post(db.Model):
"""A post."""
__tablename__ = 'posts'
post_id = db.Column(db.Integer,
primary_key=True,
autoincrement=True,
nullable=False
)
post_date = db.Column(db.Date, nullable=False)
content = db.Column(db.Text, nullable=False)
user_id = db.Column(db.Integer, db.ForeignKey('users.user_id'), nullable=False)
event_id = db.Column(db.Integer, db.ForeignKey('events.event_id'), nullable=False)
# Same question as EventOwner class
#backref can be named anything it's not the name of the table
user = db.relationship('User', backref='posts')
event = db.relationship('Event', backref='posts')
# change echo to True to see thing in console
def connect_to_db(flask_app, database='knitalong', echo=True):
"""Connect to database."""
flask_app.config["SQLALCHEMY_DATABASE_URI"] = f"postgresql:///{database}"
flask_app.config["SQLALCHEMY_ECHO"] = echo
flask_app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
db.app = flask_app
db.init_app(flask_app)
if __name__ == "__main__":
from server import app
connect_to_db(app) | {"/crud.py": ["/model.py", "/server.py"], "/server.py": ["/model.py", "/crud.py"], "/model.py": ["/server.py"], "/seed_database.py": ["/crud.py", "/model.py", "/server.py"]} |
49,622 | LaOriana/knit-along | refs/heads/main | /seed_database.py | """Script to seed database."""
import os
# don't need this now
# import json
import crud
import model
import server
os.system('dropdb knitalong')
os.system('createdb knitalong')
model.connect_to_db(server.app)
model.db.create_all()
| {"/crud.py": ["/model.py", "/server.py"], "/server.py": ["/model.py", "/crud.py"], "/model.py": ["/server.py"], "/seed_database.py": ["/crud.py", "/model.py", "/server.py"]} |
49,641 | apollinemeyss/Poulpe-Defender | refs/heads/master | /tirs_invaders.py | import pygame
from pygame.locals import*
#creation classe tirs des invaders
class Tir_Inv:
def __init__(self,pygame,x,y):
self.pygame = pygame
self.tir = self.pygame.image.load("tir_inv.png")
self.position = self.tir.get_rect()
self.position.center = x,y
def descendre(self):
self.position = self.position.move(0,+15)
def getTir(self):
return self.tir
def getPosition(self):
return self.position
| {"/main.py": ["/poulpe.py", "/invaders.py", "/tir.py", "/tirs_invaders.py"]} |
49,642 | apollinemeyss/Poulpe-Defender | refs/heads/master | /tir.py | import pygame
from pygame.locals import*
#creation classe tir
class Tir:
def __init__(self,pygame,x,y):
self.pygame = pygame
self.tir = self.pygame.image.load("tir.png")
self.position = self.tir.get_rect()
self.position.center = x,y
def monter(self):
self.position = self.position.move(0,-15)
def getTir(self):
return self.tir
def getPosition(self):
return self.position
| {"/main.py": ["/poulpe.py", "/invaders.py", "/tir.py", "/tirs_invaders.py"]} |
49,643 | apollinemeyss/Poulpe-Defender | refs/heads/master | /main.py | #!/usr/bin/env python
#-*- coding: utf-8 -*-
from poulpe import Poulpe
from invaders import Invaders
from tir import Tir
from tirs_invaders import Tir_Inv
#on a importé tous les objets/classes et leurs fonctions associées
import pygame
import random
from pygame.locals import *
# Initialisation de pygame et des variables du jeu
#==================================================
#la bibliothèque pygame est importée et initialisée
pygame.init()
clock = pygame.time.Clock()
# Police pour le texte
font = pygame.font.SysFont('Arial', 25)
# Initialisation des images
fenetre = pygame.display.set_mode((800, 600))#on définie la fenetre et ses dimensions
fond = pygame.image.load("background_espace.png")#On définie l'image background_espace comme fond de l'interface
game_over = pygame.image.load("game_over.png")
fond_gagne = pygame.image.load("bravo.png")
tir = pygame.image.load("tir.png")
intro = pygame.image.load("scenario.png")
controles = pygame.image.load("controles.png")
# Initialisation de la musique
pygame.mixer.music.load("musique.wav") #On définie la musique principale du jeu
# Initialisation des booléens pour les boucles
jouer = True
gagner = False
# Initialisation de la liste tirs des invaders
list_tirs_invaders = []
# Initialisation de la liste des tirs
list_tirs_poulpe = []
#On introduit une variable score pour ajouter un second but au jeu, il est conservé au cours des parties si on ne perd pas
score = 0
#================= Fin initialisation =====================
# fonction pour que l'on puisse rejouer à l'infini, replace les invaders et le poulpe
def reinitialisation():
# On récupère les variables globales
global jouer
global gagner
global list_tirs_poulpe
global list_invaders
global list_tirs_invaders
global poulpe
global score
# Initialisation de la liste des invaders
list_invaders = []
for i in range(0, 11):
# on fait i*50 pour décaler les monstres
list_invaders.append(Invaders(pygame, 100 + i * 50, 300,
"verts")) # inserer dans la liste(en commençant par la fin)les invaders et leurs coordonnées x,y
for i in range(0, 11):
# on fait i*50 pour décaler les monstres
list_invaders.append(Invaders(pygame, 100 + i * 50, 250,
"rouges")) # inserer dans la liste(en commençant par la fin)les invaders et leurs coordonnées x,y
for i in range(0, 11):
# on fait i*50 pour décaler les monstres
list_invaders.append(Invaders(pygame, 100 + i * 50, 200,
"marrons")) # inserer dans la liste(en commençant par la fin)les invaders et leurs coordonnées x,y
for i in range(0, 11):
# on fait i*50 pour décaler les monstres
list_invaders.append(Invaders(pygame, 100 + i * 50, 150,
"bleus")) # inserer dans la liste(en commençant par la fin)les invaders et leurs coordonnées x,y
# Création du poulpe en initialisant un objet poulpe depuis la class Poulpe
poulpe_position_initial_x = 320
poulpe_position_initial_y = 420
poulpe = Poulpe(pygame, poulpe_position_initial_x, poulpe_position_initial_y)
# On remet les booleens a zero
jouer = True
gagner = False
# Ajoute un tir du poulpe à la liste des tirs
def ajouter_tir(x, y):
global list_tirs_poulpe # à chaque fois on définit les variables communes à toutes les fonctions
#on ajoute un tir dans la liste, avec les position du poulpe (car doit etre affiché au dessus de lui)
list_tirs_poulpe.append(Tir(pygame, x + 20, y - 20)) # +20 pour centrer l'image de tir
def ajouter_tir_invaders(x_i, y_i):
global list_tirs_invaders
list_tirs_invaders.append(Tir_Inv(pygame, x_i + 20, y_i + 20))
# Fonction d'affichage de l'introduction du jeu
def introduction():
explication = True
controle = True
# On récupère les variables globales
global jouer
global fond
global intro
fenetre.blit(fond, (0,0)) # on colle le fond créé sur la fenetre, en définissant les coordonnées du point de collage(haut gauche)
# On affiche le panneau explication
fenetre.blit(intro, (0,0))
pygame.display.flip()
while explication and jouer:
for event in pygame.event.get():
if event.type == KEYDOWN:
if event.key == K_SPACE: #Si on appuie sur espace:
explication = False #la fenetre se ferme et on passe à la prochaine
if event.key == QUIT or event.key == K_ESCAPE: #si on clique sur la croix ou si on fait echap :
jouer = False #le jeu se ferme
# On récupère les événements 10 fois par seconde, pour éviter de boucler trop rapidement
clock.tick(10)
# On affiche la panneau contrôle
fenetre.blit(controles, (0, 0))
pygame.display.flip()
while controle and jouer:
for event in pygame.event.get():
if event.type == KEYDOWN:
if event.key == K_SPACE:
controle = False
if event.key == QUIT or event.key == K_ESCAPE:
jouer = False
# On récupère les événements 10 fois par seconde, pour éviter de boucler trop rapidement
clock.tick(10)
# Affiche le panneau Game Over
def gameOver():
# On récupère les variables globales
global jouer
global score
#Si game over le score est remis à 0
score = 0
afficher_gameover= True
fenetre.blit(game_over, (0, 0)) # on recolle le fond
pygame.display.flip()
while afficher_gameover and jouer:
for event in pygame.event.get():
if event.type == KEYDOWN:
if event.key == K_SPACE:
jouer = True
pygame.mixer.music.play()
afficher_gameover = False
if event.key == QUIT or event.key == K_ESCAPE:
pygame.mixer.music.stop()
jouer = False
# On récupère les événements 15 fois par seconde, pour éviter de boucler trop rapidement
clock.tick(15)
# Affiche le panneau gagné
def gagne():
# On récupère les variables globales
global jouer
afficher_gagne = True
fenetre.blit(fond_gagne, (0, 0)) # on recolle le fond
pygame.display.flip()
while afficher_gagne and jouer:
for event in pygame.event.get():
if event.type == KEYDOWN:
if event.key == K_SPACE:
afficher_gagne = False
if event.key == QUIT or event.key == K_ESCAPE:
pygame.mixer.music.stop()
jouer = False
# On récupère les événements 15 fois par seconde, pour éviter de boucler trop rapidement
clock.tick(15)
def collision_tir_poulpe(): # collision entre les tirs des invaders et le poulpe / et le bas de la fenetre
global list_tirs_invaders
global poulpe
position_poulpe = poulpe.getPosition()
# on teste les positions du poulpe et des tirs
for i in list_tirs_invaders:
collision_poulpe = False
position_tir_inv = i.getPosition()
if (position_poulpe.top < position_tir_inv.bottom) and (position_poulpe.bottom > position_tir_inv.top):
if (position_poulpe.right > position_tir_inv.left) and (position_poulpe.left < position_tir_inv.right):
collision_poulpe = True
# si le tir est tout en bas on le supprime
if position_tir_inv.top > 2000: # j'ai pas trouvé d'autres moyen d'inserer un temps entre chaque tirs
list_tirs_invaders.remove(i)
#si le poulpe est touche, le tir disparait et l'information de la collision est envoyé
if collision_poulpe:
list_tirs_invaders.remove(i)
return True
def collision_tir_invaders(): # collision entre les tirs du poulpe et les invaders/ et le haut de la fenetre
global list_tirs_poulpe
global list_invaders
global score
for i in list_invaders:
position_invaders = i.getPosition()
collision_tir = False
for t in list_tirs_poulpe:
position_tir = t.getPosition()
# si le bas de l'alien est plus bas que le haut du tir mais que le tir ne l'a pas encore dépassé -> sur la meme ligne
if position_invaders.bottom > position_tir.top and position_invaders.top < position_tir.bottom:
# si la gauche de l'alien est plus a gauche que la droite du tir -> tir pas a gauche de l'alien
# et que la droite de l'alien est plus a droite que la gauche du tir -> tir pas à droite de l'alien => en collision
if (position_invaders.left < position_tir.right) and (position_invaders.right > position_tir.left):
collision_tir = True
# si le tir est tout en haut on le supprime
if position_tir.bottom < 0:
list_tirs_poulpe.remove(t)
if collision_tir:
#fenetre.blit
list_invaders.remove(i)
list_tirs_poulpe.remove(t)
#si un invader est touché on gagne 100 points
score += 100
def collision():
global poulpe
global list_invaders
position_poulpe = poulpe.getPosition()
#pour tous les tirs des invaders, on compare leur position avec celle du poulpe
for i in list_invaders:
position_invaders = i.getPosition()
# Si hors du terrain, trop bas
if position_invaders.bottom > 420:
return True
if (position_poulpe.top < position_invaders.bottom) and (position_poulpe.bottom > position_invaders.top):
if (position_poulpe.right > position_invaders.left) and (position_poulpe.left < position_invaders.right):
return True
else:
return False
# Fonction principale du jeu
def jeu():
global jouer
global gagner
global list_tirs_poulpe
global list_invaders
global list_tirs_invaders
# Réinitialisation des variables de la parties
vie = 3
gagner = False
stop_invaders_a_droite = False
stop_invaders_a_gauche = True
while jouer:
#print ("=======================")
#print ("Nombre de vie: ", vie)
#print ("Gagner: ", gagner)
#print ("Nombre invaders: ", len(list_invaders))
#print ("Nombre de tirs du poulpe: ", len(list_tirs_poulpe))
#print ("Nombre de tirs des invaders: ", len(list_tirs_invaders))
#print ("=======================")
collision_tir_invaders()
#si le poulpe est touché il perd une vie
if collision_tir_poulpe():
vie -= 1
#si plus de vie ou que les invaders sont trop descendus
if vie == 0 or collision():
break # Arrete la boucle
# Si plus d'invaders le joueur a gagné
if len(list_invaders) == 0:
gagner = True
break # Arrete la boucle
#tirs des invaders
if len(list_tirs_invaders) == 0: # ne crée un tir que si il n'y a pas déjà un autre tir, niveau facile
# on prend au hasard un invaders qui lachera un tir
al = random.randint(0, len(list_invaders)-1)
invader = list_invaders[al]
# on recupere les coordonnées de cet invaders pour lui faire créer un tir
x_invaders = invader.getX()
y_invaders = invader.getY()
ajouter_tir_invaders(x_invaders,y_invaders)
#affichage du fond et du poulpe
fenetre.blit(fond, (0, 0)) # on recolle le fond
fenetre.blit(poulpe.getPoulpe(), poulpe.getPosition()) # on recolle le poulpe a sa nouvelle position
# Affiche le nombre de vie
fenetre.blit(font.render('Vie: ' + str(vie), True, (15,183,132)), (10, 5)) #render(text, antialias, color, background=None) -> Surface
# crée une nouvelle surface sur lequel on affiche le texte couleur ?
# Affiche le score
fenetre.blit(font.render('Score: ' + str(score), True, (255,0,0)), (10, 35)) #render(text, antialias, color, background=None) -> Surface
# font.render crée une nouvelle surface sur lequel on affiche le texte
# on affiche et on bouge les tirs du poulpe
for i in range(len(list_tirs_poulpe)):
fenetre.blit(list_tirs_poulpe[i].getTir(),
list_tirs_poulpe[i].getPosition()) # collage de l'image et de la position de chaque tir
list_tirs_poulpe[i].monter()
# on affiche et on fait bouger les tirs des invaders
for i in range(len(list_tirs_invaders)):
fenetre.blit(list_tirs_invaders[i].getTir(),
list_tirs_invaders[i].getPosition()) # collage de l'image et de la position de chaque tir de monstre
list_tirs_invaders[i].descendre()
# on affiche les monstres
for i in range(len(list_invaders)):
fenetre.blit(list_invaders[i].getInvaders(),
list_invaders[i].getPosition()) # collage de l'image et de la position de chaque monstre
# on fait bouger les monstres
if not (stop_invaders_a_droite):
for i in range(len(list_invaders)):
if not (list_invaders[i].allerAdroite()):
stop_invaders_a_droite = True
stop_invaders_a_gauche = False
for a in range(len(list_invaders)):
list_invaders[a].descendre()
elif not (stop_invaders_a_gauche):
for i in range(len(list_invaders)):
if not (list_invaders[i].allerAgauche()):
stop_invaders_a_gauche = True
stop_invaders_a_droite = False
for a in range(len(list_invaders)):
list_invaders[a].descendre()
pygame.display.flip()
for event in pygame.event.get():
if event.type == KEYDOWN:
if event.key == QUIT or event.key == K_ESCAPE:
jouer = False
if event.key == K_LEFT: # Lorsque l'on va appuyer sur la flèche de gauche
poulpe.allerAgauche() # Le poulpe va se déplacer de 5px vers la gauche
if event.key == K_RIGHT: # Lorsque l'on va appuyer sur la flèche de droite
poulpe.allerAdroite() # Le poulpe va se déplacer de 5px vers la droite
if event.key == K_SPACE:
x = poulpe.getX()
y = poulpe.getY()
if len(list_tirs_poulpe) == 0: # On autorise un seul tir en même temps au poulpe
ajouter_tir(x, y)
# si on reste appuyer sur gauche ou droite
keys = pygame.key.get_pressed()
if keys[K_LEFT]:
poulpe.allerAgauche()
if keys[K_RIGHT]:
poulpe.allerAdroite()
# On actualise 30 fois par seconde
clock.tick(30)
#=========================================================================
#On lance l'introduction et la musique avant la boucle principale, histoire qu'elle ne s'arrête pas quand le jeu se relance
introduction()
pygame.mixer.music.play()
# Boucle principale du jeu, on peut rejouer tant qu'on a pas quitté le jeu
while jouer:
reinitialisation()
jeu()
if gagner:
gagne()
else:
gameOver()
for event in pygame.event.get():
if event.type == KEYDOWN:
if event.key == QUIT or event.key == K_ESCAPE:
jouer = False
# On récupère les événements 10 fois par seconde, pour éviter de boucler trop rapidement
clock.tick(10)
pygame.quit()
| {"/main.py": ["/poulpe.py", "/invaders.py", "/tir.py", "/tirs_invaders.py"]} |
49,644 | apollinemeyss/Poulpe-Defender | refs/heads/master | /poulpe.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import pygame
from pygame.locals import *
# Creation classe poulpe
class Poulpe:
# fonction d'initialisation, lancée lors de la création
def __init__(self,pygame,x,y):
self.pygame = pygame
self.poulpe = self.pygame.image.load("poulpe.png").convert_alpha()
self.position = self.poulpe.get_rect()
self.position.center = x,y #Position initiale du poulpe
def allerAdroite(self):
if (self.position.x + 15 < 750 ) and ( self.position.x + 15 > 0):
self.position = self.position.move(7,0)
def allerAgauche(self):
if (self.position.x - 15 < 750) and ( self.position.x - 15 > 0):
self.position = self.position.move(-7,0)
def getPoulpe(self):
return self.poulpe #on a besoin de return pour pouvoir rappeler l'image dans le main
def getPosition(self):
return self.position #pour pouvoir rappeler la position du poulpe (définie ici dans sa classe) dans le main
def getX(self):
return self.position.x
def getY(self):
return self.position.y
| {"/main.py": ["/poulpe.py", "/invaders.py", "/tir.py", "/tirs_invaders.py"]} |
49,645 | apollinemeyss/Poulpe-Defender | refs/heads/master | /invaders.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import pygame
from pygame.locals import *
# Creation classe invaders
class Invaders:
# fonction d'initialisation, lancé lors de la création
def __init__(self,pygame,x,y,couleur):
self.pygame = pygame
if couleur == "verts":
self.invaders = self.pygame.image.load("invaders.png").convert_alpha()#alpha pour enlever la partie blanche autour de l'image
if couleur == "rouges":
self.invaders = self.pygame.image.load("invaders_rouges.png").convert_alpha()#alpha pour enlever la partie blanche autour de l'image
if couleur == "marrons":
self.invaders = self.pygame.image.load("invadermarron.png").convert_alpha()#alpha pour enlever la partie blanche autour de l'image
if couleur == "bleus":
self.invaders = self.pygame.image.load("invaderbleu.png").convert_alpha()#alpha pour enlever la partie blanche autour de l'image
self.position = self.invaders.get_rect()
self.position.center = x,y # position initial du rectangle
def allerAdroite(self):
if (self.position.x + 15 < 750 ) and ( self.position.x + 15 > 0):
self.position = self.position.move(5,0)
return True
self.position=self.position.move(5,0)
return False
def allerAgauche(self):
if (self.position.x - 15 < 750) and ( self.position.x - 15 > 0):
self.position = self.position.move(-5,0)
return True
self.position=self.position.move(-5,0)
return False
def descendre(self):
self.position = self.position.move(0,5)
def getInvaders(self):
return self.invaders
def getPosition(self):
return self.position
def getX(self):
return self.position.x
def getY(self):
return self.position.y
| {"/main.py": ["/poulpe.py", "/invaders.py", "/tir.py", "/tirs_invaders.py"]} |
49,677 | elipugh/aa222_project | refs/heads/master | /optimizers/differential_evolution.py | from __future__ import division
import numpy as np
from copy import copy
import scipy.optimize as sp
class Differential_Evolution_Optimizer():
def __init__(self, f, bounds, n, reps=1, args=(), popsize=5):
self.args = args
self.reps = reps
self.f = f
self.bounds = bounds
self.nit = n
self.popsize = popsize
self.optimize()
# This repeats the evaluation with very slightly
# different values to get more accurate drag number
def repf(self, pts):
yaw_weights = np.array([6.641, 6.55, 6.283, 5.863,
5.321, 4.697, 4.033, 3.368,
2.736, 2.162, 1.661])
if self.reps == 1:
obj = np.dot(self.f(pts, *(self.args)), yaw_weights)
else:
objs = [[] for _ in range(self.reps)]
weights = np.ones(self.reps)
for i in range(self.reps):
npt = pts + np.random.normal(0, np.mean(pt)/20, pt.shape)
objs[i] = self.f(npt, *(self.args))
weights[i] -= (len(objs[i]) - len(set(objs[i])))/len(objs[i])
obj = np.dot( (np.dot(weights,objs) / np.sum(weights)), yaw_weights )
reg = np.linalg.norm(pts)*2
reg += np.linalg.norm([pts[i-1]-pts[i] for i in range(1,len(pts))])*10
print("\t Regularized : {}\n".format(obj + reg))
return obj + reg
# Nelder Mead Optimization
def optimize(self):
self.opt = sp.differential_evolution(
self.repf,
self.bounds,
strategy="best1bin",
maxiter=self.nit,
popsize=self.popsize
)
self.message = self.opt.message
self.nit = self.opt.nit
self.fun = self.opt.fun
self.x = self.opt.x
# Example
if __name__ == "__main__":
def rosenbrock(X):
"""
Good R^2 -> R^1 function for optimization
http://en.wikipedia.org/wiki/Rosenbrock_function
"""
x = X[0]
y = X[1]
a = 1. - x
b = y - x*x
obj = a*a + b*b*100.
print(obj)
return obj
try:
opt = Differential_Evolution_Optimizer(rosenbrock, [(-1,2),(-1,2)], n=5, reps=1, popsize=5)
print(opt.message)
print("Iters: {}".format(opt.nit))
print("Design:\n{}".format(list(opt.x)))
print("Objective: {}".format(opt.fun))
except:
print("sorry ... change line 37 to self.f instead of self.repf") | {"/parameterizations/naca_parsec_mix.py": ["/parameterizations/naca.py", "/parameterizations/parsec.py"], "/run.py": ["/parameterizations/helpers.py", "/parameterizations/naca.py", "/parameterizations/parsec.py", "/parameterizations/naca_parsec_mix.py", "/parameterizations/inter.py", "/optimizers/fib.py", "/optimizers/nelder_mead.py", "/optimizers/differential_evolution.py"]} |
49,678 | elipugh/aa222_project | refs/heads/master | /parameterizations/naca_parsec_mix.py | from __future__ import division
import numpy as np
import math
from parameterizations.naca import Airfoil as NacaAirfoil
from parameterizations.parsec import Airfoil as ParsecAirfoil
class Airfoil(object):
def __init__(self, params):
self.naca_params = params[0:1]
self.parsec_params = params[1:7]
self.mix = params[7]
self.naca = NacaAirfoil(self.naca_params)
self.parsec = ParsecAirfoil(self.parsec_params)
def Z_up(self, X):
naca_coords = self.naca.Z_up(X)
parsec_coords = self.parsec.Z_up(X)
naca_coords = naca_coords * 1/(6*np.max(naca_coords))
parsec_coords = parsec_coords * 1/(6*np.max(parsec_coords))
foil = self.mix * naca_coords + (1-self.mix) * parsec_coords
# foil[-1] = 0
return foil
def Z_lo(self, X):
return -self.Z_up(X) | {"/parameterizations/naca_parsec_mix.py": ["/parameterizations/naca.py", "/parameterizations/parsec.py"], "/run.py": ["/parameterizations/helpers.py", "/parameterizations/naca.py", "/parameterizations/parsec.py", "/parameterizations/naca_parsec_mix.py", "/parameterizations/inter.py", "/optimizers/fib.py", "/optimizers/nelder_mead.py", "/optimizers/differential_evolution.py"]} |
49,679 | elipugh/aa222_project | refs/heads/master | /parameterizations/helpers.py | from __future__ import division
import numpy as np
def fn_2_dat(filename, upper, lower):
# get a grid of points approximating
# the upper and lower edge of the foil
x = np.linspace(0.0, 1.0, 100)
foil_up = upper(x)
foil_lo = lower(x)
topmax = np.max(foil_up)
# to appease UCI regulation
# 3 to 1 ratio max
# (we automatically take max
# ratio at each design)
foil_up = foil_up * 1/(6*topmax)
foil_lo = foil_lo * 1/(6*topmax)
# Write to a .dat file for Xfoil.
# defines curve starting at far
# rear of the foil (x=1) and then
# moves counterclockwise up around
# to the front of the airfoil at
# x=0, then down and back to the
# rear tip (right)
with open(filename, "w") as f:
f.write("Custom_Airfoil\n")
for i in range(len(x)-1,-1,-1):
f.write("{:.5f} {:.5f}\n".format(x[i],foil_up[i]))
for i in range(len(x)):
f.write("{:.5f} {:.5f}\n".format(x[i],foil_lo[i]))
| {"/parameterizations/naca_parsec_mix.py": ["/parameterizations/naca.py", "/parameterizations/parsec.py"], "/run.py": ["/parameterizations/helpers.py", "/parameterizations/naca.py", "/parameterizations/parsec.py", "/parameterizations/naca_parsec_mix.py", "/parameterizations/inter.py", "/optimizers/fib.py", "/optimizers/nelder_mead.py", "/optimizers/differential_evolution.py"]} |
49,680 | elipugh/aa222_project | refs/heads/master | /xfoil/xfoil.py | import subprocess as subp
import psutil
import numpy as np
import os
import sys
import re
import time
import random
import sys
# Evaluate the different characteristics
# of an airfoil
# Hyperparams I chose ... maybe changed later:
# - Reynolds number set to 38k
# - Mach number set to 0.03
# - Max 10k iterations
# - Visuous flow
# - Evals at each degree in angles
def evaluate(filename, angles, viscous, iters=3000):
curdir = os.path.dirname(os.path.realpath(__file__))
xf = Xfoil()
# Normalize foil
xf.cmd("NORM\n")
# Load foil
xf.cmd('LOAD {}\n'.format(filename))
# Disable graphing
xf.cmd("PLOP\nG F\n\n")
# Set options for panels
xf.cmd("PPAR\n")
xf.cmd("N 240\n")
xf.cmd("T 1\n\n\n")
xf.cmd("PANE\n")
# Operation mode
xf.cmd("OPER\n")
# Set Reynolds #
xf.cmd("Re 38000\n")
# Set Mach
xf.cmd("Mach 0.03\n")
if viscous:
# Viscous mode
xf.cmd("v\n")
# Allow more iterations
xf.cmd("ITER " + str(iters) + "\n")
# Get started with an eval
xf.cmd("ALFA 0\n")
# Set recording to file sf.txt
savefile = "sf{}.txt".format(random.randrange(10**20)%(10**15))
xf.cmd("PACC\n{}\n\n".format(savefile))
# Run evals for 0deg to 12deg
for a in angles:
xf.cmd("ALFA {}\n".format(a))
# End recording
xf.cmd("PACC\n\n\nQUIT\n")
# Don't try to read results before
# Xfoil finished simulations
xf.wait_to_finish()
alpha = []
CL = []
CD = []
CDp = []
CM = []
try:
# open log savefile and read
# results into arrays to return
with open(savefile, "r") as f:
for _ in range(12):
f.readline()
for line in f:
if line is not None:
r = line.replace("-", " -").split()
alpha += [float(r[0])]
CL += [float(r[1])]
CD += [float(r[2])]
CDp += [float(r[3])]
CM += [float(r[4])]
except:
print(sys.exc_info())
# probably worst case,
# nothing converged,
# hence no savefile?
print("Uh oh. Delete savefile then retry")
return None
dnc = []
for i,a in enumerate(angles):
if a not in alpha:
dnc += [i]
# Worst case scenario, nothing converges
if len(dnc) == len(angles):
return None
if len(dnc)>0:
print("Angles did not converge:\t{}".format(list(np.array(angles)[dnc])))
# experimental, but seems to improve
# accuracy of guess of unconverged
# performance
for i in dnc:
seen = 0
# make the unconverged numbers be the same
# as the one above them, but penalize 5%
for j in range(i, len(angles)):
if (angles[j] in alpha) and (seen == 0):
seen = 1
ind = alpha.index(angles[j])
CL.insert(i, CL[ind])
CD.insert(i, CD[ind]*1.05)
CDp.insert(i, CDp[ind])
CM.insert(i, CM[ind])
# When unconverged ones are at the end,
# set to worst value with 15% penalty
if seen == 0:
worsts = [0, 0, 0, 0]
for j in range(len(alpha)):
if CL[j] > worsts[0]:
worsts[0] = CL[j]
if CD[j] > worsts[1]:
worsts[1] = CD[j]
if CDp[j] > worsts[2]:
worsts[2] = CDp[j]
if CM[j] > worsts[3]:
worsts[3] = CM[j]
CL.insert(i, worsts[0])
CD.insert(i, worsts[1]*1.15)
CDp.insert(i, worsts[2])
CM.insert(i, worsts[3])
alpha.insert(i, angles[i])
try:
# Remove savefile
os.remove(savefile)
except:
# probably worst case,
# nothing converged,
# hence no savefile?
print("fail rm {}".format(savefile))
pass
# Return results
return alpha, CL, CD, CDp, CM
class Xfoil():
def __init__(self):
path = os.path.dirname(os.path.realpath(__file__))
self.xfsubprocess = subp.Popen(os.path.join(path, 'xfoil'),
stdin=subp.PIPE,
stdout=open(os.devnull, 'w'))
def cmd(self, cmd):
self.xfsubprocess.stdin.write(cmd.encode('utf-8'))
def wait_to_finish(self):
p = psutil.Process(self.xfsubprocess.pid)
self.xfsubprocess.stdin.close()
try:
p.wait(timeout=60)
except psutil.TimeoutExpired:
p.kill()
self.xfsubprocess.wait()
# Example
if __name__ == "__main__":
# hint ... use fn_2_dat() in parameterizations/helpers.py
# to create an airfoil file, or download one at
# https://m-selig.ae.illinois.edu/ads/coord_database.html
# be cautious ... points need to be in XFOIL ordering
alpha, CL, CD, CDp, CM = evaluate("custom_airfoil.dat", [0], False)
print("alpha :{}".format(alpha))
print("CL :{}".format(CL))
print("CD :{}".format(CD))
print("CDp :{}".format(CDp))
print("CM :{}".format(CM))
| {"/parameterizations/naca_parsec_mix.py": ["/parameterizations/naca.py", "/parameterizations/parsec.py"], "/run.py": ["/parameterizations/helpers.py", "/parameterizations/naca.py", "/parameterizations/parsec.py", "/parameterizations/naca_parsec_mix.py", "/parameterizations/inter.py", "/optimizers/fib.py", "/optimizers/nelder_mead.py", "/optimizers/differential_evolution.py"]} |
49,681 | elipugh/aa222_project | refs/heads/master | /parameterizations/naca.py | from __future__ import division
import numpy as np
import math
class Airfoil(object):
def __init__(self, params):
self.truncation = params[0]
self.thickness = 1
def Z_up(self, X):
X = X * self.truncation
t = self.thickness
foil = 5*t * (.2969*np.sqrt(X) - .1260*X - .3516*X**2 + .2843*X**3 - .102*X**4)
# foil[-1] = 0
return foil
def Z_lo(self, X):
X = X * self.truncation
t = self.thickness
foil = -5*t * (.2969*np.sqrt(X) - .1260*X - .3516*X**2 + .2843*X**3 - .102*X**4)
# foil[-1] = 0
return foil | {"/parameterizations/naca_parsec_mix.py": ["/parameterizations/naca.py", "/parameterizations/parsec.py"], "/run.py": ["/parameterizations/helpers.py", "/parameterizations/naca.py", "/parameterizations/parsec.py", "/parameterizations/naca_parsec_mix.py", "/parameterizations/inter.py", "/optimizers/fib.py", "/optimizers/nelder_mead.py", "/optimizers/differential_evolution.py"]} |
49,682 | elipugh/aa222_project | refs/heads/master | /results/filter.py | import os
design_points = []
objectives = []
i = 0
with open("results3.txt", "r") as f:
for l in f:
if l is not None:
l = l.replace(",", " ")
l = l.replace("[", " ")
l = l.replace("]", " ")
l = l.replace(":", " ")
r = l.split()
if r[0] == "Design":
dp = [float(x) for x in r[1:]]
design_points += [dp]
if r[0] == "Objective":
objectives += [float(r[1])]
with open("objectives3.txt", "w") as f:
for i, o in enumerate(objectives):
if i >= 200:
break
f.write("{},\n".format(o))
with open("design_points3.txt", "w") as f:
for i, p in enumerate(design_points):
if i >= 200:
break
f.write("{},\n".format(p))
| {"/parameterizations/naca_parsec_mix.py": ["/parameterizations/naca.py", "/parameterizations/parsec.py"], "/run.py": ["/parameterizations/helpers.py", "/parameterizations/naca.py", "/parameterizations/parsec.py", "/parameterizations/naca_parsec_mix.py", "/parameterizations/inter.py", "/optimizers/fib.py", "/optimizers/nelder_mead.py", "/optimizers/differential_evolution.py"]} |
49,683 | elipugh/aa222_project | refs/heads/master | /run.py | import numpy as np
from xfoil import xfoil
import os
import random
from datetime import datetime
from parameterizations.helpers import fn_2_dat
from parameterizations.naca import Airfoil as NacaAirfoil
from parameterizations.parsec import Airfoil as ParsecAirfoil
from parameterizations.naca_parsec_mix import Airfoil as MixAirfoil
from parameterizations.inter import Airfoil as InterAirfoil
from optimizers.fib import Fib_Optimizer
from optimizers.nelder_mead import Nelder_Mead_Optimizer
from optimizers.differential_evolution import Differential_Evolution_Optimizer
#========================================#
# Change this to change type of airfoil
parameterization="Interpolate"
# The rest are set to good values
# later, if left unset
# Note that the auto initializations
# are roughly optimal, so you're
# unlikely to see much improvement
# Initial point
x0 = None
# Evaluation Args
args = None
# Optimizer Evaluations (not quite iters)
n = None
# If you want to repeat evaluations
# at multiple points very close to each
# design point to denoise objective
reps = None
# SLOW but does global optimization.
# Mostly just for kicks currently.
# Currently only for interpolate,
# ez to extend to the others though
# Also note, this is doing regularization
# to make airfoils smoother. Check out
# optimizers/differential_evolution.py
global_opt = True
popsize=None
#========================================#
# Perform an objective function
# evaluation at x
def evaluation(x, parameterization, avg=True, ticks=None, iters=3000):
# Angles are of wind on airfoil
angles = [i for i in range(11)]
# Weights how much we care abt each yaw angle
# These are given by:
# from scipy.stats import norm
# rv = norm(loc=0, scale=6)
# weights = np.array([rv.cdf(i+.5)-rv.cdf(i-.5) for i in range(11)])
# weights *= 100
# This is because experienced yaw is roughly gaussian
# with mean 0 and variance 6-7ish probably
weights = np.array([6.641, 6.55, 6.283, 5.863, 5.321, 4.697,
4.033, 3.368, 2.736, 2.162, 1.661])
# Viscuous?
visc = True
filename = "evaluation{}.dat".format(random.randrange(10**20)%(10**15))
if parameterization == "Mixed":
airfoil = MixAirfoil(x)
if parameterization == "PARSEC":
airfoil = ParsecAirfoil(x)
if parameterization == "NACA":
airfoil = NacaAirfoil(x)
if parameterization == "Interpolate":
# Probably should not be none!
if ticks is None:
ticks = np.linspace(0,1,x.size+1)
params = np.zeros((2,len(ticks)))
params[0] = ticks
params[1,:-1] = x
airfoil = InterAirfoil(params)
# Write points into .dat file
# for Xfoil to load
fn_2_dat(filename,
airfoil.Z_up,
airfoil.Z_lo)
# Do an evaluation of the point
# using Afoil CFD shtuff
metrics = xfoil.evaluate(filename, angles, visc, iters=iters)
if metrics is None:
# uh oh, nothing converged,
# probably very bad design?
alpha, CL, CD, CDp, CM = [[.2]*len(angles) for _ in range(5)]
else:
alpha, CL, CD, CDp, CM = metrics
# Remove the file
os.remove(filename)
obj = CD
if not visc:
obj = np.abs(CDp)
print("\n{} Eval:".format(datetime.now().time().isoformat(timespec='seconds')))
print("\t Design : {}".format(list(np.around(x, decimals=3))))
print("\t Drags : {}".format(obj))
print("\t Objective : {}".format(np.around(np.array(obj).dot(weights), decimals=4)))
if avg:
return np.array(obj).dot(weights)
else:
return obj
# Note that these initializations are roughly optimal,
# so you're unlikely to see much improvement
# The exception is NACA, where Fib Search Opt is used
# Nelder Mead works for NACA too, but Fib Search is nice
if x0 is None:
if parameterization == "PARSEC":
x0 = np.array([0.3997, 0.2453, 0.3009, 2.3359, 0.618, 0.7968])
if parameterization == "NACA":
x0 = [0.34,0.50]
a, b = x0[0], x0[1]
if parameterization == "Mixed":
x0 = np.array([0.4388, 0.4285, 0.2319, 0.2924, 2.2184, 0.6653, 0.4959, 0.3065])
if parameterization == "Interpolate":
x0 = [0.0372, 0.0374, 0.0204, 0.0301, 0.0367, 0.0206, 0.0114, 0.0038,
0.003, 0.0001, -0.001, -0.0021, -0.0031, -0.003, -0.003]
# These are suggested args ... you can change
# them if you know what you're doing >;)
# If you want help, reach out, esp on Interpolated :)
if args is None:
if parameterization == "PARSEC":
args = (parameterization,False,None)
if parameterization == "NACA":
args = (parameterization,False)
if parameterization == "Mixed":
args = (parameterization,False,None)
if parameterization == "Interpolate":
ticks = np.linspace(0,np.pi/2,10)
ticks = np.array([(0.5*(1.0-np.cos(x))) for x in ticks])
ticks = np.hstack([ticks, np.linspace(0.5,1,7)[1:]])
args = (parameterization,False,ticks)
if global_opt:
bounds = [(e-.02,e+.02) for e in x0]
print("\n\n PARAMETERIZATION\n==================\n\n{}\n".format(parameterization))
print(" INITIALIZATION\n================\n\n{}\n".format(x0))
if parameterization == "NACA":
if a is None or b is None:
a, b = 0.34, 0.50
if n is None:
n = 30
if reps is None:
reps = 3
opt = Fib_Optimizer(
evaluation,
x0,
n,
a,
b,
reps=reps,
args=args
)
elif global_opt:
if n is None:
n = 25
if reps is None:
reps = 1
if popsize is None:
popsize = 15
opt = Differential_Evolution_Optimizer(
evaluation,
bounds,
n,
reps=reps,
args=args,
popsize=popsize
)
else:
if n is None:
n = 200
if reps is None:
reps = 1
opt = Nelder_Mead_Optimizer(
evaluation,
x0,
n,
reps=reps,
args=args
)
print("\n\n")
if parameterization != "NACA":
print(opt.message)
print("Iters: {}".format(opt.nit))
print("Design:\n{}".format(list(np.around(opt.x,decimals=4))))
print("Objective: {}".format(np.around(opt.fun,decimals=6)))
| {"/parameterizations/naca_parsec_mix.py": ["/parameterizations/naca.py", "/parameterizations/parsec.py"], "/run.py": ["/parameterizations/helpers.py", "/parameterizations/naca.py", "/parameterizations/parsec.py", "/parameterizations/naca_parsec_mix.py", "/parameterizations/inter.py", "/optimizers/fib.py", "/optimizers/nelder_mead.py", "/optimizers/differential_evolution.py"]} |
49,684 | elipugh/aa222_project | refs/heads/master | /optimizers/fib.py | from __future__ import division
import numpy as np
from copy import copy
class Fib_Optimizer():
def __init__(self, f, x0, n, a, b, reps=4, args=()):
self.args = args
self.reps = reps
self.f = f
self.x0 = x0
self.n = n
self.a = np.array([a])
self.b = np.array([b])
self.bounds = (a,b)
self.optimize()
self.message = "Eli wrote this so there's no fancy optimizer status, lol"
# This repeats the evaluation with very slightly
# different values to get more accurate drag number
def repf(self, pt):
yaw_weights = np.array([6.641, 6.55, 6.283, 5.863, 5.321, 4.697,
4.033, 3.368, 2.736, 2.162, 1.661])
if self.reps == 1:
return self.f(pt, *(self.args))
objs = [[] for _ in range(self.reps)]
weights = np.ones(self.reps)
for i, npt in enumerate(np.linspace(0.99*pt, 1.01*pt, self.reps)):
objs[i] = self.f(npt, *(self.args))
weights[i] -= (len(objs[i]) - len(set(objs[i])))/len(objs[i])
return np.dot( (np.dot(weights,objs) / np.sum(weights)), yaw_weights )
# Fibonacci Search Optimization
# see slides 10-19:
# https://www.cs.ccu.edu.tw/~wtchu/courses/2014s_OPT/Lectures/
# Chapter%207%20One-Dimensional%20Search%20Methods.pdf
def optimize(self):
x0, n, a, b = self.x0, self.n, self.a, self.b
eps = 0.01
phi = (1+np.sqrt(5))/2
s = (1-np.sqrt(5))/(1+np.sqrt(5))
p = 1 / (phi*(1-s**(n+1))/(1-s**n))
d = p*b + (1-p)*a
yd = self.repf(d)
for i in range(n-1):
if i == n-2:
c = eps*a + (1-eps)*d
else:
c = p*a + (1-p)*b
yc = self.repf(c)
if yc < yd:
b, d, yd = d, c, yc
else:
a, b = b, c
a_r, b_r = np.around([a,b],decimals=3)
print("\nInterval: [{},{}]\n".format(a_r[0], b_r[0]))
p = 1 / (phi**(1-s**(n-i))/(1-s**(n-i-1)))
self.bounds = ((a, b) if a < b else (b,a))
self.x = np.array([np.mean(self.bounds)])
self.fun = self.repf(self.x)
self.nit = n
# Example
if __name__ == "__main__":
f = lambda x : x**2 - 0.8*x + 0.56
opt = Fib_Optimizer(f, np.array([0.6]), 8, 0.2, 1)
print(opt.bounds) # -5.999543942488367
print(opt.fun) # 0.002399843156309108 | {"/parameterizations/naca_parsec_mix.py": ["/parameterizations/naca.py", "/parameterizations/parsec.py"], "/run.py": ["/parameterizations/helpers.py", "/parameterizations/naca.py", "/parameterizations/parsec.py", "/parameterizations/naca_parsec_mix.py", "/parameterizations/inter.py", "/optimizers/fib.py", "/optimizers/nelder_mead.py", "/optimizers/differential_evolution.py"]} |
49,685 | elipugh/aa222_project | refs/heads/master | /optimizers/nelder_mead.py | from __future__ import division
import numpy as np
from copy import copy
import scipy.optimize as sp
class Nelder_Mead_Optimizer():
def __init__(self, f, x0, n, reps=1, args=()):
self.args = args
self.reps = reps
self.f = f
self.x0 = x0
self.nit = n
self.optimize()
# This repeats the evaluation with very slightly
# different values to get more accurate drag number
def repf(self, pts):
yaw_weights = np.array([6.641, 6.55, 6.283, 5.863,
5.321, 4.697, 4.033, 3.368,
2.736, 2.162, 1.661])
if self.reps == 1:
obj = np.dot(self.f(pts, *(self.args)), yaw_weights)
else:
objs = [[] for _ in range(self.reps)]
weights = np.ones(self.reps)
for i in range(self.reps):
npt = pt + np.random.normal(0, np.mean(pt)/20, pt.shape)
objs[i] = self.f(npt, *(self.args))
weights[i] -= (len(objs[i]) - len(set(objs[i])))/len(objs[i])
obj = np.dot( (np.dot(weights,objs) / np.sum(weights)), yaw_weights )
return obj
# Nelder Mead Optimization
def optimize(self):
self.opt = sp.minimize(
self.repf,
self.x0,
method="Nelder-Mead",
options={'maxiter': self.nit}
)
self.message = self.opt.message
self.nit = self.opt.nit
self.fun = self.opt.fun
self.x = self.opt.x
# Example
if __name__ == "__main__":
def rosenbrock(X):
"""
Good R^2 -> R^1 function for optimization
http://en.wikipedia.org/wiki/Rosenbrock_function
"""
x = X[0]
y = X[1]
a = 1. - x
b = y - x*x
obj = a*a + b*b*100.
print(obj)
return obj
try:
opt = Nelder_Mead_Optimizer(rosenbrock, np.array([0.,0.]), 100, 3)
print(opt.message)
print("Iters: {}".format(opt.nit))
print("Design:\n{}".format(list(opt.x)))
print("Objective: {}".format(np.around(opt.fun,decimals=6)))
except:
print("sorry ... change line 36 to self.f instead of self.repf")
| {"/parameterizations/naca_parsec_mix.py": ["/parameterizations/naca.py", "/parameterizations/parsec.py"], "/run.py": ["/parameterizations/helpers.py", "/parameterizations/naca.py", "/parameterizations/parsec.py", "/parameterizations/naca_parsec_mix.py", "/parameterizations/inter.py", "/optimizers/fib.py", "/optimizers/nelder_mead.py", "/optimizers/differential_evolution.py"]} |
49,686 | elipugh/aa222_project | refs/heads/master | /parameterizations/inter.py | from __future__ import division
import numpy as np
import math
from scipy.interpolate import interp1d
class Airfoil(object):
def __init__(self, params):
params = np.array(params)
self.x = params[0]
self.y = np.zeros(params.shape[1])
for i in range(1,self.y.size):
self.y[i] = self.y[i-1] + params[1][i-1]
self.f = interp1d(self.x, self.y, kind=1)
def Z_up(self, X):
return self.f(X)
def Z_lo(self, X):
return -self.Z_up(X) | {"/parameterizations/naca_parsec_mix.py": ["/parameterizations/naca.py", "/parameterizations/parsec.py"], "/run.py": ["/parameterizations/helpers.py", "/parameterizations/naca.py", "/parameterizations/parsec.py", "/parameterizations/naca_parsec_mix.py", "/parameterizations/inter.py", "/optimizers/fib.py", "/optimizers/nelder_mead.py", "/optimizers/differential_evolution.py"]} |
49,687 | elipugh/aa222_project | refs/heads/master | /parameterizations/parsec.py | from __future__ import division
import numpy as np
import math
class Parameters(object):
'''Parameters defining a PARSEC airfoil'''
def __init__(self, x):
if x.shape > (6,):
print("5 or 6d np array expected")
front_radius = x[0]
x_cross_section = x[1]
cross_section_width = x[2]
sides_curve = x[3]
rear_angle = x[4]
trunc = 1
if x.size > 5:
trunc = x[5]
self.r_le = front_radius # Leading edge radius
self.X_up = x_cross_section # Upper crest location X coordinate
self.Z_up = cross_section_width # Upper crest location Z coordinate
self.Z_XX_up = -sides_curve # Upper crest location curvature
self.X_lo = x_cross_section # Lower crest location X coordinate
self.Z_lo = -cross_section_width # Lower crest location Z coordinate
self.Z_XX_lo = sides_curve # Lower crest location curvature
self.Z_te = 0 # static # Trailing edge Z coordinate
self.dZ_te = 0 # static # Trailing edge thickness
self.alpha_te = 0 # static # Trailing edge direction angle
self.beta_te = rear_angle #(radians) # Trailing edge wedge angle
self.P_mix = 1.0 # Blending parameter
self.trunc = min(trunc,1) # Where we truncate
class Coefficients(object):
'''
Credit for this class goes to
https://github.com/mbodmer/libairfoil
This class calculates the equation systems which define the coefficients
for the polynomials given by the parsec airfoil parameters.
'''
def __init__(self, parsec_params):
self.params = Parameters(parsec_params)
self._a_up = self._calc_a_up(self.params)
self._a_lo = self._calc_a_lo(self.params)
def a_up(self):
'''Returns coefficient vector for upper surface'''
return self._a_up
def a_lo(self):
'''Returns coefficient vector for lower surface'''
return self._a_lo
def _calc_a_up(self, parsec_params):
Amat = self._prepare_linsys_Amat(parsec_params.X_up)
Bvec = np.array([parsec_params.Z_te, parsec_params.Z_up,
math.tan(parsec_params.alpha_te - parsec_params.beta_te/2),
0.0, parsec_params.Z_XX_up, math.sqrt(2*parsec_params.r_le)])
return np.linalg.solve(Amat, Bvec)
def _calc_a_lo(self, parsec_params):
Amat = self._prepare_linsys_Amat(parsec_params.X_lo)
Bvec = np.array([parsec_params.Z_te, parsec_params.Z_lo,
math.tan(parsec_params.alpha_te + parsec_params.beta_te/2),
0.0, parsec_params.Z_XX_lo, -math.sqrt(2*parsec_params.r_le)])
return np.linalg.solve(Amat, Bvec)
def _prepare_linsys_Amat(self, X):
return np.array(
[[1.0, 1.0, 1.0, 1.0, 1.0, 1.0 ],
[X**0.5, X**1.5, X**2.5, X**3.5, X**4.5, X**5.5 ],
[0.5, 1.5, 2.5, 3.5, 4.5, 5.5 ],
[0.5*X**-0.5, 1.5*X**0.5, 2.5*X**1.5, 3.5*X**2.5, 4.5*X**3.5, 5.5*X**4.5 ],
[-0.25*X**-1.5, 0.75*X**-0.5, 3.75*X**0.5, 8.75*X**1.5, 15.75*X**2.5, 24.75*X**3.5],
[1.0, 0.0, 0.0, 0.0, 0.0, 0.0 ]])
class Airfoil(object):
'''
Credit for this class goes to
https://github.com/mbodmer/libairfoil
Airfoil defined by PARSEC Parameters
'''
def __init__(self, parsec_params):
self._coeff = Coefficients(parsec_params)
def Z_up(self, X):
'''Returns Z(X) on upper surface, calculates PARSEC polynomial'''
a = self._coeff.a_up()
X = X * self._coeff.params.trunc
foil = a[0]*X**0.5 + a[1]*X**1.5 + a[2]*X**2.5 + a[3]*X**3.5 + a[4]*X**4.5 + a[5]*X**5.5
# foil[-1] = 0
return foil
def Z_lo(self, X):
'''Returns Z(X) on lower surface, calculates PARSEC polynomial'''
a = self._coeff.a_lo()
X = X * self._coeff.params.trunc
foil = a[0]*X**0.5 + a[1]*X**1.5 + a[2]*X**2.5 + a[3]*X**3.5 + a[4]*X**4.5 + a[5]*X**5.5
# foil[-1] = 0
return foil
if __name__ == "__main__":
# Whatever you do, DO NOT uncomment
# and run with python 2
# Your computer will prob crash
# I have done this twice lol
# Python 3 is fine
# Python 2 does not like matplotlib
# # # import matplotlib.pyplot as plt
# params = np.array([0.4, 0.3, 0.3, 2, np.pi/2])
# airfoil = Airfoil(params)
# x = np.linspace(0.0, 1.0, 150)
# foil_up = airfoil.Z_up(x)
# foil_lo = airfoil.Z_lo(x)
# topmax = np.max(foil_up)
# foil_up = foil_up * 1/(6*topmax)
# foil_lo = foil_lo * 1/(6*topmax)
# plt.plot(x, foil_up, 'r--', x,foil_lo, 'b--')
# plt.xlim(-0.2, 1.2)
# plt.ylim(-1, 1)
# plt.gca().set_aspect('equal', adjustable='box')
# plt.grid(True)
# plt.show()
pass
| {"/parameterizations/naca_parsec_mix.py": ["/parameterizations/naca.py", "/parameterizations/parsec.py"], "/run.py": ["/parameterizations/helpers.py", "/parameterizations/naca.py", "/parameterizations/parsec.py", "/parameterizations/naca_parsec_mix.py", "/parameterizations/inter.py", "/optimizers/fib.py", "/optimizers/nelder_mead.py", "/optimizers/differential_evolution.py"]} |
49,688 | eightys3v3n/calculator | refs/heads/master | /calculator/__init__.py | from .main import *
from . import finance
| {"/calculator/__init__.py": ["/calculator/main.py"], "/calculator/main.py": ["/calculator/__init__.py", "/calculator/finance.py"]} |
49,689 | eightys3v3n/calculator | refs/heads/master | /calculator/finance.py | import unittest
import logging
import locale
# for number formatting
import sympy
# for equation rearranging
global FORMULAS
FORMULAS = {}
# contains all the cached function rearrangements
RESULT_PRECISION = 4 # How many decimals should we round results from formulas to
def num_format(num):
"""Format a number using commas and 4 decimal places."""
num = round(num, 4)
return "{:,.4f}".format(num)
def all_functions(var, expr, vars):
"""Generates formulas to solve for as many variables in the given expression as the symply module can.
Returns a list of lambdas for the rearrangements. Lambdas have the equation attribute containing the sympy.Equation.
Also caches the sympy.Symbol objects given as vars inside 'symbols' dict element."""
root_var = var
root_expr = expr
var = None
expr = None
functions = {}
vars.sort(key=lambda v:v.name) # so we can predict the argument order
root_eq = sympy.Eq(root_var, root_expr) # so we can rearrange it
logging.debug("Given equation:", root_eq)
#sympy.pprint(root_eq)
for var in vars:
try:
expr, = sympy.solve(root_eq, var)
except NotImplementedError as e:
print("No method found to solve for {} in equation".format(var))
sympy.pprint(root_eq)
continue
if not expr:
logging.warning("Couldn't solve for {}".format(var.name))
continue
eq = sympy.Eq(var, expr)
logging.debug("Derived equation:", eq)
#sympy.pprint(eq)
new_vars = vars.copy()
new_vars.remove(var)
functions[var.name] = sympy.lambdify(new_vars, expr)
# embed the equation for odd solving
functions[var.name].equation = eq
# embed the symbols for odd solving
functions['symbols'] = {}
for s in vars:
functions['symbols'][str(s)] = s
return functions
# Time Value of Money
def tmv(pv=None, fv=None, r=None, n=None, should_round=True):
"""Converts between present money and future money taking into account interest rates and years past.
pv: present value
fv: future value with compound interest added
r: compound yearly interest rate
n: years
"""
global FORMULAS
name = 'solve'
# generate all rearrangements of the given expression
if name not in FORMULAS:
_pv, _fv, _r, _n = sympy.symbols("pv fv r n")
pv_expr = _fv / ((1+_r)**_n)
FORMULAS[name] = all_functions(_pv, pv_expr, [_pv, _fv, _r, _n])
# insist on the right number of arguments
supplied = sum(1 if v is not None else 0 for v in (pv, fv, r, n))
if supplied != 3:
raise Exception("Invalid number of arguments")
if pv is None:
ret = [FORMULAS[name]['pv'](fv, n, r), "PV"]
elif fv is None:
ret = [FORMULAS[name]['fv'](n, pv, r), "FV"]
elif r is None:
ret = [FORMULAS[name]['r'](fv, n, pv), "r"]
elif n is None:
ret = [FORMULAS[name]['n'](fv, pv, r), "n"]
else:
print("You supplied all the arguments, there's nothing to calculate")
return None
if should_round:
ret[0] = round(ret[0], RESULT_PRECISION)
return ret
class Test_tmv(unittest.TestCase):
def test_pv(self):
pv, _ = tmv(fv=1000, r=0.02, n=10)
self.assertAlmostEqual(pv, 820.3483)
def test_fv(self):
fv, _ = tmv(pv=1000, r=0.02, n=10)
self.assertAlmostEqual(fv, 1218.9944)
def test_r(self):
r, _ = tmv(pv=1000, fv=1218.9944, n=10)
self.assertAlmostEqual(r, 0.02)
def test_n(self):
n, _ = tmv(pv=1000, fv=1218.9944, r=0.02)
self.assertAlmostEqual(n, 10)
def perpetuity(pv=None, C=None, r=None, should_round=True):
"""Calculates for perpetuities given the annual payment and the interest rate.
pv: present value
C: yearly payment
r: yearly interest rate
"""
global FORMULAS
name = 'perpetuity'
# generate all rearrangements of the given expression
if name not in FORMULAS:
pv_, C_, r_ = sympy.symbols("pv C r")
pv_expr = C_ / r_
FORMULAS[name] = all_functions(pv_, pv_expr, [pv_, C_, r_])
# insist on the right number of arguments
supplied = sum(1 if v is not None else 0 for v in (pv, C, r))
if supplied != 2:
raise Exception("Invalid number of arguments")
if pv is None:
ret = [FORMULAS[name]['pv'](C, r), "PV"]
elif C is None:
ret = [FORMULAS[name]['C'](pv, r), "C"]
elif r is None:
ret = [FORMULAS[name]['r'](C, pv), "r"]
else:
print("You supplied all the arguments, there's nothing to calculate")
return None
if should_round:
ret[0] = round(ret[0], RESULT_PRECISION)
return ret
class Test_perpetuity(unittest.TestCase):
def test_pv(self):
pv, _ = perpetuity(C=1000, r=0.02)
self.assertAlmostEqual(pv, 50_000)
def test_C(self):
C, _ = perpetuity(pv=50_000, r=0.02)
self.assertAlmostEqual(C, 1000)
def test_r(self):
r, _ = perpetuity(pv=50_000, C=1000)
self.assertAlmostEqual(r, 0.02)
def _annuity_pv(pv=None, C=None, r=None, n=None, should_round=True):
global FORMULAS
name = 'annuity_pv'
# generate all rearrangements of the given expression
if name not in FORMULAS:
_pv, _C, _r, _n = sympy.symbols("pv C r n")
pv_expr = _C * 1/_r * (1 - 1/(1+_r)**_n)
FORMULAS[name] = all_functions(_pv, pv_expr, [_pv, _C, _r, _n])
# insist on the right number of arguments
supplied = sum(1 if v is not None else 0 for v in (pv, C, r, n))
if supplied != 3:
raise Exception("Invalid number of arguments")
if pv is None:
ret = [FORMULAS[name]['pv'](C, n, r), "PV"]
elif C is None:
ret = [FORMULAS[name]['C'](n, pv, r), "C"]
elif r is None:
raise NotImplementedError("Can't calculate for r because I can't rearrange the formula.")
print("Use the calculator with {}PV; {}C; {}N; CPT; I/Y".format(pv, C, n))
elif n is None:
raise NotImplementedError("Can't calculate for n because I can't rearrange the formula.")
print("Use the calculator with {}PV; {}C; {}I/Y; CPT; N".format(pv, C, r))
else:
print("You supplied all the arguments, there's nothing to calculate")
return None
if should_round:
ret[0] = round(ret[0], RESULT_PRECISION)
return ret
class Test_annuity_pv(unittest.TestCase):
def test_pv(self):
pv, _ = _annuity_pv(C=1000, r=0.02, n=10)
self.assertAlmostEqual(pv, 8982.5850)
def test_C(self):
C, _ = _annuity_pv(pv=8982.5850, r=0.02, n=10)
self.assertAlmostEqual(C, 1000)
def _annuity_fv(fv=None, C=None, r=None, n=None, should_round=True):
if fv is None:
pv, _ = _annuity_pv(C=C, r=r, n=n, should_round=False)
fv, _ = tmv(pv=pv, r=r, n=n, should_round=False)
ret = [fv, "FV"]
else:
pv, _ = tmv(fv=fv, r=r, n=n, should_round=False)
ret = _annuity_pv(pv=pv, C=C, r=r, n=n, should_round=False)
if should_round:
ret[0] = round(ret[0], RESULT_PRECISION)
return ret
class Test_annuity_pv(unittest.TestCase):
def test_fv(self):
pv, _ = _annuity_fv(C=1000, r=0.02, n=10)
self.assertAlmostEqual(pv, 10949.7210)
def test_C(self):
C, _ = _annuity_fv(fv=10949.7210, r=0.02, n=10)
self.assertAlmostEqual(C, 1000)
# Time Value of Money
def annuity(pv=None, fv=None, C=None, r=None, n=None):
"""Converts between present money and future money taking into account interest rates and years past.
pv: present value
fv: future value with compound interest added
r: compound periodly interest rate
n: periods
C: periodly payment
"""
supplied = sum(1 if v is not None else 0 for v in (pv, fv, C, r, n))
if supplied == 3:
if pv is None:
return _annuity_fv(fv=fv, C=C, r=r, n=n)
elif fv is None:
return _annuity_pv(pv=pv, C=C, r=r, n=n)
else:
print("No present or future value specified")
elif supplied == 4:
raise NotImplemented("Can't do this yet")
class Test_annuity(unittest.TestCase): pass
# test from fv to pv
# test from pv to fv
# Time Value of Money
def ytm(ytm=None, fv=None, cpn=None, n=None, p=None, should_round=True):
"""Converts between present money and future money taking into account interest rates and years past.
ytm: Yield to maturity.
fv: future value including coupon payments and payout.
cpn: coupon payment amount in dollars.
n: number of coupon payment periods.
p: Current market price.
"""
global FORMULAS
name = 'ytm'
# generate all rearrangements of the given expression
if name not in FORMULAS:
_ytm, _fv, _cpn, _p, _n = sympy.symbols("ytm fv cpn p n")
p_expr = _cpn * (1/_ytm)*(1 - ( 1/(1+_ytm)**_n )) + (_fv/(1+_ytm)**_n)
FORMULAS[name] = all_functions(_p, p_expr, [_ytm, _fv, _cpn, _n, _p])
# insist on the right number of arguments
supplied = sum(1 if v is not None else 0 for v in (ytm, fv, cpn, n, p))
if supplied != 4:
raise Exception("Invalid number of arguments")
if ytm is None:
ret = FORMULAS[name]['p'].equation.subs({'p': p, 'cpn':cpn, 'n':n, 'fv':fv})
rets = []
for i in range(1000):
try:
rets.append(sympy.nsolve(ret, sympy.Symbol("ytm"), (i+1)/1000))
except ValueError: pass
finally: pass
logging.debug(rets)
ret = [rets[0], "YTM"]
elif fv is None:
ret = [FORMULAS[name]['fv'](ytm=ytm, cpn=cpn, n=n, p=p), "FV"]
elif cpn is None:
print(FORMULAS[name]['cpn'].equation)
ret = [FORMULAS[name]['cpn'](ytm=ytm, fv=fv, n=n, p=p), "CPN"]
elif n is None:
ret = [FORMULAS[name]['n'](ytm=ytm, fv=fv, cpn=cpn, p=p), "n"]
elif p is None:
ret = [FORMULAS[name]['p'](ytm=ytm, fv=fv, cpn=cpn, n=n), "P"]
else:
print("You supplied all the arguments, there's nothing to calculate")
return None
if should_round:
ret[0] = round(ret[0], RESULT_PRECISION)
return ret
class Test_ytm(unittest.TestCase):
def test_ytm(self):
ret = ytm(fv=1000, cpn=25, p=957.3490, n=10)
self.assertEqual(ret[1], 'YTM')
self.assertEqual(str(ret[0]), '0.0300')
def test_fv(self):
ret = ytm(cpn=25, p=957.3490, n=10, ytm=0.03)
self.assertEqual(ret[1], 'FV')
self.assertEqual(round(ret[0], 4), 1000.0000)
def test_cpn(self):
ret = ytm(fv=1000, p=957.3490, n=10, ytm=0.03)
self.assertEqual(ret[1], 'CPN')
self.assertEqual(round(ret[0], 4), 25.0000)
# TODO test the other two function rearrangements
"""
Things still required
Variance of an investment:
Var = 1/(T-1)((R_1-R_avg)^2+...+(R_T-R_avg)^2)
Where T is the number of periods, R_1 is the return for period 1, R_avg is the average return for all periods.
Fix number format for large numbers
Price and YTM of a coupon bond with n coupon payments. So we need to be able to use annuity to
calculate PV and FV given four other arguments.
EAR interest, APR interest rate, Nominal rate
EAR = 1 + (APR/m)**m - 1
m is the compounding periods per year (monthly means m=12)
annuities with n years of odd deposit amounts. maybe input a list?
f = lambda r: tmv.fv(400 / (1+r) + 500 / (1+r)**2 + 1000 / (1+r)**3, r, 3)
calculates the future value of an annuity with specific deposit amounts.
The price today for a stock given risk rate, expected dividends, and expected future price. Also rearranged to solve for anything else.
P_0 = (Div_1 + P_1) / (1 + r_E)
Where P_0 is the current price,
P_1 is the future sell price
r_E is the risk rate.
The capital gain rate for a stock
capital_gain_rate = (P_1-P_0)/P_0
Where P_0 is the current price and P_1 is the future expected price.
The total return of a stock
total_return = (Div_1/P_0) + capital_gain_rate(P_0, P_1)
Where Div_1 is the expected dividend at the end of the year,
P_0 is the current price,
P_1 is the expected future sell price.
Dividend Yield formula for arbitrary number of years and arbitrary dividend payments each year.
"""
| {"/calculator/__init__.py": ["/calculator/main.py"], "/calculator/main.py": ["/calculator/__init__.py", "/calculator/finance.py"]} |
49,690 | eightys3v3n/calculator | refs/heads/master | /calculator/main.py | import unittest
import logging
import math
from math import sqrt, pow
# sqrt()
# pow()
import locale
# for number formatting
import sympy
# for equation rearranging
import statistics
from statistics import stdev
# stdev(arr) for std deviation
from . import finance
from .finance import tmv,num_format
# a number of functions for my FNCE courses
locale.setlocale(locale.LC_ALL, '')
logging.basicConfig(level=logging.INFO)
"""
Things still required
Fix number format for large numbers
Price and YTM of a coupon bond with n coupon payments. So we need to be able to use annuity to
calculate PV and FV given four other arguments.
EAR interest, APR interest rate, Nominal rate
EAR = 1 + (APR/m)**m - 1
m is the compounding periods per year (monthly means m=12)
annuities with n years of odd deposit amounts. maybe input a list?
f = lambda r: tmv.fv(400 / (1+r) + 500 / (1+r)**2 + 1000 / (1+r)**3, r, 3)
calculates the future value of an annuity with specific deposit amounts.
The price today for a stock given risk rate, expected dividends, and expected future price. Also rearranged to solve for anything else.
P_0 = (Div_1 + P_1) / (1 + r_E)
Where P_0 is the current price,
P_1 is the future sell price
r_E is the risk rate.
The capital gain rate for a stock
capital_gain_rate = (P_1-P_0)/P_0
Where P_0 is the current price and P_1 is the future expected price.
The total return of a stock
total_return = (Div_1/P_0) + capital_gain_rate(P_0, P_1)
Where Div_1 is the expected dividend at the end of the year,
P_0 is the current price,
P_1 is the expected future sell price.
Dividend Yield formula for arbitrary number of years and arbitrary dividend payments each year.
"""
| {"/calculator/__init__.py": ["/calculator/main.py"], "/calculator/main.py": ["/calculator/__init__.py", "/calculator/finance.py"]} |
49,692 | bigdatasciencegroup/flightr-project | refs/heads/master | /Twitter/TwitterService.py | """Main twitter services"""
from Twitter.twitterAdapter import TwitterAdaptor
class TwitterService(object):
"""Methods used by service layer"""
@staticmethod
def send_notification(twitter_handle, message):
adapter = TwitterAdaptor()
message_to_send = twitter_handle + ' ' + message
adapter.api.update_status(status=message_to_send)
| {"/Twitter/TwitterService.py": ["/Twitter/twitterAdapter.py"], "/Flightaware/flightawareService.py": ["/Flightaware/models.py", "/Flightaware/restAdapter.py"], "/Presentation/views.py": ["/Presentation/forms.py", "/Presentation/watcherService.py"], "/Gmaps/tests.py": ["/Gmaps/googlemaps_service.py"], "/Presentation/watcherService.py": ["/Flightaware/flightawareService.py", "/Gmaps/googlemaps_service.py", "/Presentation/models.py", "/Twitter/TwitterService.py"], "/Twitter/tests.py": ["/Twitter/twitterAdapter.py"], "/Flightaware/tests.py": ["/Flightaware/flightawareService.py", "/Flightaware/models.py"]} |
49,693 | bigdatasciencegroup/flightr-project | refs/heads/master | /Flightaware/flightawareService.py | from Flightaware.models import Flight
from Flightaware.restAdapter import RestAdapter
import requests
class FlightawareService(object):
@staticmethod
def find_flight(flight_number, response=None):
adapter = RestAdapter()
payload = {'ident': flight_number, 'howMany': 1}
# This is for mocking and is generally bad practice, But it's the best I can do ATM
if response is None:
response = requests.get(adapter.url + "FlightInfoStatus", params=payload,
auth=(adapter.username, adapter.apiKey))
result = response.json()['FlightInfoStatusResult']['flights'][0]
return Flight(result['ident'],
result['aircrafttype'],
result['origin'],
result['status'],
result['actual_arrival_time'],
result['arrival_delay'],
result['estimated_arrival_time'])
| {"/Twitter/TwitterService.py": ["/Twitter/twitterAdapter.py"], "/Flightaware/flightawareService.py": ["/Flightaware/models.py", "/Flightaware/restAdapter.py"], "/Presentation/views.py": ["/Presentation/forms.py", "/Presentation/watcherService.py"], "/Gmaps/tests.py": ["/Gmaps/googlemaps_service.py"], "/Presentation/watcherService.py": ["/Flightaware/flightawareService.py", "/Gmaps/googlemaps_service.py", "/Presentation/models.py", "/Twitter/TwitterService.py"], "/Twitter/tests.py": ["/Twitter/twitterAdapter.py"], "/Flightaware/tests.py": ["/Flightaware/flightawareService.py", "/Flightaware/models.py"]} |
49,694 | bigdatasciencegroup/flightr-project | refs/heads/master | /Presentation/models.py |
class FlightDetails:
def __init__(self,
flight_number,
flight_status,
current_flight_delay,
journey_time_to_airport,
suggested_time_to_start_journey,
time_till_leave_time):
self.flight_number = flight_number
self.flight_status = flight_status
self.current_flight_delay = current_flight_delay
self.journey_time_to_airport = journey_time_to_airport
self.suggested_time_to_start_journey = suggested_time_to_start_journey
self.time_till_leave_time = time_till_leave_time
flight_number = None
flight_status = None
current_flight_delay = None
journey_time_to_airport = None
suggested_time_to_start_journey = None
time_till_leave_time = None
| {"/Twitter/TwitterService.py": ["/Twitter/twitterAdapter.py"], "/Flightaware/flightawareService.py": ["/Flightaware/models.py", "/Flightaware/restAdapter.py"], "/Presentation/views.py": ["/Presentation/forms.py", "/Presentation/watcherService.py"], "/Gmaps/tests.py": ["/Gmaps/googlemaps_service.py"], "/Presentation/watcherService.py": ["/Flightaware/flightawareService.py", "/Gmaps/googlemaps_service.py", "/Presentation/models.py", "/Twitter/TwitterService.py"], "/Twitter/tests.py": ["/Twitter/twitterAdapter.py"], "/Flightaware/tests.py": ["/Flightaware/flightawareService.py", "/Flightaware/models.py"]} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.