index
int64 | repo_name
string | branch_name
string | path
string | content
string | import_graph
string |
|---|---|---|---|---|---|
28,577
|
zhao-ji/elfin
|
refs/heads/master
|
/scripts/timeline_operate.py
|
# coding: utf-8
import logging
from pyquery import PyQuery as pq
from socrates.set import log
def timeline(content):
open_line_init = pq(content)
open_line_init('span').remove()
open_line_li_list = open_line_init('li')
single_message = lambda talk: talk[0] + '\n\n'
normal_message = lambda talk: talk[0] + ' : ' + \
talk[1] + '\n\n'
at_message = lambda talk: talk[0] + '@' + \
talk[1] + ' : ' + \
talk[2] + '\n\n'
rt_message = lambda talk: talk[0] + ':' + \
talk[3] + ' ' + \
talk[1] + ' ' + \
talk[2] + ':' + ' '
talk_func = {
1: single_message, 2: normal_message,
3: at_message, 4: rt_message,
}
open_talk_list = [list(li.itertext()) for li in open_line_li_list]
open_talk_list = [filter(lambda i: i != ' @', talk) for talk in open_talk_list]
open_talk_list = filter(lambda talk: not (len(talk)==4 and talk[3].strip()==unichr(int('ff01', 16))), open_talk_list)
open_talk_list = map(lambda talk: talk_func[len(talk)](talk), open_talk_list)
open_line = ''.join([talk.encode('utf-8') for talk in open_talk_list])
logging.info(open_line)
return open_line
|
{"/scripts/get_access_token.py": ["/socrates/set.py"], "/scripts/photo_send.py": ["/socrates/set.py", "/scripts/mongo_operate.py", "/scripts/session_get.py"], "/scripts/timeline_get.py": ["/scripts/timeline_operate.py", "/scripts/session_get.py", "/socrates/set.py"], "/handlers/userset.py": ["/socrates/set.py", "/scripts/talk_send.py", "/scripts/mongo_operate.py"], "/scripts/timeline_operate.py": ["/socrates/set.py"], "/main.py": ["/handlers/wechat.py", "/handlers/bind.py", "/handlers/userset.py"], "/handlers/bind.py": ["/scripts/login.py", "/scripts/mongo_operate.py"], "/scripts/session_get.py": ["/scripts/mongo_operate.py"], "/scripts/homeline_get.py": ["/socrates/set.py"], "/scripts/mongo_operate.py": ["/socrates/set.py"]}
|
28,578
|
zhao-ji/elfin
|
refs/heads/master
|
/main.py
|
#!/usr/bin/env python
# coding: utf-8
# version2.0 @Nightwish
import os.path
import tornado
import tornado.web
import tornado.ioloop
import tornado.httpserver
from handlers.wechat import Wechat
from handlers.bind import bind
from handlers.userset import userset
from tornado.options import define, options
define('port', default=3333, type=int)
template_path = os.path.join(os.path.dirname(__file__), 'templates')
static_path = os.path.join(os.path.dirname(__file__), 'static')
handlers = [ (r'/wechat', Wechat),
(r'/bind/(.*)', bind),
(r'/userset/(.*)', userset),
(r'/static/(.*)',
tornado.web.StaticFileHandler,
{'path': static_path}),
]
if __name__ == '__main__':
tornado.options.parse_command_line()
app = tornado.web.Application(handlers=handlers,
template_path=template_path,)
http_server = tornado.httpserver.HTTPServer(app, xheaders=True)
http_server.listen(options.port)
tornado.ioloop.IOLoop.instance().start()
|
{"/scripts/get_access_token.py": ["/socrates/set.py"], "/scripts/photo_send.py": ["/socrates/set.py", "/scripts/mongo_operate.py", "/scripts/session_get.py"], "/scripts/timeline_get.py": ["/scripts/timeline_operate.py", "/scripts/session_get.py", "/socrates/set.py"], "/handlers/userset.py": ["/socrates/set.py", "/scripts/talk_send.py", "/scripts/mongo_operate.py"], "/scripts/timeline_operate.py": ["/socrates/set.py"], "/main.py": ["/handlers/wechat.py", "/handlers/bind.py", "/handlers/userset.py"], "/handlers/bind.py": ["/scripts/login.py", "/scripts/mongo_operate.py"], "/scripts/session_get.py": ["/scripts/mongo_operate.py"], "/scripts/homeline_get.py": ["/socrates/set.py"], "/scripts/mongo_operate.py": ["/socrates/set.py"]}
|
28,579
|
zhao-ji/elfin
|
refs/heads/master
|
/scripts/hot_word_get.py
|
# coding: utf-8
import re
import pickle
import logging
from urllib import urlencode
import requests
from socrates.set import log
from scripts.mongo_operate import get_user_value
hot_word_url = 'http://m.weilairiji.com/index.php?op=search&{}'
hot_word_wrap = '<a href="http://m.weilairiji.com/index.php?op=search&{}">{}</a>'
hot_word_pattern = re.compile(ur'style="padding:5px"\s>(.+?)</a>')
def hot_word():
session = pickle.loads(get_user_value('session', id=47637).get('session'))
r = session.get(url=hot_word_url.format(urlencode({'s':'90for'})))
hot_word_list = hot_word_pattern.findall(r.text)
to_unicode = lambda i: i.replace('&#x','\u').replace(';','').decode('unicode_escape')
hot_word_unicode = map(to_unicode, hot_word_list)
to_href = lambda i: hot_word_wrap.format(urlencode({'s':i.encode('gbk')}), i.encode('utf8'))
hot_word_href = map(to_href, hot_word_unicode)
return '\n\n'.join(hot_word_href)
|
{"/scripts/get_access_token.py": ["/socrates/set.py"], "/scripts/photo_send.py": ["/socrates/set.py", "/scripts/mongo_operate.py", "/scripts/session_get.py"], "/scripts/timeline_get.py": ["/scripts/timeline_operate.py", "/scripts/session_get.py", "/socrates/set.py"], "/handlers/userset.py": ["/socrates/set.py", "/scripts/talk_send.py", "/scripts/mongo_operate.py"], "/scripts/timeline_operate.py": ["/socrates/set.py"], "/main.py": ["/handlers/wechat.py", "/handlers/bind.py", "/handlers/userset.py"], "/handlers/bind.py": ["/scripts/login.py", "/scripts/mongo_operate.py"], "/scripts/session_get.py": ["/scripts/mongo_operate.py"], "/scripts/homeline_get.py": ["/socrates/set.py"], "/scripts/mongo_operate.py": ["/socrates/set.py"]}
|
28,580
|
zhao-ji/elfin
|
refs/heads/master
|
/handlers/wechat.py
|
#!/usr/bin/env python
# coding: utf-8
import logging
import os
import sys
import time
import xml.etree.ElementTree as ET
from tornado.web import RequestHandler
from socrates import hanzi
from socrates.set import log
from scripts.mongo_operate import del_user, get_user_value
from scripts.check_sig import check_sig
from scripts.talk_send import send
from scripts.photo_send import upload_photo
from scripts.homeline_get import home
from scripts.hot_word_get import hot_word
from scripts.timeline_get import open_line, time_line
from scripts.message_get import get_message_num
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), os.path.pardir))
class BaseHandler(RequestHandler):
wechat_id = None
user = None
message_type = None
content = None
def initialize(self):
xml_original = self.request.body
try:
xml = ET.fromstring(xml_original)
except Exception, e:
logging.error(e)
logging.info(str(xml_original))
return
self.wechat_id = xml.find('FromUserName').text
ret = get_user_value(wechat_id=self.wechat_id)
self.user = ret
self.message_type = xml.find('MsgType').text
if self.message_type == 'text':
self.content = xml.find('Content').text
elif self.message_type == 'image':
self.content = xml.find("PicUrl").text
self.media_id = xml.find("MediaId").text
elif self.message_type == 'event':
self.content = xml.find("Event").text
if self.content == 'CLICK':
self.eventkey = xml.find("EventKey").text
def wechat(self, ret_str=None):
ret_str = ret_str or hanzi.SEND_FAIL
self.render('text.xml',
toUser=self.wechat_id,
time=time.time(),
text=ret_str)
class Wechat(BaseHandler):
def get(self):
signature = self.get_argument('signature')
timestamp = self.get_argument('timestamp')
nonce = self.get_argument('nonce')
echostr = self.get_argument('echostr')
if check_sig(signature, timestamp, nonce):
self.write(echostr)
def post(self):
if self.message_type == 'text':
if self.user:
self.wechat(send(self.user, self.content))
else:
self.wechat(hanzi.HELLO%self.wechat_id)
elif self.message_type == 'image':
self.wechat(upload_photo(self.user, self.content,
self.media_id,))
elif self.message_type == 'event':
if self.content == 'subscribe':
self.wechat(hanzi.HELLO%self.wechat_id)
elif self.content == 'unsubscribe':
del_user(wechat_id=self.wechat_id)
elif self.content == 'CLICK':
if self.eventkey == 'help':
self.wechat(hanzi.HELP)
elif self.eventkey in ['home1', 'home2', 'home3']:
Feedback = home(self.user, self.eventkey)
self.wechat(Feedback)
elif self.eventkey in ['tml1', 'tml2', 'tml3']:
time_lines = time_line(self.eventkey, self.user)
self.wechat(time_lines)
elif self.eventkey in ['at_msg', 'private_msg']:
message_num = get_message_num(self.eventkey, self.user)
self.wechat(message_num)
elif self.eventkey == 'tail':
self.wechat(hanzi.USET%self.wechat_id)
elif self.eventkey == 'public_msg':
open_lines = open_line()
self.wechat(open_lines)
elif self.eventkey == 'recent_visitor':
self.wechat('hello')
elif self.eventkey == 'hot_words':
hot_words = hot_word()
self.wechat(hot_words)
|
{"/scripts/get_access_token.py": ["/socrates/set.py"], "/scripts/photo_send.py": ["/socrates/set.py", "/scripts/mongo_operate.py", "/scripts/session_get.py"], "/scripts/timeline_get.py": ["/scripts/timeline_operate.py", "/scripts/session_get.py", "/socrates/set.py"], "/handlers/userset.py": ["/socrates/set.py", "/scripts/talk_send.py", "/scripts/mongo_operate.py"], "/scripts/timeline_operate.py": ["/socrates/set.py"], "/main.py": ["/handlers/wechat.py", "/handlers/bind.py", "/handlers/userset.py"], "/handlers/bind.py": ["/scripts/login.py", "/scripts/mongo_operate.py"], "/scripts/session_get.py": ["/scripts/mongo_operate.py"], "/scripts/homeline_get.py": ["/socrates/set.py"], "/scripts/mongo_operate.py": ["/socrates/set.py"]}
|
28,581
|
zhao-ji/elfin
|
refs/heads/master
|
/scripts/message_get.py
|
# coding: utf-8
import re
import pickle
import requests
from socrates import hanzi
from scripts.session_get import get_session
message_url = 'http://m.weilairiji.com/index.php'
at_pattern = re.compile(ur".*atreplies'>(.+?)</a>.*")
private_message_pattern = re.compile(ur".*privatemsg'>(.+?)</a>.*")
def get_message_num(message_type, user):
if user.get('session', ''):
session = user['session']
session = pickle.loads(session)
else:
session = get_session(user['wechat_id'])
r = session.get(url=message_url)
if message_type == 'at_msg':
message_init = at_pattern.findall(r.text)
message_num = message_init[0][-1]
message = hanzi.AT_MESSAGE.format(message_num)
elif message_type == 'private_msg':
message_init = private_message_pattern.findall(r.text)
message_num = message_init[0][-1]
message = hanzi.PRIVATE_MESSAGE.format(message_num)
return message
|
{"/scripts/get_access_token.py": ["/socrates/set.py"], "/scripts/photo_send.py": ["/socrates/set.py", "/scripts/mongo_operate.py", "/scripts/session_get.py"], "/scripts/timeline_get.py": ["/scripts/timeline_operate.py", "/scripts/session_get.py", "/socrates/set.py"], "/handlers/userset.py": ["/socrates/set.py", "/scripts/talk_send.py", "/scripts/mongo_operate.py"], "/scripts/timeline_operate.py": ["/socrates/set.py"], "/main.py": ["/handlers/wechat.py", "/handlers/bind.py", "/handlers/userset.py"], "/handlers/bind.py": ["/scripts/login.py", "/scripts/mongo_operate.py"], "/scripts/session_get.py": ["/scripts/mongo_operate.py"], "/scripts/homeline_get.py": ["/socrates/set.py"], "/scripts/mongo_operate.py": ["/socrates/set.py"]}
|
28,582
|
zhao-ji/elfin
|
refs/heads/master
|
/handlers/bind.py
|
#!/usr/bin/env python
# coding: utf-8
import time
from tornado.web import RequestHandler
from scripts.login import login
from socrates import hanzi
from scripts.mongo_operate import save_user, del_user
class bind(RequestHandler):
def get(self, wechat_id):
action = '/elfin/bind/' + wechat_id
self.render('bind.html', info=hanzi.BIND, time=time.ctime(), action=action)
def post(self, wechat_id):
email = self.get_argument('email')
psw = self.get_argument('psw')
action = '/elfin/bind/' + wechat_id
if not all([email, psw]):
self.render('base.html', info=hanzi.NOT_ALL, time=time.ctime(), action=action)
return
_login = login(email=email, psw=psw)
_login.analyses()
if _login.status_code == 200:
elfin = {}
elfin['wechat_id'] = wechat_id
elfin['xiezhua_id'] = _login.xiezhua_id
elfin['id'] = _login.id
elfin['tail'] = hanzi.DEVICE
elfin['hash'] = _login.hash
elfin['ret'] = _login.ret
try:
elfin['session'] = _login.session
except:
pass
del_user(wechat_id=wechat_id)
save_user(elfin)
self.render('return.html', info=hanzi.BIND_OK, time=time.ctime())
elif _login.status_code == 401:
action = '/elfin/bind/' + wechat_id
self.render('bind.html', info=hanzi.ERR_PSW, time=time.ctime(), action=action)
elif _login.status_code in [404,500, 503]:
self.render('return.html', info=hanzi.ERR_SERVER, time=time.ctime())
else:
self.render('return.html', info=hanzi.ERR_UNKOWN, time=time.ctime())
|
{"/scripts/get_access_token.py": ["/socrates/set.py"], "/scripts/photo_send.py": ["/socrates/set.py", "/scripts/mongo_operate.py", "/scripts/session_get.py"], "/scripts/timeline_get.py": ["/scripts/timeline_operate.py", "/scripts/session_get.py", "/socrates/set.py"], "/handlers/userset.py": ["/socrates/set.py", "/scripts/talk_send.py", "/scripts/mongo_operate.py"], "/scripts/timeline_operate.py": ["/socrates/set.py"], "/main.py": ["/handlers/wechat.py", "/handlers/bind.py", "/handlers/userset.py"], "/handlers/bind.py": ["/scripts/login.py", "/scripts/mongo_operate.py"], "/scripts/session_get.py": ["/scripts/mongo_operate.py"], "/scripts/homeline_get.py": ["/socrates/set.py"], "/scripts/mongo_operate.py": ["/socrates/set.py"]}
|
28,583
|
zhao-ji/elfin
|
refs/heads/master
|
/scripts/session_get.py
|
# coding: utf-8
import pickle
import requests
from scripts.mongo_operate import update_user
def get_session(xiezhua_id):
login_url = 'http://m.weilairiji.com/index.php?op=login'
s = requests.Session()
data = {}
data['loginaccount'] = xiezhua_id[0]
data['loginpass'] = xiezhua_id[1]
data['action'] = 'login'
r = s.post(login_url, data=data)
session = pickle.dumps(s)
update_user({'xiezhua_id':xiezhua_id},
session=session)
return s
|
{"/scripts/get_access_token.py": ["/socrates/set.py"], "/scripts/photo_send.py": ["/socrates/set.py", "/scripts/mongo_operate.py", "/scripts/session_get.py"], "/scripts/timeline_get.py": ["/scripts/timeline_operate.py", "/scripts/session_get.py", "/socrates/set.py"], "/handlers/userset.py": ["/socrates/set.py", "/scripts/talk_send.py", "/scripts/mongo_operate.py"], "/scripts/timeline_operate.py": ["/socrates/set.py"], "/main.py": ["/handlers/wechat.py", "/handlers/bind.py", "/handlers/userset.py"], "/handlers/bind.py": ["/scripts/login.py", "/scripts/mongo_operate.py"], "/scripts/session_get.py": ["/scripts/mongo_operate.py"], "/scripts/homeline_get.py": ["/socrates/set.py"], "/scripts/mongo_operate.py": ["/socrates/set.py"]}
|
28,584
|
zhao-ji/elfin
|
refs/heads/master
|
/scripts/login.py
|
# coding: utf-8
import json
import pickle
import logging
import requests
from socrates import hanzi
from socrates.set import log
from scripts.session_get import get_session
POST_URL = 'http://weilairiji.com/api/statuses/update.json'
class login:
xieban = 0
user_name = ''
protected = 0
id = 0
def __init__(self, email=None, psw=None):
__data = {}
__data['status'] = hanzi.BIND_OK.decode('utf-8').encode('GB18030')
__data['source'] = hanzi.DEVICE.decode('utf-8').encode('GB18030')
__data['status_type'] = 'talk'
self.bind_ret = requests.post(POST_URL, data=__data, auth=(email, psw))
self.status_code = self.bind_ret.status_code
self.xiezhua_id = [email, psw]
self.hash = 0
self.ret = ''
def analyses(self):
ret_json = self.bind_ret.text.replace('=', ':')
try:
ret_list = json.loads(ret_json, encoding='GB18030')
except Exception, e:
logging.error(e)
else:
user_info = ret_list[0]['user']
self.id = int(user_info['id'])
self.session = pickle.dumps(get_session(self.xiezhua_id))
|
{"/scripts/get_access_token.py": ["/socrates/set.py"], "/scripts/photo_send.py": ["/socrates/set.py", "/scripts/mongo_operate.py", "/scripts/session_get.py"], "/scripts/timeline_get.py": ["/scripts/timeline_operate.py", "/scripts/session_get.py", "/socrates/set.py"], "/handlers/userset.py": ["/socrates/set.py", "/scripts/talk_send.py", "/scripts/mongo_operate.py"], "/scripts/timeline_operate.py": ["/socrates/set.py"], "/main.py": ["/handlers/wechat.py", "/handlers/bind.py", "/handlers/userset.py"], "/handlers/bind.py": ["/scripts/login.py", "/scripts/mongo_operate.py"], "/scripts/session_get.py": ["/scripts/mongo_operate.py"], "/scripts/homeline_get.py": ["/socrates/set.py"], "/scripts/mongo_operate.py": ["/socrates/set.py"]}
|
28,585
|
zhao-ji/elfin
|
refs/heads/master
|
/scripts/homeline_get.py
|
# coding: utf-8
import json
import logging
import requests
from socrates.set import log
home_url = "http://weilairiji.com/api/statuses/user_timeline.json"
home_dict = {'home1':1,
'home2':2,
'home3':3}
def home(user, key):
data = {}
data['id'] = user['id']
data['page'] = home_dict[key]
r = requests.get(home_url, params=data, auth=tuple(user['xiezhua_id']))
assert r.status_code==200
home_original = json.loads(r.text.replace('=',':'),
encoding='GB18030',
strict=False)
ret = '\n\n'.join(
map(lambda item: item['text'],
home_original)
)
logging.info(ret)
return ret
|
{"/scripts/get_access_token.py": ["/socrates/set.py"], "/scripts/photo_send.py": ["/socrates/set.py", "/scripts/mongo_operate.py", "/scripts/session_get.py"], "/scripts/timeline_get.py": ["/scripts/timeline_operate.py", "/scripts/session_get.py", "/socrates/set.py"], "/handlers/userset.py": ["/socrates/set.py", "/scripts/talk_send.py", "/scripts/mongo_operate.py"], "/scripts/timeline_operate.py": ["/socrates/set.py"], "/main.py": ["/handlers/wechat.py", "/handlers/bind.py", "/handlers/userset.py"], "/handlers/bind.py": ["/scripts/login.py", "/scripts/mongo_operate.py"], "/scripts/session_get.py": ["/scripts/mongo_operate.py"], "/scripts/homeline_get.py": ["/socrates/set.py"], "/scripts/mongo_operate.py": ["/socrates/set.py"]}
|
28,586
|
zhao-ji/elfin
|
refs/heads/master
|
/socrates/set.py
|
# coding: utf-8
import logging
log = logging.basicConfig(filename='/home/elfin/log/run.log',
filemode='a',
format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.INFO
)
open_line_url = 'http://m.weilairiji.com/index.php?op=browse'
time_line_url = 'http://m.weilairiji.com/index.php?op=index&page={}'
''' [1] list timeline of each user
[2] list recent visiters of each user
[3] string @, private message
[4] list public message, hot words
[5] string access_token
'''
REDIS = {'HOST':'localhost','PORT':6379}
''' elfin-user-info
{ wechat_id : string ,
xieban : string ,
xiezhua_id: string ,
name : string ,
protected : bool ,
}
'''
import pymongo
mongo = pymongo.MongoClient(host='localhost', port=27017)['elfin']
|
{"/scripts/get_access_token.py": ["/socrates/set.py"], "/scripts/photo_send.py": ["/socrates/set.py", "/scripts/mongo_operate.py", "/scripts/session_get.py"], "/scripts/timeline_get.py": ["/scripts/timeline_operate.py", "/scripts/session_get.py", "/socrates/set.py"], "/handlers/userset.py": ["/socrates/set.py", "/scripts/talk_send.py", "/scripts/mongo_operate.py"], "/scripts/timeline_operate.py": ["/socrates/set.py"], "/main.py": ["/handlers/wechat.py", "/handlers/bind.py", "/handlers/userset.py"], "/handlers/bind.py": ["/scripts/login.py", "/scripts/mongo_operate.py"], "/scripts/session_get.py": ["/scripts/mongo_operate.py"], "/scripts/homeline_get.py": ["/socrates/set.py"], "/scripts/mongo_operate.py": ["/socrates/set.py"]}
|
28,587
|
zhao-ji/elfin
|
refs/heads/master
|
/scripts/talk_send.py
|
#! /usr/bin/python
# coding: utf-8
import logging
import requests
from gevent import spawn, joinall
from socrates import hanzi
from socrates.set import log
from scripts.simi import simi
from scripts.mongo_operate import update_user, del_user
POST_URL = 'http://weilairiji.com/api/statuses/update.json'
def diff_from_password(talk, pwd_list):
assert pwd_list[1] not in talk, hanzi.SENSITIVE
def diff_from_old(talk, old_hash):
new_hash = hash(talk)
assert new_hash!=old_hash, hanzi.REPEAT
def less_than_300(talk):
if len(talk)>300:
raise OverflowError
def slice_talk(talk):
talk_list = [talk[i:i + 295] for i in range(0, len(talk), 295)]
return map(lambda string: u'【' + str(1 + talk_list.index(string)) + u'】' + string, talk_list)
def transmit(user, talk,touser=None):
data = {}
data['status'] = talk.encode('GB18030')
data['source'] = user['tail'].encode('GB18030')
data['in_reply_to_status_id'] = ''
data['status_type'] = 'talk'
transmit_ret = requests.post(POST_URL, data=data, auth=tuple(user['xiezhua_id']))
if transmit_ret.status_code in [404, 500, 503]:
raise RuntimeWarning
elif transmit_ret.status_code in [401, 403]:
del_user(user)
raise UserWarning
elif transmit_ret.status_code is 200:
return
else:
raise FutureWarning
def send(user, talk):
try:
diff_from_password(talk, user['xiezhua_id'])
diff_from_old(talk, user['hash'])
less_than_300(talk)
except AssertionError, e:
logging.info(e)
return e
except OverflowError:
map(lambda talk: transmit(user,talk), slice_talk(talk))
ret = hanzi.MULTITALK
return ret
else:
try:
if user['ret']:
transmit(user, talk)
ret = user['ret']
else:
task_simi = spawn(simi, talk)
task_send = spawn(transmit, user, talk)
joinall([task_simi, task_send])
ret = task_simi.value
logging.info(ret)
except RuntimeWarning:
return hanzi.ERR_SERVER
except UserWarning:
return hanzi.ERR_PSW
except FutureWarning:
return hanzi.ERR_UNKOWN
else:
return ret
finally:
update_user({'id':user['id']}, hash=hash(talk))
logging.info(str(user['id']) + ' : ' + talk)
|
{"/scripts/get_access_token.py": ["/socrates/set.py"], "/scripts/photo_send.py": ["/socrates/set.py", "/scripts/mongo_operate.py", "/scripts/session_get.py"], "/scripts/timeline_get.py": ["/scripts/timeline_operate.py", "/scripts/session_get.py", "/socrates/set.py"], "/handlers/userset.py": ["/socrates/set.py", "/scripts/talk_send.py", "/scripts/mongo_operate.py"], "/scripts/timeline_operate.py": ["/socrates/set.py"], "/main.py": ["/handlers/wechat.py", "/handlers/bind.py", "/handlers/userset.py"], "/handlers/bind.py": ["/scripts/login.py", "/scripts/mongo_operate.py"], "/scripts/session_get.py": ["/scripts/mongo_operate.py"], "/scripts/homeline_get.py": ["/socrates/set.py"], "/scripts/mongo_operate.py": ["/socrates/set.py"]}
|
28,588
|
zhao-ji/elfin
|
refs/heads/master
|
/scripts/check_sig.py
|
#!/usr/bin/env python
# coding: utf-8
import hashlib
TOKEN = 'elfin'
def check_sig(signature=None, timestamp=None, nonce=None):
if all([signature, timestamp, nonce]):
tmp_list = [timestamp, nonce, TOKEN]
tmp_list.sort()
tmp_str = ''.join(tmp_list)
sha1_str = hashlib.sha1(tmp_str).hexdigest()
return 1 if signature == sha1_str else 0
return 0
|
{"/scripts/get_access_token.py": ["/socrates/set.py"], "/scripts/photo_send.py": ["/socrates/set.py", "/scripts/mongo_operate.py", "/scripts/session_get.py"], "/scripts/timeline_get.py": ["/scripts/timeline_operate.py", "/scripts/session_get.py", "/socrates/set.py"], "/handlers/userset.py": ["/socrates/set.py", "/scripts/talk_send.py", "/scripts/mongo_operate.py"], "/scripts/timeline_operate.py": ["/socrates/set.py"], "/main.py": ["/handlers/wechat.py", "/handlers/bind.py", "/handlers/userset.py"], "/handlers/bind.py": ["/scripts/login.py", "/scripts/mongo_operate.py"], "/scripts/session_get.py": ["/scripts/mongo_operate.py"], "/scripts/homeline_get.py": ["/socrates/set.py"], "/scripts/mongo_operate.py": ["/socrates/set.py"]}
|
28,589
|
zhao-ji/elfin
|
refs/heads/master
|
/socrates/hanzi.py
|
# coding=utf-8
'''wechat.py'''
HELLO = '''欢迎关注蟹爪小精灵,请点击<a href="http://chashuibiao.org/elfin/bind/%s">这里</a>绑定'''
USET = '''欢迎设置蟹爪小精灵,请点击<a href="http://chashuibiao.org/elfin/userset/%s">这里</a>设置'''
HELP = "有不懂的上蟹爪@雷锋"
SENSITIVE = "请勿发送密码"
REPEAT = "爱过"
SEND_OK = "发射成功"
SEND_FAIL = "发射失败"
WECHATPIC = '微信图片'
DEVICE = '蟹爪小精灵'
AT_MESSAGE = '您有{}条未读@'
PRIVATE_MESSAGE = '您有{}条未读私信'
MULTITALK = '您的talk已分条发送成功'
'''bind.py'''
BIND = '绑定蟹爪'
LOGIN = '请输入邮箱和密码'
NOT_ALL = '邮箱或密码不完整'
BIND_OK = '异空间传送器搭建完毕!'
ERR_PSW = '邮箱或密码错误'
ERR_SERVER = '服务器错误请稍候重试'
ERR_UNKOWN = '未知错误,联系雷锋'
'''userset.py'''
NOT_NULL = '尾巴不要为空'
TAIL_OK = '小精灵尾巴更换成功!'
RET_OK = '回复设置成功'
TAIL_ERR = '尾巴更换失败,请稍候重试'
CHANGE_TAIL = '''尾巴更改为:{}'''
USERSET = '设置小精灵'
|
{"/scripts/get_access_token.py": ["/socrates/set.py"], "/scripts/photo_send.py": ["/socrates/set.py", "/scripts/mongo_operate.py", "/scripts/session_get.py"], "/scripts/timeline_get.py": ["/scripts/timeline_operate.py", "/scripts/session_get.py", "/socrates/set.py"], "/handlers/userset.py": ["/socrates/set.py", "/scripts/talk_send.py", "/scripts/mongo_operate.py"], "/scripts/timeline_operate.py": ["/socrates/set.py"], "/main.py": ["/handlers/wechat.py", "/handlers/bind.py", "/handlers/userset.py"], "/handlers/bind.py": ["/scripts/login.py", "/scripts/mongo_operate.py"], "/scripts/session_get.py": ["/scripts/mongo_operate.py"], "/scripts/homeline_get.py": ["/socrates/set.py"], "/scripts/mongo_operate.py": ["/socrates/set.py"]}
|
28,590
|
zhao-ji/elfin
|
refs/heads/master
|
/scripts/mongo_operate.py
|
#! /usr/bin/python
# coding: utf-8
from socrates.set import mongo
def whether_login(wechat_id):
query = {'wechat_id':wechat_id}
count = mongo.elfin.find(query).count()
assert count==1
def del_user(**query):
mongo.elfin.remove(query)
def save_user(user):
mongo.elfin.save(user)
def update_user(find_query_dict, **update_query):
mongo.elfin.update(find_query_dict,
{'$set':update_query})
def get_user_value(*keys, **query):
if keys:
user = mongo.elfin.find_one(query,
{i:1 for i in keys})
else:
user = mongo.elfin.find_one(query)
return user
|
{"/scripts/get_access_token.py": ["/socrates/set.py"], "/scripts/photo_send.py": ["/socrates/set.py", "/scripts/mongo_operate.py", "/scripts/session_get.py"], "/scripts/timeline_get.py": ["/scripts/timeline_operate.py", "/scripts/session_get.py", "/socrates/set.py"], "/handlers/userset.py": ["/socrates/set.py", "/scripts/talk_send.py", "/scripts/mongo_operate.py"], "/scripts/timeline_operate.py": ["/socrates/set.py"], "/main.py": ["/handlers/wechat.py", "/handlers/bind.py", "/handlers/userset.py"], "/handlers/bind.py": ["/scripts/login.py", "/scripts/mongo_operate.py"], "/scripts/session_get.py": ["/scripts/mongo_operate.py"], "/scripts/homeline_get.py": ["/socrates/set.py"], "/scripts/mongo_operate.py": ["/socrates/set.py"]}
|
28,591
|
MachineLearningMaster/AutonomousDrone
|
refs/heads/master
|
/util/object_tracking_util.py
|
from __future__ import unicode_literals
import cv2
import numpy as np
from scipy import spatial
from itertools import tee, izip
def pairwise(iterable):
"s -> (s0,s1), (s1,s2), (s2, s3), ..."
a, b = tee(iterable)
next(b, None)
return izip(a, b)
def do_kdtree(combined_x_y_arrays, points):
"""
Returns the np array of indices of points in combined_x_y_arrays that are closest to points
e.g.
combined_x_y_arrays <-
array([[92.14673117, 13.77534758],
[98.54777011, 33.85032899],
[56.09304457, 8.00195266],
[26.65179454, 70.34241889],
[57.20960077, 7.37334471],
[83.45449933, 94.33560961],
[53.05171069, 24.39858403],
[48.3750993 , 81.5119219 ],
[76.22763828, 25.2616434 ],
[65.19508197, 32.29815772]])
points <- [[93, 14],[77,24],[66,32]]
returns array([0, 8, 9])
"""
mytree = spatial.cKDTree(combined_x_y_arrays)
dist, indexes = mytree.query(points)
return dist, indexes
def centers(bboxes_before, bboxes_after):
# Replace all bboxes with their centers
# (bymin, bxmin, bymax, bxmax) = bboxes_before[0,:,0],bboxes_before[0,:,1],bboxes_before[0,:,2],bboxes_before[0,:,3]
# (aymin, axmin, aymax, axmax) = bboxes_after[0,:,0],bboxes_after[0,:,1],bboxes_after[0,:,2],bboxes_after[0,:,3]
(bymin, bxmin, bymax, bxmax) = bboxes_before[:, 0], bboxes_before[:, 1], bboxes_before[:,2], bboxes_before[:, 3]
(aymin, axmin, aymax, axmax) = bboxes_after[:, 0], bboxes_after[:, 1], bboxes_after[:, 2], bboxes_after[:, 3]
areas_before = (bymax-bymin)*(bxmax-bxmin)
areas_after = (aymax-aymin)*(axmax-axmin)
center_before_x= (bxmin+bxmax)/2.
center_before_y = (bymin+bymax)/2.
center_before_x = np.expand_dims(center_before_x,1)
center_before_y = np.expand_dims(center_before_y,1)
center_before = np.hstack((center_before_x,center_before_y))
center_after_x = (axmin + axmax) / 2.
center_after_y = (aymin + aymax) / 2.
center_after_x = np.expand_dims(center_after_x, 1)
center_after_y = np.expand_dims(center_after_y, 1)
center_after = np.hstack((center_after_x, center_after_y))
return center_before,center_after, areas_before, areas_after
def check_within(val, threshold):
if val < 0 : raise ValueError("Unexpected val "+str(val))
if (abs(1 - val)<threshold) or (val < threshold):
return False
return True
def check_all_within(vals, threshold=0.05):
ret = True
for upck in vals:
ret = ret and check_within(upck, threshold)
return ret
def any_shape_entry_zero(shp):
return not all(shp)
def draw_line_between_shortest(img, bboxes_before, bboxes_after, classes_before, classes_after, lo_trail):
"""
:param img: Image to draw on (debugging)
:param bboxes_before: Allow for change in area calculation
:param bboxes_after: Allow for change in area calculation and
:param classes_before:
:param classes_after:
:return:
"""
change_in_area = {}
area = {}
if img is None:
print('None')
return area, change_in_area
if any_shape_entry_zero(classes_before.shape) or any_shape_entry_zero(classes_after.shape): return area, change_in_area
h,w = img.shape[:2]
center_before, center_after, areas_before, areas_after = centers(bboxes_before, bboxes_after)
# Classes that are present, both before and after
drawable_classes= set(list(classes_before)).intersection(set(list(classes_after)))
drawable_classes = np.array(list(drawable_classes))
# if a trail wasn't appending to, pop it by checking this index set
shouldnt_pop = set()
for drawable_class in drawable_classes:
to_idx_before = np.where(classes_before == drawable_class)[0]
to_idx_after = np.where(classes_after == drawable_class)[0]
if to_idx_before.shape[0] == 0 or (to_idx_after.shape[0] == 0):
return area, change_in_area
center_before_curr = center_before[to_idx_before,:]
center_after_curr = center_after[to_idx_after,:]
areas_before_curr = areas_before[to_idx_before]
areas_after_curr = areas_after[to_idx_after]
num_bbox_before = areas_before_curr.shape
num_bbox_after = areas_after_curr.shape
did_swap = False
if num_bbox_before < num_bbox_after:
did_swap = True
temp_center = center_after_curr.copy()
center_after_curr = center_before_curr.copy()
center_before_curr = temp_center
dist, indexes = do_kdtree(center_before_curr, center_after_curr)
closest_points_to_center_after_curr = center_before_curr[indexes]
num_pts, _ =center_after_curr.shape
change_in_area[drawable_class] = []
area[drawable_class] = []
for ii in range(num_pts):
p1 = closest_points_to_center_after_curr[ii,:]
p2 = center_after_curr[ii,:]
abefore =areas_before_curr[ii]
aafter =areas_after_curr[ii]
# print('areas_after_curr.shape',areas_after_curr.shape)
# print("Area Before ",abefore)
# print("Area After ",aafter)
# if abefore < aafter:
# print('increasing\n')
# else:
# print('decreasing\n')
# Continue if anything is too close to the edge
# if not (check_all_within(p1) and check_all_within(p2)):
# continue
# Set a threshold for how big a line can be TODO -> replace with how big a line can grow
if dist[ii] > 0.09:
continue
change_in_area[drawable_class].append(aafter - abefore)
area[drawable_class].append(aafter)
p1 = (int(p1[0]*w),int(p1[1]*h))
p2 = (int(p2[0]*w),int(p2[1]*h))
if not did_swap:
did_append = False
for it, trail in enumerate(lo_trail):
if p1 == trail[-1]:
trail.append(p2)
did_append = True
shouldnt_pop.add(it)
if not did_append:
# if len(lo_trail)>0: lo_trail.pop(0)
lo_trail.append([p2])
shouldnt_pop.add(len(lo_trail)-1)
else:
did_append = False
for it, trail in enumerate(lo_trail):
if p2 == trail[-1]:
trail.append(p1)
did_append = True
shouldnt_pop.add(it)
if not did_append:
# if len(lo_trail)>0: lo_trail.pop(0)
lo_trail.append([p1])
shouldnt_pop.add(len(lo_trail)-1)
for curr_trail in lo_trail:
for p_i, p_ii in pairwise(curr_trail):
cv2.arrowedLine(img, p_i, p_ii, (0, 255, 0), 3, tipLength=0.5)
# new_lo_trail = []
for curr_it in range(len(lo_trail)):
if curr_it not in shouldnt_pop or (len(lo_trail[curr_it])>5):
lo_trail[curr_it].pop(0)
lo_trail[:] = (i for i in lo_trail if len(i)>0)
return area, change_in_area
def draw_line_between_shortest_old(img, bboxes_before, bboxes_after, classes_before, classes_after):
"""
:param img: Image to draw on (debugging)
:param bboxes_before: Allow for change in area calculation
:param bboxes_after: Allow for change in area calculation and
:param classes_before:
:param classes_after:
:return:
"""
change_in_area = {}
area = {}
if img is None:
print('None')
return area, change_in_area
if any_shape_entry_zero(classes_before.shape) or any_shape_entry_zero(classes_after.shape): return area, change_in_area
h,w = img.shape[:2]
center_before, center_after, areas_before, areas_after = centers(bboxes_before, bboxes_after)
# There must be at least two of each class to consider drawing
# drawable_classes= set(list(np.squeeze(classes_before,axis=0))).intersection(set(list(np.squeeze(classes_after,axis=0))))
drawable_classes= set(list(classes_before)).intersection(set(list(classes_after)))
# if len(drawable_classes) == 0:
# print('No drawable')
# return area, change_in_area
drawable_classes = np.array(list(drawable_classes))
for drawable_class in drawable_classes:
to_idx_before = np.where(classes_before == drawable_class)[0]
to_idx_after = np.where(classes_after == drawable_class)[0]
if to_idx_before.shape[0] == 0 or (to_idx_after.shape[0] == 0):
print('Zero shape')
return
center_before_curr = center_before[to_idx_before,:]
center_after_curr = center_after[to_idx_after,:]
areas_before_curr = areas_before[to_idx_before]
areas_after_curr = areas_after[to_idx_after]
# If a bbox of the same class is introduced, there might be a disagreement between num bbox before and after
# For now, let's simply return if this is the case
num_bbox_before = areas_before_curr.shape
num_bbox_after = areas_after_curr.shape
did_swap = False
if num_bbox_before < num_bbox_after:
did_swap = True
temp_center = center_after_curr.copy()
center_after_curr = center_before_curr.copy()
center_before_curr = temp_center
dist, indexes = do_kdtree(center_before_curr, center_after_curr)
closest_points_to_center_after_curr = center_before_curr[indexes]
num_pts, _ =center_after_curr.shape
change_in_area[drawable_class] = []
area[drawable_class] = []
for ii in range(num_pts):
p1 = closest_points_to_center_after_curr[ii,:]
p2 = center_after_curr[ii,:]
abefore =areas_before_curr[ii]
aafter =areas_after_curr[ii]
print('areas_after_curr.shape',areas_after_curr.shape)
print("Area Before ",abefore)
print("Area After ",aafter)
if abefore < aafter:
print('increasing\n')
else:
print('decreasing\n')
# Continue if anything is too close to the edge
# if not (check_all_within(p1) and check_all_within(p2)):
# continue
# Set a threshold for how big a line can be TODO -> replace with how big a line can grow
if dist[ii] > 0.09:
print('continue')
continue
change_in_area[drawable_class].append(aafter - abefore)
area[drawable_class].append(aafter)
p1 = (int(p1[0]*w),int(p1[1]*h))
p2 = (int(p2[0]*w),int(p2[1]*h))
lineThickness = 3
if did_swap:
cv2.arrowedLine(img, p2, p1, (0, 255, 0), lineThickness, tipLength=0.5)
else:
cv2.arrowedLine(img, p1, p2, (0, 255, 0), lineThickness,tipLength=0.5)
return area, change_in_area
def get_bbox_area(bbox):
"""
Assume box unpacks to normalized(ymin, xmin, ymax, xmax)
If any of the four floats are within 0.01 of boundary, ret <- False
"""
THRESHOLD=0.05
(ymin, xmin, ymax, xmax) = bbox
ret =check_all_within(bbox,threshold=THRESHOLD)
return ret, (xmax-xmin)*(ymax-ymin)
def write_text(img, msg):
h,w,_ = img.shape
font = cv2.FONT_HERSHEY_SIMPLEX
bottomLeftCornerOfText = (w//2 - 2*len(msg),h//2)
fontScale = 1
fontColor = (255, 255, 255)
lineType = 2
cv2.putText(img, msg,
bottomLeftCornerOfText,
font,
fontScale,
fontColor,
lineType)
|
{"/automous_flight.py": ["/object_detector.py"], "/example_flight_simulation.py": ["/flight_simulator.py", "/command_handler.py"]}
|
28,592
|
MachineLearningMaster/AutonomousDrone
|
refs/heads/master
|
/command_handler.py
|
from functools import partial
import pickle
import os
from util.ReverseCommandUtil import swap
import datetime
from FlightSimulator import FlightSimulator
def get_date():
now = datetime.datetime.now()
date = now.strftime("%Y-%m-%d")
return date + '-'+ str(now.hour) + '-'+ str(now.minute)+ '-'+str(now.second)
class CommandHandler:
"""
args:
command_poster_fn is a function that can send commands to the remote server
x,y,z are the expected coordinates in space in centimeters
angle_degree is the expected number of degrees counter-clockwise the remote object is from its starting position
"""
def __init__(self, command_poster_fn, history=None, x=0,y=0,z=0,angle_degree=0):
self.command_history = []
self.landed = True
self.initialized = False
if history is not None:
self.history = history
self._load_history()
else:
self.history_location = 'history'
if not os.path.exists(self.history_location):
os.mkdir(self.history_location)
self.history = 'history/flight_'+get_date()+'.flightlog'
# Wrap an observer around the post function
self.post_command = partial(self._store_history, fn=command_poster_fn)
# Store the expected state of the remote object
self.x = x
self.y = y
self.z = z
self.angle_degree = angle_degree
def initialize_sdk(self):
# Initialize the SDK
self.initialized = True
self.post_command('Command')
def track_back(self):
cp = list(self.command_history)
cp.reverse()
for cmd in cp:
swapped = swap(cmd)
self.post_command(swapped)
def _load_history(self):
with open(self.history, 'rb') as f:
self.command_history = pickle.load(f)
def _store_history(self, x, fn):
fn(x)
if x == 'takeoff':
self.landed = False
elif x is 'land':
self.landed = True
self.command_history.append(x)
with open(self.history,'wb') as f:
pickle.dump(self.command_history, f)
def take_off(self):
self.post_command('takeoff')
self.z+=100
def land(self):
self.post_command('land')
self.z-=100
def enable_video_stream(self):
self.post_command('streamon')
def disable_video_stream(self):
self.post_command('streamoff')
def kill_power_unsafely(self):
self.post_command('emergency')
def up(self, x):
self._move(x,'up',self.up)
self.z+=x
def down(self, x):
self._move(x,'down',self.down)
self.z-=x
def left(self, x):
self._move(x,'left',self.left)
self.x-=x
def right(self, x):
self._move(x,'right',self.right)
self.x+=x
def forward(self, x):
self._move(x,'forward',self.forward)
self.y+=x
def back(self, x):
self._move(x,'up',self.back)
self.y-=x
def rotate_clockwise(self, x):
self._move(x,'cw', self.rotate_clockwise, min_x=1, max_x=360)
self.angle_degree -= x
self.angle_degree = (self.angle_degree - x) % 360
def rotate_counter_clockwise(self, x):
self._move(x,'ccw', self.rotate_counter_clockwise, min_x=1, max_x=360)
self.angle_degree = (self.angle_degree + x) % 360
def _move(self, x, cmd_prefix, curr_func, expected_type=int, min_x=20,max_x=500):
if not isinstance(x, expected_type):
raise TypeError('x = '+str(x)+ " argument is not a " + str(expected_type) + ' ' + str(curr_func) + ' type: '+str(type(x)))
# is x a float? @TODO
if not ((x>=min_x) and (x<=max_x)):
raise ValueError(str(x)+ " argument is out of bounds in " + str(curr_func))
self.post_command(cmd_prefix+' '+str(x))
|
{"/automous_flight.py": ["/object_detector.py"], "/example_flight_simulation.py": ["/flight_simulator.py", "/command_handler.py"]}
|
28,593
|
MachineLearningMaster/AutonomousDrone
|
refs/heads/master
|
/automous_flight.py
|
import socket
import threading
import time
import numpy as np
import libh264decoder
import cv2
import os
from object_detector import DetectorAPI
#"C:\ProgramData\Miniconda2\Scripts\pip.exe" install https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-2.0.0-cp27-none-linux_x86_64.whl
class AutonomousFlight:
def __init__(self, local_ip='', local_port=9000, drone_ip='192.168.10.1',
drone_port=8889):
"""
Binds to the local IP/port and puts the Tello into command mode.
:param local_ip (str): Local IP address to bind.
:param local_port (int): Local port to bind.
:param imperial (bool): If True, speed is MPH and distance is feet.
If False, speed is KPH and distance is meters.
:param command_timeout (int|float): Number of seconds to wait for a response to a command.
:param drone_ip (str): Tello IP.
:param drone_port (int): Tello port.
"""
if not os.path.exists('images'):
os.mkdir('images')
# Command Send & Receive Queue
self.max_len = 5
self.rec_q = []
#
self.decoder = libh264decoder.H264Decoder()
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # socket for sending cmd
self.socket_video = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # socket for receiving video stream
self.tello_address = (drone_ip, drone_port)
self.socket.bind((local_ip, local_port))
# thread for receiving cmd ack
self.receive_thread = threading.Thread(target=self._receive_thread)
self.receive_thread.daemon = True
self.receive_thread.start()
# Load up the inference graph
model_path = 'faster_rcnn_inception_v2_coco_2018_01_28.tar/faster_rcnn_inception_v2_coco_2018_01_28/frozen_inference_graph.pb'
labelmap_path = 'lblmap.pbtxt'
self.odapi = DetectorAPI(path_to_ckpt=model_path, path_to_label_map=labelmap_path)
self.local_video_port = 11111 # port for receiving video stream
self.socket_video.bind((local_ip, self.local_video_port))
# thread for receiving video
self.receive_video_thread = threading.Thread(target=self._receive_video_thread)
self.receive_video_thread.daemon = True
self.receive_video_thread.start()
# Post command every 5 sec
self.send_5_sec_com = threading.Thread(target=self._command_thread)
self.send_5_sec_com.start()
# to receive video -- send cmd: command, streamon
self.send_command('command')
print('sent: command')
self.send_command('streamon')
print('sent: streamon')
def __del__(self):
"""Closes the local socket."""
self.socket.close()
self.socket_video.close()
def main_loop(self):
while True:
time.sleep(0.1)
pass
# def read_response(self):
# if len(self.rec_q) < 1:
# return None
# else:
# return self.rec_q.pop()
def _receive_thread(self):
"""Listen to responses from the Tello.
Runs as a thread, sets response to whatever the Drone last returned.
"""
while True:
try:
response, ip = self.socket.recvfrom(3000)
if response is not None:
response_decoded = response.decode('utf-8')
print('message: %s' % response_decoded)
# if len(self.rec_q) >= self.max_len:
# ppd = self.rec_q.pop()
# #print('unread message left receive queue: %s' % ppd)
#
# self.rec_q.insert(0, response_decoded)
except socket.error as exc:
print("Caught exception socket.error in Receive thread : %s" % exc)
time.sleep(1)
# Dont break
def _command_thread(self):
"""
Posts 'command' to Tello every n seconds.
Runs as a seperate thread
"""
N = 5.0
while True:
try:
self.send_command('command')
time.sleep(N)
except socket.error as exc:
print("Caught exception socket.error in Command thread: %s" % exc)
def _receive_video_thread(self):
"""
Listens for video streaming (raw h264) from the Tello.
Runs as a thread, sets frame to the most recent frame Tello captured.
"""
ii=0
packet_data = ""
time.sleep(4)
while True:
try:
res_string, ip = self.socket_video.recvfrom(2048)
packet_data += res_string
# end of frame
if len(res_string) != 1460 and len(res_string)!=0:
for frame in self._h264_decode(packet_data):
#self.state?
# Psas a BGR image!! @TODO BGR PASSED?
area, change_in_area = self.odapi.processFrame(frame, ii)
ii += 1
packet_data = ""
except socket.error as exc:
print("Caught exception socket.error in Video thread : %s" % exc)
def _h264_decode(self, packet_data):
"""
decode raw h264 format data from Tello
:param packet_data: raw h264 data array
:return: a list of decoded frame
"""
res_frame_list = []
frames = self.decoder.decode(packet_data)
for framedata in frames:
(frame, w, h, ls) = framedata
if frame is not None:
# print 'frame size %i bytes, w %i, h %i, linesize %i' % (len(frame), w, h, ls)
frame = np.fromstring(frame, dtype=np.ubyte, count=len(frame), sep='')
frame = (frame.reshape((h, ls / 3, 3)))
frame = frame[:, :w, :]
res_frame_list.append(frame)
return res_frame_list
def send_command(self, command):
self.socket.sendto(command.encode('utf-8'), self.tello_address)
if __name__ == "__main__":
t2 = AutonomousFlight()
t2.main_loop()
|
{"/automous_flight.py": ["/object_detector.py"], "/example_flight_simulation.py": ["/flight_simulator.py", "/command_handler.py"]}
|
28,594
|
MachineLearningMaster/AutonomousDrone
|
refs/heads/master
|
/object_detector.py
|
import numpy as np
import tensorflow as tf
import cv2
from TensorFlowObjectDetectionUtil import label_map_util
from TensorFlowObjectDetectionUtil import visualization_utils as vis_util
import os
import time
from util import ObjectTrackingUtil as util
class DetectorAPI:
def __init__(self, path_to_ckpt, path_to_label_map):
assert os.path.exists(path_to_ckpt), path_to_ckpt
assert os.path.exists(path_to_label_map), path_to_label_map
self.path_to_ckpt = path_to_ckpt
if not os.path.exists('images'):
os.mkdir('images')
self.detection_graph = tf.Graph()
with self.detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(self.path_to_ckpt, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
self.default_graph = self.detection_graph.as_default()
self.sess = tf.Session(graph=self.detection_graph)
# Definite input and output Tensors for detection_graph
self.image_tensor = self.detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
self.detection_boxes = self.detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
self.detection_scores = self.detection_graph.get_tensor_by_name('detection_scores:0')
self.detection_classes = self.detection_graph.get_tensor_by_name('detection_classes:0')
self.num_detections = self.detection_graph.get_tensor_by_name('num_detections:0')
# Open images class labelmap
NUM_CLASSES = 601
self.MINIMUM_CONFIDENCE = 0.30
self.idx_to_save = 0
label_map = label_map_util.load_labelmap(path_to_label_map)
categories = label_map_util.convert_label_map_to_categories(
label_map,
max_num_classes=NUM_CLASSES,
use_display_name=True)
self.CATEGORY_INDEX = label_map_util.create_category_index(categories)
self.sz = (1280,720)
self.cap = None
self.old_boxes = None
self.old_classes = None
self.visualize_object_tracking = False
self.lo_trail = []
def preprocess_frame(self,frame):
if frame is None:
raise ValueError("None type frame received")
return cv2.resize(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB), self.sz)
def set_capture(self, cap):
self.cap = cap
def main_stream_loop(self,pth):
all_im = [os.path.join(pth,i) for i in os.listdir(pth)]
try:
for imp in all_im:
img = cv2.imread(imp)
odapi.processFrame(img)
finally:
self.close()
def main_loop(self):
assert self.cap is not None,"Must Set Capture Input"
try:
ret = 1
ii = 0
while ret:
ret, img = self.cap.read()
if ii%2 == 0:
odapi.processFrame(img)
ii +=1
except KeyboardInterrupt as e:
print("Safely handled keyboard interrupt: Exiting nicely...")
finally:
odapi.close()
cv2.destroyAllWindows()
self.cap.release()
def save_frame(self,frame):
ret, frame_jpg = cv2.imencode('.jpg', frame)
if ret:
frame_name = "images/" + str(self.idx_to_save) + ".jpg"
assert not os.path.exists(frame_name), "Tried to overwrite frame: "+frame_name
with open(frame_name,'wb') as wf:
wf.write(frame_jpg)
def processFrame(self, img):
t = time.time()
img_preprocessed = self.preprocess_frame(img)
(boxes, scores, classes, num_detections) = self.sess.run(
[self.detection_boxes, self.detection_scores, self.detection_classes, self.num_detections],
feed_dict={ self.image_tensor: np.expand_dims(img_preprocessed, axis=0)})
img_temp = cv2.cvtColor(img_preprocessed, cv2.COLOR_RGB2BGR)
# # Threshold by min confidence
idc = scores > self.MINIMUM_CONFIDENCE
boxes = boxes[idc]
classes = classes[idc]
scores = scores[idc]
# Object track based on classes first if possible, then by distance to center
if self.visualize_object_tracking and self.old_boxes is not None and self.old_classes is not None:
# Get a pair of maps of the class to area & change in area
area, change_in_area = util.draw_line_between_shortest(img_temp, self.old_boxes, boxes, self.old_classes, classes, self.lo_trail)
self.old_boxes = boxes
self.old_classes = classes
vis_util.visualize_boxes_and_labels_on_image_array(
img_temp,
boxes,
classes.astype(np.int32),
scores,
self.CATEGORY_INDEX,
use_normalized_coordinates=True
)
#print("FPS is: " + str(1 / (time.time() - t)))
cv2.imshow('image', img_temp)
if cv2.waitKey(25) & 0xFF == ord('q'):
raise KeyboardInterrupt("Exited by q")
def close(self):
self.sess.close()
if __name__ == "__main__":
# Open Images
model_path=r"faster_rcnn_inception_resnet_v2_atrous_oid_v4_2018_12_12\frozen_inference_graph.pb"
labelmap_path = r'faster_rcnn_inception_resnet_v2_atrous_oid_v4_2018_12_12\open_images\label_map_v4.pbtxt'
odapi = DetectorAPI(path_to_ckpt=model_path,path_to_label_map=labelmap_path)
pth =r"person_increasing.mp4"
odapi.set_capture(cv2.VideoCapture(pth))
odapi.main_loop()
|
{"/automous_flight.py": ["/object_detector.py"], "/example_flight_simulation.py": ["/flight_simulator.py", "/command_handler.py"]}
|
28,595
|
MachineLearningMaster/AutonomousDrone
|
refs/heads/master
|
/flight_simulator.py
|
import time
class FlightSimulator:
def __init__(self):
self.cm = None
self.latency = 0.1
def set_commander(self,commander):
self.cm = commander
def is_statelike(self,cmd):
return cmd[-1]=='?'
def check_state(self,cmd):
assert self.cm is not None, "Must set commander before flying"
assert self.cm.initialized, "Must initialize SDK before starting"
assert (self.cm.z > 0) or self.cm.landed, "Flying below takeoff pad?"
if self.cm.landed:
assert self.is_statelike(cmd) or cmd == 'takeoff' or cmd == 'Command', "LANDED but sent command: "+ cmd
def post_command(self,cmd):
print('Posting command '+str(cmd))
time.sleep(self.latency)
self.check_state(cmd)
|
{"/automous_flight.py": ["/object_detector.py"], "/example_flight_simulation.py": ["/flight_simulator.py", "/command_handler.py"]}
|
28,596
|
MachineLearningMaster/AutonomousDrone
|
refs/heads/master
|
/util/reverse_command_util.py
|
reverse_map = {'takeoff':'land','up':'down','left':'right','Command':'Command','streamon':'streamoff','cw':'ccw'}
reverse_map_reversed = {reverse_map[i]:i for i in reverse_map}
reverse_map.update(reverse_map_reversed)
def swap(cmd):
"""
:param cmd: One of the drone commands
:return: The opposite drone command
e.g. 'up' will return 'down'
"""
if ' ' in cmd:
s = cmd.split(' ')
s = [i for i in s if len(i.strip())>0]
return swap(s[0]) + ' ' + s[-1]
if cmd not in reverse_map:
raise KeyError("cmd "+cmd+" is not in the reverse map")
return reverse_map[cmd]
|
{"/automous_flight.py": ["/object_detector.py"], "/example_flight_simulation.py": ["/flight_simulator.py", "/command_handler.py"]}
|
28,597
|
MachineLearningMaster/AutonomousDrone
|
refs/heads/master
|
/TensorFlowObjectDetectionUtil/repeat_area_util.py
|
import sys
sys.path.append('/home/nvidia/.local/lib/python2.7/site-packages')
def iou_calculator(b1,b2):
area1 = (b1[2] - b1[0]) * (b1[3] - b1[1])
area2 = (b2[2] - b2[0]) * (b2[3] - b2[1])
xi_min = max(b1[0], b2[0])
yi_min = max(b1[1], b2[1])
xi_max = min(b1[2], b2[2])
yi_max = min(b1[3], b2[3])
inter_area = (yi_max - yi_min) * (xi_max - xi_min)
union_area = area1 + area2 - inter_area
iou = float(inter_area)/float(union_area)
return iou
def update_bb_recorder(bb, de_bb):
new_bb_recorder = bb
for i in range(len(de_bb)):
find_repeat = False
for j in range(len(bb)):
iou = iou_calculator(de_bb[i], bb[j][0])
if iou >= 0.9:
find_repeat = True
new_bb_recorder[j] = [de_bb[i], bb[j][1]+2]
break
if not find_repeat:
new_bb_recorder.append([de_bb[i], 2])
new_b_r = []
for bbox in new_bb_recorder:
show_count = bbox[1] - 1
if show_count < 1:
continue
if show_count > 720:
new_b_r.append([bbox[0],720])
else:
new_b_r.append([bbox[0], show_count])
return new_b_r
def box_not_repeated(box, repeats):
for bb in repeats:
if iou_calculator(bb, box) >= 0.9:
return False
return True
def update_repeat_area_variables(bb_recorder, new_bboxes):
repeat_bbox = []
bb_recorder = update_bb_recorder(bb_recorder, new_bboxes)
for bbox_re in bb_recorder:
if bbox_re[1] > 360:
repeat_bbox.append(bbox_re[0])
return bb_recorder, repeat_bbox
|
{"/automous_flight.py": ["/object_detector.py"], "/example_flight_simulation.py": ["/flight_simulator.py", "/command_handler.py"]}
|
28,598
|
MachineLearningMaster/AutonomousDrone
|
refs/heads/master
|
/TensorFlowObjectDetectionUtil/usb_prepare_util.py
|
import time
import os
import datetime
import sys
import subprocess
sys.path.append('/home/nvidia/.local/lib/python2.7/site-packages')
def preparatory_work():
today = datetime.date.today()
path_today = "/mnt/usb/" + str(today)
if not os.path.ismount('/mnt/usb'):
mount_usb()
t = time.time()
while not os.path.exists(path_today):
if int(time.time() - t) > 30:
break
delete_previous_day_folders(today)
if not os.path.exists(path_today):
os.makedirs(path_today)
os.makedirs(path_today + "/all_frames/all_frames/")
os.makedirs(path_today + "/all_frames/log/")
framenum = find_its_max_frame(path_today) + 1
return today, path_today, framenum
def mount_usb():
time_now = time.time()
output = None
done = 0
if os.path.exists("/mnt/usb"):
os.system("sudo rm -r /mnt/usb")
os.system("sudo mkdir /mnt/usb")
while done == 0:
try:
output = subprocess.check_output("ls /dev/dm-0", shell=True)
print(output)
done = 1
except subprocess.CalledProcessError as e:
output = e.output
print(output)
if (time.time() - time_now) > 60:
write_to_error_file(4)
done = 0
while done == 0:
try:
output = subprocess.check_output("sudo umount /dev/dm-0", shell=True)
print(output)
done = 1
except subprocess.CalledProcessError as e:
output = e.output
print(output)
if (time.time() - time_now) > 60:
write_to_error_file(4)
done = 0
while done == 0:
try:
output = subprocess.check_output("sudo mount /dev/dm-0 /mnt/usb", shell=True)
print(output)
done = 1
except subprocess.CalledProcessError as e:
output = e.output
print(output)
if (time.time() - time_now) > 60:
write_to_error_file(4)
def delete_previous_day_folders(today):
list_days = os.listdir("/mnt/usb/")
for days in list_days:
path_to_day = "/mnt/usb/" + days
try:
length = len([i for i in os.listdir(path_to_day + "/all_frames/all_frames/")])
#If there's an exception, it's because there are no folders
except:
length = 0
if length == 0 and str(days) != (today):
os.system("sudo rm -r " + path_to_day)
print("sudo rm -r " + path_to_day)
else:
if str(days) != str(today):
write_to_error_file(3)
def find_usb_percent_full():
output = subprocess.check_output("df /dev/dm-0", shell=True)
percent = str(output).split()[-2][:-1]
return percent
def write_to_error_file(error):
#ERROR 1: The USB is filling up (it is at least 75% full).
#ERROR 2: The USB is almost completely full (95%) and recording frames has stopped.
#ERROR 3: There are folders from at least one previous day that haven't been pushed for some reason.
#ERROR 4: nousb
with open("/home/nvidia/ping_errors/errors.txt", 'w') as the_file:
the_file.write("!error" + str(error) + "!")
def find_its_max_frame(path_today):
folder_list = os.listdir(path_today + '/all_frames/all_frames/')
length = len(folder_list)
if length == 0:
return -1
num_list = [int(i[6:]) for i in folder_list]
num_list.sort()
folder = num_list[-1]
num_frame = len(os.listdir(path_today + '/all_frames/all_frames/folder' + str(folder) + '/'))
framenum = folder * 1000 + num_frame - 1
return framenum
def usb_storage_check():
try:
percent = find_usb_percent_full()
print(str(percent) + "% full")
except:
percent = 0
print("Cannot allocate memory")
if 90 > int(percent) >= 75:
write_to_error_file(1)
if int(percent) >= 90:
write_to_error_file(2)
remove_redundant_data("/mnt/usb/", 30)
def remove_redundant_data(path_init, num):
path = get_oldest_folder(path_init)
folder_list = os.listdir(path)
while len(folder_list) == 0:
os.system('sudo rm -rf {}'.format(path[:-22]))
path = get_oldest_folder(path_init)
folder_list = os.listdir(path)
if len(folder_list) <= num:
for folder in folder_list:
os.system('sudo rm -rf {}{}'.format(path, folder))
else:
folder_list = [int(i[6:]) for i in folder_list]
folder_list.sort()
for i in range(num):
os.system('sudo rm -rf {}{}{}'.format(path, 'folder', str(folder_list[i])))
def get_oldest_folder(path):
frame_date_list = os.listdir(path)
frame_date_list.sort()
frame_folders_path = path + frame_date_list[0] + "/all_frames/all_frames/"
return frame_folders_path
|
{"/automous_flight.py": ["/object_detector.py"], "/example_flight_simulation.py": ["/flight_simulator.py", "/command_handler.py"]}
|
28,599
|
MachineLearningMaster/AutonomousDrone
|
refs/heads/master
|
/example_flight_simulation.py
|
from flight_simulator import FlightSimulator
from command_handler import CommandHandler
def run_simulation():
my_sim = FlightSimulator()
my_commander = CommandHandler(command_poster_fn=my_sim.post_command)
my_sim.set_commander(my_commander)
my_commander.initialize_sdk()
my_commander.take_off()
my_commander.right(20)
my_commander.rotate_clockwise(15)
my_commander.land()
my_commander.take_off()
my_commander.rotate_clockwise(15)
print('\n\n\ntracing route back...\n\n\n')
my_commander.track_back()
print('flight logs saved to logs')
if __name__ == '__main__':
run_simulation()
|
{"/automous_flight.py": ["/object_detector.py"], "/example_flight_simulation.py": ["/flight_simulator.py", "/command_handler.py"]}
|
28,600
|
MachineLearningMaster/AutonomousDrone
|
refs/heads/master
|
/visualize_object_detection.py
|
import numpy as np
import time
import tensorflow as tf
import cv2
import os
from TensorFlowObjectDetectionUtil import label_map_util
from TensorFlowObjectDetectionUtil import visualization_utils as vis_util
NUM_CLASSES = 80
MINIMUM_CONFIDENCE = 0.75
height = 720
width = 1280
queue_filter = True
PATH_TO_LABELS='labelmaps/label_map.pbtxt'
PATH_TO_CKPT = 'faster_rcnn_inception_v2_coco_2018_01_28.tar/faster_rcnn_inception_v2_coco_2018_01_28/frozen_inference_graph.pb'
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(
label_map,
max_num_classes=NUM_CLASSES,
use_display_name=True)
CATEGORY_INDEX = label_map_util.create_category_index(categories)
def get_frozen_graph(graph_file):
"""Read Frozen Graph file from disk."""
with tf.Graph().as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(graph_file, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
return od_graph_def
trt_graph = get_frozen_graph(PATH_TO_CKPT)
pth = r"person_increasing.mp4"
assert os.path.exists(pth)
cap = cv2.VideoCapture(pth)
# Running the tensorflow session
with tf.Session() as sess:
tf.import_graph_def(trt_graph, name= '')
image_tensor = sess.graph.get_tensor_by_name('image_tensor:0')
detection_boxes = sess.graph.get_tensor_by_name('detection_boxes:0')
detection_scores = sess.graph.get_tensor_by_name('detection_scores:0')
detection_classes = sess.graph.get_tensor_by_name('detection_classes:0')
num = sess.graph.get_tensor_by_name('num_detections:0')
while True:
t = time.time()
ret, img_temp = cap.read()
frame = cv2.cvtColor(img_temp, cv2.COLOR_BGR2RGB)
image_np_expanded = np.expand_dims(frame, axis=0)
(boxes, scores, classes, num_detections) = sess.run(
[detection_boxes, detection_scores, detection_classes,num],
feed_dict={image_tensor: image_np_expanded})
boxes = boxes[scores >MINIMUM_CONFIDENCE]
classes = classes[scores >MINIMUM_CONFIDENCE]
scores = scores[scores >MINIMUM_CONFIDENCE]
vis_util.visualize_boxes_and_labels_on_image_array(
img_temp,
boxes,
classes.astype(np.int32),
scores,
CATEGORY_INDEX,
use_normalized_coordinates=True
)
cv2.imshow('image',img_temp)
if cv2.waitKey(25) & 0xFF == ord('q'):
cv2.destroyAllWindows()
break
print("FPS:" + str(1/(time.time()-t)))
cap.release()
cv2.destroyAllWindows()
|
{"/automous_flight.py": ["/object_detector.py"], "/example_flight_simulation.py": ["/flight_simulator.py", "/command_handler.py"]}
|
28,601
|
MachineLearningMaster/AutonomousDrone
|
refs/heads/master
|
/TensorFlowObjectDetectionUtil/video_decision_util.py
|
import sys
import time
sys.path.append('/home/nvidia/.local/lib/python2.7/site-packages')
def find_out_playing_video(video_counter, video_map, queue_threshold, detection):
for name in video_counter:
if name in detection['names']:
video_counter[name].append(1)
else:
video_counter[name].append(0)
video_counter[name].pop(0)
last_prediction = ''
for name in video_counter:
if sum(video_counter[name]) >= queue_threshold[name]:
find_video = 0
for video in video_map:
if name in video_map[video]:
last_prediction = last_prediction + ' ' + video
find_video = 1
break
last_prediction = last_prediction + ' ' + str(time.time())
return last_prediction, video_counter
def need_to_sleep(start, end):
time_now = int(time.strftime("%H%M%S", time.localtime()))
if start< time_now or time_now < end:
return True
return False
|
{"/automous_flight.py": ["/object_detector.py"], "/example_flight_simulation.py": ["/flight_simulator.py", "/command_handler.py"]}
|
28,602
|
MachineLearningMaster/AutonomousDrone
|
refs/heads/master
|
/joy_stick_mode.py
|
if __name__ != '__main__':
raise Exception("joy stick mode is not designed to be imported")
import threading
import socket
import keyboard
import time
import os
import traceback
def check_root():
if hasattr(os, "geteuid"):
if not os.geteuid() == 0:
print("This script needs to be run as root.")
exit()
class KeyListener:
def __init__(self):
check_root()
self.on = True
self.verbose = False
self.fps = 1/24
self.listener_thread = None
self.key_map = {}
def exit_nice(self):
self.on = False
if self.listener_thread is not None:
self.listener_thread.join()
def key_event(self, e):
if self.verbose:
# if e.event_type == "up":
# print("Key up: " + str(e.name))
# if e.event_type == "down":
# print("Key down: " + str(e.name))
if e.name == "q":
self.exit_nice()
print("Quitting")
if e.event_type == "down":
if e.name in self.key_map:
lo_cb = self.key_map[e.name]
for cb in lo_cb:
cb(e.name)
def register_callback(self, key, callback):
if not isinstance(key,str):
raise TypeError('arg: "key" must be a '+str(type("")))
if key in self.key_map:
keys_so_far = self.key_map[key]
keys_so_far.append(callback)
else:
self.key_map[key] = [callback]
def start(self):
self.listener_thread = threading.Thread(target=self._main)
self.listener_thread.start()
def _main(self):
keyboard.hook(self.key_event)
while self.on:
time.sleep(self.fps)
self.exit_nice()
host = ''
port = 9000
locaddr = (host,port)
# Create a UDP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
tello_address = ('192.168.10.1', 8889)
sock.bind(locaddr)
def recv():
while True:
data, server = sock.recvfrom(1518)
print(data.decode(encoding="utf-8"))
time.sleep(0.005)
#recvThread create
recvThread = threading.Thread(target=recv)
recvThread.start()
com_map = {
'right': 'rc 80 0 0 0',
'left': 'rc -80 0 0 0',
'back': 'rc 0 -80 0 0',
'forward': 'rc 0 80 0 0',
'up': 'rc 0 0 80 0',
'down': 'rc 0 0 -80 0',
'space': 'rc 0 0 0 0',
'c': 'command',
't':'takeoff',
'l':'land',
'b':'battery?',
}
def joy(e):
print("hit " + str(e) + '\n')
if e not in com_map:
sock.close()
raise ValueError(e+ " not in controller map")
msg = com_map[e]
# Send data
print("sent " + msg + '\n')
msg = msg.encode(encoding="utf-8")
sock.sendto(msg, tello_address)
# def joy_up(e):
# joy('up')
#
# def joy_down(e):
# joy('up')
def joy_left(e):
joy('left')
def joy_right(e):
joy('right')
def joy_forward(e):
joy('forward')
def joy_back(e):
joy('back')
def joy_stabilize(e):
joy('space')
def joy_land(e):
joy('l')
def joy_takeoff(e):
joy('t')
def joy_command(e):
joy('c')
def joy_battery(e):
joy('b')
kl = KeyListener()
kl.register_callback("left", joy_left)
kl.register_callback("right", joy_right)
kl.register_callback("up", joy_forward)
kl.register_callback("down", joy_back)
kl.register_callback("space", joy_stabilize)
kl.register_callback("l", joy_land)
kl.register_callback("t", joy_takeoff)
kl.register_callback("c", joy_command)
kl.register_callback("b", joy_battery)
# kl.verbose=True
kl.start()
|
{"/automous_flight.py": ["/object_detector.py"], "/example_flight_simulation.py": ["/flight_simulator.py", "/command_handler.py"]}
|
28,603
|
AnimationInVR/avango-blender
|
refs/heads/master
|
/__init__.py
|
bl_info = {
"name": "Avango",
"author": "Avango Development Team",
"version": (14, 11, 0),
"blender": (2, 72, 0),
"b4w_format_version": "5.01",
"location": "File > Import-Export",
"description": "Avango is a Blender-friendly 3D web framework",
"warning": "",
"wiki_url": "http://www.blend4web.com/doc",
"category": "Import-Export"
}
if "bpy" in locals():
import imp
imp.reload(properties)
imp.reload(interface)
imp.reload(node_tree)
imp.reload(field_container)
imp.reload(exporter)
else:
from . import properties
from . import interface
from . import node_tree
from . import field_container
from . import exporter
import bpy
import nodeitems_utils
import nodeitems_utils as nu
from bpy.props import StringProperty
import os
node_categories = [
node_tree.AvangoNodeCategory("SOMENODES", "avango.osg", items=[
nu.NodeItem("SceneGraph"),
nu.NodeItem("Viewer"),
nu.NodeItem("Window"),
nu.NodeItem("Camera"),
nu.NodeItem("Light"),
nu.NodeItem("Screen"),
nu.NodeItem("Transform"),
nu.NodeItem("Mesh"),
]),
#node_tree.AvangoNodeCategory("SOMENODES", "Nodes", items=[
#]
#node_tree.AvangoNodeCategory("SOMENODES", "Texture", items=[
#]
#node_tree.AvangoNodeCategory("SOMENODES", "Loader", items=[
#]
]
def register():
properties.register()
interface.register()
node_tree.register()
field_container.register()
exporter.register()
# Idee: root am SceneGraph
# 1.)
# alle objects haben CollectionProperty,
# sollte beim exportieren des scene graphs ein die CollectionProperty eines
# Hierarchieknotens leer sein, wird ein default FieldContainer des
# passenden typs erstellt
# 2.) wenn ein Node gelöscht wird, soll auf referenzierten blender objecten
# vorher die Referenz auf diesen Node entfernt werden
#bpy.types.Object.avango_nodes = bpy.props.CollectionProperty(type=bpy.types.PropertyGroup)
if not 'AVANGO_NODES' in nu._node_categories:
nu.register_node_categories('AVANGO_NODES', node_categories)
def unregister():
properties.unregister()
interface.unregister()
node_tree.unregister()
field_container.unregister()
exporter.unregister()
if 'AVANGO_NODES' in nu._node_categories:
nu.unregister_node_categories('AVANGO_NODES')
if __name__ == "__main__":
register()
|
{"/field_container.py": ["/__init__.py"], "/properties.py": ["/interface.py"], "/exporter.py": ["/__init__.py"]}
|
28,604
|
AnimationInVR/avango-blender
|
refs/heads/master
|
/field_container.py
|
import bpy
from bpy.types import NodeTree, Node, NodeSocket
from bpy.props import StringProperty, IntProperty, FloatProperty, IntVectorProperty, FloatVectorProperty, BoolProperty
from . import node_tree
from bpy.app.handlers import persistent
# field connection socket color
# TODO:
class Camera(Node, node_tree.AvangoCustomTreeNode):
bl_idname = 'Camera'
bl_label = 'Camera'
def update_name(self, context):
lamp = None
if self.referenced_object in bpy.data.objects:
bpy.data.objects[self.referenced_object].name = self.name
self.referenced_object = self.name
else:
print("Error: failed referenced_object")
name = StringProperty(description='name', update=update_name)
scenegraph = StringProperty(
default='SceneGraph',
description='name of scenegraph'
)
output_window_name = StringProperty(
default='Window',
description=''
)
referenced_object = StringProperty(
description='name of referenced blender object'
)
resolution = IntVectorProperty(
name="Resolution",
description="resolution",
default=(1024,768),
min=1,
size=2
)
left_screen_path = StringProperty(
description='path to screen node used for the left eye',
default='/Screen'
)
def init(self, context):
pass
def draw_buttons(self, context, layout):
scene = context.scene
col = layout.column()
col.prop(self, 'name', text='Name')
col.prop(self, 'scenegraph', text='SceneGraph')
col.prop(self, 'output_window_name', text='OutputWindowName')
col.prop(self, 'resolution', text='Resolution')
col.prop(self, 'left_screen_path', text='LeftScreenPath')
col.label(text='Camera: '+self.referenced_object, icon='CAMERA_DATA')
# browse cameras
col.prop_search(self, 'referenced_object', bpy.data, 'cameras',
text='', icon='CAMERA_DATA')
def process(self):
pass
def get_args(self):
pass
class Screen(Node, node_tree.AvangoCustomTreeNode):
bl_idname = 'Screen'
bl_label = 'Screen'
def update_name(self, context):
if self.referenced_object in bpy.data.objects:
bpy.data.objects[self.referenced_object].name = self.name
self.referenced_object = self.name
else:
print("Error: failed referenced_object")
width = FloatProperty(default=2, step=0.001, min=0.01)
height = FloatProperty(
default=1.5, step=0.001, min=0.01)
referenced_object = StringProperty(
default='',
description='name of referenced blender object'
)
name = StringProperty(description='name', update=update_name)
def init(self, context):
bpy.ops.object.empty_add(type='PLAIN_AXES')
obj = bpy.context.object
self.referenced_object = obj.name
self.name = obj.name
obj["avango_nodes"] = self.name
def draw_buttons(self, context, layout):
scene = context.scene
col = layout.column()
col.prop(self, 'name', text='Name')
col.prop(self, 'width', text='Width')
col.prop(self, 'height', text='Heigth')
def process(self):
pass
def get_args(self):
pass
class Light(Node, node_tree.AvangoCustomTreeNode):
bl_idname = 'Light'
bl_label = 'Light'
def update_name(self, context):
lamp = None
if self.referenced_object in bpy.data.objects:
bpy.data.objects[self.referenced_object].name = self.name
self.referenced_object = self.name
else:
print("Error: failed referenced_object")
name = StringProperty(description='name', update=update_name)
referenced_object = StringProperty(
default='',
description='name of referenced blender object'
#update=update_node
)
#update= todo when update , add my name to blender object
def init(self, context):
pass
def draw_buttons(self, context, layout):
scene = context.scene
col = layout.column()
col.prop(self, 'name', text='Name')
col.prop_search(self, 'referenced_object', bpy.data, 'lamps',
text='', icon='LAMP_DATA')
col.label(text='Light: '+self.referenced_object, icon='LAMP_DATA')
def free(self):
print("Light unregister shit")
i = bpy.data.objects.find(self.referenced_object)
if -1 != i :
print("Remove link to me")
obj = bpy.data.objects[self.referenced_object]
if obj.get("avango_nodes"):
obj["avango_nodes"] = list(filter((obj["avango_nodes"]).__ne__, self.name))
def process(self):
pass
def get_args(self):
pass
class Mesh(Node, node_tree.AvangoCustomTreeNode):
bl_idname = 'Mesh'
bl_label = 'Mesh'
referenced_object = StringProperty(description='linked mesh',
#update= todo when update , add my name to blender object
)
is_animation_hack = BoolProperty(
description='is this an animation',
default=False
)
def init(self, context):
pass
def draw_buttons(self, context, layout):
scene = context.scene
col = layout.column()
col.prop(self, 'name', text='Name')
col.prop_search(self, 'referenced_object', bpy.data, 'meshes',
text='', icon='MESH_DATA')
col.prop(self, 'is_animation_hack', text='is animation')
# # a blender mesh is telling us, that it will no longer link to this mesh
# def unregister(self, blender_mesh):
# if blender_mesh.name == self.referenced_object:
# self.referenced_object = ""
# # else:
# # ignore event
def process(self):
pass
def get_args(self):
pass
def free(self):
print("Mesh unregister shit")
i = bpy.data.objects.find(self.referenced_object)
if -1 != i :
print("Remove link to me")
obj = bpy.data.objects[self.referenced_object]
if obj.get("avango_nodes"):
obj["avango_nodes"] = list(filter((obj["avango_nodes"]).__ne__, self.name))
class SceneGraph(Node, node_tree.AvangoCustomTreeNode):
bl_idname = 'SceneGraph'
bl_label = 'SceneGraph'
root = StringProperty(description='root node', default='Av_root')
def init(self, context):
self.outputs.new('SceneGraphSocketType', 'SceneGraph')
bpy.ops.object.empty_add(type='PLAIN_AXES')
bpy.context.object.name = 'Av_root'
def draw_buttons(self, context, layout):
col = layout.column()
col.prop(self, 'name', text='Name')
col.label(text='Root: '+self.root, icon='OBJECT_DATA')
# col.prop_search(self, 'root', bpy.data, 'objects',
# text='', icon='OBJECT_DATA')
def process(self):
pass
def get_args(self):
pass
'''
class MatrixSocket(NodeSocket):
# 4x4 matrix Socket_type
# ref: http://urchn.org/post/nodal-transform-experiment
bl_idname = "MatrixSocket"
bl_label = "Matrix Socket"
prop_name = StringProperty(default='')
def sv_get(self, default=sentinel, deepcopy=True):
if self.is_linked and not self.is_output:
return SvGetSocket(self, deepcopy)
elif default is sentinel:
raise SvNoDataError
else:
return default
def sv_set(self, data):
SvSetSocket(self, data)
def draw(self, context, layout, node, text):
if self.is_linked:
layout.label(text + '. ' + SvGetSocketInfo(self))
else:
layout.label(text)
def draw_color(self, context, node):
#if self.is_linked:
# return(.8,.3,.75,1.0)
#else:
return(.2, .8, .8, 1.0)
'''
class Transform(Node, node_tree.AvangoCustomTreeNode):
bl_idname = 'Transform'
bl_label = 'Transform'
def update_name(self, context):
if self.referenced_object in bpy.data.objects:
bpy.data.objects[self.referenced_object].name = self.name
self.referenced_object = self.name
else:
print("Error: failed referenced_object")
name = StringProperty(description='name', update=update_name)
referenced_object = StringProperty(default='transform',
description='identifies this FieldContainer')
def init(self, context):
bpy.ops.object.empty_add(type='PLAIN_AXES')
obj = bpy.context.object
# bpy.context.object.name = 'transform'
# self.inputs.new('MatrixSocketType', 'Transform')
self.referenced_object = obj.name
self.name = obj.name
def draw_buttons(self, context, layout):
col = layout.column()
col.prop(self, 'name', text='Name')
col.prop_search(self, 'referenced_object', bpy.data, 'objects',
text='', icon='OBJECT_DATA')
def process(self):
pass
def get_args(self):
pass
class Viewer(Node, node_tree.AvangoCustomTreeNode):
bl_idname = 'Viewer'
bl_label = 'Viewer'
def init(self, context):
self.color = (0.4022911489, 0.6329187, 0.841202378)
self.inputs.new('WindowSocketType', 'Window')
self.inputs.new('SceneGraphSocketType', 'SceneGraph')
self.inputs.new('CameraSocketType', 'Camera')
def draw_buttons(self, context, layout):
col = layout.column()
col.prop(self, 'name', text='Name')
#col.operator("node.sp_serialize_synthdef", text='make synthdef')
def process(self):
pass
def get_args(self):
pass
class Window(Node, node_tree.AvangoCustomTreeNode):
bl_idname = 'Window'
bl_label = 'Window'
display_field = StringProperty(description='display number', default=':0.0')
title_field = StringProperty(description='window title', default='beautiful')
left_size = IntVectorProperty(
name="Resolution",
description="size",
default=(1024,768),
min=1,
size=2
)
left_pos = IntVectorProperty(
name="Position",
description="size",
default=(0,0),
min=0,
size=2
)
enabled = BoolProperty(
name = "enabled",
description="enabled",
default=True
)
def init(self, context):
self.inputs.new('StereoModeSocketType', "StereoMode")
self.outputs.new('WindowSocketType', 'Window')
def draw_buttons(self, context, layout):
col = layout.column()
col.prop(self, 'name', text='Name')
col.prop(self, 'title_field', text='Title')
col.prop(self, 'display_field', text='Display')
col.prop(self, 'left_size', text='LeftSize')
col.prop(self, 'left_pos', text='LeftPosition')
def process(self):
pass
def get_args(self):
pass
# Sockets - theses Correspond to the various field types
class CameraSocket(NodeSocket):
'''Camera node socket type'''
bl_idname = 'CameraSocketType'
bl_label = 'Camera Socket'
# def camera_select(self, context):
# cams = bpy.data.cameras
# return [(c.name,c.name,"") for c in cams]
#
# cameraProperty = bpy.props.EnumProperty(items=camera_select)
# Optional function for drawing the socket input value
def draw(self, context, layout, node, text):
# if self.is_output or self.is_linked:
layout.label(text)
# else:
# layout.prop(self, "cameraProperty", text=text)
# Socket color
def draw_color(self, context, node):
return (0.216, 0.4, 1.0, 0.5)
class WindowSocket(NodeSocket):
'''Custom node socket type'''
bl_idname = 'WindowSocketType'
bl_label = 'Window Socket'
# Optional function for drawing the socket input value
def draw(self, context, layout, node, text):
# if self.is_output or self.is_linked:
layout.label(text)
# else:
# layout.prop(self, "stereo_mode", text=text)
# Socket color
def draw_color(self, context, node):
return (1.0, 0.4, 0.216, 0.5)
class SceneGraphSocket(NodeSocket):
'''SceneGraph node socket type'''
bl_idname = 'SceneGraphSocketType'
bl_label = 'SceneGraph Socket'
# Optional function for drawing the socket input value
def draw(self, context, layout, node, text):
# if self.is_output or self.is_linked:
layout.label(text)
# else:
# layout.prop(self, "stereo_mode", text=text)
# Socket color
def draw_color(self, context, node):
return (1.0, 0.4, 0.216, 0.5)
# Custom socket type
class StereoModeSocket(NodeSocket):
# Description string
'''Custom node socket type'''
# Optional identifier string. If not explicitly defined, the python class name is used.
bl_idname = 'StereoModeSocketType'
bl_label = 'Stereo Mode Socket'
# Enum items list
modes = [
("MONO", "Mono", "mono"),
("SIDE_BY_SIDE", "SideBySide", "side by side stereo"),
("ANAGLYPH_RED_GREEN", "Anaglyph", "anaglyph stereo"),
("ANAGLYPH_RED_CYAN", "Anaglyph", "anaglyph stereo"),
("CHECKERBOARD", "Checkerboard", "checkerboard for 3D-TVs")
]
stereo_mode = bpy.props.EnumProperty(name="StereoMode",
description="stereo modes", items=modes, default='MONO')
# Optional function for drawing the socket input value
def draw(self, context, layout, node, text):
if self.is_output or self.is_linked:
layout.label(text)
else:
layout.prop(self, "stereo_mode", text=text)
# Socket color
def draw_color(self, context, node):
return (1.0, 0.4, 0.216, 0.5)
# bpy.app.handlers.scene_update_pre
# bpy.app.handlers.scene_update_post
#@persistent
#def scene_update_pre(dummy):
# print("scene_update_pre:")
@persistent
def scene_update_post(dummy):
#print("scene_update_post:")
objects = bpy.data.objects
# if objects.is_updated:
# print("one or more objects were updated")
for object in objects:
if object.is_updated_data:
print("updateddata => {0}".format(object.name))
if object.is_updated:
print("updated => {0}".format(object.name))
def register():
print("field_container.register()")
bpy.utils.register_class(StereoModeSocket)
bpy.utils.register_class(WindowSocket)
bpy.utils.register_class(CameraSocket)
bpy.utils.register_class(SceneGraphSocket)
bpy.utils.register_class(SceneGraph)
bpy.utils.register_class(Viewer)
bpy.utils.register_class(Window)
bpy.utils.register_class(Camera)
bpy.utils.register_class(Light)
bpy.utils.register_class(Mesh)
bpy.utils.register_class(Screen)
bpy.utils.register_class(Transform)
# bpy.app.handlers.scene_update_pre.append(scene_update_pre)
# bpy.app.handlers.scene_update_post.append(scene_update_post)
def unregister():
print("field_container.unregister()")
bpy.utils.unregister_class(StereoModeSocket)
bpy.utils.unregister_class(WindowSocket)
bpy.utils.unregister_class(CameraSocket)
bpy.utils.unregister_class(SceneGraphSocket)
bpy.utils.unregister_class(SceneGraph)
bpy.utils.unregister_class(Viewer)
bpy.utils.unregister_class(Window)
bpy.utils.unregister_class(Camera)
bpy.utils.unregister_class(Light)
bpy.utils.unregister_class(Mesh)
bpy.utils.unregister_class(Screen)
bpy.utils.unregister_class(Transform)
|
{"/field_container.py": ["/__init__.py"], "/properties.py": ["/interface.py"], "/exporter.py": ["/__init__.py"]}
|
28,605
|
AnimationInVR/avango-blender
|
refs/heads/master
|
/interface.py
|
import bpy
import mathutils
import math
import os
import cProfile
import bgl
# serialize data to json
_OBJECT_PT_constraints = None
class ScenePanel(bpy.types.Panel):
bl_label = "Avango-Blender"
bl_idname = "SCENE_PT_b4a"
bl_space_type = "PROPERTIES"
bl_region_type = "WINDOW"
bl_context = "scene"
def draw(self, context):
layout = self.layout
scene = context.scene
if scene:
row = layout.row()
row.prop(scene, "enable_ssao", text="Enable SSAO")
row = layout.row()
row.prop(scene, "enable_god_rays", text="Enable God Rays")
row = layout.row()
row.prop(scene, "enable_bloom", text="Enable Bloom")
row = layout.row()
row.prop(scene, "enable_fog", text="Enable Fog")
row = layout.row()
row.prop(scene, "enable_vignette", text="Enable Vignette")
row = layout.row()
row.prop(scene, "enable_hdr", text="Enable HDR")
row = layout.row()
row.prop(scene, "enable_preview_display", text="Enable Preview Display")
row = layout.row()
row.prop(scene, "enable_fps_display", text="Enable FPS Display")
row = layout.row()
row.prop(scene, "enable_ray_display", text="Enable Ray Display")
row = layout.row()
row.prop(scene, "enable_bbox_display", text="Enable bbox Display")
row = layout.row()
row.prop(scene, "enable_wire_frame", text="Enable Wire Frame")
row = layout.row()
row.prop(scene, "enable_FXAA", text="Enable FXAA")
row = layout.row()
row.prop(scene, "enable_frustum_culling", text="Enable Frustum Culling")
row = layout.row()
row.prop(scene, "enable_backface_culling", text="Enable Backface Culling")
row = layout.row()
row.prop(scene, "near_clip", text="Near Clip")
row = layout.row()
row.prop(scene, "far_clip", text="Far Clip")
split = layout.split()
col = split.column()
class WorldPanel(bpy.types.Panel):
bl_label = "Avango-Blender"
bl_idname = "WORLD_PT_b4a"
bl_space_type = "PROPERTIES"
bl_region_type = "WINDOW"
bl_context = "world"
def draw(self, context):
layout = self.layout
world = context.world
if world:
ssao = world.ssao_settings
row = layout.row()
box = row.box()
col = box.column()
col.label("SSAO Settings:")
row = col.row()
row.prop(ssao, "radius", text="Radius")
row = col.row()
row.prop(ssao, "intensity", text="Intensity")
row = col.row()
row.prop(ssao, "falloff", text="Falloff")
bloom = world.bloom_settings
row = layout.row()
box = row.box()
col = box.column()
col.label("Bloom settings:")
row = col.row()
row.prop(bloom, "radius", text="Radius")
row = col.row()
row.prop(bloom, "threshold", text="Threshold")
row = col.row()
row.prop(bloom, "intensity", text="Intensity")
fog = world.fog_settings
row = layout.row()
box = row.box()
col = box.column()
col.label("Fog settings:")
row = col.row()
row.prop(fog, "start", text="Start")
row = col.row()
row.prop(fog, "end", text="End")
row = col.row()
row.prop(fog, "texture", text="Texture")
row = col.row()
row.prop(fog, "color", text="Color")
background = world.background_settings
row = layout.row()
box = row.box()
col = box.column()
col.label("Background settings:")
row = col.row()
row.prop(background, "mode", text="Mode")
row = col.row()
row.prop(background, "texture", text="Texture")
row = col.row()
row.prop(background, "color", text="Color")
vignette = world.vignette_settings
row = layout.row()
box = row.box()
col = box.column()
col.label("Vignette settings:")
row = col.row()
row.prop(vignette, "color", text="Color")
row = col.row()
row.prop(vignette, "coverage", text="Coverage")
row = col.row()
row.prop(vignette, "softness", text="Softness")
hdr = world.hdr_settings
row = layout.row()
box = row.box()
col = box.column()
col.label("HDR settings:")
row = col.row()
row.prop(hdr, "key", text="Key")
class DataPanel(bpy.types.Panel):
bl_label = "Avango-Blender"
bl_idname = "DATA_PT_b4a"
bl_space_type = "PROPERTIES"
bl_region_type = "WINDOW"
bl_context = "data"
def draw(self, context):
layout = self.layout
cam = context.camera
if cam:
row = layout.row(align=True)
row.prop(cam, "ms_style", text="Mono/Sterio")
class CustomConstraintsPanel(bpy.types.OBJECT_PT_constraints):
def draw_constraint(self, context, con):
if con.type == "LOCKED_TRACK":
layout = self.layout
box = layout.box()
box.label("LOCKED_TRACK constraint reserved for " + con.name)
else:
global _OBJECT_PT_constraints
_OBJECT_PT_constraints.draw_constraint(self, context, con)
def add_remove_refl_plane(obj):
if obj.reflective:
#add reflection plane
bpy.ops.object.constraint_add(type="LOCKED_TRACK")
lods = obj.lods
index = len(lods)
obj.refl_plane_index = index
cons = get_locked_track_constraint(obj, index)
cons.name = "REFLECTION PLANE"
# disable fake LOCKED_TRACK constraint
cons.mute = True
else:
#remove reflection plane
index = obj.refl_plane_index
if index >= 0:
cons = get_locked_track_constraint(obj, index)
obj.constraints.remove(cons)
def register():
global _OBJECT_PT_constraints
bpy.utils.register_class(ScenePanel)
bpy.utils.register_class(WorldPanel)
bpy.utils.register_class(DataPanel)
_OBJECT_PT_constraints = bpy.types.OBJECT_PT_constraints
bpy.utils.unregister_class(bpy.types.OBJECT_PT_constraints)
bpy.utils.register_class(CustomConstraintsPanel)
def unregister():
global _OBJECT_PT_constraints
bpy.utils.unregister_class(ScenePanel)
bpy.utils.unregister_class(WorldPanel)
bpy.utils.unregister_class(DataPanel)
bpy.utils.unregister_class(CustomConstraintsPanel)
bpy.utils.register_class(_OBJECT_PT_constraints)
|
{"/field_container.py": ["/__init__.py"], "/properties.py": ["/interface.py"], "/exporter.py": ["/__init__.py"]}
|
28,606
|
AnimationInVR/avango-blender
|
refs/heads/master
|
/properties.py
|
import bpy
import mathutils
import math
import os
import cProfile
from .interface import *
class SSAOSettings(bpy.types.PropertyGroup):
radius = bpy.props.FloatProperty(
name = "radius",
description = "radius",
default = 0.0,
min = 0.0,
max = 20.0,
step = 0.01,
precision = 2
)
intensity = bpy.props.FloatProperty(
name = "intensity",
description = "intensity",
default = 0.0,
min = 0.0,
max = 20.0,
step = 0.01,
precision = 2
)
falloff = bpy.props.FloatProperty(
name = "falloff",
description = "falloff",
default = 0.0,
min = 0.0,
max = 20.0,
step = 0.01,
precision = 2
)
class BloomSettings(bpy.types.PropertyGroup):
radius = bpy.props.FloatProperty(
name = "radius",
description = "radius",
default = 0.0,
min = 0.0,
max = 20.0,
step = 0.01,
precision = 2
)
threshold = bpy.props.FloatProperty(
name = "threshold",
description = "threshold",
default = 0.0,
min = 0.0,
max = 20.0,
step = 0.01,
precision = 2
)
intensity = bpy.props.FloatProperty(
name = "intensity",
description = "intensity",
default = 0.0,
min = 0.0,
max = 20.0,
step = 0.01,
precision = 2
)
class FogSettings(bpy.types.PropertyGroup):
start = bpy.props.FloatProperty(
name = "start",
description = "start",
default = 1.0,
min = 0.0,
max = 5.0,
step = 0.01,
precision = 2
)
end = bpy.props.FloatProperty(
name = "end",
description = "end",
default = 4.0,
min = 0.0,
max = 20.0,
step = 0.01,
precision = 2
)
texture = bpy.props.StringProperty(
name = "texture",
description = "texture",
default = ""
)
color = bpy.props.FloatVectorProperty(
name = "Avango-Blender: fog color",
description = "Fog color",
default = (0.5, 0.5, 0.5),
min = 0.0,
soft_min = 0.0,
max = 1.0,
soft_max = 1.0,
precision = 3,
subtype = 'COLOR',
size = 3
)
class BackgroundSettings(bpy.types.PropertyGroup):
mode = bpy.props.FloatProperty(
name = "mode",
description = "mode",
default = 0,
min = 0,
max = 5,
step = 1
)
texture = bpy.props.StringProperty(
name = "texture",
description = "texture",
default = ""
)
color = bpy.props.FloatVectorProperty(
name = "Avango-Blender: Background color",
description = "Background color",
default = (0.5, 0.5, 0.5),
min = 0.0,
soft_min = 0.0,
max = 1.0,
soft_max = 1.0,
precision = 3,
subtype = 'COLOR',
size = 3
)
class VignetteSettings(bpy.types.PropertyGroup):
color = bpy.props.FloatVectorProperty(
name = "Avango-Blender: coverage color",
description = "coverage color",
default = (0.5, 0.5, 0.5),
min = 0.0,
soft_min = 0.0,
max = 1.0,
soft_max = 1.0,
precision = 3,
subtype = 'COLOR',
size = 3
)
coverage = bpy.props.FloatProperty(
name = "coverage",
description = "coverage",
default = 1.0,
min = 0.0,
max = 5.0,
step = 0.01,
precision = 2
)
softness = bpy.props.FloatProperty(
name = "softness",
description = "softness",
default = 4.0,
min = 0.0,
max = 20.0,
step = 0.01,
precision = 2
)
class HdrSettings(bpy.types.PropertyGroup):
key = bpy.props.FloatProperty(
name = "key",
description = "key",
default = 1.0,
min = 0.0,
max = 5.0,
step = 0.01,
precision = 2
)
def add_props():
do_not_export = bpy.props.BoolProperty(
name = "Avango-Blender: do not export",
description = "Check if you do NOT wish to export this component",
default = False
)
# deprecated
export_path = bpy.props.StringProperty(
name = "Avango-Blender: component export path",
description = "Exported file path relative to the blend file",
default = ""
)
class_names = [
'Action',
'Armature',
'Camera',
'Curve',
'Group',
'Image',
'Lamp',
'Material',
'Mesh',
'Object',
'ParticleSettings',
'Texture',
'Scene',
'Speaker',
'Sound',
'World'
]
class_names_for_export = [
'Action',
'Image',
'Material',
'Object',
'ParticleSettings',
'Scene',
'Texture',
'World'
]
for class_name in class_names_for_export:
cl = getattr(bpy.types, class_name)
cl.do_not_export = do_not_export
for class_name in class_names:
cl = getattr(bpy.types, class_name)
# deprecated
cl.export_path = export_path
export_path_json = bpy.props.StringProperty(
name = "Avango-Blender: export path json",
description = "Exported json file path relative to the blend file",
default = ""
)
export_path_html = bpy.props.StringProperty(
name = "Avango-Blender: export path html",
description = "Exported html file path relative to the blend file",
default = ""
)
bpy.types.Scene.export_path_json = export_path_json
bpy.types.Scene.export_path_html = export_path_html
add_scene_properties()
add_world_properties()
def add_scene_properties():
scene_type = bpy.types.Scene
enable_ssao = bpy.props.BoolProperty(
name = "Avango-Blender: enable SSAO",
description = "Enable screen space ambient occlusion",
default = False
)
scene_type.enable_ssao = enable_ssao
enable_preview_display = bpy.props.BoolProperty(
name = "Avango-Blender: enable preview display",
description = "enable preview display",
default = False
)
scene_type.enable_preview_display = enable_preview_display
enable_fps_display = bpy.props.BoolProperty(
name = "Avango-Blender: enable fps display",
description = "enable fps display",
default = False
)
scene_type.enable_fps_display = enable_fps_display
enable_ray_display = bpy.props.BoolProperty(
name = "Avango-Blender: enable ray display",
description = "enable ray display",
default = False
)
scene_type.enable_ray_display = enable_ray_display
enable_bbox_display = bpy.props.BoolProperty(
name = "Avango-Blender: enable bbox display",
description = "enable bbox display",
default = False
)
scene_type.enable_bbox_display = enable_bbox_display
enable_fxaa = bpy.props.BoolProperty(
name = "Avango-Blender: enable FXAA",
description = "Enable FXAA",
default = False
)
scene_type.enable_fxaa = enable_fxaa
enable_frustum_culling = bpy.props.BoolProperty(
name = "Avango-Blender: enable_frustum_culling",
description = "enable_frustum_culling",
default = False
)
scene_type.enable_frustum_culling = enable_frustum_culling
enable_backface_culling = bpy.props.BoolProperty(
name = "Avango-Blender: enable_backface_culling",
description = "enable_backface_culling",
default = False
)
scene_type.enable_backface_culling = enable_backface_culling
near_clip = bpy.props.FloatProperty(
name = "Avango-Blender: near clip",
description = "near clip",
default = 0.1,
min = 0.0000001,
soft_min = 0.01,
max = 1000.0,
soft_max = 100.0,
step = 0.1,
precision = 4
)
scene_type.near_clip = near_clip
far_clip = bpy.props.FloatProperty(
name = "Avango-Blender: far clip",
description = "far clip",
default = 1000.0,
min = 0.0,
soft_min = 0.0,
max = 1000000000.0,
soft_max = 0.1,
step = 0.1,
precision = 4
)
scene_type.far_clip = far_clip
enable_bloom = bpy.props.BoolProperty(
name = "Avango-Blender: enable bloom",
description = "Enable bloom",
default = False
)
scene_type.enable_bloom = enable_bloom
enable_fog = bpy.props.BoolProperty(
name = "Avango-Blender: enable fog",
description = "Enable fog",
default = False
)
scene_type.enable_fog = enable_fog
enable_vignette = bpy.props.BoolProperty(
name = "Avango-Blender: enable vignette",
description = "Enable vignette",
default = False
)
scene_type.enable_vignette = enable_vignette
enable_fog = bpy.props.BoolProperty(
name = "Avango-Blender: enable fog",
description = "Enable fog",
default = False
)
scene_type.enable_fog = enable_fog
enable_FXAA = bpy.props.BoolProperty(
name = "Avango-Blender: enable fxaa",
description = "Enable fxaa",
default = False
)
scene_type.enable_FXAA = enable_FXAA
enable_hdr = bpy.props.BoolProperty(
name = "Avango-Blender: enable hdr",
description = "Enable hdr",
default = False
)
scene_type.enable_hdr = enable_hdr
def add_world_properties():
# for world panel
fog_color = bpy.props.FloatVectorProperty(
name = "Avango-Blender: fog color",
description = "Fog color",
default = (0.5, 0.5, 0.5),
min = 0.0,
soft_min = 0.0,
max = 1.0,
soft_max = 1.0,
precision = 3,
subtype = 'COLOR',
size = 3
)
bpy.types.World.fog_color = fog_color
fog_density = bpy.props.FloatProperty(
name = "Avango-Blender: fog density",
description = "Fog density",
default = 0.0,
min = 0.0,
soft_min = 0.0,
max = 1.0,
soft_max = 0.1,
step = 0.1,
precision = 4
)
bpy.types.World.fog_density = fog_density
bpy.types.World.ssao_settings = bpy.props.PointerProperty(
name = "Avango-Blender: SSAO settings",
type = SSAOSettings
)
bpy.types.World.bloom_settings = bpy.props.PointerProperty(
name = "Avango-Blender: bloom settings",
type = BloomSettings
)
bpy.types.World.fog_settings = bpy.props.PointerProperty(
name = "Avango-Blender: fog settings",
type = FogSettings
)
bpy.types.World.background_settings = bpy.props.PointerProperty(
name = "Avango-Blender: background settings",
type = BackgroundSettings
)
bpy.types.World.vignette_settings = bpy.props.PointerProperty(
name = "Avango-Blender: vignette settings",
type = VignetteSettings
)
bpy.types.World.hdr_settings = bpy.props.PointerProperty(
name = "Avango-Blender: hdr settings",
type = HdrSettings
)
def register():
bpy.utils.register_class(SSAOSettings)
bpy.utils.register_class(BloomSettings)
bpy.utils.register_class(FogSettings)
bpy.utils.register_class(BackgroundSettings)
bpy.utils.register_class(VignetteSettings)
bpy.utils.register_class(HdrSettings)
add_props()
def unregister():
bpy.utils.unregister_class(SSAOSettings)
bpy.utils.unregister_class(BloomSettings)
bpy.utils.unregister_class(FogSettings)
bpy.utils.unregister_class(BackgroundSettings)
bpy.utils.unregister_class(VignetteSettings)
bpy.utils.unregister_class(HdrSettings)
|
{"/field_container.py": ["/__init__.py"], "/properties.py": ["/interface.py"], "/exporter.py": ["/__init__.py"]}
|
28,607
|
AnimationInVR/avango-blender
|
refs/heads/master
|
/exporter.py
|
import bpy
from bpy_extras.io_utils import ExportHelper
import os
import json
from . import field_container
from mathutils import Matrix
# TODO:
# write parent
def matrixToList(matrix):
return sum(list( list(x) for x in matrix), [])
def avangoNodeTrees():
return (x for x in bpy.data.node_groups if
(x.bl_idname == 'AvangoCustomTreeType'))
def to_json(obj):
if isinstance(obj, field_container.SceneGraph):
name = obj.name
root = 'null'
i = bpy.data.objects.find(obj.root)
if -1 != i :
root = bpy.data.objects[obj.root].name
return {
'type' : 'SceneGraph',
'name' : obj.name,
'root' : root
}
if isinstance(obj, field_container.Window):
modeSocket = bpy.data.node_groups["NodeTree"].nodes["Window"].inputs['StereoMode']
if modeSocket.is_linked and modeSocket.links[0].is_valid:
mode = modeSocket.links[0].from_socket.stereo_mode
else:
mode = modeSocket.stereo_mode
return {
'type' : 'Window',
'name' : obj.name,
'title' : obj.title_field,
'display' : obj.display_field,
'left_resolution' : [obj.left_size[0], obj.left_size[1]],
'left_position' : [obj.left_pos[0], obj.left_pos[1]],
'mode' : mode
}
if isinstance(obj, field_container.Viewer):
windowSocket = bpy.data.node_groups["NodeTree"].nodes[obj.name].inputs['Window']
if windowSocket.is_linked and windowSocket.links[0].is_valid:
window = windowSocket.links[0].from_node.name
else:
window = 'null'
cameraSocket = bpy.data.node_groups["NodeTree"].nodes[obj.name].inputs['SceneGraph']
if cameraSocket.is_linked and cameraSocket.links[0].is_valid:
camera = cameraSocket.links[0].from_node.name
else:
camera = 'null'
sgSocket = bpy.data.node_groups["NodeTree"].nodes[obj.name].inputs['Camera']
if sgSocket.is_linked and sgSocket.links[0].is_valid:
scenegraph = sgSocket.links[0].from_node.name
else:
scenegraph = 'null'
return {
'name' : obj.name,
'type' : 'Viewer',
'window' : window,
'scenegraph' : scenegraph,
'camera' : camera
}
if isinstance(obj, field_container.Camera):
parent = 'null'
if obj.referenced_object in bpy.data.objects:
if bpy.data.objects[obj.referenced_object].parent:
parent = bpy.data.objects[obj.referenced_object].parent.name
matrix = bpy.data.objects[obj.referenced_object].matrix_local
return {
'type' : 'Camera',
'name' : obj.name,
'scenegraph' : obj.scenegraph,
'output_window_name' : obj.output_window_name,
'left_screen_path' : obj.left_screen_path,
'resolution' : [ obj.resolution[0], obj.resolution[1] ],
'transform' : matrixToList(matrix),
'parent' : parent
}
if isinstance(obj, field_container.Light):
name = obj.name
i = bpy.data.objects.find(obj.referenced_object)
lamp = None
o = None
if -1 != i :
o = bpy.data.objects[obj.referenced_object]
lamp = bpy.data.lamps[o.data.name]
ty = 'null'
if lamp.type == 'POINT':
ty = 'PointLight'
if lamp.type == 'SUN':
ty = 'SunLight'
if lamp.type == 'SPOT':
ty = 'SpotLight'
if lamp.type == 'HEMI':
ty = 'HemiLight'
if lamp.type == 'AREA':
ty = 'AreaLight'
# print(" Location: ", o.location) # light radius
# print(" Scale: ", o.scale) # light radius
# print(" Rotation Quaternion: ", o.rotation_quaternion) # light radius
parent = 'null'
if obj.referenced_object in bpy.data.objects:
if bpy.data.objects[obj.referenced_object].parent:
parent = bpy.data.objects[obj.referenced_object].parent.name
matrix = bpy.data.objects[obj.referenced_object].matrix_local
if lamp is not None:
return {
'name' : obj.name,
'type' : ty,
'color' : [ lamp.color.r, lamp.color.g, lamp.color.b],
'distance' : lamp.distance,
'parent' : parent,
'transform' : matrixToList(matrix),
'energy' : lamp.energy
}
else:
return {
'name' : obj.name,
'type' : ty
}
if isinstance(obj, field_container.Mesh):
parent = 'null'
blender_obj = None
if obj.referenced_object in bpy.data.objects:
blender_obj = bpy.data.objects[obj.referenced_object]
if blender_obj.parent:
parent = blender_obj.parent.name
matrix = blender_obj.matrix_local
filename = obj.name + '.obj'
if (obj.is_animation_hack):
filename = obj.name + '.md5mesh'
else:
splittedPath = filepath.split('/')
path = ''
for x in range(1, len(splittedPath)-1):
path += '/' + splittedPath[x]
if not os.path.exists(path + '/tmp'):
os.makedirs(path + '/tmp')
path += bpy.path.abspath('/tmp/')
bpy.ops.object.select_all(action='DESELECT')
# scene.objects.active = blender_obj
blender_obj.select = True
world = blender_obj.matrix_world.copy()
Matrix.identity(blender_obj.matrix_world)
bpy.ops.export_scene.obj(
filepath= path + filename,
check_existing=False,
use_selection=True,
use_normals=True,
use_triangles=True,
use_uvs=True,
use_materials=True,
axis_forward='Y',
axis_up='Z',
path_mode='AUTO'
)
blender_obj.matrix_world = world
blender_obj.select = False
return {
'type' : 'Mesh',
'name' : obj.name,
'file' : 'tmp/' + filename,
'parent' : parent,
'transform' : matrixToList(matrix)
}
if isinstance(obj, field_container.Screen):
parent = 'null'
matrix = []
if obj.referenced_object in bpy.data.objects:
if bpy.data.objects[obj.referenced_object].parent:
parent = bpy.data.objects[obj.referenced_object].parent.name
matrix = bpy.data.objects[obj.referenced_object].matrix_local
return {
'type' : 'Screen',
'name' : obj.name,
'parent' : parent,
'transform' : matrixToList(matrix)
}
if isinstance(obj, field_container.Transform):
parent = 'null'
if obj.referenced_object in bpy.data.objects:
if bpy.data.objects[obj.referenced_object].parent:
parent = bpy.data.objects[obj.referenced_object].parent.name
matrix = bpy.data.objects[obj.referenced_object].matrix_local
return {
'type' : 'Transform',
'parent' : parent,
'transform' : matrixToList(matrix),
'name' : obj.name
}
raise TypeError(repr(obj) + ' is not JSON serializable')
def meshAsDict(mesh):
i = bpy.data.objects.find(self.referenced_object)
if -1 != i :
obj = bpy.data.objects[self.referenced_object]
#parent
#children
#transform
#boundingbox
#shadowmode # OFF, LOW_QUALITY, HIGH_QUALITY
#material
return {
'type' : 'TriMeshNode',
'name' : mesh.name
# 'transform' : matrix
}
printFieldContainer = {
'Mesh' : meshAsDict,
'Light' : meshAsDict,
'Transform' : meshAsDict
}
#def save(operator, context, filepath = ""):
def save(operator, context):
which = "NodeTree"
ns = bpy.data.node_groups[which].nodes
document = {
#triMeshes = (x for x in ns if (x.bl_label == 'Mesh'))
#screens = (x for x in ns if (x.bl_label == 'Screen'))
#'transforms' : list(x for x in ns if (x.bl_label == 'Transform'))
'viewer' : dict((x.name,x) for x in ns if (x.bl_label == 'Viewer')),
'scenegraphs' : dict((x.name,x) for x in ns if (x.bl_label == 'SceneGraph')),
'windows' : dict((x.name,x) for x in ns if (x.bl_label == 'Window')),
'cameras' : dict((x.name,x) for x in ns if (x.bl_label == 'Camera')),
'lights' : dict((x.name,x) for x in ns if (x.bl_label == 'Light')),
'meshes' : dict((x.name,x) for x in ns if (x.bl_label == 'Mesh')),
'screens' : dict((x.name,x) for x in ns if (x.bl_label == 'Screen')),
'transforms' : dict((x.name,x) for x in ns if (x.bl_label == 'Transform')),
'enable_preview_display' : str(context.scene.enable_preview_display).lower(),
'enable_fps_display' : str(context.scene.enable_fps_display).lower(),
'enable_ray_display' : str(context.scene.enable_ray_display).lower(),
'enable_bbox_display' : str(context.scene.enable_bbox_display).lower(),
'enable_FXAA' : str(context.scene.enable_FXAA).lower(),
'enable_frustum_culling' : str(context.scene.enable_frustum_culling).lower(),
'enable_backface_culling' : str(context.scene.enable_backface_culling).lower(),
'near_clip' : context.scene.near_clip,
'far_clip' : context.scene.far_clip
}
global filepath
filepath = operator.filepath
global scene
scene = context.scene
with open(operator.filepath, 'w', encoding='utf-8') as f:
json.dump(document, f, default=to_json, indent=4)
return {'FINISHED'}
class ExportAvango(bpy.types.Operator, ExportHelper):
'''Export selected object / scene for Avango (ASCII JSON format).'''
bl_idname = "export.avango"
bl_label = "Export Avango"
filename_ext = ".json"
def invoke(self, context, event):
#restore_settings_export(self.properties)
return ExportHelper.invoke(self, context, event)
@classmethod
def poll(cls, context):
return context.active_object != None
def execute(self, context):
print("Selected: " + context.active_object.name)
if not self.properties.filepath:
raise Exception("filename not set")
filepath = self.filepath
return save(self, context)#, **self.properties)
def draw(self, context):
layout = self.layout
row = layout.row()
row.label(text="Geometry:")
def menu_func_export(self, context):
default_path = bpy.data.filepath.replace(".blend", ".json")
self.layout.operator(ExportAvango.bl_idname, \
text="Avango (.json)").filepath = default_path
def register():
bpy.utils.register_class(ExportAvango)
bpy.types.INFO_MT_file_export.append(menu_func_export)
def unregister():
bpy.utils.register_class(ExportAvango)
bpy.types.INFO_MT_file_export.remove(menu_func_export)
|
{"/field_container.py": ["/__init__.py"], "/properties.py": ["/interface.py"], "/exporter.py": ["/__init__.py"]}
|
28,613
|
njokuifeanyigerald/drf-with-list-create-func
|
refs/heads/master
|
/app/views.py
|
from re import I
from rest_framework import generics
from .serializer import StudentSerializeer
from .models import Student
# from rest_framework.response import Response
class StudentApi(generics.ListCreateAPIView):
queryset = Student.objects.all()
serializer_class = StudentSerializeer
|
{"/app/views.py": ["/app/models.py"]}
|
28,614
|
njokuifeanyigerald/drf-with-list-create-func
|
refs/heads/master
|
/app/models.py
|
from django.db import models
class Student(models.Model):
student_reg_number = models.TextField(unique=True)
student_name = models.TextField()
student_email = models.TextField()
student_mobile = models.TextField(null=True)
created_at = models.DateTimeField(auto_now=True)
|
{"/app/views.py": ["/app/models.py"]}
|
28,618
|
dappstore0/dappstore
|
refs/heads/master
|
/app/migrations/0006_dapp_days_to_go.py
|
# Generated by Django 2.1.2 on 2018-10-28 10:43
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0005_feedback'),
]
operations = [
migrations.AddField(
model_name='dapp',
name='days_to_go',
field=models.IntegerField(default=10),
),
]
|
{"/init.py": ["/app/models.py"], "/app/admin.py": ["/app/models.py"], "/app/forms.py": ["/app/models.py"], "/app/views.py": ["/app/forms.py", "/app/models.py"], "/app/urls.py": ["/dappstore/event_watcher.py"], "/dappstore/event_watcher.py": ["/app/models.py"]}
|
28,619
|
dappstore0/dappstore
|
refs/heads/master
|
/app/models.py
|
from django.db import models
ratings = [(r, r) for r in range(0, 6)]
class Dapp(models.Model):
name = models.CharField(max_length=1000)
address = models.CharField(max_length=1000, null=True, blank=True) # submission owner
status = models.CharField(max_length=1000)
category = models.CharField(max_length=1000)
homepage = models.CharField(max_length=1000)
icon = models.CharField(max_length=1000, null=True, blank=True)
blockchain = models.CharField(max_length=1000)
current_fund = models.IntegerField(default=0)
fund_next_stage = models.IntegerField(default=10)
days_to_go = models.IntegerField(default=10)
class State(models.Model):
key = models.CharField(max_length=1000, primary_key=True)
value = models.IntegerField()
class Feedback(models.Model):
dapp = models.ForeignKey(Dapp, on_delete=models.CASCADE)
text = models.CharField(max_length=1000)
rating = models.CharField(max_length=1, choices=ratings, default=0)
|
{"/init.py": ["/app/models.py"], "/app/admin.py": ["/app/models.py"], "/app/forms.py": ["/app/models.py"], "/app/views.py": ["/app/forms.py", "/app/models.py"], "/app/urls.py": ["/dappstore/event_watcher.py"], "/dappstore/event_watcher.py": ["/app/models.py"]}
|
28,620
|
dappstore0/dappstore
|
refs/heads/master
|
/init.py
|
import os
os.environ["DJANGO_SETTINGS_MODULE"] = "dappstore.settings"
import django
django.setup()
from app.models import State
d = State(key="block", value=0)
d.save()
|
{"/init.py": ["/app/models.py"], "/app/admin.py": ["/app/models.py"], "/app/forms.py": ["/app/models.py"], "/app/views.py": ["/app/forms.py", "/app/models.py"], "/app/urls.py": ["/dappstore/event_watcher.py"], "/dappstore/event_watcher.py": ["/app/models.py"]}
|
28,621
|
dappstore0/dappstore
|
refs/heads/master
|
/app/admin.py
|
from django.contrib import admin
from .models import Dapp
admin.site.register(Dapp)
|
{"/init.py": ["/app/models.py"], "/app/admin.py": ["/app/models.py"], "/app/forms.py": ["/app/models.py"], "/app/views.py": ["/app/forms.py", "/app/models.py"], "/app/urls.py": ["/dappstore/event_watcher.py"], "/dappstore/event_watcher.py": ["/app/models.py"]}
|
28,622
|
dappstore0/dappstore
|
refs/heads/master
|
/app/forms.py
|
from django import forms
from .models import ratings
class FeedbackForm(forms.Form):
name = forms.CharField(max_length=1000) # dapp name
text = forms.CharField(max_length=1000, widget=forms.Textarea(attrs={"class": "form-control"}))
rating = forms.ChoiceField(choices=ratings, widget=forms.RadioSelect())
|
{"/init.py": ["/app/models.py"], "/app/admin.py": ["/app/models.py"], "/app/forms.py": ["/app/models.py"], "/app/views.py": ["/app/forms.py", "/app/models.py"], "/app/urls.py": ["/dappstore/event_watcher.py"], "/dappstore/event_watcher.py": ["/app/models.py"]}
|
28,623
|
dappstore0/dappstore
|
refs/heads/master
|
/app/views.py
|
from django.shortcuts import render
from random import randint
from .forms import FeedbackForm
from .models import Dapp, Feedback
st = {
"prototype": "mvp",
"mvp": "alpha",
"alpha": "beta",
"beta": "live",
"live": None
}
def home(request):
if request.method == "POST":
add = int(request.POST.get("eth"))
name = request.POST.get("name")
print(name, add)
x = Dapp.objects.all().filter(name__exact=name)[0]
x.current_fund += add
#x.current_fund = 0
if x.current_fund >= x.fund_next_stage:
x.status = st[x.status.lower()]
x.fund_next_stage += randint(10, 30)
x.save()
dapps = Dapp.objects.all()
prototypes = dapps.filter(status__iexact="prototype")
mvps = dapps.filter(status__iexact="mvp")
alphas = dapps.filter(status__iexact="alpha")
betas = dapps.filter(status__iexact="beta")
lives = dapps.filter(status__iexact="live")
context = {
"prototypes": prototypes,
"mvps": mvps,
"alphas": alphas,
"betas": betas,
"lives": lives,
"title": "Welcome to The dAppMap!",
}
return render(request, "home.html", context)
def feedback(request, name):
dapp = Dapp.objects.all().filter(name__iexact=name)[0]
form = FeedbackForm()
if request.method == "POST":
form = FeedbackForm(request.POST)
if form.is_valid():
name = form.cleaned_data["name"]
text = form.cleaned_data["text"]
rating = form.cleaned_data["rating"]
f = Feedback(dapp=dapp, text=text, rating=rating)
f.save()
feedbacks = Feedback.objects.all().filter(dapp__exact=dapp)
context = {
"form": form,
"name": name,
"feedbacks": feedbacks,
}
return render(request, "feedback.html", context)
|
{"/init.py": ["/app/models.py"], "/app/admin.py": ["/app/models.py"], "/app/forms.py": ["/app/models.py"], "/app/views.py": ["/app/forms.py", "/app/models.py"], "/app/urls.py": ["/dappstore/event_watcher.py"], "/dappstore/event_watcher.py": ["/app/models.py"]}
|
28,624
|
dappstore0/dappstore
|
refs/heads/master
|
/app/migrations/0002_auto_20181027_1340.py
|
# Generated by Django 2.1.2 on 2018-10-27 13:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='dapp',
name='address',
field=models.CharField(blank=True, max_length=1000, null=True),
),
migrations.AlterField(
model_name='dapp',
name='icon',
field=models.CharField(blank=True, max_length=1000, null=True),
),
]
|
{"/init.py": ["/app/models.py"], "/app/admin.py": ["/app/models.py"], "/app/forms.py": ["/app/models.py"], "/app/views.py": ["/app/forms.py", "/app/models.py"], "/app/urls.py": ["/dappstore/event_watcher.py"], "/dappstore/event_watcher.py": ["/app/models.py"]}
|
28,625
|
dappstore0/dappstore
|
refs/heads/master
|
/app/urls.py
|
from django.urls import path
import dappstore.event_watcher
from . import views
from threading import Thread
app_name = "app"
urlpatterns = [
path("", views.home, name="home"),
path("feedback/<str:name>", views.feedback, name="feedback"),
]
thread = Thread(target=dappstore.event_watcher.loop)
thread.daemon = True
thread.start()
|
{"/init.py": ["/app/models.py"], "/app/admin.py": ["/app/models.py"], "/app/forms.py": ["/app/models.py"], "/app/views.py": ["/app/forms.py", "/app/models.py"], "/app/urls.py": ["/dappstore/event_watcher.py"], "/dappstore/event_watcher.py": ["/app/models.py"]}
|
28,626
|
dappstore0/dappstore
|
refs/heads/master
|
/dappstore/event_watcher.py
|
from web3 import Web3, HTTPProvider, WebsocketProvider
import json
import dappstore.settings as settings
#import os
import time
#os.environ["DJANGO_SETTINGS_MODULE"] = "dappstore.settings"
#import django
#django.setup()
from app.models import State, Dapp
class DappStore:
@staticmethod
def sign(app, name, category, homepage, icon, blockchain):
print(app)
Dapp(
address=app,
name=name,
category=category,
homepage=homepage,
icon=icon,
status="protoype",
blockchain=blockchain
).save()
@staticmethod
def update(app, name, stage):
dp = Dapp.objects.filter(name__exact=name)[0]
if dp and dp.address == app:
dp.status=stage
dp.save()
match = {
settings.CONTRACT_WSS.events.Sign: DappStore.sign,
settings.CONTRACT_WSS.events.StageUpdate: DappStore.update,
}
def loop_items(block, min_block, max_block, first_loop):
for event, func in match.items():
# Get all events starting from last block
data = event.createFilter(fromBlock=block, toBlock=max_block)
for entry in data.get_all_entries():
# Call corresponding function
func(**entry["args"])
print("Event: {}".format(event))
# If first time looping through dict
if first_loop:
# The first entry defines the max block, so all events go in sync
if max_block is None:
max_block = entry["blockNumber"]
elif entry["blockNumber"] > max_block:
max_block = entry["blockNumber"]
# Define the latest block that gets found, this will be the fromBlock next loop
if entry["blockNumber"] > min_block:
min_block = entry["blockNumber"]
first_loop = False
return min_block, max_block, first_loop
def loop():
# get block from database
db_block = State.objects.get(key="block")
block = db_block.value
firstTime = True
while True:
first_loop = True
# Max block is to make every loop go in sync
max_block = None
min_block = -2147483647
# Loop over every event
min_block, max_block, first_loop = loop_items(block, min_block, max_block, first_loop)
# commit transaction queries
# Update database min_block
if min_block != -2147483647:
# Search from the block +=1
block = db_block.value = min_block+1
db_block.value = block
db_block.save()
#db_block.value = min_block+1
if firstTime:
print("Loop working")
firstTime = False
time.sleep(4)
|
{"/init.py": ["/app/models.py"], "/app/admin.py": ["/app/models.py"], "/app/forms.py": ["/app/models.py"], "/app/views.py": ["/app/forms.py", "/app/models.py"], "/app/urls.py": ["/dappstore/event_watcher.py"], "/dappstore/event_watcher.py": ["/app/models.py"]}
|
28,627
|
dappstore0/dappstore
|
refs/heads/master
|
/app/migrations/0004_auto_20181027_2104.py
|
# Generated by Django 2.1.2 on 2018-10-27 19:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0003_state'),
]
operations = [
migrations.AddField(
model_name='dapp',
name='blockchain',
field=models.CharField(default='ethereum', max_length=1000),
preserve_default=False,
),
migrations.AddField(
model_name='dapp',
name='current_fund',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='dapp',
name='fund_next_stage',
field=models.IntegerField(default=10),
),
]
|
{"/init.py": ["/app/models.py"], "/app/admin.py": ["/app/models.py"], "/app/forms.py": ["/app/models.py"], "/app/views.py": ["/app/forms.py", "/app/models.py"], "/app/urls.py": ["/dappstore/event_watcher.py"], "/dappstore/event_watcher.py": ["/app/models.py"]}
|
28,628
|
ksteigerwald/aws-health-event-to-slack-message
|
refs/heads/master
|
/tests/unit/handlers/test_aws_health_event_publisher.py
|
'''Test health_event_publisher'''
# pylint: disable=protected-access
# pylint: disable=wrong-import-position
# pylint: disable=redefined-outer-name
import json
import os
import boto3
import jsonschema
from moto import mock_sns, mock_sts
import pytest
import handlers.aws_health_event_publisher as h # noqa
EVENT_FILE = os.path.join(
os.path.dirname(__file__),
'..',
'..',
'events',
'aws_health_event_publisher.json'
)
SLACK_SCHEMA_FILE_PATH = os.path.join(
os.path.dirname(__file__),
'../../../slack-message-schema.json'
)
SNS_TOPIC_NAME = "mock-aws-health-event-to-slack-message"
@pytest.fixture()
def event(event_file=EVENT_FILE):
'''Trigger event'''
with open(event_file) as f:
return json.load(f)
@pytest.fixture()
def slack_message_schema():
'''Slack message schema'''
with open(SLACK_SCHEMA_FILE_PATH) as f:
return json.load(f)
@pytest.fixture()
def sns_client():
'''SNS client'''
return boto3.client('sns')
@pytest.fixture()
def sns_message(event):
'''SNS message'''
return h._format_slack_message(event)
@pytest.fixture
def sns_topic_name():
'''SNS topic name'''
return SNS_TOPIC_NAME
def test__format_slack_message(event, slack_message_schema):
'''Test format of message message to be published'''
slack_message = h._format_slack_message(event)
jsonschema.validate(slack_message, slack_message_schema)
@mock_sts
@mock_sns
def test__publish_sns_message(sns_client, sns_message, sns_topic_name):
'''Test publish an SNS message.'''
sns_create_topic_resp = sns_client.create_topic(Name=sns_topic_name)
sns_publish_resp = h._publish_sns_message(
sns_create_topic_resp.get('TopicArn'),
sns_message
)
assert sns_publish_resp.get('ResponseMetadata').get('HTTPStatusCode') == 200
|
{"/tests/unit/handlers/test_aws_health_event_publisher.py": ["/handlers/aws_health_event_publisher.py"]}
|
28,629
|
ksteigerwald/aws-health-event-to-slack-message
|
refs/heads/master
|
/handlers/aws_health_event_publisher.py
|
'''Publish AWS health events to SNS'''
import json
import logging
import os
import boto3
log_level = os.environ.get('LOG_LEVEL', 'INFO')
logging.root.setLevel(logging.getLevelName(log_level)) # type: ignore
_logger = logging.getLogger(__name__)
AWS_SNS_TOPIC_ARN = os.environ.get('AWS_SNS_TOPIC_ARN')
sns_client = boto3.client('sns')
def _create_event_data_field(k: str, v: str, short=True):
'''Return an event data field based on a key value pair'''
field = {
"title": k,
"value": v,
"short": short
}
return field
def _format_slack_message(event: dict) -> dict:
'''Return a slack message for publishing'''
msg = {}
msg['text'] = "AWS Health Event Notification"
msg['attachments'] = []
event_data = {
"title": event.get('detail-type'),
"author": "Amazon Web Services",
"text": event.get('detail').get('eventArn')
}
event_data_fields = []
event_data_fields.append(
_create_event_data_field('Type', event.get('detail-type'))
)
event_data_fields.append(
_create_event_data_field('Source', event.get('source'))
)
event_data_fields.append(
_create_event_data_field('Account', event.get('account'))
)
event_data_fields.append(
_create_event_data_field('Service', event.get('detail').get('service'))
)
event_data_fields.append(
_create_event_data_field('Event Type Category',
event.get('detail').get('eventTypeCategory'))
)
event_data_fields.append(
_create_event_data_field('Event Type Code',
event.get('detail').get('eventTypeCode'),
False)
)
if event.get('resources'):
event_data_fields.append(
_create_event_data_field('Resources', ','.join(event.get('resources')))
)
event_data['fields'] = event_data_fields
msg['attachments'].append(event_data)
for description in event.get('detail').get('eventDescription'):
event_description = {
'title': 'Description',
'text': description.get('latestDescription')
}
msg['attachments'].append(event_description)
_logger.debug('Slack message: {}'.format(json.dumps(msg)))
return msg
def _publish_sns_message(sns_topic_arn: str, message: dict) -> None:
'''Publish message to SNS topic'''
_logger.debug('SNS message: {}'.format(json.dumps(message)))
r = sns_client.publish(
TopicArn=sns_topic_arn,
Message=json.dumps(message)
)
return r
def handler(event, context):
'''Function entry'''
_logger.debug('Event received: {}'.format(json.dumps(event)))
sns_message = _format_slack_message(event)
_publish_sns_message(AWS_SNS_TOPIC_ARN, sns_message)
resp = {'Status': 'OK'}
_logger.debug('Response: {}'.format(json.dumps(resp)))
return resp
|
{"/tests/unit/handlers/test_aws_health_event_publisher.py": ["/handlers/aws_health_event_publisher.py"]}
|
28,646
|
vkandoba/words2numbers
|
refs/heads/master
|
/w2n.py
|
tokens_config = {
'ноль': {'num': 0, 'type': 'self', 'level': 1},
'один': {'num': 1, 'type': 'final', 'level': 1},
'одна': {'num': 1, 'type': 'final', 'level': 1},
'два': {'num': 2, 'type': 'final', 'level': 1},
'две': {'num': 2, 'type': 'final', 'level': 1},
'три': {'num': 3, 'type': 'final', 'level': 1},
'четыре': {'num': 4, 'type': 'final', 'level': 1},
'пять': {'num': 5, 'type': 'final', 'level': 1},
'шесть': {'num': 6, 'type': 'final', 'level': 1},
'семь': {'num': 7, 'type': 'final', 'level': 1},
'восемь': {'num': 8, 'type': 'final', 'level': 1},
'девять': {'num': 9, 'type': 'final', 'level': 1},
"десять": {'num': 10, 'type': 'final', 'level': 1},
"одиннадцать": {'num': 11, 'type': 'final', 'level': 1},
"двенадцать": {'num': 12, 'type': 'final', 'level': 1},
"тринадцать": {'num': 13, 'type': 'final', 'level': 1},
"четырнадцать": {'num': 14, 'type': 'final', 'level': 1},
"пятнадцать": {'num': 15, 'type': 'final', 'level': 1},
"шестнадцать": {'num': 16, 'type': 'final', 'level': 1},
"семнадцать": {'num': 17, 'type': 'final', 'level': 1},
"восемнадцать": {'num': 18, 'type': 'final', 'level': 1},
"девятнадцать": {'num': 19, 'type': 'final', 'level': 1},
"двадцать": {'num': 20, 'type': 'degree', 'level': 2},
"тридцать": {'num': 30, 'type': 'degree', 'level': 2},
"сорок": {'num': 40, 'type': 'degree', 'level': 2},
"пятьдесят": {'num': 50, 'type': 'degree', 'level': 2},
"шестьдесят": {'num': 60, 'type': 'degree', 'level': 2},
"семьдесят": {'num': 70, 'type': 'degree', 'level': 2},
"восемьдесят": {'num': 80, 'type': 'degree', 'level': 2},
"девяносто": {'num': 90, 'type': 'degree', 'level': 2},
"сто": {'num': 100, 'type': 'degree', 'level': 3},
"двести": {'num': 200, 'type': 'degree', 'level': 3},
"триста": {'num': 300, 'type': 'degree', 'level': 3},
"четыреста": {'num': 400, 'type': 'degree', 'level': 3},
"пятьсот": {'num': 500, 'type': 'degree', 'level': 3},
"шестьсот": {'num': 600, 'type': 'degree', 'level': 3},
"семьсот": {'num': 700, 'type': 'degree', 'level': 3},
"восемьсот": {'num': 800, 'type': 'degree', 'level': 3},
"девятьсот": {'num': 900, 'type': 'degree', 'level': 3},
"тысяча": {'num': 1000, 'type': 'degree', 'level': 4},
"одна тысяча": {'num': 1000, 'type': 'degree', 'level': 4},
"две тысячи": {'num': 2000, 'type': 'degree', 'level': 4},
"три тысячи": {'num': 3000, 'type': 'degree', 'level': 4},
"четыре тысячи": {'num': 4000, 'type': 'degree', 'level': 4},
"пять тысяч": {'num': 5000, 'type': 'degree', 'level': 4},
"шесть тысяч": {'num': 6000, 'type': 'degree', 'level': 4},
"семь тысяч": {'num': 7000, 'type': 'degree', 'level': 4},
"восемь тысяч": {'num': 8000, 'type': 'degree', 'level': 4},
"девять тысяч": {'num': 9000, 'type': 'degree', 'level': 4},
'два ноля': {'num': '00', 'type': 'self', 'level': 1},
'два нуля': {'num': '00', 'type': 'self', 'level': 1},
'три ноля': {'num': '000', 'type': 'self', 'level': 1},
'три нуля': {'num': '000', 'type': 'self', 'level': 1},
'две единицы': {'num': '11', 'type': 'self', 'level': 1},
'три единицы': {'num': '111', 'type': 'self', 'level': 1}
}
def make_num(text):
words = text.split()
tokens = filter_tokens(weave_double_complex_tokens(words))
numbers = []
while tokens:
token = tokens_config[tokens[0]]
num, tokens = make_num_greedy(tokens) if token['type'] != 'self' else (token['num'], tokens[1:])
numbers.append(num)
return ''.join([str(num) for num in numbers])
def make_num_x(text):
words = text.split()
tokens = weave_double_complex_tokens(words)
numbers = []
while tokens:
if tokens[0] not in tokens_config:
numbers.append('x')
tokens = tokens[1:]
else:
token = tokens_config[tokens[0]]
num, tokens = make_num_greedy(tokens) if token['type'] != 'self' else (token['num'], tokens[1:])
numbers.append(num)
return ''.join([str(num) for num in numbers])
# TODO: add limitation by length or prefix condition
def make_num_versions(text):
words = text.split()
tokens = filter_tokens(weave_double_complex_tokens(words))
versions = make_num_versions_internal(tokens)
return [''.join([str(num) for num in numbers]) for numbers in versions]
def filter_tokens(words):
return [w for w in words if w in tokens_config]
def weave_double_complex_tokens(words):
if not words:
return words
words_with_next = [((w, next_w), f"{w} {next_w}" in tokens_config) for (w, next_w) in zip(words, words[1:] + [""])]
w_is_complex = [is_complex for (_, is_complex) in words_with_next]
words_filtered = [words_with_next[0]] + \
[w_with_next for (w_with_next, previous_is_complex) in zip(words_with_next[1:], w_is_complex)
if not previous_is_complex]
return [w if not is_complex else f"{w} {w_next}" for ((w, w_next), is_complex) in words_filtered]
def make_num_versions_internal(words):
if not words:
return [[]]
num_token = tokens_config[words[0]]
if num_token['type'] == 'self':
num = num_token['num']
return [[num] + v for v in make_num_versions_internal(words[1:])]
num_versions = set()
versions = []
for d in range(num_token['level'], 0, -1):
num, rest = make_num_one_greedy(words, num_token['level'], d)
if num not in num_versions:
num_versions.add(num)
versions.extend([[num] + v for v in make_num_versions_internal(rest)])
return versions
def make_num_greedy(words):
level = tokens_config[words[0]]['level']
return make_num_one_greedy(words, level, level)
def make_num_one_greedy(words, level, depth):
if not words or depth == 0 or level == 0:
return 0, words
num_token = tokens_config[words[0]]
if num_token['level'] > level or num_token['type'] == 'self':
return 0, words
if num_token['type'] == 'final':
return num_token['num'], words[1:]
acc, rest = make_num_one_greedy(words[1:], num_token['level'] - 1, depth - 1)
return acc + num_token['num'], rest
|
{"/test_w2n.py": ["/w2n.py"]}
|
28,647
|
vkandoba/words2numbers
|
refs/heads/master
|
/test_w2n.py
|
from unittest import TestCase
from w2n import make_num, make_num_x, make_num_greedy, make_num_one_greedy, make_num_versions
class TestMakeNumbers(TestCase):
def test_make_with_empty(self):
self.assertEqual('', make_num(''))
def test_make_with_spaces(self):
self.assertEqual('12', make_num(' один два '))
def test_make_digits(self):
self.assertEqual('9876543210', make_num('девять восемь семь шесть пять четыре три два один ноль'))
def test_make_num(self):
self.assertEqual('5076078845', make_num('пятьсот семь шестьсот семь восемь восемь четыре пять'))
def test_make_num_with_zero(self):
self.assertEqual('5000045', make_num('пятьсот ноль ноль четыре пять'))
def test_make_num_with_double_zero(self):
self.assertEqual('5000000111', make_num('пятьсот два ноля два нуля три единицы'))
def test_make_num_with_complex_token(self):
self.assertEqual('2070', make_num('две тысячи семьдесят'))
self.assertEqual('20003000', make_num('две тысячи три тысячи'))
def test_make_num_with_two_level_final(self):
self.assertEqual('5115', make_num('пятьсот одиннадцать пять'))
def test_make_num_with_no_numerical_words(self):
self.assertEqual('222641', make_num('номер двести двадцать два добавить шестьсот сорок и один'))
def test_make_one_greedy(self):
self.assertEqual((5, []), make_num_one_greedy(['пять'], 1, 1))
self.assertEqual((45, []), make_num_one_greedy(['сорок', 'пять'], 2, 2))
self.assertEqual((205, []), make_num_one_greedy(['двести', 'пять'], 3, 3))
self.assertEqual((245, []), make_num_one_greedy(['двести', 'сорок', 'пять'], 3, 3))
def test_make_one_partial(self):
self.assertEqual((200, ['сорок', 'пять']), make_num_one_greedy(['двести', 'сорок', 'пять'], 3, 1))
self.assertEqual((240, ['пять']), make_num_one_greedy(['двести', 'сорок', 'пять'], 3, 2))
def test_make_one_greedy_with_another_num_at_end(self):
self.assertEqual((1, ['два']), make_num_greedy(['один', 'два']))
self.assertEqual((200, ['триста']), make_num_greedy(['двести', 'триста']))
self.assertEqual((200, ['ноль']), make_num_greedy(['двести', 'ноль']))
def test_make_num_x(self):
self.assertEqual('x10603478702',
make_num_x('всего десять шестьсот три четыре семьдесят восемь семьсот два'))
self.assertEqual('234600x47851',
make_num_x('двадцать три сорок шесть два ноля на четыре семьдесят восемь пятьдесят один'))
class TestMakeNumVersions(TestCase):
def test_make_versions_digit(self):
self.assertEqual(['9876543210'], make_num_versions('девять восемь семь шесть пять четыре три два один ноль'))
def test_make_versions_for_one(self):
self.assertEqual(['45', '405'], make_num_versions('сорок пять'))
self.assertEqual(['245', '2405', '20045', '200405'], make_num_versions('двести сорок пять'))
self.assertEqual(['205', '2005'], make_num_versions('двести пять'))
def test_make_versions(self):
self.assertEqual(['24552', '245502', '240552', '2405502', '2004552', '20045502', '20040552', '200405502'],
make_num_versions('двести сорок пять пятьдесят два'))
def test_test_make_versions_with_zero(self):
self.assertEqual(['5000115'], make_num_versions('пятьсот ноль одиннадцать пять'))
def test_make_num_versions_with_complex_token(self):
self.assertEqual(['2371', '23701', '230071', '2300701', '2000371', '20003701', '200030071', '2000300701'],
make_num_versions('две тысячи триста семьдесят один'))
def test_test_make_versions_no_numerical_words(self):
self.assertEqual(['240', '20040'], make_num_versions('номер двести и сорок закончил'))
def test_test_make_versions_with_two_level_final(self):
self.assertEqual(['5115', '500115'], make_num_versions('пятьсот одиннадцать пять'))
|
{"/test_w2n.py": ["/w2n.py"]}
|
28,665
|
Garima16/experiments-with-GAN
|
refs/heads/main
|
/utils.py
|
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
import torchvision.datasets as datasets
import torchvision.transforms as transforms
import torchvision.utils as vutils
def load_data(dataroot, img_size, bs, workers=1, dset_name='mnist'):
"""
Create dataloader from data of given bs and transform images
:param dataroot:
:param img_size:
:param bs:
:param workers:
:param dset_name:
:return:
"""
if dset_name == 'mnist':
data = datasets.MNIST(root=dataroot,
download=True,
transform=transforms.Compose([
transforms.Resize(img_size),
transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,)),
]))
if dset_name == 'celeb':
data = datasets.CelebA(root=dataroot,
download=True,
transform=transforms.Compose([
transforms.Resize(img_size),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]))
'''
transforms.Normalize():
specify mean and stddev for 3 channels - 1st tuple is mean for 3 channels, 2nd is stddev
to normalize: img_pixel = img_pixel - mean / stddev
'''
dataloader = torch.utils.data.DataLoader(data, batch_size=bs, shuffle=True, num_workers=workers)
return dataloader
def print_sample_data(dataloader, bs, device):
data_batch = next(iter(dataloader))
plt.figure(figsize=(8, 8))
plt.axis("off")
plt.title("{} Training Images".format(bs))
plt.imshow(
np.transpose(vutils.make_grid(data_batch[0].to(device)[:bs], padding=2, normalize=True).cpu(), (1, 2, 0)))
def initialise_weights(model):
"""
Initialise weights of Convolution and Batch norm layers
Convolution layers: weights sampled from Gaussian distribution with mu=0.0, stddev=0.02
Batch norm layers: weights sampled from Gaussian distribution with mu=1.0, stddev=0.02 and bias with 0
:param model:
:return: None
"""
classname = model.__class__.__name__
if classname.find('Conv') != -1:
nn.init.normal_(tensor=model.weight, mean=0.0, std=0.02)
elif classname.find('BatchNorm') != -1:
nn.init.normal_(tensor=model.weight, mean=1.0, std=0.02)
nn.init.zeros_(tensor=model.bias)
print("Initialised weights.")
def save_wts(generator, discriminator, epoch, wts_file):
"""
Save Discriminator and Generator weights after an epoch in given filepath.
:param generator: model
:param discriminator: model
:param epoch:
:param wts_file:
:return: None
"""
torch.save(generator.state_dict(), '%s/Gwts_epoch%d' % (wts_file, epoch))
torch.save(discriminator.state_dict(), '%s/Dwts_epoch%d' % (wts_file, epoch))
print("Saved weights.")
def load_wts(generator, discriminator, gen_wts_file, dis_wts_file):
"""
Load Discriminator and Generator models with previously saved weights.
:param generator:
:param discriminator:
:param gen_wts_file: file containing G wts
:param dis_wts_file: file containing D wts
:return:
"""
generator.load_state_dict(torch.load(gen_wts_file))
discriminator.load_state_dict(torch.load(dis_wts_file))
print("Loaded weights.")
return generator, discriminator
def load_weights(models, filepaths):
"""
:param models: contain model's Class objects
:param filepaths: corr. weight files
:return:
"""
for i in range(len(models)):
models[i].load_state_dict(torch.load(filepaths[i]))
print("Loaded weights for models.")
def initialise_wts(generator, discriminator):
"""
Call to initialise_weights to initialise weights of D and G.
:param generator:
:param discriminator:
:return:
"""
generator.apply(initialise_weights)
discriminator.apply(initialise_weights)
return generator, discriminator
def generate_img_from_pretrained_generator(generator, bs, nz, device, imgs_dir='None'):
"""
Generate images from a trained Generator model and plot images, and optionally, save them.
:param generator: pre-trained model
:param bs: batch size
:param nz: size of latent variable
:param device: cuda or cpu
:param imgs_dir: images directory to save images, default: None
:return:
"""
fake_img_list = []
fixed_noise = torch.randn(bs, nz, 1, 1, device=device)
fake = generator(fixed_noise).detach().cpu()
fake_img_list.append(vutils.make_grid(fake, padding=2, normalize=True))
if imgs_dir:
vutils.save_image(
fake,
'%s/%d_fake_samples.png' % (imgs_dir, bs),
normalize=True
)
plt.figure(figsize=(8, 8))
plt.axis("off")
plt.show()
def plot_losses(g_losses, d_losses):
"""
Plot G and D training losses.
:param g_losses: List of G's loss values
:param d_losses: List of D's loss values
:return:
"""
plt.figure(figsize=(10, 5))
plt.title("Generator and Discriminator Loss During Training")
plt.plot(g_losses, label="G")
plt.plot(d_losses, label="D")
plt.xlabel("iterations")
plt.ylabel("Loss")
plt.legend()
plt.show()
def total_parameters(model):
"""
Find total parameters(trainable or non-trainable) of a model.
:param model:
:return: no. of parameters
"""
params = 0
for param in model.parameters():
params += param.numel() # this fn returns total parameters in this specific param category
return params
|
{"/main.py": ["/infogan.py", "/utils.py"]}
|
28,666
|
Garima16/experiments-with-GAN
|
refs/heads/main
|
/animate_images.py
|
from pathlib import Path
import imageio
img_dir = Path('../dcgan-mnist-generated-images')
images = list(img_dir.glob('*.png'))
image_list = []
for file_name in images:
image_list.append(imageio.imread(file_name))
imageio.mimwrite('dcgan_animation.gif', image_list)
|
{"/main.py": ["/infogan.py", "/utils.py"]}
|
28,667
|
Garima16/experiments-with-GAN
|
refs/heads/main
|
/infogan.py
|
import numpy as np
import os
import torch.optim as optim
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision.utils import save_image
class Generator(nn.Module):
def __init__(self, latent_var_size=74):
super(Generator, self).__init__()
self.fc = nn.Sequential(
nn.Linear(in_features=latent_var_size, out_features=1024),
nn.BatchNorm1d(1024),
nn.ReLU(),
nn.Linear(in_features=1024, out_features=128 * 7 * 7),
nn.BatchNorm1d(128 * 7 * 7),
nn.ReLU(),
)
self.conv = nn.Sequential(
# input size: (128, 7, 7)
nn.ConvTranspose2d(in_channels=128,
out_channels=64,
kernel_size=4,
stride=2,
padding=1),
nn.BatchNorm2d(64),
nn.ReLU(), # output size: (64, 14, 14)
nn.ConvTranspose2d(in_channels=64,
out_channels=1,
kernel_size=4,
stride=2,
padding=1),
nn.Tanh() # output size: (1, 28, 28)
)
def forward(self, x):
fc_out = self.fc(x.float())
fc_out = fc_out.view(-1, 128, 7, 7)
output = self.conv(fc_out)
return output
# Shared network part between Recognition Network and Discriminator
class SharedNetwork(nn.Module):
def __init__(self):
super(SharedNetwork, self).__init__()
# input size: (1, 28, 28)
self.conv1 = nn.Sequential(
nn.Conv2d(in_channels=1,
out_channels=64,
kernel_size=4,
stride=2,
padding=1),
nn.LeakyReLU(negative_slope=0.1, inplace=True)
)
self.conv2 = nn.Sequential(
# output size: (64, 14, 14)
nn.Conv2d(in_channels=64,
out_channels=128,
kernel_size=4,
stride=2,
padding=1),
nn.BatchNorm2d(128),
nn.LeakyReLU(negative_slope=0.1, inplace=True),
# output size: (128, 7, 7)
)
self.fc = nn.Sequential(
nn.Linear(in_features=128 * 7 * 7,
out_features=1024),
nn.BatchNorm1d(1024),
nn.LeakyReLU(negative_slope=0.1, inplace=True)
)
def forward(self, x):
# print("input size: ", x.size())
out = self.conv1(x)
# print("shared network conv1 size: ", out.size())
out = self.conv2(out)
# print("shared network conv2 size: ", out.size())
out = out.view(-1, 128 * 7 * 7)
# print("shared network conv2 size: ", out.size())
out = self.fc(out)
# print("shared network fc size: ", out.size())
return out
class RecognitionNetwork(nn.Module):
def __init__(self, n_cats=10, cont_codes_size=2): # values for MNIST dataset
super(RecognitionNetwork, self).__init__()
self.fc = nn.Sequential(
nn.Linear(in_features=1024,
out_features=128),
nn.BatchNorm1d(128),
nn.LeakyReLU(negative_slope=0.1, inplace=True)
)
# categorical codes - softmax will be applied to get final output
self.cat_codes = nn.Linear(in_features=128, out_features=n_cats)
# continuous codes - for MNIST, using 2 cont. codes - network outputs mean and variance
self.cont_codes_mu = nn.Linear(in_features=128, out_features=cont_codes_size)
self.cont_codes_var = nn.Linear(in_features=128, out_features=cont_codes_size)
def forward(self, x):
cat_codes = F.softmax(self.cat_codes(self.fc(x)), dim=1)
cont_codes_mu = self.cont_codes_mu(self.fc(x)).squeeze()
# taking exponent, so that variance is positive
cont_codes_var = self.cont_codes_var(self.fc(x)).squeeze().exp()
return cat_codes, cont_codes_mu, cont_codes_var
class Discriminator(nn.Module):
def __init__(self):
super(Discriminator, self).__init__()
self.last = nn.Linear(in_features=1024, out_features=1)
def forward(self, x):
# print("input size to D: ", x.size())
disc_out = torch.sigmoid(self.last(x))
# print("discriminator output size: ", disc_out.size())
return disc_out
class LogGaussian(object):
# this method will be called on an instance of this class, by passing params to the instance
def __call__(self, x, mu, var):
# taking log of Gaussian distribution
log_likelihood = -0.5 * (var.mul(2 * np.pi) + 1e-6).log() - \
(x - mu).pow(2).div(var.mul(2) + 1e-6)
return log_likelihood.sum(1).mean().mul(-1)
class InfoGAN(object):
def __init__(self, noise_dim, disc_codes_dim, cont_code1_dim, cont_code2_dim, bs, image_size, epochs, dis_lr,
gen_lr):
# generator_net = Generator().to(self.device)
# self.discriminator_net = Discriminator().to(self.device)
# recognition_net = RecognitionNetwork().to(self.device)
# shared_nw = SharedNetwork().to(self.device)
self.bs = bs
self.image_size = image_size
self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
self.epochs = epochs
self.dis_lr = dis_lr # D's learning rate
self.gen_lr = gen_lr # G's learning rate
self.noise_dim = noise_dim
self.disc_codes_dim = disc_codes_dim
self.cont_code1_dim = cont_code1_dim
self.cont_code2_dim = cont_code2_dim
self.z_dim = self.noise_dim + self.disc_codes_dim + self.cont_code1_dim + self.cont_code2_dim
self.criterionD = nn.BCELoss()
self.criterionRN_disc = nn.CrossEntropyLoss()
self.criterionRN_cont = LogGaussian()
def generate_noise_input(self, noise, disc_code, cont_code):
# generate a random integer from the range [0, 10) of size batch size
# this integer will be fed as input to G as one-hot code.
idx = np.random.randint(low=10, size=self.bs)
c = np.zeros((self.bs, self.disc_codes_dim))
c[range(self.bs), idx] = 1.0 # create one hot encoding
disc_code.data.copy_(torch.Tensor(c))
cont_code.data.uniform_(-1, 1)
noise.data.uniform_(-1, 1)
z = torch.cat([noise, disc_code, cont_code], 1).view(-1, self.z_dim)
return z, idx, cont_code
def train(self, dataloader, discriminator_net, generator_net, recognition_net, shared_net, img_save_filepath,
wts_file):
# real_x = torch.FloatTensor(self.bs, 1, self.image_size, self.image_size).to(self.device)
torch.autograd.set_detect_anomaly(True)
# optimizers
optimG = optim.Adam([{'params': generator_net.parameters()}, {'params': recognition_net.parameters()}],
lr=self.gen_lr, betas=(0.5, 0.999))
optimD = optim.Adam([{'params': discriminator_net.parameters()}, {'params': shared_net.parameters()}],
lr=self.dis_lr, betas=(0.5, 0.999))
noise = torch.FloatTensor(self.bs, self.noise_dim).to(self.device)
disc_code = torch.FloatTensor(self.bs, self.disc_codes_dim).to(self.device)
cont_code = torch.FloatTensor(self.bs, self.cont_code1_dim + self.cont_code2_dim).to(self.device)
real_label = 1
fake_label = 0
# fixed random variables, to generate same 100 images after every specific number of iterations
c = np.linspace(start=-1, stop=1, num=10).reshape(1, -1)
c = np.repeat(c, 10, 0).reshape(-1, 1)
c1 = torch.from_numpy(np.hstack([c, np.zeros_like(c)]))
c2 = torch.from_numpy(np.hstack([np.zeros_like(c), c]))
# 10 one-hot codes for 10 classes(10 digits). Thus, attempting to generate 10 images of each of the 10 digits
idx = np.arange(10).repeat(10)
one_hot = torch.from_numpy(np.zeros((100, 10)))
one_hot[range(100), idx] = 1
fix_noise = torch.Tensor(100, self.noise_dim).uniform_(-1, 1)
img_count = 1 # iteratively increase count to name image files differently
for epoch in range(self.epochs):
for i, data in enumerate(dataloader):
# updating D
optimD.zero_grad()
# real image input to D
real_img = data[0].to(self.device)
batch_size = real_img.size(0)
label = torch.full((batch_size,), real_label, dtype=torch.float, device=self.device)
# resize noise and codes according to batch size of current batch
noise.data.resize_(batch_size, self.noise_dim)
disc_code.data.resize_(batch_size, self.disc_codes_dim)
cont_code.data.resize_(batch_size, self.cont_code1_dim + self.cont_code2_dim)
shared_obj = shared_net(real_img.detach())
output = discriminator_net(shared_obj).view(-1)
d_error_real = self.criterionD(output, label)
d_error_real.backward(retain_graph=True)
d_x = output.mean().item()
# fake image input to D
z, idx, cont_code = self.generate_noise_input(noise, disc_code, cont_code)
# idx is the digit class whose discrete code is fed to G.
# idx will be used to calculate discrete code loss to update recognition network parameters
fake_img = generator_net(z)
label.fill_(fake_label)
shared_nw_out = shared_net(fake_img)
disc_fake_output = discriminator_net(shared_nw_out).view(-1)
d_error_fake = self.criterionD(disc_fake_output, label)
d_error_fake.backward(retain_graph=True)
d_error = d_error_real + d_error_fake
optimD.step()
# updating G and Q
optimG.zero_grad()
label.fill_(real_label)
# disc_fake_output computed above can't be used here again, since backward() has already
# been called upon it and has changed. so, calculate it again.
shared_nw_out = shared_net(fake_img)
disc_fake_output = discriminator_net(shared_nw_out).view(-1)
d_error_fake2 = self.criterionD(disc_fake_output, label)
q_logits, q_mu, q_var = recognition_net(shared_nw_out)
class_label = torch.LongTensor(idx).to(self.device)
discrete_loss = self.criterionRN_disc(q_logits, class_label)
cont_loss = self.criterionRN_cont(cont_code, q_mu, q_var)
g_error = d_error_fake2 + discrete_loss + cont_loss
g_error.backward(retain_graph=True)
optimG.step()
# print results and generate 100 fake images after model has seen 100 batches of data in each epoch
if i % 100 == 0:
print("Epoch:{}/{}\tBatch:{}/{}\tD-error:{}\tG-error:{}".
format(epoch, self.epochs, i, len(dataloader), d_error, g_error))
# noise.data.copy_(fix_noise)
# disc_code.data.copy_(torch.Tensor(one_hot))
#
# cont_code.data.copy_(torch.from_numpy(c1))
z = torch.cat([fix_noise, one_hot, c1], 1).view(-1, 74) # size=(100, 74, 1, 1)
print("z size: ", z.size())
# z = torch.cat([noise, disc_code, cont_code], 1).view(-1, 74, 1, 1)
fake_img = generator_net(z).detach()
save_image(fake_img.data, '%s/image_c1_%d.png' % (img_save_filepath, img_count), nrow=10,
normalize=True)
# cont_code.data.copy_(torch.from_numpy(c2))
z = torch.cat([fix_noise, one_hot, c2], 1).view(-1, 74) # size=(100, 74, 1, 1)
fake_img = generator_net(z).detach()
save_image(fake_img.data, '%s/image_c2_%d.png' % (img_save_filepath, img_count), nrow=10,
normalize=True)
img_count += 1
torch.save(generator_net.state_dict(), '%s/Gwts_epoch%d' % (wts_file, epoch))
torch.save(discriminator_net.state_dict(), '%s/Dwts_epoch%d' % (wts_file, epoch))
torch.save(recognition_net.state_dict(), '%s/RNwts_epoch%d' % (wts_file, epoch))
torch.save(shared_net.state_dict(), '%s/SNwts_epoch%d' % (wts_file, epoch))
|
{"/main.py": ["/infogan.py", "/utils.py"]}
|
28,668
|
Garima16/experiments-with-GAN
|
refs/heads/main
|
/dcgan.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Generator(nn.Module):
def __init__(self, ngpu, nc, nz, ngf):
super(Generator, self).__init__()
self.ngpu = ngpu
self.layer1 = nn.Sequential(
nn.ConvTranspose2d(in_channels=nz,
out_channels=ngf * 8,
kernel_size=4,
stride=1,
padding=0,
bias=False),
nn.BatchNorm2d(ngf * 8), # layer1 out: (ngf*8, 4x4)
)
self.layer2 = nn.Sequential(
nn.ConvTranspose2d(in_channels=ngf * 8,
out_channels=ngf * 4,
kernel_size=4,
stride=2,
padding=1,
bias=False),
nn.BatchNorm2d(ngf * 4) # layer2 out: (ngf*4, 8x8)
)
self.layer3 = nn.Sequential(
nn.ConvTranspose2d(in_channels=ngf * 4,
out_channels=ngf * 2,
kernel_size=4,
stride=2,
padding=1,
bias=False),
nn.BatchNorm2d(ngf * 2) # layer3 out: (ngf*2, 16x16)
)
self.layer4 = nn.Sequential(
nn.ConvTranspose2d(in_channels=ngf * 2,
out_channels=ngf,
kernel_size=4,
stride=2,
padding=1,
bias=False),
nn.BatchNorm2d(ngf) # layer4 out: (ngf, 32x32)
)
self.layer5 = nn.ConvTranspose2d(in_channels=ngf,
out_channels=nc,
kernel_size=4,
stride=2,
padding=1,
bias=False)
def forward(self, x):
layer1_out = F.relu(input=self.layer1(x), inplace=True)
layer2_out = F.relu(input=self.layer2(layer1_out), inplace=True)
layer3_out = F.relu(input=self.layer3(layer2_out), inplace=True)
layer4_out = F.relu(input=self.layer4(layer3_out), inplace=True)
output = torch.tanh(input=self.layer5(layer4_out))
return output
class Discriminator(nn.Module):
def __init__(self, nc, ndf):
super(Discriminator, self).__init__()
# input to Discriminator layer 1 = (nc, 64x64), inplace=True ; ndf=64, ngf=64
self.layer1 = nn.Conv2d(in_channels=nc, out_channels=ndf, kernel_size=4,
stride=2, padding=1, bias=False)
# input to Discriminator layer 2 = (ndf, 32x32)
self.layer2 = nn.Sequential(
nn.Conv2d(in_channels=ndf, out_channels=ndf * 2, kernel_size=4,
stride=2, padding=1, bias=False),
nn.BatchNorm2d(ndf * 2)
)
# input to Discriminator layer 3 = (ndf * 2, 16x16)
self.layer3 = nn.Sequential(
nn.Conv2d(in_channels=ndf * 2, out_channels=ndf * 4, kernel_size=4,
stride=2, padding=1, bias=False),
nn.BatchNorm2d(ndf * 4)
)
# input to Discriminator layer 4 = (ndf * 4, 8x8)
self.layer4 = nn.Sequential(
nn.Conv2d(in_channels=ndf * 4, out_channels=ndf * 8, kernel_size=4,
stride=2, padding=1, bias=False),
nn.BatchNorm2d(ndf * 8)
)
# input to Discriminator layer 5 = (ndf * 8, 4x4)
self.layer5 = nn.Conv2d(in_channels=ndf * 8, out_channels=1, kernel_size=4,
stride=1, padding=0, bias=False)
def forward(self, x):
layer1_out = F.leaky_relu(input=self.layer1(x),
negative_slope=0.2,
inplace=True)
layer2_out = F.leaky_relu(input=self.layer2(layer1_out),
negative_slope=0.2,
inplace=True)
layer3_out = F.leaky_relu(input=self.layer3(layer2_out),
negative_slope=0.2,
inplace=True)
layer4_out = F.leaky_relu(input=self.layer4(layer3_out),
negative_slope=0.2,
inplace=True)
output = torch.sigmoid(input=self.layer5(layer4_out))
return output
|
{"/main.py": ["/infogan.py", "/utils.py"]}
|
28,669
|
Garima16/experiments-with-GAN
|
refs/heads/main
|
/main.py
|
import infogan
from torch import cuda
import utils
if __name__ == "__main__":
root_dir = '../Datasets'
img_dir = '../Images'
wts_dir = '../Weights'
# declare constants for InfoGAN training
bs = 100
noise_dim = 62
disc_codes_dim = 10
cont_code1_dim = 1
cont_code2_dim = 1
img_size = 28
epochs = 1
device = 'cuda' if cuda.is_available() else 'cpu'
dis_lr = 0.0002
gen_lr = 0.001
dataloader = utils.load_data(dataroot=root_dir, img_size=img_size, bs=bs)
utils.print_sample_data(dataloader=dataloader,
bs=bs,
device=device)
infogan_obj = infogan.InfoGAN(noise_dim=noise_dim,
disc_codes_dim=disc_codes_dim,
cont_code1_dim=cont_code1_dim,
cont_code2_dim=cont_code2_dim,
bs=bs,
image_size=img_size,
epochs=epochs,
dis_lr=dis_lr,
gen_lr=gen_lr)
disc_net = infogan.Discriminator().to(device)
gen_net = infogan.Generator().to(device)
rn_net = infogan.RecognitionNetwork().to(device)
shared_net = infogan.SharedNetwork().to(device)
# initialise weights of parameters in all models
for i in [disc_net, gen_net, rn_net, shared_net]:
i.to(device)
i.apply(utils.initialise_weights)
infogan_obj.train(dataloader=dataloader,
discriminator_net=disc_net,
generator_net=gen_net,
recognition_net=rn_net,
shared_net=shared_net,
img_save_filepath=img_dir,
wts_file=wts_dir)
|
{"/main.py": ["/infogan.py", "/utils.py"]}
|
28,670
|
akshay9494/Inceptioner
|
refs/heads/master
|
/tests/test_inceptioner.py
|
from core.inceptioner import Inceptioner
import pytest
def test_inceptioner():
sut = Inceptioner()
res = sut.predict(img_path='dog.jpg')
assert res['prediction'] == 'golden_retriever'
assert res['confidence'] == pytest.approx(0.65, 0.1)
|
{"/tests/test_inceptioner.py": ["/core/inceptioner.py"], "/configuration/logging.py": ["/configuration/instance.py"], "/main.py": ["/services/flask_restplus_service.py", "/configuration/instance.py"], "/services/flask_restplus_service.py": ["/api_models/recogntion.py", "/configuration/instance.py", "/core/inceptioner.py"], "/core/inceptioner.py": ["/configuration/instance.py"], "/core/utilities.py": ["/configuration/instance.py", "/entities/request_payloads.py", "/core/inceptioner.py"], "/services/nameko_service.py": ["/core/utilities.py"], "/flask_server/instance.py": ["/configuration/instance.py"], "/main_nameko.py": ["/services/nameko_service.py"]}
|
28,671
|
akshay9494/Inceptioner
|
refs/heads/master
|
/configuration/logging.py
|
import logging
from logging import handlers
from datetime import datetime
from .instance import config
import os
LOG_FORMAT = ('%(levelname) -10s %(asctime)s %(name) -10s %(funcName) -20s %(lineno) -5d: %(message)s')
log_name = 'logs_{}.log'.format(datetime.now().strftime('%Y-%m-%d--%H-%M-%S'))
handler = handlers.RotatingFileHandler(os.path.join(config.log_dir, log_name), maxBytes=100000, backupCount=100)
logging.basicConfig(
level=logging.DEBUG,
format=LOG_FORMAT,
handlers=[
handler
]
)
|
{"/tests/test_inceptioner.py": ["/core/inceptioner.py"], "/configuration/logging.py": ["/configuration/instance.py"], "/main.py": ["/services/flask_restplus_service.py", "/configuration/instance.py"], "/services/flask_restplus_service.py": ["/api_models/recogntion.py", "/configuration/instance.py", "/core/inceptioner.py"], "/core/inceptioner.py": ["/configuration/instance.py"], "/core/utilities.py": ["/configuration/instance.py", "/entities/request_payloads.py", "/core/inceptioner.py"], "/services/nameko_service.py": ["/core/utilities.py"], "/flask_server/instance.py": ["/configuration/instance.py"], "/main_nameko.py": ["/services/nameko_service.py"]}
|
28,672
|
akshay9494/Inceptioner
|
refs/heads/master
|
/configuration/instance.py
|
import os
class Configuration:
def __init__(self):
self.port = os.environ.get('PORT', 8080)
self.home_dir = os.path.join(os.path.dirname(__file__), '..', 'home')
os.makedirs(self.home_dir, exist_ok=True)
self.log_dir = os.path.join(self.home_dir, 'logs')
os.makedirs(self.log_dir, exist_ok=True)
self.model_path = os.path.join(os.path.dirname(__file__), '..', 'dl_models', 'inception_v3_weights_tf_dim_ordering_tf_kernels.h5')
config = Configuration()
|
{"/tests/test_inceptioner.py": ["/core/inceptioner.py"], "/configuration/logging.py": ["/configuration/instance.py"], "/main.py": ["/services/flask_restplus_service.py", "/configuration/instance.py"], "/services/flask_restplus_service.py": ["/api_models/recogntion.py", "/configuration/instance.py", "/core/inceptioner.py"], "/core/inceptioner.py": ["/configuration/instance.py"], "/core/utilities.py": ["/configuration/instance.py", "/entities/request_payloads.py", "/core/inceptioner.py"], "/services/nameko_service.py": ["/core/utilities.py"], "/flask_server/instance.py": ["/configuration/instance.py"], "/main_nameko.py": ["/services/nameko_service.py"]}
|
28,673
|
akshay9494/Inceptioner
|
refs/heads/master
|
/main.py
|
# from services.nameko_service import Service, InceptionerService
# from services.http_exceptions import Service
from configuration import logging
# from flask_server.instance import server
from flask_server import server
from services.flask_restplus_service import *
from configuration.instance import config
if __name__ == '__main__':
app.run()
|
{"/tests/test_inceptioner.py": ["/core/inceptioner.py"], "/configuration/logging.py": ["/configuration/instance.py"], "/main.py": ["/services/flask_restplus_service.py", "/configuration/instance.py"], "/services/flask_restplus_service.py": ["/api_models/recogntion.py", "/configuration/instance.py", "/core/inceptioner.py"], "/core/inceptioner.py": ["/configuration/instance.py"], "/core/utilities.py": ["/configuration/instance.py", "/entities/request_payloads.py", "/core/inceptioner.py"], "/services/nameko_service.py": ["/core/utilities.py"], "/flask_server/instance.py": ["/configuration/instance.py"], "/main_nameko.py": ["/services/nameko_service.py"]}
|
28,674
|
akshay9494/Inceptioner
|
refs/heads/master
|
/services/flask_restplus_service.py
|
from flask_restplus import Resource, Namespace, reqparse
# from flask_server.instance import server
from flask_server import server
from api_models.recogntion import recognition_request, recognition_response
import logging
import base64
from flask import abort
from configuration.instance import config
from core.inceptioner import Inceptioner
import tensorflow as tf
import os
import werkzeug
import uuid
import socket
file_upload = reqparse.RequestParser()
file_upload.add_argument(
'image_file',
type=werkzeug.datastructures.FileStorage,
location='files',
required=True,
help='image file'
)
inceptioner_instance = Inceptioner()
graph = tf.get_default_graph()
ns = Namespace('inceptioner', description='Image recognition with Inception V3')
app, api = server.app, server.inceptioner_ns
@api.route('/recognize/base64')
class RecognizeBase64(Resource):
@api.expect(recognition_request)
@api.marshal_with(recognition_response)
def post(self):
logging.info('received post request for classification with base64')
uid = self.api.payload['id']
file_name = uid+'.jpg'
file_path = os.path.join(config.home_dir, file_name)
base64_string = self.api.payload['base64String']
try:
image = base64.decodebytes(str.encode(base64_string))
with open(file_path, 'wb') as img:
img.write(image)
except Exception as e:
logging.error(e)
abort(400, str(e))
try:
with graph.as_default():
recognition_response = inceptioner_instance.predict(file_path)
recognition_response['id'] = uid
recognition_response['hostname'] = socket.gethostname()
return recognition_response
except Exception as e:
logging.error(e)
abort(500, str(e))
@api.route('/recognize/file')
class RecognizeFile(Resource):
@api.expect(file_upload)
@api.marshal_with(recognition_response)
def post(self):
logging.info('received post request for classification with file')
args = file_upload.parse_args()
file_extension = args['image_file'].filename.split('.')[-1]
uid = uuid.uuid4()
file_name = str(uid) + '.{}'.format(file_extension)
file_path = os.path.join(config.home_dir, file_name)
args['image_file'].save(file_path)
try:
with graph.as_default():
recognition_response = inceptioner_instance.predict(file_path)
recognition_response['id'] = uid
recognition_response['hostname'] = socket.gethostname()
return recognition_response
except Exception as e:
logging.error(e)
abort(500, str(e))
|
{"/tests/test_inceptioner.py": ["/core/inceptioner.py"], "/configuration/logging.py": ["/configuration/instance.py"], "/main.py": ["/services/flask_restplus_service.py", "/configuration/instance.py"], "/services/flask_restplus_service.py": ["/api_models/recogntion.py", "/configuration/instance.py", "/core/inceptioner.py"], "/core/inceptioner.py": ["/configuration/instance.py"], "/core/utilities.py": ["/configuration/instance.py", "/entities/request_payloads.py", "/core/inceptioner.py"], "/services/nameko_service.py": ["/core/utilities.py"], "/flask_server/instance.py": ["/configuration/instance.py"], "/main_nameko.py": ["/services/nameko_service.py"]}
|
28,675
|
akshay9494/Inceptioner
|
refs/heads/master
|
/api_models/recogntion.py
|
from flask_restplus import fields
# from flask_server.instance import server
from flask_server import server
recognition_request = server.api.model('RecognitionRequest', {
'id': fields.String(description='UID for maintaining traceability', required=True),
'base64String': fields.String(description='Base64 encoded image string for the image', required=True)
})
recognition_response = server.api.model('RecognitionResponse', {
'id': fields.String(description='UID for maintaining traceability'),
'prediction': fields.String(description='Prediction from the image using inception'),
'confidence': fields.Float(description='Prediction Confidence'),
'hostname': fields.String(description='hostname from where the execution happened to check for load balancing')
})
|
{"/tests/test_inceptioner.py": ["/core/inceptioner.py"], "/configuration/logging.py": ["/configuration/instance.py"], "/main.py": ["/services/flask_restplus_service.py", "/configuration/instance.py"], "/services/flask_restplus_service.py": ["/api_models/recogntion.py", "/configuration/instance.py", "/core/inceptioner.py"], "/core/inceptioner.py": ["/configuration/instance.py"], "/core/utilities.py": ["/configuration/instance.py", "/entities/request_payloads.py", "/core/inceptioner.py"], "/services/nameko_service.py": ["/core/utilities.py"], "/flask_server/instance.py": ["/configuration/instance.py"], "/main_nameko.py": ["/services/nameko_service.py"]}
|
28,676
|
akshay9494/Inceptioner
|
refs/heads/master
|
/entities/request_payloads.py
|
from marshmallow import Schema, fields, post_load
class RecognitionRequestSchema(Schema):
id = fields.Str(required=True)
base64String = fields.Str(required=True)
|
{"/tests/test_inceptioner.py": ["/core/inceptioner.py"], "/configuration/logging.py": ["/configuration/instance.py"], "/main.py": ["/services/flask_restplus_service.py", "/configuration/instance.py"], "/services/flask_restplus_service.py": ["/api_models/recogntion.py", "/configuration/instance.py", "/core/inceptioner.py"], "/core/inceptioner.py": ["/configuration/instance.py"], "/core/utilities.py": ["/configuration/instance.py", "/entities/request_payloads.py", "/core/inceptioner.py"], "/services/nameko_service.py": ["/core/utilities.py"], "/flask_server/instance.py": ["/configuration/instance.py"], "/main_nameko.py": ["/services/nameko_service.py"]}
|
28,677
|
akshay9494/Inceptioner
|
refs/heads/master
|
/core/inceptioner.py
|
from keras.applications.inception_v3 import InceptionV3, preprocess_input, decode_predictions
from keras.preprocessing import image
import numpy as np
import os
from configuration.instance import config
class Inceptioner:
def __init__(self):
self.model = InceptionV3(weights=None)
self.model.load_weights(config.model_path)
def predict(self, img_path):
img = image.load_img(img_path, target_size=(299, 299))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
preds = self.model.predict(x)
decoded = decode_predictions(preds)[0][0]
res = {
'prediction': decoded[1],
'confidence': decoded[2]
}
return res
if __name__ == '__main__':
pass
|
{"/tests/test_inceptioner.py": ["/core/inceptioner.py"], "/configuration/logging.py": ["/configuration/instance.py"], "/main.py": ["/services/flask_restplus_service.py", "/configuration/instance.py"], "/services/flask_restplus_service.py": ["/api_models/recogntion.py", "/configuration/instance.py", "/core/inceptioner.py"], "/core/inceptioner.py": ["/configuration/instance.py"], "/core/utilities.py": ["/configuration/instance.py", "/entities/request_payloads.py", "/core/inceptioner.py"], "/services/nameko_service.py": ["/core/utilities.py"], "/flask_server/instance.py": ["/configuration/instance.py"], "/main_nameko.py": ["/services/nameko_service.py"]}
|
28,678
|
akshay9494/Inceptioner
|
refs/heads/master
|
/flask_server/server.py
|
from flask import Flask
from flask_restplus import Api, Namespace
app = Flask(__name__)
api = Api(
app,
version='1.0',
title='Inception Image Recognition API',
description='Recognize images based on the inception model trained on imagenet data',
doc='/api/swagger'
)
inceptioner_ns = Namespace('inceptioner', description='Image recognition with Inception V3')
api.add_namespace(inceptioner_ns)
|
{"/tests/test_inceptioner.py": ["/core/inceptioner.py"], "/configuration/logging.py": ["/configuration/instance.py"], "/main.py": ["/services/flask_restplus_service.py", "/configuration/instance.py"], "/services/flask_restplus_service.py": ["/api_models/recogntion.py", "/configuration/instance.py", "/core/inceptioner.py"], "/core/inceptioner.py": ["/configuration/instance.py"], "/core/utilities.py": ["/configuration/instance.py", "/entities/request_payloads.py", "/core/inceptioner.py"], "/services/nameko_service.py": ["/core/utilities.py"], "/flask_server/instance.py": ["/configuration/instance.py"], "/main_nameko.py": ["/services/nameko_service.py"]}
|
28,679
|
akshay9494/Inceptioner
|
refs/heads/master
|
/core/utilities.py
|
from configuration.instance import config
import base64
import os
from entities.request_payloads import RecognitionRequestSchema
import logging
import tensorflow as tf
from core.inceptioner import Inceptioner
import socket
inceptioner_instance = Inceptioner()
graph = tf.get_default_graph()
def process_request(json_request):
schema = RecognitionRequestSchema()
result = schema.loads(json_request)
logging.debug('Unmarshalling result: {}'.format(result))
if result.errors:
return "wrong payload supplied"
uid = result.data['id']
file_name = uid + '.jpg'
file_path = os.path.join(config.home_dir, file_name)
base64_string = result.data['base64String']
try:
image = base64.decodebytes(str.encode(base64_string))
with open(file_path, 'wb') as img:
img.write(image)
except Exception as e:
logging.error(e)
return str(e)
try:
with graph.as_default():
recognition_response = inceptioner_instance.predict(file_path)
recognition_response['id'] = uid
recognition_response['hostname'] = socket.gethostname()
return recognition_response
except Exception as e:
logging.error(e)
return str(e)
|
{"/tests/test_inceptioner.py": ["/core/inceptioner.py"], "/configuration/logging.py": ["/configuration/instance.py"], "/main.py": ["/services/flask_restplus_service.py", "/configuration/instance.py"], "/services/flask_restplus_service.py": ["/api_models/recogntion.py", "/configuration/instance.py", "/core/inceptioner.py"], "/core/inceptioner.py": ["/configuration/instance.py"], "/core/utilities.py": ["/configuration/instance.py", "/entities/request_payloads.py", "/core/inceptioner.py"], "/services/nameko_service.py": ["/core/utilities.py"], "/flask_server/instance.py": ["/configuration/instance.py"], "/main_nameko.py": ["/services/nameko_service.py"]}
|
28,680
|
akshay9494/Inceptioner
|
refs/heads/master
|
/services/nameko_service.py
|
from nameko.web.handlers import http
from nameko.messaging import consume
from kombu.messaging import Exchange, Queue
import logging
import json
from core.utilities import process_request
class InceptionerService:
"""Service endpoint for Inceptioner"""
name = "inceptioner_service"
test_exchange = Exchange('nameko_test_exchange', type='direct')
test_queue = Queue('nameko_test_queue', exchange=test_exchange)
@http('GET', '/get/<int:value>')
def get_method_for_test(self, request, value):
return json.dumps({'value': value})
@http('POST', '/recognize/base64')
def do_post(self, request):
logging.info('Received Request on recognition from base64')
request_data = request.data
logging.debug('Data Received: {}'.format(request_data))
res = process_request(request.data)
print(res)
return str(res)
@consume(test_queue)
def handle_event(self, payload):
logging.info('Received request on recognition on the queue')
logging.debug('Data received: {}'.format(payload))
res = process_request(payload)
print(res)
return res
|
{"/tests/test_inceptioner.py": ["/core/inceptioner.py"], "/configuration/logging.py": ["/configuration/instance.py"], "/main.py": ["/services/flask_restplus_service.py", "/configuration/instance.py"], "/services/flask_restplus_service.py": ["/api_models/recogntion.py", "/configuration/instance.py", "/core/inceptioner.py"], "/core/inceptioner.py": ["/configuration/instance.py"], "/core/utilities.py": ["/configuration/instance.py", "/entities/request_payloads.py", "/core/inceptioner.py"], "/services/nameko_service.py": ["/core/utilities.py"], "/flask_server/instance.py": ["/configuration/instance.py"], "/main_nameko.py": ["/services/nameko_service.py"]}
|
28,681
|
akshay9494/Inceptioner
|
refs/heads/master
|
/flask_server/instance.py
|
from flask import Flask
from flask_restplus import Api, Namespace
from configuration.instance import config
from waitress import serve
class Server:
def __init__(self):
self.app = Flask(__name__)
self.api = Api(
self.app,
version='1.0',
title='Inception Image Recognition API',
description='Recognize images based on the inception model trained on imagenet data',
doc='/api/swagger'
)
self.inceptioner_ns = Namespace('inceptioner', description='Image recognition with Inception V3')
self.api.add_namespace(self.inceptioner_ns)
def run(self):
serve(self.app, host='0.0.0.0', port=config.port)
server = Server()
|
{"/tests/test_inceptioner.py": ["/core/inceptioner.py"], "/configuration/logging.py": ["/configuration/instance.py"], "/main.py": ["/services/flask_restplus_service.py", "/configuration/instance.py"], "/services/flask_restplus_service.py": ["/api_models/recogntion.py", "/configuration/instance.py", "/core/inceptioner.py"], "/core/inceptioner.py": ["/configuration/instance.py"], "/core/utilities.py": ["/configuration/instance.py", "/entities/request_payloads.py", "/core/inceptioner.py"], "/services/nameko_service.py": ["/core/utilities.py"], "/flask_server/instance.py": ["/configuration/instance.py"], "/main_nameko.py": ["/services/nameko_service.py"]}
|
28,682
|
akshay9494/Inceptioner
|
refs/heads/master
|
/main_nameko.py
|
from services.nameko_service import InceptionerService
|
{"/tests/test_inceptioner.py": ["/core/inceptioner.py"], "/configuration/logging.py": ["/configuration/instance.py"], "/main.py": ["/services/flask_restplus_service.py", "/configuration/instance.py"], "/services/flask_restplus_service.py": ["/api_models/recogntion.py", "/configuration/instance.py", "/core/inceptioner.py"], "/core/inceptioner.py": ["/configuration/instance.py"], "/core/utilities.py": ["/configuration/instance.py", "/entities/request_payloads.py", "/core/inceptioner.py"], "/services/nameko_service.py": ["/core/utilities.py"], "/flask_server/instance.py": ["/configuration/instance.py"], "/main_nameko.py": ["/services/nameko_service.py"]}
|
28,683
|
JingSiHan/daily_fresh
|
refs/heads/main
|
/apps/goods/views.py
|
from django.shortcuts import render
from django.views.generic import View
from apps.goods.models import GoodsType, IndexGoodsBanner, IndexPromotionBanner, IndexTypeGoodsBanner
from django.core.cache import cache
from django_redis import get_redis_connection
import os
# Create your views here.
# class Test(object):
# def __init__(self):
# self.name = 'abc'
# t = Test()
# t.age = 10
# print(t.age)
# http://127.0.0.1:8000
class IndexView(View):
"""首页"""
def get(self, request):
"""显示首页"""
# 先尝试从缓存中获取页面信息
context = cache.get(':1:index_page_data')
print(context)
if context is None:
print('----设置缓存!!!')
# 获取商品种类信息
types = GoodsType.objects.all()
# 获取首页轮播商品信息
goods_banners = IndexGoodsBanner.objects.all().order_by('index')
for goods in goods_banners:
print('goods are:', goods)
# 获取首页促销活动信息
promotion_banners = IndexPromotionBanner.objects.all().order_by('index')
# 获取首页分类商品展示信息
for type_name in types:
image_banners = IndexTypeGoodsBanner.objects.filter(type=type_name, display_type=1).order_by('index')
title_banners = IndexTypeGoodsBanner.objects.filter(type=type_name, display_type=0).order_by('index')
# 动态给type增加属性,分别保存首页分类商品的图片展示信息和文字展示信息
type_name.image_banners = image_banners
type_name.title_banners = title_banners
# 需要缓存的数据
context = {'types': types,
'goods_banners': goods_banners,
'promotion_banners': promotion_banners,
'data': 'This is a message!!'
}
user = request.user
cart_count = 0
if user.is_authenticated:
conn = get_redis_connection('default')
cart_key = 'cart_%d' % user.id
cart_count = conn.hlen(cart_key)
context = context.update(cart_count=cart_count)
print(context)
# 设置缓存
cache.set('index_page_data', context, 3600)
print('写入缓存成功。')
# 组织模板上下文
context = cache.get(':1:index_page_data')
return render(request, 'templates/index.html', context)
|
{"/apps/user/urls.py": ["/apps/user/views.py"], "/apps/goods/urls.py": ["/apps/goods/views.py"]}
|
28,684
|
JingSiHan/daily_fresh
|
refs/heads/main
|
/apps/goods/migrations/0003_auto_20201221_1851.py
|
# Generated by Django 2.2 on 2020-12-21 10:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('goods', '0002_auto_20201103_2334'),
]
operations = [
migrations.AlterField(
model_name='goods',
name='create_time',
field=models.DateTimeField(auto_now=True, verbose_name='创建时间'),
),
migrations.AlterField(
model_name='goodsimage',
name='create_time',
field=models.DateTimeField(auto_now=True, verbose_name='创建时间'),
),
migrations.AlterField(
model_name='goodssku',
name='create_time',
field=models.DateTimeField(auto_now=True, verbose_name='创建时间'),
),
migrations.AlterField(
model_name='goodstype',
name='create_time',
field=models.DateTimeField(auto_now=True, verbose_name='创建时间'),
),
migrations.AlterField(
model_name='indexgoodsbanner',
name='create_time',
field=models.DateTimeField(auto_now=True, verbose_name='创建时间'),
),
migrations.AlterField(
model_name='indexpromotionbanner',
name='create_time',
field=models.DateTimeField(auto_now=True, verbose_name='创建时间'),
),
migrations.AlterField(
model_name='indextypegoodsbanner',
name='create_time',
field=models.DateTimeField(auto_now=True, verbose_name='创建时间'),
),
]
|
{"/apps/user/urls.py": ["/apps/user/views.py"], "/apps/goods/urls.py": ["/apps/goods/views.py"]}
|
28,685
|
JingSiHan/daily_fresh
|
refs/heads/main
|
/apps/user/views.py
|
from django.shortcuts import render, redirect
from django.contrib.auth.models import User
from django.contrib.auth import get_user_model, authenticate, login, logout
from django.views.generic import View
from django.conf import settings
from django.http import HttpResponse
from django.urls import reverse
from apps.user.models import Address
from apps.goods.models import GoodsSKU
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from itsdangerous import SignatureExpired
import re
from utils.mixin import LoginRequireMixin
from celery_tasks import tasks
from django_redis import get_redis_connection
User = get_user_model()
# Create your views here.
# /user/register
class RegisterView(View):
def get(self, request):
return render(request, 'templates/register.html')
def post(self, request):
# 进行注册处理
# 接收数据
username = request.POST.get('user_name')
password = request.POST.get('pwd')
email = request.POST.get('email')
allow = request.POST.get('allow')
# 进行数据校验,还可以校验更多信息,此处省略
# 1.是否传完整数据
if not all((username, password, email)):
return render(request, 'templates/register.html', {'errmsg': '数据不完整,请完善注册信息。'})
# 2 判断邮箱格式是否正确
if not re.match(r'^[a-z0-9][\w\.\-]*@[a-z0-9\-]+(\.[a-z]{2,5}){1,2}$', email):
return render(request, 'templates/register.html', {'errmsg': '邮箱格式不合法。'})
# 校验是否勾选协议
if allow != 'on':
return render(request, 'templates/register.html', {'errmsg': '请同意协议。'})
# 校验用户名是否重复
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
user = None
if user:
return render(request, 'templates/register.html', {'errmsg': '用户名已存在,请重新输入。'})
# 进行业务处理:用户注册
user = User.objects.create_user(username, email, password)
user.is_active = 0
user.save()
# 发送邮件激活,包含激活链接: http://127.0.0.1:8000/user/active/user_id
# 激活链接中需要包含用户的身份信息,并把信息进行加密。通过It`s dangerous
# 加密用户的信息,生成激活的token
serializer = Serializer(settings.SECRET_KEY, 3600)
info = {'confirm': user.id}
token = serializer.dumps(info)
token = token.decode('utf-8')
# 发邮件
#
# subject = '天天生鲜欢迎信息'
# message = ''
# sender = settings.EMAIL_FROM
to_email = [email]
# html_message = '<h1>%s, 欢迎您成为天天生鲜注册会员。</h1>请点击下面的链接激活账户:</br> ' \
# '<a href="http://127.0.0.1:8000/user/active/%s">http://127.0.0.1:8000/user/active/%s' \
# '</a>' % (user, token, token)
tasks.send_register_active_email(to_email, username, token)
# 返回应答,注册完了用户之后,跳转到首页。
return redirect(reverse('goods:index'))
class ActiveView(View):
"""用户激活"""
def get(self, request, token):
# 进行解密
serializer = Serializer(settings.SECRET_KEY, 3600)
try:
info = serializer.loads(token)
# 获取激活用户的id
user_id = info['confirm']
user = User.objects.get(id=user_id)
user.is_active = 1
user.save()
# 激活成功,跳转到登录页面,跳转仍然是反向解析
return redirect(reverse('user:login'))
except SignatureExpired as e:
# 激活链接已经过期
# 应该返回某个链接, 再发一次激活的邮件。
return HttpResponse('链接已过期,请重新注册。')
# /user/login
class LoginView(View):
"""登录页面"""
def get(self, request):
# 判断是否记住用户名
if 'username' in request.COOKIES:
username = request.COOKIES.get('username')
checked = 'checked'
else:
username = ''
checked = ''
# 使用模板
return render(request, 'templates/login.html', {'username': username, 'checked': checked})
def post(self, request):
"""登录校验"""
# 接收数据
username = request.POST.get('username')
password = request.POST.get('pwd')
# 校验数据
if not all((username, password)):
return render(request, 'templates/login.html', {'errmsg': '数据不完整。'})
# 业务处理:登录校验
user = authenticate(username=username, password=password)
if user is not None:
if user.is_active:
# 用户已激活,记录用户的登录状态。session
login(request, user)
# 获取登陆后所要跳转到的地址,登录装饰器login_require需求时用到。
# 默认跳转到首页,url提交的数据使用GET获取,表单采用POST.get()获取
next_url = request.GET.get('next', reverse('goods:index')) # 如果有next值,则返回next值,否则返回后面的referse地址
# 跳转到next_url
return redirect(next_url)
response = redirect(reverse('goods:index'))
# 判断是否需要记住用户名
remember = request.POST.get('remember')
if remember == 'on':
# 记住用户名
response.set_cookie('username', username, max_age=7*24*3600)
else:
response.delete_cookie('username')
return response
else:
# 用户未激活
return render(request, 'templates/login.html', {'errmsg': '账号未激活,请先激活账户。'}) # 应该发激活邮件。
else:
# 用户名或密码错误
return render(request, 'templates/login.html', {'errmsg': '用户名或密码不正确。'})
class LogoutView(View):
"""退出登录"""
def get(self, request):
# 使用django内置的用户系统
logout(request)
# 跳转到首页
return redirect(reverse('goods:index'))
# /user/info/
class UserInfoView(LoginRequireMixin, View):
def get(self, request):
# 获取用户信息
user = request.user
address = Address.objects.get_default_address(user=user)
# 获取用户的历史浏览记录
# 原本使用redis的方法:
# from redis import StrictRedis
# StrictRedis(host='127.0.0.1', port='6379', db=9)
con = get_redis_connection('default')
history_key = 'history_%d' % user.id
# 获取用户最新浏览的5条商品
sku_ids = con.lrange(history_key, 0, 4)
# 从数据库中查询用户浏览的商品的具体信息:
# goods_li = GoodsSKU.objects.filter(goods_id__in=sku_ids)
# goods_res = []
# for a_id in goods_li:
# for good in goods_li:
# if a_id == good:
# goods_res.append(good)
# 遍历获取用户浏览的历史商品信息
goods_li = []
for id in sku_ids:
goods = GoodsSKU.objects.get(id=id)
goods_li.append(goods)
# 组织上下文:
context = {'user': user,
'address': address,
'goods_li': goods_li,
'page': 'info'
}
# 除了认为给template传递变量以外,django会把request.user传给template.即:可以直接在模板文件中使用user.
return render(request, 'templates/user_center_info.html', context)
# /user/order/
class UserOrderView(LoginRequireMixin, View):
def get(self, request):
page = 'order'
context = {
'page': page,
}
# 获取用户的订单信息
return render(request, 'templates/user_center_order.html', context)
# /user/address/
class AddressView(LoginRequireMixin, View):
def get(self, request):
user = request.user
# try:
# address = Address.objects.get(user=user, is_default=True)
# except Address.DoesNotExist:
# # 不存在默认地址
# address = None
address = Address.objects.get_default_address(user=user)
context = {'address': address,
'page': 'address'}
# 获取默认收货地址
return render(request, 'templates/user_center_site.html', context)
def post(self, request):
# 接收数据
receiver = request.POST.get('receiver')
addr = request.POST.get('addr')
zip_code = request.POST.get('zip_code')
phone = request.POST.get('phone')
# 数据校验
if not all([receiver, addr, phone]):
return render(request, 'templates/user_center_site.html', {'errmsg': '数据不完整。'})
# 校验手机号
if not re.match('^1[3|4|5|7|8][0-9]{9}$', phone):
return render(request, 'templates/user_center_site.html', {'errmsg': '手机号码格式不正确。'})
# 添加地址:如果用户已经添加地址,则新添加的不作为默认地址。如果之前没有地址,那么新添加的作为默认地址。
user = request.user
# try:
# address = Address.objects.get(user=user, is_default=True)
# except Address.DoesNotExist:
# # 不存在默认地址
# address = None
address = Address.objects.get_default_address(user=user)
if address:
is_default = False
else:
is_default = True
Address.objects.create(user=user,
addr=addr,
zip_code=zip_code,
phone=phone,
is_default=is_default)
# 返回应答,刷新地址页面
return redirect(reverse('user:address')) # get 请求方式
|
{"/apps/user/urls.py": ["/apps/user/views.py"], "/apps/goods/urls.py": ["/apps/goods/views.py"]}
|
28,686
|
JingSiHan/daily_fresh
|
refs/heads/main
|
/apps/order/migrations/0003_auto_20201103_2334.py
|
# Generated by Django 2.2 on 2020-11-03 15:34
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('order', '0002_auto_20201103_1834'),
]
operations = [
migrations.AlterField(
model_name='ordergoods',
name='create_time',
field=models.DateTimeField(auto_created=True, blank=True, verbose_name='创建时间'),
),
migrations.AlterField(
model_name='orderinfo',
name='create_time',
field=models.DateTimeField(auto_created=True, blank=True, verbose_name='创建时间'),
),
]
|
{"/apps/user/urls.py": ["/apps/user/views.py"], "/apps/goods/urls.py": ["/apps/goods/views.py"]}
|
28,687
|
JingSiHan/daily_fresh
|
refs/heads/main
|
/apps/order/migrations/0004_auto_20201221_1851.py
|
# Generated by Django 2.2 on 2020-12-21 10:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('order', '0003_auto_20201103_2334'),
]
operations = [
migrations.AlterField(
model_name='ordergoods',
name='create_time',
field=models.DateTimeField(auto_now=True, verbose_name='创建时间'),
),
migrations.AlterField(
model_name='orderinfo',
name='create_time',
field=models.DateTimeField(auto_now=True, verbose_name='创建时间'),
),
]
|
{"/apps/user/urls.py": ["/apps/user/views.py"], "/apps/goods/urls.py": ["/apps/goods/views.py"]}
|
28,688
|
JingSiHan/daily_fresh
|
refs/heads/main
|
/apps/user/urls.py
|
from django.urls import re_path
# from django.contrib.auth.decorators import login_required
from apps.user import views
from apps.user.views import RegisterView, ActiveView, LoginView, UserInfoView, UserOrderView, AddressView, LogoutView
urlpatterns = [
# re_path(r'^register$', views.register, name='register'), # 注册
# re_path(r'^register_handle$', views.register_handle, name='register_handle'), # 注册
re_path(r'^register$', RegisterView.as_view(), name='register'), # 注册
re_path(r'^logout$', LogoutView.as_view(), name='logout'), # 注销登录
re_path(r'^active/(?P<token>.*)$', ActiveView.as_view(), name='active'), # 激活账户
re_path(r'^login$', LoginView.as_view(), name='login'), # 激活成功,显示登录页面
re_path(r'^info$', UserInfoView.as_view(), name='info'), # 用户中心-信息页
re_path(r'^order$', UserOrderView.as_view(), name='order'), # 用户中心-订单页
re_path(r'^address$', AddressView.as_view(), name='address'), # 用户中心-地址页
]
|
{"/apps/user/urls.py": ["/apps/user/views.py"], "/apps/goods/urls.py": ["/apps/goods/views.py"]}
|
28,689
|
JingSiHan/daily_fresh
|
refs/heads/main
|
/apps/goods/urls.py
|
from django.urls import re_path
from apps.goods.views import IndexView
urlpatterns = [
re_path(r'^', IndexView.as_view(), name='index'),
]
|
{"/apps/user/urls.py": ["/apps/user/views.py"], "/apps/goods/urls.py": ["/apps/goods/views.py"]}
|
28,690
|
JingSiHan/daily_fresh
|
refs/heads/main
|
/apps/goods/migrations/0002_auto_20201103_2334.py
|
# Generated by Django 2.2 on 2020-11-03 15:34
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('goods', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='goods',
name='create_time',
field=models.DateTimeField(auto_created=True, blank=True, verbose_name='创建时间'),
),
migrations.AlterField(
model_name='goodsimage',
name='create_time',
field=models.DateTimeField(auto_created=True, blank=True, verbose_name='创建时间'),
),
migrations.AlterField(
model_name='goodssku',
name='create_time',
field=models.DateTimeField(auto_created=True, blank=True, verbose_name='创建时间'),
),
migrations.AlterField(
model_name='goodstype',
name='create_time',
field=models.DateTimeField(auto_created=True, blank=True, verbose_name='创建时间'),
),
migrations.AlterField(
model_name='indexgoodsbanner',
name='create_time',
field=models.DateTimeField(auto_created=True, blank=True, verbose_name='创建时间'),
),
migrations.AlterField(
model_name='indexpromotionbanner',
name='create_time',
field=models.DateTimeField(auto_created=True, blank=True, verbose_name='创建时间'),
),
migrations.AlterField(
model_name='indextypegoodsbanner',
name='create_time',
field=models.DateTimeField(auto_created=True, blank=True, verbose_name='创建时间'),
),
]
|
{"/apps/user/urls.py": ["/apps/user/views.py"], "/apps/goods/urls.py": ["/apps/goods/views.py"]}
|
28,691
|
JingSiHan/daily_fresh
|
refs/heads/main
|
/apps/order/urls.py
|
from django.urls import re_path
urlpatterns = [
# re_path(r'user'), # 用户模块
]
|
{"/apps/user/urls.py": ["/apps/user/views.py"], "/apps/goods/urls.py": ["/apps/goods/views.py"]}
|
28,692
|
JingSiHan/daily_fresh
|
refs/heads/main
|
/apps/cart/urls.py
|
from django.urls import re_path
urlpatterns = [
# re_path(r'user', ), # 用户模块
]
|
{"/apps/user/urls.py": ["/apps/user/views.py"], "/apps/goods/urls.py": ["/apps/goods/views.py"]}
|
28,693
|
MathewLech/sandbox
|
refs/heads/master
|
/djangoprojects/backendwithjwt/MyAPI/utils.py
|
from channels.db import database_sync_to_async
from .exceptions import ClientError
from .models import StreamChannel
# This decorator turns this function from a synchronous function into an async one
# we can call from our async consumers, that handles Django DBs correctly.
# For more, see http://channels.readthedocs.io/en/latest/topics/databases.html
@database_sync_to_async
def get_streamchannel_or_error(room_id, user):
"""
Tries to fetch a stream channel for the client, checking permissions along the way.
"""
# Check if the user is logged in
if not user.is_authenticated:
raise ClientError("USER_HAS_TO_LOGIN")
# Find the stream channel they requested (by ID)
try:
sc = StreamChannel.objects.get(pk=room_id)
except StreamChannel.DoesNotExist:
raise ClientError("STREAMCHANNEL_INVALID")
return sc
|
{"/djangoprojects/backendwithjwt/MyAPI/utils.py": ["/djangoprojects/backendwithjwt/MyAPI/models.py"], "/djangoprojects/backendwithjwt/MyAPI/views.py": ["/djangoprojects/backendwithjwt/MyAPI/models.py"], "/djangoprojects/backendwithjwt/MyAPI/admin.py": ["/djangoprojects/backendwithjwt/MyAPI/models.py"], "/djangoprojects/backendwithjwt/MyAPI/consumers.py": ["/djangoprojects/backendwithjwt/MyAPI/utils.py"]}
|
28,694
|
MathewLech/sandbox
|
refs/heads/master
|
/djangoprojects/backendwithjwt/backend/routing.py
|
from django.urls import path
from channels.http import AsgiHandler
from channels.routing import ProtocolTypeRouter, URLRouter
from channels.auth import AuthMiddlewareStack
from MyAPI.consumers import StreamConsumer
from backend.token_auth import TokenAuthMiddlewareStack
from backend.jwttoken_auth import JwtTokenAuthMiddlewareStack
# The channel routing defines what connections get handled by what consumers,
# selecting on either the connection type (ProtocolTypeRouter) or properties
# of the connection's scope (like URLRouter, which looks at scope["path"])
# For more, see http://channels.readthedocs.io/en/latest/topics/routing.html
application = ProtocolTypeRouter({
# Channels will do this for you automatically. It's included here as an example.
# "http": AsgiHandler,
# Route all WebSocket requests to our custom streamcahnnel handler.
# We actually don't need the URLRouter here, but we've put it in for
# illustration. Also note the inclusion of the TokenAuthMiddlewareStack to
# this is a custom handler to handle a basic auth token from websocket
# for authentication
"websocket": JwtTokenAuthMiddlewareStack(
URLRouter([
# URLRouter just takes standard Django path() or url() entries.
path("myapi/stream/", StreamConsumer),
]),
),
})
|
{"/djangoprojects/backendwithjwt/MyAPI/utils.py": ["/djangoprojects/backendwithjwt/MyAPI/models.py"], "/djangoprojects/backendwithjwt/MyAPI/views.py": ["/djangoprojects/backendwithjwt/MyAPI/models.py"], "/djangoprojects/backendwithjwt/MyAPI/admin.py": ["/djangoprojects/backendwithjwt/MyAPI/models.py"], "/djangoprojects/backendwithjwt/MyAPI/consumers.py": ["/djangoprojects/backendwithjwt/MyAPI/utils.py"]}
|
28,695
|
MathewLech/sandbox
|
refs/heads/master
|
/djangoprojects/backendwithjwt/MyAPI/views.py
|
from django.shortcuts import render
import datetime
import os
import sys
from django.views import generic
import json
from django.core import serializers
from django.shortcuts import HttpResponse
from rest_framework import views, serializers, status
from rest_framework.response import Response
from rest_framework.renderers import JSONRenderer
from rest_framework.response import Response
from django.http import JsonResponse
from django.forms.models import model_to_dict
from django.utils import timezone
from .models import StreamChannel
import random
def index(request):
# Render that in the index template
return render(request, "index.html")
class MessageSerializer(serializers.Serializer):
AllData = serializers.CharField()
#this is auth by JWT
class TestData(views.APIView):
def post(self, request, *args, **kwargs):
d = {'Message': 'This is a protected post'}
serializer = MessageSerializer(data=d)
serializer.is_valid(raise_exception=True)
return Response(serializer.data, status=status.HTTP_201_CREATED)
def get(self, request, *args, **kwargs):
d = {'Message': 'This is a protected get' + str(random.randint(0,100))}
return JsonResponse(d, status=status.HTTP_201_CREATED, safe=False)
|
{"/djangoprojects/backendwithjwt/MyAPI/utils.py": ["/djangoprojects/backendwithjwt/MyAPI/models.py"], "/djangoprojects/backendwithjwt/MyAPI/views.py": ["/djangoprojects/backendwithjwt/MyAPI/models.py"], "/djangoprojects/backendwithjwt/MyAPI/admin.py": ["/djangoprojects/backendwithjwt/MyAPI/models.py"], "/djangoprojects/backendwithjwt/MyAPI/consumers.py": ["/djangoprojects/backendwithjwt/MyAPI/utils.py"]}
|
28,696
|
MathewLech/sandbox
|
refs/heads/master
|
/djangoprojects/backendwithjwt/backend/token_auth.py
|
from channels.auth import AuthMiddlewareStack
from rest_framework.authtoken.models import Token
from django.contrib.auth.models import AnonymousUser
from channels.middleware import BaseMiddleware
class TokenAuthMiddleware(BaseMiddleware):
"""
Token authorization middleware for Django Channels 2
"""
def __init__(self, inner):
self.inner = inner
def __call__(self, scope):
try:
#for now we pass a token as a url param on the websocket
query = dict((x.split('=') for x in scope['query_string'].decode().split("&")))
#pull token from url param
token = query['token']
#lookup token and associated fk user
token = Token.objects.get(key=token)
#set user in the scope, channels will look to this to make sure
#its authenticated
scope['user'] = token.user
except:
scope['user'] = AnonymousUser()
return self.inner(scope)
TokenAuthMiddlewareStack = lambda inner: TokenAuthMiddleware(AuthMiddlewareStack(inner))
|
{"/djangoprojects/backendwithjwt/MyAPI/utils.py": ["/djangoprojects/backendwithjwt/MyAPI/models.py"], "/djangoprojects/backendwithjwt/MyAPI/views.py": ["/djangoprojects/backendwithjwt/MyAPI/models.py"], "/djangoprojects/backendwithjwt/MyAPI/admin.py": ["/djangoprojects/backendwithjwt/MyAPI/models.py"], "/djangoprojects/backendwithjwt/MyAPI/consumers.py": ["/djangoprojects/backendwithjwt/MyAPI/utils.py"]}
|
28,697
|
MathewLech/sandbox
|
refs/heads/master
|
/djangoprojects/backendwithjwt/MyAPI/models.py
|
from django.db import models
class StreamChannel(models.Model):
"""
A Channel for clients to subscribe to
"""
# StreamChannel title
title = models.CharField(max_length=255)
def __str__(self):
return self.title
@property
def group_name(self):
"""
Returns the Channels Group name that sockets should subscribe to to get sent
messages as they are generated.
"""
return "streamchannel-%s" % self.id
|
{"/djangoprojects/backendwithjwt/MyAPI/utils.py": ["/djangoprojects/backendwithjwt/MyAPI/models.py"], "/djangoprojects/backendwithjwt/MyAPI/views.py": ["/djangoprojects/backendwithjwt/MyAPI/models.py"], "/djangoprojects/backendwithjwt/MyAPI/admin.py": ["/djangoprojects/backendwithjwt/MyAPI/models.py"], "/djangoprojects/backendwithjwt/MyAPI/consumers.py": ["/djangoprojects/backendwithjwt/MyAPI/utils.py"]}
|
28,698
|
MathewLech/sandbox
|
refs/heads/master
|
/djangoprojects/backendwithjwt/testexternalserver.py
|
from channels.layers import get_channel_layer
from django.conf import settings
import asyncio
import os
import time
async def main():
redis_host = os.environ.get('REDIS_HOST', 'localhost')
settings.configure(CHANNEL_LAYERS = {
"default": {
# This example app uses the Redis channel layer implementation channels_redis
"BACKEND": "channels_redis.core.RedisChannelLayer",
"CONFIG": {
"hosts": [(redis_host, 6379)],
},
},
})
channel_layer = get_channel_layer()
init_text = "HI FROM THE ADMIN"
incr = 0
while True:
await channel_layer.group_send(
"streamchannel-1",
{
"type": "stream.message",
"stream_id": 1,
"username": "Admin",
"message": init_text,
}
)
time.sleep(3)
incr = incr + 1
init_text = str(incr)
if __name__== "__main__":
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
|
{"/djangoprojects/backendwithjwt/MyAPI/utils.py": ["/djangoprojects/backendwithjwt/MyAPI/models.py"], "/djangoprojects/backendwithjwt/MyAPI/views.py": ["/djangoprojects/backendwithjwt/MyAPI/models.py"], "/djangoprojects/backendwithjwt/MyAPI/admin.py": ["/djangoprojects/backendwithjwt/MyAPI/models.py"], "/djangoprojects/backendwithjwt/MyAPI/consumers.py": ["/djangoprojects/backendwithjwt/MyAPI/utils.py"]}
|
28,699
|
MathewLech/sandbox
|
refs/heads/master
|
/djangoprojects/backendwithjwt/MyAPI/admin.py
|
from django.contrib import admin
from .models import StreamChannel
admin.site.register(
StreamChannel,
list_display=["id", "title"],
list_display_links=["id", "title"],
)
|
{"/djangoprojects/backendwithjwt/MyAPI/utils.py": ["/djangoprojects/backendwithjwt/MyAPI/models.py"], "/djangoprojects/backendwithjwt/MyAPI/views.py": ["/djangoprojects/backendwithjwt/MyAPI/models.py"], "/djangoprojects/backendwithjwt/MyAPI/admin.py": ["/djangoprojects/backendwithjwt/MyAPI/models.py"], "/djangoprojects/backendwithjwt/MyAPI/consumers.py": ["/djangoprojects/backendwithjwt/MyAPI/utils.py"]}
|
28,700
|
MathewLech/sandbox
|
refs/heads/master
|
/djangoprojects/backendwithjwt/backend/jwttoken_auth.py
|
from channels.auth import AuthMiddlewareStack
from rest_framework.authtoken.models import Token
from django.contrib.auth.models import AnonymousUser
from channels.middleware import BaseMiddleware
from rest_framework_simplejwt.state import token_backend
from django.contrib.auth import authenticate, get_user_model
User = get_user_model()
class JwtTokenAuthMiddleware(BaseMiddleware):
"""
JWT token authorization middleware for Django Channels 2
"""
def __init__(self, inner):
self.inner = inner
def __call__(self, scope):
try:
#for now we pass a token as a url param on the websocket
query = dict((x.split('=') for x in scope['query_string'].decode().split("&")))
#pull token from url param
token = query['token']
data = {'token': token}
#set user in the scope, channels will look to this to make sure
payload = token_backend.decode(token, verify=True)
user_id = payload.get('user_id')
user = User.objects.get(id=user_id)
except:
scope['user'] = AnonymousUser()
#scope['user'] = AnonymousUser()
return self.inner(scope)
JwtTokenAuthMiddlewareStack = lambda inner: JwtTokenAuthMiddleware(AuthMiddlewareStack(inner))
|
{"/djangoprojects/backendwithjwt/MyAPI/utils.py": ["/djangoprojects/backendwithjwt/MyAPI/models.py"], "/djangoprojects/backendwithjwt/MyAPI/views.py": ["/djangoprojects/backendwithjwt/MyAPI/models.py"], "/djangoprojects/backendwithjwt/MyAPI/admin.py": ["/djangoprojects/backendwithjwt/MyAPI/models.py"], "/djangoprojects/backendwithjwt/MyAPI/consumers.py": ["/djangoprojects/backendwithjwt/MyAPI/utils.py"]}
|
28,701
|
MathewLech/sandbox
|
refs/heads/master
|
/djangoprojects/backendwithjwt/MyAPI/consumers.py
|
from django.conf import settings
from channels.generic.websocket import AsyncJsonWebsocketConsumer
from .exceptions import ClientError
from .utils import get_streamchannel_or_error
class StreamConsumer(AsyncJsonWebsocketConsumer):
"""
This Stream consumer handles websocket connections for
clients subsribing to stream and also any processes who want to
publish to channel.
It uses AsyncJsonWebsocketConsumer, which means all the handling functions
must be async functions, and any sync work (like ORM access) has to be
behind database_sync_to_async or sync_to_async. For more, read
http://channels.readthedocs.io/en/latest/topics/consumers.html
"""
##### WebSocket event handlers
async def connect(self):
"""
Called when the websocket is handshaking as part of initial connection.
"""
# Are they logged in?
if self.scope["user"].is_anonymous:
# Reject the connection
await self.close()
else:
# Accept the connection
await self.accept()
# Store which stream_channels the user has joined on this connection
self.stream_channels = set()
await self.subscribe_client_to_channel()
async def subscribe_client_to_channel(self):
stream_channel_id = 1 #we need to look this up from DB
# The logged-in user is in our scope thanks to the authentication ASGI middleware
stream_channel = await get_streamchannel_or_error(stream_channel_id, self.scope["user"])
#make note they have joined this stream
self.stream_channels.add(stream_channel_id)
# Add them to the group so they get stream_channel messages
await self.channel_layer.group_add(
stream_channel.group_name,
self.channel_name,
)
async def receive_json(self, content):
"""
Called when we get a text frame. Channels will JSON-decode the payload
for us and pass it as the first argument.
"""
try:
# Messages need to have a "command" key we can switch on
command = content.get("command", None)
if command == "send":
await self.send_stream(content["stream_id"], content["message"])
elif command == "leave":
# Leave the stream
await self.leave_stream(content["stream_id"])
except ClientError as e:
# Catch any errors and send it back
await self.send_json({"error": e.code})
async def disconnect(self, code):
"""
Called when the WebSocket closes for any reason.
"""
# Leave all the stream channels we are still in
for stream_id in list(self.stream_channels):
try:
await self.leave_stream(stream_id)
except ClientError:
pass
##### Command helper methods called by receive_json
async def leave_stream(self, stream_id):
"""
Called by receive_json when someone sent a leave command.
"""
# The logged-in user is in our scope thanks to the authentication ASGI middleware
stream_channel = await get_streamchannel_or_error(stream_id, self.scope["user"])
# Remove that we're in the stream
self.stream_channels.discard(stream_id)
# Remove them from the group so they no longer get stream messages
await self.channel_layer.group_discard(
stream_channel.group_name,
self.channel_name,
)
# Instruct their client to finish closing the stream
await self.send_json({
"leave": str(stream_channel.id),
})
async def send_stream(self, stream_id, message):
"""
Called by receive_json when a process sends a message to a stream.
"""
# Check they are in this stream
if stream_id not in self.stream_channels:
raise ClientError("STREAM_ACCESS_DENIED")
# Get the stream and send to the group about it
stream_channel = await get_streamchannel_or_error(stream_id, self.scope["user"])
await self.channel_layer.group_send(
stream_channel.group_name,
{
"type": "stream.message",
"stream_id": stream_id,
"username": self.scope["user"].username,
"message": message,
}
)
##### Handlers for messages sent over the channel layer
# These helper methods are named by the types we send - so stream.message becomes stream_message
async def stream_leave(self, event):
"""
Called when someone has left the stream.
"""
# Send a message down to the client
await self.send_json(
{
"msg_type": settings.MSG_TYPE_LEAVE,
"stream": event["stream_id"],
"username": event["username"],
},
)
async def stream_message(self, event):
"""
Called when a process has sent something to the stream.
"""
# Send a message down to the client
await self.send_json(
{
"msg_type": "msg",
"stream": event["stream_id"],
"username": event["username"],
"message": event["message"],
},
)
|
{"/djangoprojects/backendwithjwt/MyAPI/utils.py": ["/djangoprojects/backendwithjwt/MyAPI/models.py"], "/djangoprojects/backendwithjwt/MyAPI/views.py": ["/djangoprojects/backendwithjwt/MyAPI/models.py"], "/djangoprojects/backendwithjwt/MyAPI/admin.py": ["/djangoprojects/backendwithjwt/MyAPI/models.py"], "/djangoprojects/backendwithjwt/MyAPI/consumers.py": ["/djangoprojects/backendwithjwt/MyAPI/utils.py"]}
|
28,705
|
certik/fwrap
|
refs/heads/master
|
/fwrap/tests/tutils.py
|
#------------------------------------------------------------------------------
# Copyright (c) 2010, Kurt W. Smith
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Fwrap project nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#------------------------------------------------------------------------------
from nose.tools import eq_
def remove_common_indent(s):
if not s:
return s
lines_ = s.splitlines()
# remove spaces from ws lines
lines = []
for line in lines_:
if line.rstrip():
lines.append(line.rstrip())
else:
lines.append("")
fst_line = None
for line in lines:
if line:
fst_line = line
break
ws_count = 0
for ch in fst_line:
if ch == ' ':
ws_count += 1
else:
break
if not ws_count: return s
ret = []
for line in lines:
line = line.rstrip()
if line:
assert line.startswith(' '*ws_count)
ret.append(line[ws_count:])
else:
ret.append('')
return '\n'.join(ret)
def compare(s1, s2):
ss1 = remove_common_indent(s1.rstrip())
ss2 = remove_common_indent(s2.rstrip())
for idx, lines in enumerate(zip(ss1.splitlines(), ss2.splitlines())):
L1, L2 = lines
assert L1 == L2, "\n%s\n != \n%s\nat line %d: '%s' != '%s'" % (ss1, ss2, idx, L1, L2)
|
{"/setup.py": ["/fwrap/version.py"], "/runtests.py": ["/fwrap/main.py"]}
|
28,706
|
certik/fwrap
|
refs/heads/master
|
/fwrap/version.py
|
#------------------------------------------------------------------------------
# Copyright (c) 2010, Kurt W. Smith
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Fwrap project nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#------------------------------------------------------------------------------
import os
from os import path
# Set isrelease = True for release version.
isrelease = False
base_version = "0.2.0"
def get_version():
if isrelease: return base_version
from subprocess import Popen, PIPE
git_dir = path.join(path.dirname(path.dirname(__file__)), '.git')
cmd = "git --git-dir=%s rev-parse --short HEAD" % git_dir
try:
pp = Popen(cmd.split(), stdout=PIPE, stderr=PIPE)
pp.wait()
global_id = pp.stdout.read().strip()
err_txt = pp.stderr.read()
except OSError:
global_id = "unknown"
return "%s_dev%s" % (base_version, global_id)
|
{"/setup.py": ["/fwrap/version.py"], "/runtests.py": ["/fwrap/main.py"]}
|
28,707
|
certik/fwrap
|
refs/heads/master
|
/fwrap/constants.py
|
#------------------------------------------------------------------------------
# Copyright (c) 2010, Kurt W. Smith
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Fwrap project nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#------------------------------------------------------------------------------
PROC_SUFFIX_TMPL = "%s_c"
KTP_MOD_NAME = "fwrap_ktp_mod"
KTP_MOD_SRC = "%s.f90" % KTP_MOD_NAME
KTP_HEADER_SRC = "fwrap_ktp_header.h"
KTP_PXD_HEADER_SRC = "fwrap_ktp.pxd"
FC_HDR_TMPL = "%s_fc.h"
FC_PXD_TMPL = "%s_fc.pxd"
FC_F_TMPL = "%s_fc.f90"
CY_PXD_TMPL = "%s.pxd"
CY_PYX_TMPL = "%s.pyx"
GENCONFIG_SRC = "genconfig.f90"
TYPE_SPECS_SRC = "fwrap_type_specs.in"
MAP_SRC = "fwrap_type_map.out"
RETURN_ARG_NAME = "fw_ret_arg"
ERR_NAME = "fw_iserr__"
ERRSTR_NAME = "fw_errstr__"
ERRSTR_LEN = "fw_errstr_len"
FORT_MAX_ARG_NAME_LEN = 63
ERR_CODES = {
ERRSTR_LEN : FORT_MAX_ARG_NAME_LEN,
'FW_NO_ERR__' : 0,
'FW_INIT_ERR__' : -1,
'FW_CHAR_SIZE__' : 1,
'FW_ARR_DIM__' : 2,
}
|
{"/setup.py": ["/fwrap/version.py"], "/runtests.py": ["/fwrap/main.py"]}
|
28,708
|
certik/fwrap
|
refs/heads/master
|
/fwrap/tests/test_gen_config.py
|
#------------------------------------------------------------------------------
# Copyright (c) 2010, Kurt W. Smith
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Fwrap project nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#------------------------------------------------------------------------------
from fwrap import pyf_iface
from fwrap import gen_config as gc
from fwrap.code import CodeBuffer
from nose.tools import assert_raises, ok_, eq_, set_trace
from tutils import compare
def mock_f2c_types(ctps, *args):
mp = {'fwrap_default_integer' : 'c_int',
'fwrap_default_real' : 'c_float',
'fwrap_default_logical' : 'c_int',
'fwrap_default_complex' : 'c_float_complex',
'fwrap_default_character' : 'c_char'
}
for ctp in ctps:
ctp.fc_type = mp[ctp.fwrap_name]
class test_genconfig(object):
def setup(self):
self.ctps = [
gc.ConfigTypeParam(basetype="integer",
odecl="integer(kind=kind(0))",
fwrap_name="fwrap_default_integer",
npy_enum="fwrap_default_integer_enum"),
gc.ConfigTypeParam(basetype="real",
odecl="real(kind=kind(0.0))",
fwrap_name="fwrap_default_real",
npy_enum="fwrap_default_real_enum"),
gc.ConfigTypeParam(basetype="logical",
odecl="logical(kind=kind(.true.))",
fwrap_name="fwrap_default_logical",
npy_enum="fwrap_default_logical_enum"),
gc.ConfigTypeParam(basetype="complex",
odecl="complex(kind=kind((0.0,0.0)))",
fwrap_name="fwrap_default_complex",
npy_enum="fwrap_default_complex_enum"),
gc.ConfigTypeParam(basetype="character",
odecl="character(kind=kind('a'))",
fwrap_name="fwrap_default_character",
npy_enum="fwrap_default_character_enum")
]
self.int, self.real, self.log, self.cmplx, self.char = self.ctps
mock_f2c_types(self.ctps)
def test_gen_f_mod(self):
eq_(self.int.gen_f_mod(),
['integer, parameter :: fwrap_default_integer = c_int'])
eq_(self.cmplx.gen_f_mod(),
['integer, parameter :: '
'fwrap_default_complex = c_float_complex'])
def test_gen_header(self):
eq_(self.int.gen_c_typedef(), ['typedef int fwrap_default_integer;'])
eq_(self.cmplx.gen_c_typedef(), ['typedef float _Complex fwrap_default_complex;'])
def test_gen_pxd(self):
eq_(self.int.gen_pxd_extern_typedef(),
['ctypedef int fwrap_default_integer'])
eq_(self.cmplx.gen_pxd_extern_typedef(), [])
eq_(self.int.gen_pxd_intern_typedef(), [])
eq_(self.cmplx.gen_pxd_intern_typedef(),
['ctypedef float complex fwrap_default_complex'])
eq_(self.int.gen_pxd_extern_extra(), [])
eq_(self.cmplx.gen_pxd_extern_extra(), [])
def test_gen_type_spec(self):
def _compare(ctp_dict, ctp):
cd = ctp_dict
x_ = gc.ConfigTypeParam(cd['basetype'],
cd['odecl'], cd['fwrap_name'], cd['npy_enum'])
eq_(x_,y)
from cPickle import loads
buf = CodeBuffer()
gc._generate_type_specs(self.ctps[:2], buf)
ctps = loads(buf.getvalue())
for x,y in zip(ctps, self.ctps[:2]):
_compare(x,y)
buf = CodeBuffer()
gc._generate_type_specs(self.ctps[2:], buf)
ctps = loads(buf.getvalue())
for x,y in zip(ctps, self.ctps[2:]):
_compare(x,y)
|
{"/setup.py": ["/fwrap/version.py"], "/runtests.py": ["/fwrap/main.py"]}
|
28,709
|
certik/fwrap
|
refs/heads/master
|
/fwrap/intrinsics.py
|
#------------------------------------------------------------------------------
# Copyright (c) 2010, Kurt W. Smith
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Fwrap project nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#------------------------------------------------------------------------------
intrinsics = """\
ABS
ACHAR
ACOS
ADJUSTL
ADJUSTR
AIMAG
AINT
ALL
ALLOCATED
ANINT
ANY
ASIN
ASSOCIATED
ATAN
ATAN2
BIT_SIZE
BTEST
CEILING
CHAR
CHARACTER
CMPLX
COMMAND_ARGUMENT_COUNT
COMPLEX
CONJG
COS
COSH
COUNT
CPU_TIME
CSHIFT
DATE_AND_TIME
DBLE
DIGITS
DIM
DOT_PRODUCT
DPROD
EOSHIFT
EPSILON
EXP
EXPONENT
EXTENDS
FLOOR
FRACTION
GET_COMMAND
GET_COMMAND_ARGUMENT
GET_ENVIRONMENT_VARIABLE
HUGE
IACHAR
IAND
IBCLR
IBITS
IBSET
ICHAR
IEOR
INDEX
INT
INTEGER
IOR
ISHFT
ISHFTC
IS_IOSTAT_END
IS_IOSTAT_EOR
KIND
LBOUND
LEN
LEN_TRIM
LGE
LGT
LLE
LLT
LOG
LOG10
LOGICAL
MATMUL
MAX
MAXEXPONENT
MAXLOC
MAXVAL
MERGE
MIN
MINEXPONENT
MINLOC
MINVAL
MOD
MODULO
MOVE_ALLOC
MVBITS
NEAREST
NEW_LINE
NINT
NOT
NULL
PACK
PRECISION
PRESENT
PRODUCT
RADIX
RANDOM_NUMBER
RANDOM_SEED
RANGE
REAL
REPEAT
RESHAPE
RRSPACING
SAME_TYPE_AS
SCALE
SCAN
SELECTED_CHAR_KIND
SELECTED_INT_KIND
SELECTED_REAL_KIND
SET
SHAPE
SIGN
SIN
SINH
SIZE
SPACING
SPREAD
SQRT
SUM
SYSTEM_CLOCK
TAN
TANH
TINY
TRANSFER
TRANSPOSE
TRIM
UBOUND
UNPACK
VERIFY\
"""
intrinsics = set(intrinsics.split('\n'))
intrinsics = set([intrins.lower() for intrins in intrinsics])
|
{"/setup.py": ["/fwrap/version.py"], "/runtests.py": ["/fwrap/main.py"]}
|
28,710
|
certik/fwrap
|
refs/heads/master
|
/fwrap/tests/test_parameters.py
|
#------------------------------------------------------------------------------
# Copyright (c) 2010, Kurt W. Smith
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Fwrap project nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#------------------------------------------------------------------------------
from fwrap import pyf_iface as pyf
from nose.tools import eq_, ok_
class test_parameter(object):
def setup(self):
self.lit_int = pyf.Parameter('int_param',
dtype=pyf.default_integer, expr="10+20")
self.var_param = pyf.Parameter("var_param",
dtype=pyf.default_integer, expr="int_param + 30")
self.call_func = pyf.Parameter("call_func",
dtype=pyf.default_integer, expr="selected_int_kind(10)")
def test_depends(self):
eq_(self.lit_int.depends(), set())
eq_(self.var_param.depends(), set(["int_param"]))
eq_(self.call_func.depends(), set([]))
class test_proc_params(object):
def setup(self):
self.lit_int = pyf.Parameter("lit_int",
dtype=pyf.default_integer, expr="30-10")
self.sik = pyf.Parameter("sik_10",
dtype=pyf.default_integer, expr="selected_int_kind(10)")
self.srk = pyf.Parameter(
"srk_10_20",
dtype=pyf.default_integer,
expr="selected_real_kind(10, lit_int)")
srk_real = pyf.RealType("srk_10_20", kind="srk_10_20")
self.real_arg = pyf.Argument("real_arg",
dtype=srk_real, intent='inout')
sik_int = pyf.IntegerType("sik_10", kind="sik_10")
self.int_arg = pyf.Argument("int_arg",
dtype=sik_int, dimension=[("lit_int",)])
subr = pyf.Subroutine(
name="subr",
args=[self.real_arg, self.int_arg],
params=[self.lit_int, self.sik, self.srk])
self.ret_arg = pyf.Argument("func", dtype=srk_real)
func = pyf.Function(
name="func",
args=[self.real_arg, self.int_arg],
params=[self.lit_int, self.sik, self.srk],
return_arg=self.ret_arg)
self.args = [self.real_arg, self.int_arg]
self.params = [self.lit_int, self.sik, self.srk]
def test_arg_man(self):
sub_am = pyf.ArgManager(args=self.args, params=self.params)
func_am = pyf.ArgManager(args=self.args,
return_arg=self.ret_arg, params=self.params)
eq_(sub_am.arg_declarations(),
['integer(kind=fwi_integer_t), '
'parameter :: lit_int = 30-10',
'integer(kind=fwi_integer_t), '
'parameter :: sik_10 = selected_int_kind(10)',
'integer(kind=fwi_integer_t), '
'parameter :: srk_10_20 = '
'selected_real_kind(10, lit_int)',
'real(kind=fwr_srk_10_20_t), '
'intent(inout) :: real_arg',
'integer(kind=fwi_sik_10_t), '
'dimension(lit_int) :: int_arg'])
def test_unneeded_param(self):
unp = pyf.Parameter("unneeded", dtype=pyf.default_integer, expr="srk_10_20 + lit_int")
sub_am = pyf.ArgManager(args=self.args, params=self.params+[unp])
eq_(sub_am.arg_declarations(),
['integer(kind=fwi_integer_t), '
'parameter :: lit_int = 30-10',
'integer(kind=fwi_integer_t), '
'parameter :: sik_10 = selected_int_kind(10)',
'integer(kind=fwi_integer_t), '
'parameter :: srk_10_20 = '
'selected_real_kind(10, lit_int)',
'real(kind=fwr_srk_10_20_t), '
'intent(inout) :: real_arg',
'integer(kind=fwi_sik_10_t), '
'dimension(lit_int) :: int_arg'])
|
{"/setup.py": ["/fwrap/version.py"], "/runtests.py": ["/fwrap/main.py"]}
|
28,711
|
certik/fwrap
|
refs/heads/master
|
/setup.py
|
#------------------------------------------------------------------------------
# Copyright (c) 2010, Kurt W. Smith
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Fwrap project nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#------------------------------------------------------------------------------
import os
from distutils.core import setup
scripts = []
if os.name == 'posix':
scripts = ['bin/fwrapc']
else:
scripts = ['fwrapc.py']
from fwrap.version import get_version
setup(name="fwrap",
version=get_version(),
description="Tool to wrap Fortran 77/90/95 code in C, Cython & Python.",
author="Kurt W. Smith & contributors",
author_email="kwmsmith@gmail.com",
url="http://fwrap.sourceforge.net/",
packages=[
"fwrap",
"fwrap.fparser"
],
package_data = {
"fwrap" : ["default.config", "log.config"],
"fwrap.fparser" : ["log.config"],
},
scripts=scripts,
classifiers = [
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Fortran",
"Programming Language :: C",
"Programming Language :: Cython",
"Topic :: Software Development :: Code Generators",
"Topic :: Software Development :: Libraries :: Python Modules"
],
)
|
{"/setup.py": ["/fwrap/version.py"], "/runtests.py": ["/fwrap/main.py"]}
|
28,712
|
certik/fwrap
|
refs/heads/master
|
/fwrap/tests/test_code.py
|
#------------------------------------------------------------------------------
# Copyright (c) 2010, Kurt W. Smith
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Fwrap project nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#------------------------------------------------------------------------------
from fwrap import code
from nose.tools import ok_, eq_, set_trace
import sys
from pprint import pprint
# 1) Any non-comment line can be broken anywhere -- in the middle of words,
# etc.
# 2) Comments are stripped out of the source and are to be ignored in reflowing
# text.
def test_breakup():
line = ("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa "
"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")
for chunk in (1, 2, 3, 5, 10, 20, 50, len(line)):
yield breakup_gen, line, chunk
def breakup_gen(line, chunk):
ret = code.reflow_line(line, 0, chunk)
eq_(simple_break(line, chunk), ret)
for part in ret[1:-1]:
eq_(len(part), chunk+2)
if len(ret) == 1:
eq_(len(ret[0]), len(line))
else:
eq_(len(ret[0]), chunk+1)
ok_(len(ret[-1]) <= chunk+1)
orig = ''.join(ret)
orig = orig.replace('&', '')
eq_(orig, line)
def simple_break(text, chunk):
i = 0
test_ret = []
while True:
test_ret.append('&'+text[i*chunk:(i+1)*chunk]+'&')
if (i+1)*chunk >= len(text):
break
i += 1
test_ret[0] = test_ret[0][1:]
test_ret[-1] = test_ret[-1][:-1]
# set_trace()
return test_ret
def test_nobreak():
line = ("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")
ret = code.reflow_line(line, 0, 100)
eq_(ret, [line])
def test_indent():
line = "12345678901234567890"
ret = code.reflow_line(line, 1, 100)
eq_(ret, [code.INDENT+line])
ret = code.reflow_line(line, 1, 10)
eq_(ret, [code.INDENT+line[:8]+'&',
code.INDENT+'&'+line[8:16]+'&',
code.INDENT+'&'+line[16:]])
def test_reflow():
reflow_src = ("subroutine many_args(a0, a1, a2, a3, a4, a5, a6, a7, a8, "
"a9, a20, a21, a22, a23, a24, a25, a26, a27, a28, a29, a30"
", a31, a32, a33, a34, a35, a36, a37, a38, a39, a40, a41, "
"a42, a43, a44, a45, a46, a47, a48, a49)\n"
" implicit none\n"
" integer, intent(in) :: a0, a1, a2, a3, a4, a5, a6, "
"a7, a8, a9, a20, a21, a22, a23, a24, a25, a26, a27, a28, "
"a29, a30, a31, a32, a33, a34, a35, a36, a37, a38, a39, "
"a40, a41, a42, a43, a44, a45, a46, a47, a48, a49\n"
"end subroutine many_args")
buf = code.CodeBuffer()
buf.putline(code.reflow_fort(reflow_src))
for line in buf.getvalue().splitlines():
ok_(len(line) <= 79, "len('%s') > 79" % line)
|
{"/setup.py": ["/fwrap/version.py"], "/runtests.py": ["/fwrap/main.py"]}
|
28,713
|
certik/fwrap
|
refs/heads/master
|
/fwrap/fwrap_parse.py
|
#------------------------------------------------------------------------------
# Copyright (c) 2010, Kurt W. Smith
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Fwrap project nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#------------------------------------------------------------------------------
from fwrap import pyf_iface as pyf
from fparser import api
def generate_ast(fsrcs):
ast = []
for src in fsrcs:
block = api.parse(src, analyze=True)
tree = block.content
for proc in tree:
if not is_proc(proc):
raise RuntimeError(
"unsupported Fortran construct %r." % proc)
args = _get_args(proc)
params = _get_params(proc)
if proc.blocktype == 'subroutine':
ast.append(pyf.Subroutine(
name=proc.name,
args=args,
params=params))
elif proc.blocktype == 'function':
ast.append(pyf.Function(
name=proc.name,
args=args,
params=params,
return_arg=_get_ret_arg(proc)))
return ast
def is_proc(proc):
return proc.blocktype in ('subroutine', 'function')
def _get_ret_arg(proc):
ret_var = proc.get_variable(proc.result)
ret_arg = _get_arg(ret_var)
ret_arg.intent = None
return ret_arg
def _get_param(p_param):
if not p_param.is_parameter():
raise ValueError("argument %r is not a parameter" % p_param)
if not p_param.init:
raise ValueError("parameter %r does not have an initialization "
"expression." % p_param)
p_typedecl = p_param.get_typedecl()
dtype = _get_dtype(p_typedecl)
name = p_param.name
intent = _get_intent(p_param)
if not p_param.is_scalar():
raise RuntimeError("do not support array or derived-type "
"parameters at the moment...")
return pyf.Parameter(name=name, dtype=dtype, expr=p_param.init)
def _get_arg(p_arg):
p_typedecl = p_arg.get_typedecl()
dtype = _get_dtype(p_typedecl)
name = p_arg.name
intent = _get_intent(p_arg)
if p_arg.is_scalar():
return pyf.Argument(name=name, dtype=dtype, intent=intent)
elif p_arg.is_array():
p_dims = p_arg.get_array_spec()
dimspec = pyf.Dimension(p_dims)
return pyf.Argument(name=name,
dtype=dtype, intent=intent, dimension=dimspec)
else:
raise RuntimeError(
"argument %s is neither "
"a scalar or an array (derived type?)" % p_arg)
def _get_args(proc):
args = []
for argname in proc.args:
p_arg = proc.get_variable(argname)
args.append(_get_arg(p_arg))
return args
def _get_params(proc):
params = []
for varname in proc.a.variables:
var = proc.a.variables[varname]
if var.is_parameter():
params.append(_get_param(var))
return params
def _get_intent(arg):
intents = []
if not arg.intent:
intents.append("inout")
else:
if arg.is_intent_in():
intents.append("in")
if arg.is_intent_inout():
intents.append("inout")
if arg.is_intent_out():
intents.append("out")
if not intents:
raise RuntimeError("argument has no intent specified, '%s'" % arg)
if len(intents) > 1:
raise RuntimeError(
"argument has multiple "
"intents specified, '%s', %s" % (arg, intents))
return intents[0]
name2default = {
'integer' : pyf.default_integer,
'real' : pyf.default_real,
'doubleprecision' : pyf.default_dbl,
'complex' : pyf.default_complex,
'character' : pyf.default_character,
'logical' : pyf.default_logical,
}
name2type = {
'integer' : pyf.IntegerType,
'real' : pyf.RealType,
'complex' : pyf.ComplexType,
'character' : pyf.CharacterType,
'logical' : pyf.LogicalType,
}
def _get_dtype(typedecl):
if not typedecl.is_intrinsic():
raise RuntimeError(
"only intrinsic types supported ATM... [%s]" % str(typedecl))
length, kind = typedecl.selector
if not kind and not length:
return name2default[typedecl.name]
if length and kind and typedecl.name != 'character':
raise RuntimeError("both length and kind specified for "
"non-character intrinsic type: "
"length: %s kind: %s" % (length, kind))
if typedecl.name == 'character':
if length == '*':
fw_ktp = '%s_xX' % (typedecl.name)
else:
fw_ktp = '%s_x%s' % (typedecl.name, length)
return pyf.CharacterType(fw_ktp=fw_ktp,
len=length, kind=kind)
if length and not kind:
return name2type[typedecl.name](fw_ktp="%s_x%s" %
(typedecl.name, length),
length=length)
try:
int(kind)
except ValueError:
raise RuntimeError(
"only integer constant kind "
"parameters supported ATM, given '%s'" % kind)
if typedecl.name == 'doubleprecision':
return pyf.default_dbl
return name2type[typedecl.name](fw_ktp="%s_%s" %
(typedecl.name, kind), kind=kind)
|
{"/setup.py": ["/fwrap/version.py"], "/runtests.py": ["/fwrap/main.py"]}
|
28,714
|
certik/fwrap
|
refs/heads/master
|
/fwrap/fwrap_setup.py
|
#------------------------------------------------------------------------------
# Copyright (c) 2010, Kurt W. Smith
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Fwrap project nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#------------------------------------------------------------------------------
# The monkey patches, method overrides, etc. in this file are here to patch
# numpy.distutils for fwrap's purposes.
import os, sys
import tempfile
import logging
from fwrap import gen_config as gc
from numpy.distutils import exec_command as np_exec_command
orig_exec_command = np_exec_command.exec_command
def fw_exec_command( command,
execute_in='', use_shell=None, use_tee = None,
_with_python = 1,
**env ):
return orig_exec_command(command,
execute_in=execute_in,
use_shell=use_shell,
use_tee=0, # we override this to control output.
_with_python=_with_python,
**env)
np_exec_command.exec_command = fw_exec_command
from numpy.distutils import ccompiler
ccompiler.exec_command = fw_exec_command
from numpy.distutils import unixccompiler
unixccompiler.exec_command = fw_exec_command
from numpy.distutils.core import DistutilsError
from numpy.distutils.command.config import config as np_config, old_config
from numpy.distutils.command.build_src import build_src as np_build_src
from numpy.distutils.command.build_ext import build_ext as np_build_ext
from numpy.distutils.command.scons import scons as npscons
from Cython.Distutils import build_ext as cy_build_ext
from numpy.distutils.core import setup as np_setup
def setup(log='fwrap_setup.log', *args, **kwargs):
if log:
_old_stdout, _old_stderr = sys.stdout, sys.stderr
log = open(log, 'w')
sys.stdout = log
sys.stderr = log
try:
np_setup(*args, **kwargs)
finally:
if log:
log.flush()
log.close()
sys.stdout, sys.stderr = _old_stdout, _old_stderr
def configuration(projname, extra_sources=None, **kw):
def _configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration(None, parent_package, top_path)
def generate_type_config(ext, build_dir):
config_cmd = config.get_config_cmd()
return gen_type_map_files(config_cmd)
sources = [generate_type_config] + \
(extra_sources or []) + \
[ '%s_fc.f90' % projname,
'%s.pyx' % projname,]
config.add_extension(projname, sources=sources, **kw)
return config
return _configuration
def gen_type_map_files(config_cmd):
ctps = gc.read_type_spec('fwrap_type_specs.in')
find_types(ctps, config_cmd)
gc.write_f_mod('fwrap_ktp_mod.f90', ctps)
gc.write_header('fwrap_ktp_header.h', ctps)
gc.write_pxd('fwrap_ktp.pxd', 'fwrap_ktp_header.h', ctps)
gc.write_pxi('fwrap_ktp.pxi', ctps)
return 'fwrap_ktp_mod.f90'
def find_types(ctps, config_cmd):
for ctp in ctps:
fc_type = None
if ctp.lang == 'fortran':
fc_type = find_fc_type(ctp.basetype,
ctp.odecl, config_cmd)
elif ctp.lang == 'c':
fc_type = find_c_type(ctp, config_cmd)
if not fc_type:
raise RuntimeError(
"unable to find C type for type %s" % ctp.odecl)
ctp.fc_type = fc_type
def find_c_type(ctp, config_cmd):
import numpy
from distutils.sysconfig import get_python_inc
if ctp.lang != 'c':
raise ValueError("wrong language, given %s, expected 'c'" % ctp.lang)
if ctp.basetype != 'integer':
raise ValueError(
"only integer basetype supported for C type discovery.")
basetypes = ('signed char', 'short int',
'int', 'long int', 'long long int')
expected = ['sizeof(%s)' % basetype for basetype in basetypes]
result = config_cmd.check_type_size(type_name=ctp.odecl,
headers=['Python.h', 'numpy/arrayobject.h'],
include_dirs=[get_python_inc(), numpy.get_include()],
expected=expected)
c_type = dict(zip(expected, basetypes))[result]
return gc.c2f[c_type]
fc_type_memo = {}
def find_fc_type(base_type, decl, config_cmd):
res = fc_type_memo.get((base_type, decl), None)
if res is not None:
return res
#XXX: test to see if it works for gfortran
if base_type == 'logical':
base_type = 'integer'
decl = decl.replace('logical', 'integer')
for ctype in gc.type_dict[base_type]:
test_decl = '%s(kind=%s)' % (base_type, ctype)
fsrc = fsrc_tmpl % {'TYPE_DECL' : decl,
'TEST_DECL' : test_decl}
print fsrc
if config_cmd.try_compile(body=fsrc, lang='f90'):
res = ctype
break
else:
res = ''
fc_type_memo[base_type, decl] = res
return res
fsrc_tmpl = '''
subroutine outer(a)
use, intrinsic :: iso_c_binding
implicit none
%(TEST_DECL)s, intent(inout) :: a
interface
subroutine inner(a)
use, intrinsic :: iso_c_binding
implicit none
%(TYPE_DECL)s, intent(inout) :: a
end subroutine inner
end interface
call inner(a)
end subroutine outer
'''
class fw_build_ext(np_build_ext):
def build_extension(self, ext):
from numpy.distutils.command.build_ext import (is_sequence,
newer_group, log, filter_sources, get_numpy_include_dirs)
sources = ext.sources
if sources is None or not is_sequence(sources):
raise DistutilsSetupError(
("in 'ext_modules' option (extension '%s'), " +
"'sources' must be present and must be " +
"a list of source filenames") % ext.name)
sources = list(sources)
if not sources:
return
fullname = self.get_ext_fullname(ext.name)
if self.inplace:
modpath = fullname.split('.')
package = '.'.join(modpath[0:-1])
base = modpath[-1]
build_py = self.get_finalized_command('build_py')
package_dir = build_py.get_package_dir(package)
ext_filename = os.path.join(package_dir,
self.get_ext_filename(base))
else:
ext_filename = os.path.join(self.build_lib,
self.get_ext_filename(fullname))
depends = sources + ext.depends
if not (self.force or newer_group(depends, ext_filename, 'newer')):
log.debug("skipping '%s' extension (up-to-date)", ext.name)
return
else:
log.info("building '%s' extension", ext.name)
extra_args = ext.extra_compile_args or []
macros = ext.define_macros[:]
for undef in ext.undef_macros:
macros.append((undef,))
c_sources, cxx_sources, f_sources, fmodule_sources = \
filter_sources(ext.sources)
if self.compiler.compiler_type=='msvc':
if cxx_sources:
# Needed to compile kiva.agg._agg extension.
extra_args.append('/Zm1000')
# this hack works around the msvc compiler attributes
# problem, msvc uses its own convention :(
c_sources += cxx_sources
cxx_sources = []
# Set Fortran/C++ compilers for compilation and linking.
if ext.language=='f90':
fcompiler = self._f90_compiler
elif ext.language=='f77':
fcompiler = self._f77_compiler
else: # in case ext.language is c++, for instance
fcompiler = self._f90_compiler or self._f77_compiler
cxx_compiler = self._cxx_compiler
# check for the availability of required compilers
if cxx_sources and cxx_compiler is None:
raise DistutilsError, "extension %r has C++ sources" \
"but no C++ compiler found" % (ext.name)
if (f_sources or fmodule_sources) and fcompiler is None:
raise DistutilsError, "extension %r has Fortran sources " \
"but no Fortran compiler found" % (ext.name)
if ext.language in ['f77','f90'] and fcompiler is None:
self.warn("extension %r has Fortran libraries " \
"but no Fortran linker "
"found, using default linker" % (ext.name))
if ext.language=='c++' and cxx_compiler is None:
self.warn("extension %r has C++ libraries " \
"but no C++ linker "
"found, using default linker" % (ext.name))
kws = {'depends':ext.depends}
output_dir = self.build_temp
include_dirs = ext.include_dirs + get_numpy_include_dirs()
c_objects = []
if c_sources:
log.info("compiling C sources")
c_objects = self.compiler.compile(c_sources,
output_dir=output_dir,
macros=macros,
include_dirs=include_dirs,
debug=self.debug,
extra_postargs=extra_args,
**kws)
if cxx_sources:
log.info("compiling C++ sources")
c_objects += cxx_compiler.compile(cxx_sources,
output_dir=output_dir,
macros=macros,
include_dirs=include_dirs,
debug=self.debug,
extra_postargs=extra_args,
**kws)
extra_postargs = []
f_objects = []
if fmodule_sources:
log.info("compiling Fortran 90 module sources")
module_dirs = ext.module_dirs[:]
module_build_dir = os.path.join(
self.build_temp,os.path.dirname(
self.get_ext_filename(fullname)))
self.mkpath(module_build_dir)
if fcompiler.module_dir_switch is None:
existing_modules = glob('*.mod')
extra_postargs += fcompiler.module_options(
module_dirs,module_build_dir)
#-----------------------------------------------------------------
#XXX: hack, but the same can be said for this ENTIRE MODULE!
# since fwrap only works with F90 compilers, fcompiler.compiler_f77
# is None, so we replace it with fcompiler.compiler_fix, which is
# an F90 compiler.
#-----------------------------------------------------------------
if fcompiler.compiler_f77 is None:
fcompiler.compiler_f77 = fcompiler.compiler_fix
f_objects += fcompiler.compile(fmodule_sources,
output_dir=self.build_temp,
macros=macros,
include_dirs=include_dirs,
debug=self.debug,
extra_postargs=extra_postargs,
depends=ext.depends)
if fcompiler.module_dir_switch is None:
for f in glob('*.mod'):
if f in existing_modules:
continue
t = os.path.join(module_build_dir, f)
if os.path.abspath(f)==os.path.abspath(t):
continue
if os.path.isfile(t):
os.remove(t)
try:
self.move_file(f, module_build_dir)
except DistutilsFileError:
log.warn('failed to move %r to %r' %
(f, module_build_dir))
if f_sources:
log.info("compiling Fortran sources")
f_objects += fcompiler.compile(f_sources,
output_dir=self.build_temp,
macros=macros,
include_dirs=include_dirs,
debug=self.debug,
extra_postargs=extra_postargs,
depends=ext.depends)
objects = c_objects + f_objects
if ext.extra_objects:
objects.extend(ext.extra_objects)
extra_args = ext.extra_link_args or []
libraries = self.get_libraries(ext)[:]
library_dirs = ext.library_dirs[:]
linker = self.compiler.link_shared_object
# Always use system linker when using MSVC compiler.
if self.compiler.compiler_type=='msvc':
# expand libraries with fcompiler libraries as we are
# not using fcompiler linker
self._libs_with_msvc_and_fortran(fcompiler,
libraries, library_dirs)
# elif ext.language in ['f77','f90'] and fcompiler is not None:
# linker = fcompiler.link_shared_object
if ext.language=='c++' and cxx_compiler is not None:
linker = cxx_compiler.link_shared_object
if sys.version[:3]>='2.3':
kws = {'target_lang':ext.language}
else:
kws = {}
linker(objects, ext_filename,
libraries=libraries,
library_dirs=library_dirs,
runtime_library_dirs=ext.runtime_library_dirs,
extra_postargs=extra_args,
export_symbols=self.get_export_symbols(ext),
debug=self.debug,
build_temp=self.build_temp,**kws)
class fw_build_src(np_build_src):
def pyrex_sources(self, sources, extension):
cbe = cy_build_ext(self.distribution)
cbe.finalize_options()
return cbe.cython_sources(sources, extension)
def f2py_sources(self, sources, extension):
# intercept to disable calling f2py
return sources
class fw_config(np_config):
def _check_compiler(self):
old_config._check_compiler(self)
from numpy.distutils.fcompiler import FCompiler, new_fcompiler
if not isinstance(self.fcompiler, FCompiler):
self.fcompiler = new_fcompiler(compiler=self.fcompiler,
dry_run=self.dry_run,
force=1,
requiref90=True,
c_compiler=self.compiler)
if self.fcompiler is not None:
self.fcompiler.customize(self.distribution)
if self.fcompiler.get_version():
self.fcompiler.customize_cmd(self)
self.fcompiler.show_customization()
else:
self.warn('f90_compiler=%s is not available.' %
self.fcompiler.compiler_type)
self.fcompiler = None
class _dummy_scons(npscons):
def run(self):
pass
fwrap_cmdclass = {'config' : fw_config,
'build_src' : fw_build_src,
'build_ext' : fw_build_ext,
'scons' : _dummy_scons}
def show_fcompilers(dist=None):
"""Print list of available compilers (used by the "--help-fcompiler"
option to "config_fc").
"""
from numpy.distutils import fcompiler, log
if dist is None:
from distutils.dist import Distribution
from numpy.distutils.command.config_compiler import config_fc
dist = Distribution()
dist.script_name = os.path.basename(sys.argv[0])
dist.script_args = ['config_fc'] + sys.argv[1:]
try:
dist.script_args.remove('--help-fcompiler')
except ValueError:
pass
dist.cmdclass['config_fc'] = config_fc
dist.parse_config_files()
dist.parse_command_line()
compilers = {}
compilers_na = []
compilers_ni = []
if not fcompiler.fcompiler_class:
fcompiler.load_all_fcompiler_classes()
platform_compilers = fcompiler.available_fcompilers_for_platform()
for compiler in platform_compilers:
v = None
log.set_verbosity(-2)
try:
c = fcompiler.new_fcompiler(compiler=compiler, verbose=dist.verbose)
c.customize(dist)
v = c.get_version()
except (fcompiler.DistutilsModuleError, fcompiler.CompilerNotFound), e:
log.debug("show_fcompilers: %s not found" % (compiler,))
log.debug(repr(e))
if v is None:
compilers_na.append(("fcompiler="+compiler, None,
fcompiler.fcompiler_class[compiler][2]))
else:
c.dump_properties()
compilers[compiler] = c
# compilers.append(("fcompiler="+compiler, None,
# fcompiler.fcompiler_class[compiler][2] + ' (%s)' % v))
# compilers_ni = list(set(fcompiler.fcompiler_class.keys()) - set(platform_compilers))
# compilers_ni = [("fcompiler="+fc, None, fcompiler.fcompiler_class[fc][2])
# for fc in compilers_ni]
# compilers.sort()
# compilers_na.sort()
# compilers_ni.sort()
return compilers
# pretty_printer = fcompiler.FancyGetopt(compilers)
# pretty_printer.print_help("Fortran compilers found:")
# pretty_printer = fcompiler.FancyGetopt(compilers_na)
# pretty_printer.print_help("Compilers available for this "
# "platform, but not found:")
# if compilers_ni:
# pretty_printer = fcompiler.FancyGetopt(compilers_ni)
# pretty_printer.print_help("Compilers not available on this platform:")
# print "For compiler details, run 'config_fc --verbose' setup command."
|
{"/setup.py": ["/fwrap/version.py"], "/runtests.py": ["/fwrap/main.py"]}
|
28,715
|
certik/fwrap
|
refs/heads/master
|
/fwrap/main.py
|
#------------------------------------------------------------------------------
# Copyright (c) 2010, Kurt W. Smith
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Fwrap project nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#------------------------------------------------------------------------------
# encoding: utf-8
import os
import sys
import shutil
import logging
import tempfile
from optparse import OptionParser
from fwrap.code import CodeBuffer, reflow_fort
from numpy.distutils.fcompiler import CompilerNotFound
from numpy.distutils.command import config_compiler
from fwrap.version import get_version
from fwrap import constants
from fwrap import gen_config as gc
from fwrap import fc_wrap
from fwrap import cy_wrap
import logging, logging.config
def _setup_config():
pass
# Available options parsed from default config files
# _config_parser = ConfigParser.SafeConfigParser()
# fp = open(os.path.join(os.path.dirname(__file__), 'default.config'),'r')
# _config_parser.readfp(fp)
# fp.close()
# _available_options = {}
# for section in _config_parser.sections():
# for opt in _config_parser.options(section):
# _available_options[opt] = section
# Remove source option
# _available_options.pop('source')
# Add config option
# _available_options['config'] = None
def get_projectpath(out_dir, name):
return os.path.abspath(os.path.join(out_dir, name))
def shutdown_logging(projectpath):
logging.shutdown()
for fname in os.listdir(os.path.curdir):
if fname.endswith('.log'):
abspath = os.path.abspath(os.path.join(os.path.curdir, fname))
shutil.move(abspath, projectpath)
def wrap(source=None,**kargs):
r"""Wrap the source given and compile if requested
This function is the main driving routine for fwrap and for most use
cases should be sufficient. It performs argument validation, compilation
of the base source if requested, parses the source, writes out the
necessary fortran, c, and cython files for wrapping, and again compiles
those into a module if requested.
:Input:
- *source* - (id) Path to source or a list of paths to source to be
wrapped. It can also be a piece of raw source in a string (assumed if
the single string does not lead to a valid file). If you give a list
of source files, make sure that they are in the order that they must
be compiled due to dependencies like modules. Note that if a source
list exists in the configuration file as well, the source argument is
appended to the end of the source list. If you need to append to the
front of the list, you need to modify the configuration file.
- *config* - (string) Path to configuration file. This will be config
file is read in first so arguments to the command supercede the
settings in the config file.
- *name* - (string) Name of the project and the name of the resulting
python module
- *build* - (bool) Compile all source into a shared library for importing
in python, default is True
- *out_dir* - (string) Path where project build is placed
- *f90* - (string) Compiler or path to compiler, default is 'gfortran'
- *fcompiler* - (string) Class name of fortran compiler requested, this
name is the one that distutils recognizes. Default is 'gnu95'.
- *fflags* - (string) Compilation flags used, appended to the end of
distutils compilation run
- *libraries* - (list)
- *library_dirs* - (list)
- *override* - (bool) If a project directory already exists in the
out_dir specified, remove it and create a fresh project.
"""
# # Read in config file if present and parse input options
# if kargs.has_key('config'):
# file_list = _config_parser.read(kargs['config'])
# if kargs['config'] not in file_list:
# logger.warning("Could not open configuration file %s" % kargs['config'])
# for opt in _available_options.iterkeys():
# if not opt == 'config':
# if kargs.has_key(opt):
# exec("%s = kargs[opt]" % opt)
# else:
# exec("%s = _config_parser.get(_available_options[opt],opt)" % opt)
# Do some option parsing
out_dir = kargs.get('out_dir')
out_dir = out_dir.strip()
name = kargs.get('name')
name.strip()
logger.debug("Running with following options:")
# for opt in _available_options:
# if not (opt == 'source' or opt == 'config'):
# logger.debug(" %s = %s" % (opt,locals()[opt]))
help_fcompiler = kargs.get('help_fcompiler')
if help_fcompiler:
from distutils.core import run_setup
run_setup(file_name, script_args=make_scriptargs(kargs))
return
if not os.path.exists(out_dir):
os.mkdir(out_dir)
project_path = get_projectpath(out_dir, name)
if os.path.exists(project_path):
override = kargs.get('override')
if override:
logger.debug("Removing %s" % project_path)
shutil.rmtree(project_path)
os.mkdir(project_path)
else:
raise ValueError("Project directory %s already exists" \
% os.path.join(out_dir,name.strip()))
else:
name.strip()
os.mkdir(project_path)
# *** TODO: Check if distutils can use this fcompiler and f90
# Check to see if each source exists and expand to full paths
raw_source = False
source_files = []
# Parse config file source list
# config_source = _config_parser.get('general','source')
# if len(config_source) > 0:
# for src in config_source.split(','):
# source_files.append(src)
# Parse function call source list
if source is not None:
if isinstance(source,basestring):
if os.path.exists(source):
source_files = [source]
else:
# Assume this is raw source, put in temporary directory
raw_source = True
fh,source_path = tempfile.mkstemp(suffix='f90',text=True)
fh.write(source)
fh.close()
source_files.append(source_path)
elif (isinstance(source,list) or isinstance(source,tuple)):
for src in source:
source_files.append(src)
else:
raise ValueError("Must provide either a string or list of source")
# Validate and parse source list
if not source_files:
raise ValueError("Must provide at least one source to wrap.")
for (i,src) in enumerate(source_files):
# Expand variables and path
source_files[i] = os.path.expanduser(os.path.expandvars(src.strip()))
if not os.path.exists(source_files[i]):
raise ValueError("The source file %s does not exist." % source_files[i])
logger.debug("Wrapping the following source:")
for src in source_files:
logger.debug(" %s" % src)
# Parse fortran using fparser
logger.info("Parsing source files.")
f_ast = parse(source_files)
# XXX: total hack: turns out that when fparser sets up its logging with
# logger.configFile(...), **the logging module disables all existing
# loggers**, which includes fwrap's logger. This completely breaks our
# logging functionality; thankfully it's simple turn it back on again.
logger.disabled = 0
logger.info("Parsing was successful.")
# Generate wrapper files
logger.info("Wrapping fortran...")
generate(f_ast, name, project_path)
logger.info("Wrapping was successful.")
# generate setup.py file
libraries = kargs.get('libraries')
library_dirs = kargs.get('library_dirs')
extra_objects = kargs.get('extra_objects')
log_name = 'fwrap_setup.log'
if logging.DEBUG >= fwlogging.console_handler.level:
log_name = ""
file_name, buf = generate_setup(name, log_name, source_files,
libraries, library_dirs, extra_objects)
write_to_project_dir(project_path, file_name, buf)
# Generate library module if requested
logger.info("Compiling sources and generating extension module...")
odir = os.path.abspath(os.curdir)
try:
os.chdir(project_path)
logger.info("Changing to project directory %s" % project_path)
from distutils.core import run_setup
run_setup(file_name, script_args=make_scriptargs(kargs))
finally:
if os.path.abspath(os.curdir) != odir:
logger.info("Returning to %s" % odir)
os.chdir(odir)
logger.info("Compiling was successful.")
# If raw source was passed in, we need to delete the temp file we created
if raw_source:
os.remove(source_files[0])
def check_fcompiler(fcompiler):
return fcompiler in allowed_fcompilers()
def allowed_fcompilers():
from numpy.distutils import fcompiler
fcompiler.load_all_fcompiler_classes()
return fcompiler.fcompiler_class.keys()
def parse(source_files):
r"""Parse fortran code returning parse tree
:Input:
- *source_files* - (list) List of valid source files
"""
from fwrap import fwrap_parse
ast = fwrap_parse.generate_ast(source_files)
return ast
def generate(fort_ast,name,project_path):
r"""Given a fortran abstract syntax tree ast, generate wrapper files
:Input:
- *fort_ast* - (`fparser.ProgramBlock`) Abstract syntax tree from parser
- *name* - (string) Name of the library module
- *out_dir* - (string) Path to build directory, defaults to './'
Raises `Exception.IOError` if writing the generated code fails.
"""
# Generate wrapping abstract syntax trees
logger.info("Generating abstract syntax tress for c and cython.")
c_ast = fc_wrap.wrap_pyf_iface(fort_ast)
cython_ast = cy_wrap.wrap_fc(c_ast)
# Generate files and write them out
generators = ( (generate_type_specs,(c_ast,name)),
(generate_fc_f,(c_ast,name)),
(generate_fc_h,(c_ast,name)),
(generate_fc_pxd,(c_ast,name)),
(generate_cy_pxd,(cython_ast,name)),
(generate_cy_pyx,(cython_ast,name)) )
for (generator,args) in generators:
file_name, buf = generator(*args)
write_to_project_dir(project_path, file_name, buf)
def write_to_project_dir(project_path, file_name, buf):
fh = open(os.path.join(project_path,file_name),'w')
try:
if isinstance(buf, basestring):
fh.write(buf)
else:
fh.write(buf.getvalue())
finally:
fh.close()
def generate_setup(name, log_file,
sources,
libraries=None,
library_dirs=None,
extra_objects=None):
tmpl = '''\
from fwrap.fwrap_setup import setup, fwrap_cmdclass, configuration
cfg_args = %(CFG_ARGS)s
cfg = configuration(projname='%(PROJNAME)s', **cfg_args)
setup(log='%(LOG_FILE)s', cmdclass=fwrap_cmdclass, configuration=cfg)
'''
sources = [os.path.abspath(source) for source in sources]
extra_objects = [os.path.abspath(eo) for eo in extra_objects]
cfg_args = {'extra_sources' : sources,
'libraries' : libraries or [],
'library_dirs' : library_dirs or [],
'extra_objects' : extra_objects or [],
}
dd = {'PROJNAME': name,
'LOG_FILE': log_file,
'CFG_ARGS': repr(cfg_args)}
return 'setup.py', (tmpl % dd)
def generate_genconfig(f_ast, name):
buf = CodeBuffer()
gc.generate_genconfig(f_ast, buf)
return constants.GENCONFIG_SRC, buf
def generate_type_specs(f_ast, name):
buf = CodeBuffer()
gc.generate_type_specs(f_ast, buf)
return constants.TYPE_SPECS_SRC, buf
def generate_cy_pxd(cy_ast, name):
buf = CodeBuffer()
fc_pxd_name = (constants.FC_PXD_TMPL % name).split('.')[0]
cy_wrap.generate_cy_pxd(cy_ast, fc_pxd_name, buf)
return constants.CY_PXD_TMPL % name, buf
def generate_cy_pyx(cy_ast, name):
buf = CodeBuffer()
cy_wrap.generate_cy_pyx(cy_ast, name, buf)
return constants.CY_PYX_TMPL % name, buf
def generate_fc_pxd(fc_ast, name):
buf = CodeBuffer()
fc_header_name = constants.FC_HDR_TMPL % name
fc_wrap.generate_fc_pxd(fc_ast, fc_header_name, buf)
return constants.FC_PXD_TMPL % name, buf
def generate_fc_f(fc_ast, name):
buf = CodeBuffer()
for proc in fc_ast:
proc.generate_wrapper(buf)
ret_buf = CodeBuffer()
ret_buf.putlines(reflow_fort(buf.getvalue()))
return constants.FC_F_TMPL % name, ret_buf
def generate_fc_h(fc_ast, name):
buf = CodeBuffer()
fc_wrap.generate_fc_h(fc_ast, constants.KTP_HEADER_SRC, buf)
return constants.FC_HDR_TMPL % name, buf
def varargs_cb(option, opt_str, value, parser):
assert value is None
value = []
for arg in parser.rargs[:]:
if arg.startswith('--') or arg.startswith('-'):
break
value.append(arg)
del parser.rargs[0]
setattr(parser.values, option.dest, value)
def make_scriptargs(kargs):
for name in ('fcompiler', 'f90flags',
'f90exec', 'debug', 'noopt',
'noarch', 'opt', 'arch', 'build_ext',
):
exec("%s = kargs.get(name)" % name)
check_fcompiler(fcompiler)
fcopt = '--fcompiler=%s' % fcompiler
scargs = []
scargs += ['config', fcopt]
scargs += ['config_fc']
if debug:
scargs += ['--debug']
if noopt:
scargs += ['--noopt']
if noarch:
scargs += ['--noarch']
if opt:
scargs += ['--opt=%s' % opt]
if arch:
scargs += ['--arch=%s' % arch]
if f90exec:
scargs += ['--f90exec=%s' % f90exec]
if f90flags:
scargs += ['--f90flags=%s' % f90flags]
scargs += ['--f77flags=%s' % f90flags]
scargs += ['build_src']
if build_ext:
scargs += ['build_ext', fcopt, '--inplace']
return scargs
class fwlogging(object):
ERROR, WARN, INFO, DEBUG = range(4)
log_levels = {ERROR : 'ERROR',
WARN : 'WARN',
INFO : 'INFO',
DEBUG : 'DEBUG'}
console_handler = None
@staticmethod
def set_console_level(verbose):
verbose = min(verbose, fwlogging.DEBUG)
verbose = max(verbose, fwlogging.ERROR)
lvl = getattr(logging, fwlogging.log_levels[verbose])
for handler in logger.handlers:
if handler.stream == sys.stdout:
if fwlogging.console_handler is None:
fwlogging.console_handler = handler
handler.setLevel(lvl)
@staticmethod
def setup_logging():
global logger
# Default logging configuration file
_DEFAULT_LOG_CONFIG_PATH = os.path.join(os.path.dirname(__file__),'log.config')
# Setup loggers
logging.config.fileConfig(_DEFAULT_LOG_CONFIG_PATH)
# Logging utility, see log.config for default configuration
logger = logging.getLogger('fwrap')
def print_version():
vandl = """\
fwrap v%s
Copyright (C) 2010 Kurt W. Smith
Fwrap is distributed under an open-source license. See the source for
licensing information. There is NO warranty, not even for MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE.
""" % get_version()
print vandl
def main(use_cmdline, sources=None, logging=True, **options):
fwlogging.setup_logging()
if sources is None:
sources = []
defaults = dict(name='fwproj',
version=False,
build_ext=False,
out_dir=os.path.curdir,
f90flags='',
f90exec='',
extra_objects=[],
verbose=fwlogging.ERROR,
override=False,
help_fcompiler=False,
debug=False,
noopt=False,
noarch=False,
opt='',
arch='')
if options:
defaults.update(options)
usage ='''\
Usage: %prog [options] fortran-source [fortran-source ...]
%prog --build [compiler-specific options] fortran-source [fortran-source ...]
%prog --help-fcompiler [compiler-specific options]
%prog --help | --version
'''
description = '''\
%prog is a commandline utility that automatically wraps Fortran code in C, Cython,
& Python, optionally building a Python extension module.
'''
parser = OptionParser(usage=usage, description=description)
parser.set_defaults(**defaults)
if use_cmdline:
parser.add_option('--help-fcompiler', action='store_true', dest='help_fcompiler',
help='output information about fortran compilers and exit')
parser.add_option('-V', '--version', dest="version",
action="store_true",
help="get version and license info and exit")
parser.add_option('-v', '--verbose', dest="verbose",
action='count',
help='the more v\'s (up to 3), the more chatty it gets')
parser.add_option('-n', '--name', dest='name',
help='name for the project directory and extension module '
'[default: %default]')
parser.add_option('-b', '--build', dest='build_ext', action='store_true',
help='create the extension module after generating wrappers [default: off]')
parser.add_option('-o', '--out_dir', dest='out_dir',
help='specify where the project directory is to be placed, '
'[default: current directory]')
parser.add_option('--override', action="store_true", dest='override',
help='clobber an existing project with the same name [default: off]')
parser.add_option('--fcompiler', dest='fcompiler',
help='specify the fortran compiler to use, see \'--help-fcompiler\'')
parser.add_option('--f90exec', dest='f90exec',
help=('Full path of the specified Fortran 90 compiler, '
'not necessary if numpy distutils can discover it (see --help-fcompiler)'))
parser.add_option('--f90flags', dest='f90flags',
help='extra fortran compilation flags')
parser.add_option('--objects', dest='extra_objects', action='callback',
callback=varargs_cb,
metavar='<object list>',
help='extra object files, archives, etc. to include in the extension module')
parser.add_option('-L', dest='library_dirs', action='append',
metavar='libdir',
help='add directory libdir to ld search path')
parser.add_option('-l', dest='libraries', action='append',
metavar='libname',
help='runtime library name to include during linking,'
' e.g. -lgfortran or -lg95')
parser.add_option('--debug', dest='debug', action='store_true',
help='include debug flags during compilation')
parser.add_option('--noopt', dest='noopt', action='store_true',
help='remove all optimization flags during compilation')
parser.add_option('--noarch', dest='noarch', action='store_true',
help='do not include architecture-specific flags during compilation')
parser.add_option('--opt', dest='opt',
help='extra optimization flags to include during compilation')
parser.add_option('--arch', dest='arch',
help='include target architecture during compilation')
args = None
else:
args = sources
parsed_options, source_files = parser.parse_args(args=args)
if parsed_options.version:
print_version()
return 0
fwlogging.set_console_level(parsed_options.verbose)
out_dir, name = parsed_options.out_dir, parsed_options.name
# Loop over options and put in a dictionary for passing into wrap
logger.debug("Command line arguments: ")
# for opt in _available_options.iterkeys():
# try:
# if getattr(parsed_options,opt) is not None:
# kargs[opt] = getattr(parsed_options,opt)
# logger.debug(" %s = %s" % (opt,kargs[opt]))
# except:
# pass
retval = 0
# Call main routine
if parsed_options.help_fcompiler:
config_compiler.show_fortran_compilers()
return 0
if not source_files:
parser.error("no source files")
try:
wrap(source_files, **parsed_options.__dict__)
except CompilerNotFound, m:
print >>sys.stdout, m
retval = 1
finally:
shutdown_logging(get_projectpath(out_dir, name))
return retval
|
{"/setup.py": ["/fwrap/version.py"], "/runtests.py": ["/fwrap/main.py"]}
|
28,716
|
certik/fwrap
|
refs/heads/master
|
/runtests.py
|
#!/usr/bin/python
import os, sys, re, shutil, unittest, doctest
WITH_CYTHON = True
from distutils.dist import Distribution
from distutils.core import Extension
from distutils.command.build_ext import build_ext as _build_ext
distutils_distro = Distribution()
FWRAP_SETUP = os.path.abspath(os.path.join('fwrap', 'fwrap_setup.py'))
TEST_DIRS = ['compile', 'errors', 'run', 'pyregr']
TEST_RUN_DIRS = ['run', 'pyregr']
# Lists external modules, and a matcher matching tests
# which should be excluded if the module is not present.
EXT_DEP_MODULES = {
'numpy' : re.compile('.*\.numpy_.*').match
}
def get_numpy_include_dirs():
import numpy
return [numpy.get_include()]
EXT_DEP_INCLUDES = [
# test name matcher , callable returning list
(re.compile('numpy_.*').match, get_numpy_include_dirs),
]
VER_DEP_MODULES = {
# such as:
# (2,4) : lambda x: x in ['run.set']
}
INCLUDE_DIRS = [ d for d in os.getenv('INCLUDE', '').split(os.pathsep) if d ]
CFLAGS = os.getenv('CFLAGS', '').split()
class build_ext(_build_ext):
def build_extension(self, ext):
if ext.language == 'c++':
try:
self.compiler.compiler_so.remove('-Wstrict-prototypes')
except Exception:
pass
_build_ext.build_extension(self, ext)
class ErrorWriter(object):
match_error = re.compile('(warning:)?(?:.*:)?\s*([-0-9]+)\s*:\s*([-0-9]+)\s*:\s*(.*)').match
def __init__(self):
self.output = []
self.write = self.output.append
def _collect(self, collect_errors, collect_warnings):
s = ''.join(self.output)
result = []
for line in s.split('\n'):
match = self.match_error(line)
if match:
is_warning, line, column, message = match.groups()
if (is_warning and collect_warnings) or \
(not is_warning and collect_errors):
result.append( (int(line), int(column), message.strip()) )
result.sort()
return [ "%d:%d: %s" % values for values in result ]
def geterrors(self):
return self._collect(True, False)
def getwarnings(self):
return self._collect(False, True)
def getall(self):
return self._collect(True, True)
class TestBuilderBase(object):
def __init__(self, *args, **kwargs):
pass
def build_suite(self):
pass
class FwrapOptions(object):
pass
class FwrapTestBuilder(object):
def __init__(self, rootdir, workdir, selectors, exclude_selectors,
cleanup_workdir, cleanup_sharedlibs, fcompiler, verbosity=0):
self.rootdir = rootdir
self.workdir = workdir
self.selectors = selectors
self.exclude_selectors = exclude_selectors
self.cleanup_workdir = cleanup_workdir
self.cleanup_sharedlibs = cleanup_sharedlibs
self.fcompiler = fcompiler
self.verbosity = verbosity
def build_suite(self):
suite = unittest.TestSuite()
test_dirs = TEST_DIRS
filenames = os.listdir(self.rootdir)
filenames.sort()
for filename in filenames:
path = os.path.join(self.rootdir, filename)
if os.path.isdir(path) and filename in test_dirs:
suite.addTest(
self.handle_directory(path, filename))
return suite
def handle_directory(self, path, context):
workdir = os.path.join(self.workdir, context)
if not os.path.exists(workdir):
os.makedirs(workdir)
suite = unittest.TestSuite()
filenames = os.listdir(path)
filenames.sort()
for filename in filenames:
if os.path.splitext(filename)[1].lower() not in (".f", ".f77", ".f90", ".f95"):
continue
if filename.startswith('.'): continue # certain emacs backup files
basename = os.path.splitext(filename)[0]
fqbasename = "%s.%s" % (context, basename)
if not [1 for match in self.selectors if match(fqbasename)]:
continue
if self.exclude_selectors:
if [1 for match in self.exclude_selectors if match(fqbasename)]:
continue
if context in TEST_RUN_DIRS:
# test_class = FwrapCompileTestCase
test_class = FwrapRunTestCase
else:
test_class = FwrapCompileTestCase
suite.addTest(self.build_test(test_class, path, workdir, filename))
return suite
def build_test(self, test_class, path, workdir, filename):
return test_class(path, workdir, filename,
cleanup_workdir=self.cleanup_workdir,
cleanup_sharedlibs=self.cleanup_sharedlibs,
fcompiler=self.fcompiler,
verbosity=self.verbosity)
class _devnull(object):
def flush(self): pass
def write(self, s): pass
def read(self): return ''
class FwrapCompileTestCase(unittest.TestCase):
def __init__(self, directory, workdir, filename,
cleanup_workdir=True, cleanup_sharedlibs=True, fcompiler=None,
verbosity=0):
self.directory = directory
self.workdir = workdir
self.filename = filename
self.cleanup_workdir = cleanup_workdir
self.cleanup_sharedlibs = cleanup_sharedlibs
self.fcompiler = fcompiler
self.verbosity = verbosity
unittest.TestCase.__init__(self)
def shortDescription(self):
return "wrapping %s" % self.filename
def setUp(self):
if self.workdir not in sys.path:
sys.path.insert(0, self.workdir)
def tearDown(self):
try:
sys.path.remove(self.workdir)
except ValueError:
pass
if os.path.exists(self.workdir):
if self.cleanup_workdir:
for rmfile in os.listdir(self.workdir):
# if not self.cleanup_workdir:
# if rmfile.lower().startswith("wrap") or rmfile.lower().startswith("autoconfig"):
# continue
# if not self.cleanup_sharedlibs and rmfile.endswith(".so") or rmfile.endswith(".dll"):
# continue
try:
rmfile = os.path.join(self.workdir, rmfile)
if os.path.isdir(rmfile):
shutil.rmtree(rmfile, ignore_errors=True)
else:
os.remove(rmfile)
except IOError:
pass
else:
os.makedirs(self.workdirs)
def runTest(self):
self.projname = os.path.splitext(self.filename)[0] + '_fwrap'
self.projdir = os.path.join(self.workdir, self.projname)
fq_fname = os.path.join(os.path.abspath(self.directory), self.filename)
main(use_cmdline=False,
sources=[fq_fname],
name=self.projname,
out_dir=self.workdir,
fcompiler=(self.fcompiler or 'gnu95'),
verbose=self.verbosity-1, # bit of a hack here.
build_ext=True)
self.runCompileTest_distutils()
def runCompileTest_distutils(self):
thisdir = os.path.abspath(os.curdir)
try:
os.chdir(self.projdir)
if self.projdir not in sys.path:
sys.path.insert(0, self.projdir)
# try to import the compiled extension module
__import__(self.projname)
del sys.modules[self.projname]
finally:
if self.projdir in sys.path:
sys.path.remove(self.projdir)
os.chdir(thisdir)
def compile(self, directory, filename, workdir, incdir):
self.run_wrapper(directory, filename, workdir, incdir)
def run_wrapper(self, directory, filename, workdir, incdir):
wrap(filename, directory, workdir)
class FwrapRunTestCase(FwrapCompileTestCase):
def shortDescription(self):
return "compiling and running %s" % self.filename
def run(self, result=None):
if result is None:
result = self.defaultTestResult()
result.startTest(self)
try:
self.setUp()
self.runTest()
if self.projdir not in sys.path:
sys.path.insert(0, self.projdir)
doctest_mod_base = self.projname+'_doctest'
doctest_mod_fqpath = os.path.join(self.directory, doctest_mod_base+'.py')
shutil.copy(doctest_mod_fqpath, self.projdir)
doctest.DocTestSuite(self.projname+'_doctest').run(result) #??
except Exception:
result.addError(self, sys.exc_info())
result.stopTest(self)
try:
self.tearDown()
except Exception:
pass
class TestBuilder(object):
def __init__(self, rootdir, workdir, selectors, exclude_selectors, annotate,
cleanup_workdir, cleanup_sharedlibs, with_pyregr, cython_only,
languages, test_bugs):
self.rootdir = rootdir
self.workdir = workdir
self.selectors = selectors
self.exclude_selectors = exclude_selectors
self.annotate = annotate
self.cleanup_workdir = cleanup_workdir
self.cleanup_sharedlibs = cleanup_sharedlibs
self.with_pyregr = with_pyregr
self.cython_only = cython_only
self.languages = languages
self.test_bugs = test_bugs
def build_suite(self):
suite = unittest.TestSuite()
test_dirs = TEST_DIRS
filenames = os.listdir(self.rootdir)
filenames.sort()
for filename in filenames:
if not WITH_CYTHON and filename == "errors":
# we won't get any errors without running Cython
continue
path = os.path.join(self.rootdir, filename)
if os.path.isdir(path) and filename in test_dirs:
if filename == 'pyregr' and not self.with_pyregr:
continue
suite.addTest(
self.handle_directory(path, filename))
return suite
def handle_directory(self, path, context):
workdir = os.path.join(self.workdir, context)
if not os.path.exists(workdir):
os.makedirs(workdir)
expect_errors = (context == 'errors')
suite = unittest.TestSuite()
filenames = os.listdir(path)
filenames.sort()
for filename in filenames:
if not (filename.endswith(".pyx") or filename.endswith(".py")):
continue
if filename.startswith('.'): continue # certain emacs backup files
if context == 'pyregr' and not filename.startswith('test_'):
continue
module = os.path.splitext(filename)[0]
fqmodule = "%s.%s" % (context, module)
if not [ 1 for match in self.selectors
if match(fqmodule) ]:
continue
if self.exclude_selectors:
if [1 for match in self.exclude_selectors if match(fqmodule)]:
continue
if context in TEST_RUN_DIRS:
if module.startswith("test_"):
test_class = CythonUnitTestCase
else:
test_class = CythonRunTestCase
else:
test_class = CythonCompileTestCase
for test in self.build_tests(test_class, path, workdir,
module, expect_errors):
suite.addTest(test)
return suite
def build_tests(self, test_class, path, workdir, module, expect_errors):
if expect_errors:
languages = self.languages[:1]
else:
languages = self.languages
if 'cpp' in module and 'c' in languages:
languages = list(languages)
languages.remove('c')
tests = [ self.build_test(test_class, path, workdir, module,
language, expect_errors)
for language in languages ]
return tests
def build_test(self, test_class, path, workdir, module,
language, expect_errors):
workdir = os.path.join(workdir, language)
if not os.path.exists(workdir):
os.makedirs(workdir)
return test_class(path, workdir, module,
language=language,
expect_errors=expect_errors,
annotate=self.annotate,
cleanup_workdir=self.cleanup_workdir,
cleanup_sharedlibs=self.cleanup_sharedlibs,
cython_only=self.cython_only)
def collect_unittests(path, module_prefix, suite, selectors):
def file_matches(filename):
return filename.startswith("Test") and filename.endswith(".py")
def package_matches(dirname):
return dirname == "Tests"
loader = unittest.TestLoader()
skipped_dirs = []
for dirpath, dirnames, filenames in os.walk(path):
if dirpath != path and "__init__.py" not in filenames:
skipped_dirs.append(dirpath + os.path.sep)
continue
skip = False
for dir in skipped_dirs:
if dirpath.startswith(dir):
skip = True
if skip:
continue
parentname = os.path.split(dirpath)[-1]
if package_matches(parentname):
for f in filenames:
if file_matches(f):
filepath = os.path.join(dirpath, f)[:-len(".py")]
modulename = module_prefix + filepath[len(path)+1:].replace(os.path.sep, '.')
if not [ 1 for match in selectors if match(modulename) ]:
continue
module = __import__(modulename)
for x in modulename.split('.')[1:]:
module = getattr(module, x)
suite.addTests([loader.loadTestsFromModule(module)])
def collect_doctests(path, module_prefix, suite, selectors):
def package_matches(dirname):
return dirname not in ("Mac", "Distutils", "Plex")
def file_matches(filename):
return (filename.endswith(".py") and not ('~' in filename
or '#' in filename or filename.startswith('.')))
import doctest, types
for dirpath, dirnames, filenames in os.walk(path):
parentname = os.path.split(dirpath)[-1]
if package_matches(parentname):
for f in filenames:
if file_matches(f):
if not f.endswith('.py'): continue
filepath = os.path.join(dirpath, f)[:-len(".py")]
modulename = module_prefix + filepath[len(path)+1:].replace(os.path.sep, '.')
if not [ 1 for match in selectors if match(modulename) ]:
continue
module = __import__(modulename)
for x in modulename.split('.')[1:]:
module = getattr(module, x)
if hasattr(module, "__doc__") or hasattr(module, "__test__"):
try:
suite.addTest(doctest.DocTestSuite(module))
except ValueError: # no tests
pass
class MissingDependencyExcluder:
def __init__(self, deps):
# deps: { module name : matcher func }
self.exclude_matchers = []
for mod, matcher in deps.items():
try:
__import__(mod)
except ImportError:
self.exclude_matchers.append(matcher)
self.tests_missing_deps = []
def __call__(self, testname):
for matcher in self.exclude_matchers:
if matcher(testname):
self.tests_missing_deps.append(testname)
return True
return False
class VersionDependencyExcluder:
def __init__(self, deps):
# deps: { version : matcher func }
from sys import version_info
self.exclude_matchers = []
for ver, matcher in deps.items():
if version_info < ver:
self.exclude_matchers.append(matcher)
self.tests_missing_deps = []
def __call__(self, testname):
for matcher in self.exclude_matchers:
if matcher(testname):
self.tests_missing_deps.append(testname)
return True
return False
class FileListExcluder:
def __init__(self, list_file):
self.excludes = {}
for line in open(list_file).readlines():
line = line.strip()
if line and line[0] != '#':
self.excludes[line.split()[0]] = True
def __call__(self, testname):
return testname.split('.')[-1] in self.excludes
if __name__ == '__main__':
from optparse import OptionParser
parser = OptionParser()
parser.add_option("--no-cleanup", dest="cleanup_workdir",
action="store_false", default=True,
help="do not delete the generated C files (allows passing --no-cython on next run)")
parser.add_option("--no-cleanup-sharedlibs", dest="cleanup_sharedlibs",
action="store_false", default=True,
help="do not delete the generated shared libary files (allows manual module experimentation)")
# parser.add_option("--no-cython", dest="with_cython",
# action="store_false", default=True,
# help="do not run the Cython compiler, only the C compiler")
# parser.add_option("--no-c", dest="use_c",
# action="store_false", default=True,
# help="do not test C compilation")
# parser.add_option("--no-cpp", dest="use_cpp",
# action="store_false", default=True,
# help="do not test C++ compilation")
# parser.add_option("--no-unit", dest="unittests",
# action="store_false", default=True,
# help="do not run the unit tests")
# parser.add_option("--no-doctest", dest="doctests",
# action="store_false", default=True,
# help="do not run the doctests")
# parser.add_option("--no-file", dest="filetests",
# action="store_false", default=True,
# help="do not run the file based tests")
# parser.add_option("--no-pyregr", dest="pyregr",
# action="store_false", default=True,
# help="do not run the regression tests of CPython in tests/pyregr/")
# parser.add_option("--cython-only", dest="cython_only",
# action="store_true", default=False,
# help="only compile pyx to c, do not run C compiler or run the tests")
# parser.add_option("--no-refnanny", dest="with_refnanny",
# action="store_false", default=True,
# help="do not regression test reference counting")
# parser.add_option("--sys-pyregr", dest="system_pyregr",
# action="store_true", default=False,
# help="run the regression tests of the CPython installation")
parser.add_option("-x", "--exclude", dest="exclude",
action="append", metavar="PATTERN",
help="exclude tests matching the PATTERN")
# parser.add_option("-C", "--coverage", dest="coverage",
# action="store_true", default=False,
# help="collect source coverage data for the Compiler")
# parser.add_option("-A", "--annotate", dest="annotate_source",
# action="store_true", default=True,
# help="generate annotated HTML versions of the test source files")
# parser.add_option("--no-annotate", dest="annotate_source",
# action="store_false",
# help="do not generate annotated HTML versions of the test source files")
parser.add_option("-v", "--verbose", dest="verbosity",
action="count",
default=0,
help="display test progress, more v's for more output")
parser.add_option("-T", "--ticket", dest="tickets",
action="append",
help="a bug ticket number to run the respective test in 'tests/bugs'")
parser.add_option('--fcompiler', dest="fcompiler",
default="gnu95",
help="specify the fortran compiler to use in tests")
options, cmd_args = parser.parse_args()
if 0:
if sys.version_info[0] >= 3:
# make sure we do not import (or run) Cython itself
options.doctests = False
options.with_cython = False
options.unittests = False
options.pyregr = False
if options.coverage:
import coverage
coverage.erase()
coverage.start()
WITH_CYTHON = options.with_cython
if WITH_CYTHON:
from Cython.Compiler.Main import \
CompilationOptions, \
default_options as pyrex_default_options, \
compile as cython_compile
from Cython.Compiler import Errors
Errors.LEVEL = 0 # show all warnings
# if 0
# RUN ALL TESTS!
ROOTDIR = os.path.join(os.getcwd(), os.path.dirname(sys.argv[0]), 'tests')
WORKDIR = os.path.join(os.getcwd(), 'BUILD')
# UNITTEST_MODULE = "Cython"
# UNITTEST_ROOT = os.path.join(os.getcwd(), UNITTEST_MODULE)
if os.path.exists(WORKDIR):
for path in os.listdir(WORKDIR):
if path in ("support",): continue
shutil.rmtree(os.path.join(WORKDIR, path), ignore_errors=True)
if not os.path.exists(WORKDIR):
os.makedirs(WORKDIR)
if 0:
if WITH_CYTHON:
from Cython.Compiler.Version import version
sys.stderr.write("Running tests against Cython %s\n" % version)
from Cython.Compiler import DebugFlags
DebugFlags.debug_temp_code_comments = 1
else:
sys.stderr.write("Running tests without Cython.\n")
#if 0
# from fwrap.main import wrap
from fwrap.main import main
sys.stderr.write("Python %s\n" % sys.version)
sys.stderr.write("\n")
# insert cython.py/Cython source directory into sys.path
cython_dir = os.path.abspath(os.path.join(os.path.pardir, os.path.pardir))
sys.path.insert(0, cython_dir)
if 0:
if options.with_refnanny:
from pyximport.pyxbuild import pyx_to_dll
libpath = pyx_to_dll(os.path.join("Cython", "Runtime", "refnanny.pyx"),
build_in_temp=True,
pyxbuild_dir=os.path.join(WORKDIR, "support"))
sys.path.insert(0, os.path.split(libpath)[0])
CFLAGS.append("-DCYTHON_REFNANNY")
#if 0
test_bugs = False
if options.tickets:
for ticket_number in options.tickets:
test_bugs = True
cmd_args.append('.*T%s$' % ticket_number)
if not test_bugs:
for selector in cmd_args:
if selector.startswith('bugs'):
test_bugs = True
import re
selectors = [ re.compile(r, re.I|re.U).search for r in cmd_args ]
if not selectors:
selectors = [ lambda x:True ]
# Chech which external modules are not present and exclude tests
# which depends on them (by prefix)
exclude_selectors = []
if options.exclude:
exclude_selectors += [ re.compile(r, re.I|re.U).search for r in options.exclude ]
if 0:
missing_dep_excluder = MissingDependencyExcluder(EXT_DEP_MODULES)
version_dep_excluder = VersionDependencyExcluder(VER_DEP_MODULES)
exclude_selectors = [missing_dep_excluder, version_dep_excluder] # want to pring msg at exit
if not test_bugs:
exclude_selectors += [ FileListExcluder("tests/bugs.txt") ]
if 0:
languages = []
if options.use_c:
languages.append('c')
if options.use_cpp:
languages.append('cpp')
# if 0
test_suite = unittest.TestSuite()
if 0:
if options.unittests:
collect_unittests(UNITTEST_ROOT, UNITTEST_MODULE + ".", test_suite, selectors)
if options.doctests:
collect_doctests(UNITTEST_ROOT, UNITTEST_MODULE + ".", test_suite, selectors)
# if 0
filetests = FwrapTestBuilder(ROOTDIR, WORKDIR, selectors, exclude_selectors,
options.cleanup_workdir, options.cleanup_sharedlibs, options.fcompiler, options.verbosity)
test_suite.addTest(filetests.build_suite())
if 0:
if options.filetests and languages:
filetests = TestBuilder(ROOTDIR, WORKDIR, selectors, exclude_selectors,
options.annotate_source, options.cleanup_workdir,
options.cleanup_sharedlibs, options.pyregr,
options.cython_only, languages, test_bugs)
test_suite.addTest(filetests.build_suite())
if options.system_pyregr and languages:
filetests = TestBuilder(ROOTDIR, WORKDIR, selectors, exclude_selectors,
options.annotate_source, options.cleanup_workdir,
options.cleanup_sharedlibs, True,
options.cython_only, languages, test_bugs)
test_suite.addTest(
filetests.handle_directory(
os.path.join(sys.prefix, 'lib', 'python'+sys.version[:3], 'test'),
'pyregr'))
unittest.TextTestRunner(verbosity=options.verbosity).run(test_suite)
if 0:
if options.coverage:
coverage.stop()
ignored_modules = ('Options', 'Version', 'DebugFlags', 'CmdLine')
modules = [ module for name, module in sys.modules.items()
if module is not None and
name.startswith('Cython.Compiler.') and
name[len('Cython.Compiler.'):] not in ignored_modules ]
coverage.report(modules, show_missing=0)
if missing_dep_excluder.tests_missing_deps:
sys.stderr.write("Following tests excluded because of missing dependencies on your system:\n")
for test in missing_dep_excluder.tests_missing_deps:
sys.stderr.write(" %s\n" % test)
if options.with_refnanny:
import refnanny
sys.stderr.write("\n".join([repr(x) for x in refnanny.reflog]))
|
{"/setup.py": ["/fwrap/version.py"], "/runtests.py": ["/fwrap/main.py"]}
|
28,717
|
certik/fwrap
|
refs/heads/master
|
/fwrap/tests/test_dimension.py
|
#------------------------------------------------------------------------------
# Copyright (c) 2010, Kurt W. Smith
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Fwrap project nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#------------------------------------------------------------------------------
from fwrap import pyf_iface as pyf
from nose.tools import raises, ok_, eq_
def _test_scal_int_expr():
sie = pyf.ScalarIntExpr("kind('a')")
eq_(sie.find_names(), set(['kind']))
def _setup(self):
self.colon = pyf.Dim(':')
self.colon_parsed = pyf.Dim(('',''))
self.lbound = pyf.Dim('n:')
self.lubound = pyf.Dim('10:20')
self.lubound_parsed = pyf.Dim(('1', '1'))
self.e1, self.e2 =('nx%y + 1', 'size(a, 3-N) + kind(NX%Z)')
self.expr = pyf.Dim((self.e1, self.e2))
self.explicit1 = pyf.Dim('10')
self.explicit2 = pyf.Dim('anteohusatnheuo%asnotehusaeontuh')
self.explicit3 = pyf.Dim(('LDIM',))
self.assumed_size1 = pyf.Dim(('0:*'))
self.assumed_size2 = pyf.Dim(('n','*'))
class test_dim(object):
def setup(self):
_setup(self)
@raises(ValueError)
def test_ubound(self):
pyf.Dim(':10')
def test_sizeexpr(self):
eq_(self.colon.sizeexpr, None)
eq_(self.colon_parsed.sizeexpr, None)
eq_(self.lbound.sizeexpr, None)
eq_(self.assumed_size1.sizeexpr, None)
eq_(self.assumed_size2.sizeexpr, None)
eq_(self.lubound.sizeexpr, '((20) - (10) + 1)')
eq_(self.lubound_parsed.sizeexpr, '((1) - (1) + 1)')
eq_(self.expr.sizeexpr, "((%s) - (%s) + 1)" % (self.e2.lower(), self.e1.lower()))
eq_(self.explicit1.sizeexpr, "(10)")
def test_names(self):
epty = set()
eq_(self.colon.depnames, epty)
eq_(self.colon_parsed.depnames, epty)
eq_(self.lbound.depnames, set(['n']))
eq_(self.lubound.depnames, epty)
eq_(self.expr.depnames, set(['nx', 'size', 'a', 'n', 'kind']))
def test_isassumedshape(self):
ok_(self.colon.is_assumed_shape)
ok_(self.colon_parsed.is_assumed_shape)
ok_(self.lbound.is_assumed_shape)
ok_(not self.lubound.is_assumed_shape)
ok_(not self.lubound_parsed.is_assumed_shape)
ok_(not self.expr.is_assumed_shape)
class test_dimension(object):
def setup(self):
_setup(self)
self.dims = pyf.Dimension((self.colon, self.lbound, self.lubound, self.expr))
def test_names(self):
eq_(self.dims.depnames, self.lbound.depnames.union(self.expr.depnames))
def test_dimspec(self):
eq_(self.dims.attrspec,
('dimension(:, n:, 10:20, %s:%s)' %
(self.e1.lower(), self.e2.lower())))
def test_len(self):
eq_(len(self.dims), 4)
|
{"/setup.py": ["/fwrap/version.py"], "/runtests.py": ["/fwrap/main.py"]}
|
28,718
|
certik/fwrap
|
refs/heads/master
|
/fwrap/tests/test_fwrap_parse.py
|
#------------------------------------------------------------------------------
# Copyright (c) 2010, Kurt W. Smith
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Fwrap project nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#------------------------------------------------------------------------------
from fwrap import fwrap_parse as fp
from fwrap import pyf_iface as pyf
from cStringIO import StringIO
from nose.tools import ok_, eq_, set_trace
def test_parse_many():
buf = '''\
subroutine subr1(a, b, c)
implicit none
integer, intent(in) :: a
complex :: b
double precision, intent(out) :: c
c = a + aimag(b)
end subroutine subr1
function func1(a, b, c)
implicit none
integer, intent(in) :: a
real :: b
complex, intent(in) :: c
double precision :: func1
func1 = a + aimag(c) - b
end function func1
'''
subr, func = fp.generate_ast([buf])
eq_(subr.name, 'subr1')
eq_(func.name, 'func1')
eq_([arg.name for arg in subr.args], ['a', 'b', 'c'])
eq_([arg.dtype for arg in subr.args], [pyf.default_integer,
pyf.default_complex,
pyf.default_dbl])
# eq_([arg.name for arg in subr.args], ['a', 'b', 'c'])
def test_parse_array_args():
buf = '''\
SUBROUTINE DGESDD( JOBZ, M, N, A, LDA, S, U, LDU, VT, LDVT, WORK,
$ LWORK, IWORK, INFO )
*
* -- LAPACK driver routine (version 3.2.1) --
* -- LAPACK is a software package provided by Univ. of Tennessee, --
* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..--
* March 2009
*
* .. Scalar Arguments ..
CHARACTER JOBZ
INTEGER INFO, LDA, LDU, LDVT, LWORK, M, N
* ..
* .. Array Arguments ..
INTEGER IWORK( * )
DOUBLE PRECISION A( LDA, * ), S( * ), U( LDU, * ),
$ VT( LDVT, * ), WORK( * )
END SUBROUTINE DGESDD'''
subr = fp.generate_ast([buf])[0]
eq_(subr.name, 'dgesdd')
eq_([arg.name for arg in subr.args],
"jobz m n a lda s u ldu vt ldvt work lwork iwork info".split())
def test_parse_kind_args():
fcode = '''\
function int_args_func(a,b,c,d)
integer(kind=8) :: int_args_func
integer(kind=1), intent(in) :: a
integer(kind=2), intent(in) :: b
integer(kind=4), intent(in) :: c
integer(kind=8), intent(out) :: d
d = a + b + c
int_args_func = 10
end function int_args_func
'''
func = fp.generate_ast([fcode])[0]
eq_([arg.dtype.odecl
for arg in func.args],
["integer(kind=%d)" % i
for i in (1,2,4,8)])
|
{"/setup.py": ["/fwrap/version.py"], "/runtests.py": ["/fwrap/main.py"]}
|
28,741
|
housz77/off_parser
|
refs/heads/master
|
/off_parser/data/__init__.py
|
"""
=============================
Data (:mod:`off_parser.data`)
=============================
This module contains functions for loading built-in and publicly
available 3D model data.
Functions
=========
.. autosummary::
:toctree: generated/
load_data
download_dataset
load_modelnet10
load_modelnet40
"""
from .loader import *
|
{"/off_parser/data/__init__.py": ["/off_parser/data/loader.py"], "/off_parser/data/loader_test.py": ["/off_parser/data/loader.py"], "/examples/toilet_ex1.py": ["/off_parser/__init__.py"], "/off_parser/__init__.py": ["/off_parser/parser/__init__.py", "/off_parser/data/__init__.py"], "/off_parser/parser/off_parser_test.py": ["/off_parser/data/__init__.py"], "/off_parser/data/loader.py": ["/off_parser/parser/__init__.py"], "/off_parser/parser/__init__.py": ["/off_parser/parser/off_parser.py"]}
|
28,742
|
housz77/off_parser
|
refs/heads/master
|
/off_parser/data/loader_test.py
|
from .loader import *
def test_load_modelnet10():
p = load_modelnet10('train')
next(p)
p = load_modelnet10('test')
next(p)
def test_load_modelnet40():
p = load_modelnet40('train')
next(p)
p = load_modelnet40('test')
next(p)
|
{"/off_parser/data/__init__.py": ["/off_parser/data/loader.py"], "/off_parser/data/loader_test.py": ["/off_parser/data/loader.py"], "/examples/toilet_ex1.py": ["/off_parser/__init__.py"], "/off_parser/__init__.py": ["/off_parser/parser/__init__.py", "/off_parser/data/__init__.py"], "/off_parser/parser/off_parser_test.py": ["/off_parser/data/__init__.py"], "/off_parser/data/loader.py": ["/off_parser/parser/__init__.py"], "/off_parser/parser/__init__.py": ["/off_parser/parser/off_parser.py"]}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.