index
int64 | repo_name
string | branch_name
string | path
string | content
string | import_graph
string |
|---|---|---|---|---|---|
19,479
|
qsq-dm/mff
|
refs/heads/master
|
/udp_server.py
|
# -*- coding: utf-8 -*-
from errno import EWOULDBLOCK, EAGAIN
import logging
import os
import socket
from tornado.ioloop import IOLoop
from tornado.netutil import set_close_exec
from logging.handlers import RotatingFileHandler
from settings import LOG_FILE_NAME
from settings import LOG_PORT
def create_client():
''' '''
udp_sock =socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return udp_sock
udp_sock = create_client()
def send_msg(msg):
udp_sock.sendto(msg, ('localhost', LOG_PORT))
#----------------------------------------------------------------------
def create_logger(path):
""" 创建logger """
logger = logging.getLogger("api")
logger.setLevel(logging.INFO)
logging.Formatter('%(message)s')
handler = RotatingFileHandler(path, maxBytes=1024*1024*1024,
backupCount=1000)
logger.addHandler(handler)
return logger
logger = create_logger(LOG_FILE_NAME)
logger.propagate=0 #不打印log出来
class UDPServer(object):
def __init__(self, name, port, on_receive, address=None, family=socket.AF_INET, io_loop=None):
self.io_loop = io_loop or IOLoop.instance()
self._on_receive = on_receive
self._sockets = []
flags = socket.AI_PASSIVE
if hasattr(socket, "AI_ADDRCONFIG"):
flags |= socket.AI_ADDRCONFIG
# find all addresses to bind, bind and register the "READ" callback
for res in set(socket.getaddrinfo(address, port, family, socket.SOCK_DGRAM, 0, flags)):
af, sock_type, proto, canon_name, sock_addr = res
self._open_and_register(af, sock_type, proto, sock_addr)
print('Started')
def _open_and_register(self, af, sock_type, proto, sock_addr):
sock = socket.socket(af, sock_type, proto)
set_close_exec(sock.fileno())
if os.name != 'nt':
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setblocking(0)
print('Binding to %s...', repr(sock_addr))
sock.bind(sock_addr)
def read_handler(fd, events):
while True:
try:
data, address = sock.recvfrom(65536)
except socket.error as e:
if e.args[0] in (EWOULDBLOCK, EAGAIN):
return
raise
self._on_receive(data, address)
self.io_loop.add_handler(sock.fileno(), read_handler, IOLoop.READ)
self._sockets.append(sock)
def stop(self):
print('Closing %d socket(s)...', len(self._sockets))
for sock in self._sockets:
self.io_loop.remove_handler(sock.fileno())
sock.close()
def custom_on_receive(data, address):
logger.info(data)
def main():
server = UDPServer('meifenfen_api_logger_on_8008', LOG_PORT, on_receive=custom_on_receive)
# def done(*args):
# print args
# for stoppable in args:
# stoppable.stop()
# IOLoop.instance().call_later(10, done, server, IOLoop.instance())
IOLoop.instance().start()
if __name__ == '__main__':
main()
|
{"/admin/urls.py": ["/admin/views.py"], "/ops/room_design.py": ["/util/sqlerr.py", "/util/utils.py", "/models.py", "/ops/utils.py", "/ops/cache.py", "/settings.py"], "/util/sign.py": ["/settings.py"], "/ops/hospital.py": ["/util/sqlerr.py", "/models.py", "/ops/utils.py"], "/ops/utils.py": ["/util/utils.py", "/models.py"], "/ops/credit.py": ["/models.py", "/util/sqlerr.py", "/util/utils.py", "/ops/utils.py", "/settings.py", "/constants.py"], "/user/api_urls.py": ["/user/auth.py", "/user/trial.py"], "/migrations/versions/3621ae6c4339_.py": ["/models.py"], "/ops/comment.py": ["/models.py", "/ops/utils.py", "/util/utils.py"], "/migrations/versions/273db5f3044f_.py": ["/models.py"], "/ops/notification.py": ["/models.py", "/util/utils.py", "/ops/utils.py"], "/ops/activity.py": ["/models.py", "/util/utils.py", "/ops/utils.py"], "/migrations/versions/18e20ed0da8d_.py": ["/models.py"], "/hospital/urls.py": ["/hospital/views.py"], "/ops/coupon.py": ["/util/utils.py", "/models.py", "/ops/utils.py"], "/ops/log.py": ["/models.py", "/util/utils.py"], "/migrations/versions/55f4c256c989_.py": ["/models.py"], "/ops/beauty_tutorial.py": ["/models.py", "/ops/utils.py", "/util/utils.py"], "/migrations/versions/42e923c1238_.py": ["/models.py"], "/user/urls.py": ["/user/views.py", "/user/auth.py", "/user/trial.py", "/user/room_design.py", "/user/redpack.py", "/user/draw_money.py"], "/ops/actions.py": ["/models.py", "/ops/order.py", "/ops/coupon.py", "/ops/credit.py"], "/migrations/versions/36d5b6be1479_.py": ["/models.py"], "/ops/redpack.py": ["/util/sqlerr.py", "/util/utils.py", "/models.py", "/ops/utils.py", "/ops/cache.py", "/settings.py"], "/ops/user.py": ["/util/sqlerr.py", "/models.py", "/ops/utils.py"], "/ops/item.py": ["/models.py", "/util/utils.py", "/util/sqlerr.py", "/ops/utils.py"], "/migrations/versions/18e507e87862_.py": ["/models.py"], "/user/draw_money.py": ["/models.py", "/util/utils.py", "/util/decorators.py", "/util/validators.py", "/util/sign.py", "/util/drawgift.py", "/ops/bulks.py", "/ops/item.py", "/ops/data.py", "/ops/user.py", "/ops/redpack.py", "/ops/promote.py", "/ops/cache.py", "/ops/room_design.py", "/constants.py", "/thirdparty/sms.py", "/thirdparty/wechat.py", "/settings.py"], "/thirdparty/alipay/config.py": ["/settings.py"], "/promote/urls.py": ["/promote/views.py"], "/ops/admin.py": ["/util/sqlerr.py", "/models.py", "/ops/utils.py"], "/udp_server.py": ["/settings.py"], "/migrations/versions/4eefa5b6eb51_.py": ["/models.py"], "/demo.py": ["/thirdparty/wechat.py"], "/user/common.py": ["/models.py", "/ops/order.py", "/ops/coupon.py", "/ops/credit.py", "/constants.py"], "/models.py": ["/util/utils.py", "/settings.py", "/constants.py"]}
|
19,480
|
qsq-dm/mff
|
refs/heads/master
|
/migrations/versions/4eefa5b6eb51_.py
|
"""empty message
Revision ID: 4eefa5b6eb51
Revises: 55f4c256c989
Create Date: 2015-11-28 10:09:38.336732
"""
# revision identifiers, used by Alembic.
revision = '4eefa5b6eb51'
down_revision = '55f4c256c989'
from alembic import op
import sqlalchemy as sa
import models
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('pay_log_order_no', sa.Column('total', models.MoneyField(precision=10, scale=2), nullable=False))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('pay_log_order_no', 'total')
### end Alembic commands ###
|
{"/admin/urls.py": ["/admin/views.py"], "/ops/room_design.py": ["/util/sqlerr.py", "/util/utils.py", "/models.py", "/ops/utils.py", "/ops/cache.py", "/settings.py"], "/util/sign.py": ["/settings.py"], "/ops/hospital.py": ["/util/sqlerr.py", "/models.py", "/ops/utils.py"], "/ops/utils.py": ["/util/utils.py", "/models.py"], "/ops/credit.py": ["/models.py", "/util/sqlerr.py", "/util/utils.py", "/ops/utils.py", "/settings.py", "/constants.py"], "/user/api_urls.py": ["/user/auth.py", "/user/trial.py"], "/migrations/versions/3621ae6c4339_.py": ["/models.py"], "/ops/comment.py": ["/models.py", "/ops/utils.py", "/util/utils.py"], "/migrations/versions/273db5f3044f_.py": ["/models.py"], "/ops/notification.py": ["/models.py", "/util/utils.py", "/ops/utils.py"], "/ops/activity.py": ["/models.py", "/util/utils.py", "/ops/utils.py"], "/migrations/versions/18e20ed0da8d_.py": ["/models.py"], "/hospital/urls.py": ["/hospital/views.py"], "/ops/coupon.py": ["/util/utils.py", "/models.py", "/ops/utils.py"], "/ops/log.py": ["/models.py", "/util/utils.py"], "/migrations/versions/55f4c256c989_.py": ["/models.py"], "/ops/beauty_tutorial.py": ["/models.py", "/ops/utils.py", "/util/utils.py"], "/migrations/versions/42e923c1238_.py": ["/models.py"], "/user/urls.py": ["/user/views.py", "/user/auth.py", "/user/trial.py", "/user/room_design.py", "/user/redpack.py", "/user/draw_money.py"], "/ops/actions.py": ["/models.py", "/ops/order.py", "/ops/coupon.py", "/ops/credit.py"], "/migrations/versions/36d5b6be1479_.py": ["/models.py"], "/ops/redpack.py": ["/util/sqlerr.py", "/util/utils.py", "/models.py", "/ops/utils.py", "/ops/cache.py", "/settings.py"], "/ops/user.py": ["/util/sqlerr.py", "/models.py", "/ops/utils.py"], "/ops/item.py": ["/models.py", "/util/utils.py", "/util/sqlerr.py", "/ops/utils.py"], "/migrations/versions/18e507e87862_.py": ["/models.py"], "/user/draw_money.py": ["/models.py", "/util/utils.py", "/util/decorators.py", "/util/validators.py", "/util/sign.py", "/util/drawgift.py", "/ops/bulks.py", "/ops/item.py", "/ops/data.py", "/ops/user.py", "/ops/redpack.py", "/ops/promote.py", "/ops/cache.py", "/ops/room_design.py", "/constants.py", "/thirdparty/sms.py", "/thirdparty/wechat.py", "/settings.py"], "/thirdparty/alipay/config.py": ["/settings.py"], "/promote/urls.py": ["/promote/views.py"], "/ops/admin.py": ["/util/sqlerr.py", "/models.py", "/ops/utils.py"], "/udp_server.py": ["/settings.py"], "/migrations/versions/4eefa5b6eb51_.py": ["/models.py"], "/demo.py": ["/thirdparty/wechat.py"], "/user/common.py": ["/models.py", "/ops/order.py", "/ops/coupon.py", "/ops/credit.py", "/constants.py"], "/models.py": ["/util/utils.py", "/settings.py", "/constants.py"]}
|
19,481
|
qsq-dm/mff
|
refs/heads/master
|
/demo.py
|
from flask import Flask
from flask_restful import Resource, Api
app = Flask(__name__)
api = Api(app)
class HelloWorld(Resource):
def get(self):
return {'hello': 'world'}
from flask import Blueprint, render_template, abort
from jinja2 import TemplateNotFound
from thirdparty.wechat import wechat
server_verify = Blueprint('server_verify', __name__,
template_folder='templates')
app = Flask(__name__)
api_bp = Blueprint('api', __name__)
api = Api(api_bp)
class TodoItem(Resource):
def get(self, id):
return {'task': 'Say "Hello, World!"'}
api.add_resource(TodoItem, '/todos/<int:id>')
app.register_blueprint(api_bp)
api.add_resource(HelloWorld, '/')
if __name__ == '__main__':
app.run(debug=True)
from flask_inputs import Inputs
from wtforms.validators import DataRequired
class CustomerInputs(Inputs):
rule = {
'id': [DataRequired()]
}
|
{"/admin/urls.py": ["/admin/views.py"], "/ops/room_design.py": ["/util/sqlerr.py", "/util/utils.py", "/models.py", "/ops/utils.py", "/ops/cache.py", "/settings.py"], "/util/sign.py": ["/settings.py"], "/ops/hospital.py": ["/util/sqlerr.py", "/models.py", "/ops/utils.py"], "/ops/utils.py": ["/util/utils.py", "/models.py"], "/ops/credit.py": ["/models.py", "/util/sqlerr.py", "/util/utils.py", "/ops/utils.py", "/settings.py", "/constants.py"], "/user/api_urls.py": ["/user/auth.py", "/user/trial.py"], "/migrations/versions/3621ae6c4339_.py": ["/models.py"], "/ops/comment.py": ["/models.py", "/ops/utils.py", "/util/utils.py"], "/migrations/versions/273db5f3044f_.py": ["/models.py"], "/ops/notification.py": ["/models.py", "/util/utils.py", "/ops/utils.py"], "/ops/activity.py": ["/models.py", "/util/utils.py", "/ops/utils.py"], "/migrations/versions/18e20ed0da8d_.py": ["/models.py"], "/hospital/urls.py": ["/hospital/views.py"], "/ops/coupon.py": ["/util/utils.py", "/models.py", "/ops/utils.py"], "/ops/log.py": ["/models.py", "/util/utils.py"], "/migrations/versions/55f4c256c989_.py": ["/models.py"], "/ops/beauty_tutorial.py": ["/models.py", "/ops/utils.py", "/util/utils.py"], "/migrations/versions/42e923c1238_.py": ["/models.py"], "/user/urls.py": ["/user/views.py", "/user/auth.py", "/user/trial.py", "/user/room_design.py", "/user/redpack.py", "/user/draw_money.py"], "/ops/actions.py": ["/models.py", "/ops/order.py", "/ops/coupon.py", "/ops/credit.py"], "/migrations/versions/36d5b6be1479_.py": ["/models.py"], "/ops/redpack.py": ["/util/sqlerr.py", "/util/utils.py", "/models.py", "/ops/utils.py", "/ops/cache.py", "/settings.py"], "/ops/user.py": ["/util/sqlerr.py", "/models.py", "/ops/utils.py"], "/ops/item.py": ["/models.py", "/util/utils.py", "/util/sqlerr.py", "/ops/utils.py"], "/migrations/versions/18e507e87862_.py": ["/models.py"], "/user/draw_money.py": ["/models.py", "/util/utils.py", "/util/decorators.py", "/util/validators.py", "/util/sign.py", "/util/drawgift.py", "/ops/bulks.py", "/ops/item.py", "/ops/data.py", "/ops/user.py", "/ops/redpack.py", "/ops/promote.py", "/ops/cache.py", "/ops/room_design.py", "/constants.py", "/thirdparty/sms.py", "/thirdparty/wechat.py", "/settings.py"], "/thirdparty/alipay/config.py": ["/settings.py"], "/promote/urls.py": ["/promote/views.py"], "/ops/admin.py": ["/util/sqlerr.py", "/models.py", "/ops/utils.py"], "/udp_server.py": ["/settings.py"], "/migrations/versions/4eefa5b6eb51_.py": ["/models.py"], "/demo.py": ["/thirdparty/wechat.py"], "/user/common.py": ["/models.py", "/ops/order.py", "/ops/coupon.py", "/ops/credit.py", "/constants.py"], "/models.py": ["/util/utils.py", "/settings.py", "/constants.py"]}
|
19,482
|
qsq-dm/mff
|
refs/heads/master
|
/user/common.py
|
# -*- coding: utf-8 -*-
import os
import time
import json
from itertools import chain
from models import Order
from models import UserCoupon
from ops.order import OrderService
from ops.coupon import CouponService
from ops.credit import CreditService
from constants import ORDER_STATUS
def cancel_order(order_id):
''' 取消已支付订单 '''
order = OrderService.get_order_by_id(order_id)
assert order, '订单不存在'
count = 0
if order.status in [ORDER_STATUS.NEW_ORDER, ORDER_STATUS.TO_PAY]:
where = Order.status.in_([ORDER_STATUS.NEW_ORDER, ORDER_STATUS.TO_PAY])
count = OrderService.update_order_status(order_id, ORDER_STATUS.CANCEL_BEFORE_PAY, order.user_id, where)
if count:
if order.credit_amount:
CreditService.modify_credit(order.user_id, -(order.credit_amount))
if order.coupon_id:
CouponService.update_user_coupon_status(UserCoupon.id==order.coupon_id, 0)
elif order.status==ORDER_STATUS.PAY_SUCCESS:
where = Order.status==ORDER_STATUS.PAY_SUCCESS
count = OrderService.update_order_status(order_id, ORDER_STATUS.CANCELED, order.user_id, where)
if count:
if order.credit_amount:
repayment_amount = OrderService.order_repayment_logs_amount(order_id)
remain_to_repayment = order.credit_amount - repayment_amount
CreditService.modify_credit(order.user_id, -remain_to_repayment)
CreditService.cancel_pay_logs(order_id)
if order.coupon_id:
CouponService.update_user_coupon_status(UserCoupon.id==order.coupon_id, 0)
|
{"/admin/urls.py": ["/admin/views.py"], "/ops/room_design.py": ["/util/sqlerr.py", "/util/utils.py", "/models.py", "/ops/utils.py", "/ops/cache.py", "/settings.py"], "/util/sign.py": ["/settings.py"], "/ops/hospital.py": ["/util/sqlerr.py", "/models.py", "/ops/utils.py"], "/ops/utils.py": ["/util/utils.py", "/models.py"], "/ops/credit.py": ["/models.py", "/util/sqlerr.py", "/util/utils.py", "/ops/utils.py", "/settings.py", "/constants.py"], "/user/api_urls.py": ["/user/auth.py", "/user/trial.py"], "/migrations/versions/3621ae6c4339_.py": ["/models.py"], "/ops/comment.py": ["/models.py", "/ops/utils.py", "/util/utils.py"], "/migrations/versions/273db5f3044f_.py": ["/models.py"], "/ops/notification.py": ["/models.py", "/util/utils.py", "/ops/utils.py"], "/ops/activity.py": ["/models.py", "/util/utils.py", "/ops/utils.py"], "/migrations/versions/18e20ed0da8d_.py": ["/models.py"], "/hospital/urls.py": ["/hospital/views.py"], "/ops/coupon.py": ["/util/utils.py", "/models.py", "/ops/utils.py"], "/ops/log.py": ["/models.py", "/util/utils.py"], "/migrations/versions/55f4c256c989_.py": ["/models.py"], "/ops/beauty_tutorial.py": ["/models.py", "/ops/utils.py", "/util/utils.py"], "/migrations/versions/42e923c1238_.py": ["/models.py"], "/user/urls.py": ["/user/views.py", "/user/auth.py", "/user/trial.py", "/user/room_design.py", "/user/redpack.py", "/user/draw_money.py"], "/ops/actions.py": ["/models.py", "/ops/order.py", "/ops/coupon.py", "/ops/credit.py"], "/migrations/versions/36d5b6be1479_.py": ["/models.py"], "/ops/redpack.py": ["/util/sqlerr.py", "/util/utils.py", "/models.py", "/ops/utils.py", "/ops/cache.py", "/settings.py"], "/ops/user.py": ["/util/sqlerr.py", "/models.py", "/ops/utils.py"], "/ops/item.py": ["/models.py", "/util/utils.py", "/util/sqlerr.py", "/ops/utils.py"], "/migrations/versions/18e507e87862_.py": ["/models.py"], "/user/draw_money.py": ["/models.py", "/util/utils.py", "/util/decorators.py", "/util/validators.py", "/util/sign.py", "/util/drawgift.py", "/ops/bulks.py", "/ops/item.py", "/ops/data.py", "/ops/user.py", "/ops/redpack.py", "/ops/promote.py", "/ops/cache.py", "/ops/room_design.py", "/constants.py", "/thirdparty/sms.py", "/thirdparty/wechat.py", "/settings.py"], "/thirdparty/alipay/config.py": ["/settings.py"], "/promote/urls.py": ["/promote/views.py"], "/ops/admin.py": ["/util/sqlerr.py", "/models.py", "/ops/utils.py"], "/udp_server.py": ["/settings.py"], "/migrations/versions/4eefa5b6eb51_.py": ["/models.py"], "/demo.py": ["/thirdparty/wechat.py"], "/user/common.py": ["/models.py", "/ops/order.py", "/ops/coupon.py", "/ops/credit.py", "/constants.py"], "/models.py": ["/util/utils.py", "/settings.py", "/constants.py"]}
|
19,483
|
qsq-dm/mff
|
refs/heads/master
|
/thirdparty/sms.py
|
# -*- coding: utf-8 -*-
import time
import hashlib
from datetime import datetime
import random
from functools import wraps
from flask import request
from flask import url_for
from thirdparty.SendTemplateSMS import sendTemplateSMS
import settings
def today_remain_seconds():
now = datetime.now()
year, month, day = now.year, now.month, now.day
now_second = time.mktime(now.timetuple())
cut_now = datetime(year, month, day)
cut_now_second = time.mktime(cut_now.timetuple())
return 86400 - int(now_second-cut_now_second)
def gen_vcode():
code = random.randrange(100000,999999)
return str(code)
def gen_complex_vcode():
code = random.randrange(10000000,99999999)
return str(code)
def _send_sms(phone, data, tpl_id):
try:
#请求包格式无法解析错误 把unicode转为str
phone = str(phone)
for i in range(len(data)):
data[i] = str(data[i])
print
print phone, data, tpl_id, '发送短信'
result = sendTemplateSMS(phone, data, tpl_id)
return result
except:
import traceback
traceback.print_exc()
@settings.celery.task
def send_sms(phone, vcode):
print '发送注册短信', phone, vcode
return _send_sms(phone, [vcode,5], 44515)
@settings.celery.task
def send_sms_apply_success(phone, amount):
print '发送审核通过短信'
return _send_sms(phone, [amount], 44988)
@settings.celery.task
def send_sms_apply_reject(phone):
print '发送审核被拒短信'
return _send_sms(phone, [], 44990)
@settings.celery.task
def send_sms_new_order(phone, name, desc, service_code):
print '下单短信'
return _send_sms(phone, [name, desc, service_code], 44994)
@settings.celery.task
def send_sms_refund(phone, name, price, period):
print '退款短信'
return _send_sms(phone, [name, price, period], 52093)
@settings.celery.task
def send_room_one(phone):
''' 老用户 '''
return _send_sms(phone, [], 71623)
@settings.celery.task
def send_room_two(phone):
''' 拉票 '''
return _send_sms(phone, [], 71638)
|
{"/admin/urls.py": ["/admin/views.py"], "/ops/room_design.py": ["/util/sqlerr.py", "/util/utils.py", "/models.py", "/ops/utils.py", "/ops/cache.py", "/settings.py"], "/util/sign.py": ["/settings.py"], "/ops/hospital.py": ["/util/sqlerr.py", "/models.py", "/ops/utils.py"], "/ops/utils.py": ["/util/utils.py", "/models.py"], "/ops/credit.py": ["/models.py", "/util/sqlerr.py", "/util/utils.py", "/ops/utils.py", "/settings.py", "/constants.py"], "/user/api_urls.py": ["/user/auth.py", "/user/trial.py"], "/migrations/versions/3621ae6c4339_.py": ["/models.py"], "/ops/comment.py": ["/models.py", "/ops/utils.py", "/util/utils.py"], "/migrations/versions/273db5f3044f_.py": ["/models.py"], "/ops/notification.py": ["/models.py", "/util/utils.py", "/ops/utils.py"], "/ops/activity.py": ["/models.py", "/util/utils.py", "/ops/utils.py"], "/migrations/versions/18e20ed0da8d_.py": ["/models.py"], "/hospital/urls.py": ["/hospital/views.py"], "/ops/coupon.py": ["/util/utils.py", "/models.py", "/ops/utils.py"], "/ops/log.py": ["/models.py", "/util/utils.py"], "/migrations/versions/55f4c256c989_.py": ["/models.py"], "/ops/beauty_tutorial.py": ["/models.py", "/ops/utils.py", "/util/utils.py"], "/migrations/versions/42e923c1238_.py": ["/models.py"], "/user/urls.py": ["/user/views.py", "/user/auth.py", "/user/trial.py", "/user/room_design.py", "/user/redpack.py", "/user/draw_money.py"], "/ops/actions.py": ["/models.py", "/ops/order.py", "/ops/coupon.py", "/ops/credit.py"], "/migrations/versions/36d5b6be1479_.py": ["/models.py"], "/ops/redpack.py": ["/util/sqlerr.py", "/util/utils.py", "/models.py", "/ops/utils.py", "/ops/cache.py", "/settings.py"], "/ops/user.py": ["/util/sqlerr.py", "/models.py", "/ops/utils.py"], "/ops/item.py": ["/models.py", "/util/utils.py", "/util/sqlerr.py", "/ops/utils.py"], "/migrations/versions/18e507e87862_.py": ["/models.py"], "/user/draw_money.py": ["/models.py", "/util/utils.py", "/util/decorators.py", "/util/validators.py", "/util/sign.py", "/util/drawgift.py", "/ops/bulks.py", "/ops/item.py", "/ops/data.py", "/ops/user.py", "/ops/redpack.py", "/ops/promote.py", "/ops/cache.py", "/ops/room_design.py", "/constants.py", "/thirdparty/sms.py", "/thirdparty/wechat.py", "/settings.py"], "/thirdparty/alipay/config.py": ["/settings.py"], "/promote/urls.py": ["/promote/views.py"], "/ops/admin.py": ["/util/sqlerr.py", "/models.py", "/ops/utils.py"], "/udp_server.py": ["/settings.py"], "/migrations/versions/4eefa5b6eb51_.py": ["/models.py"], "/demo.py": ["/thirdparty/wechat.py"], "/user/common.py": ["/models.py", "/ops/order.py", "/ops/coupon.py", "/ops/credit.py", "/constants.py"], "/models.py": ["/util/utils.py", "/settings.py", "/constants.py"]}
|
19,484
|
qsq-dm/mff
|
refs/heads/master
|
/migrations/versions/2eb48ce629a0_.py
|
"""empty message
Revision ID: 2eb48ce629a0
Revises: d5045d5ecf4
Create Date: 2015-12-16 15:59:24.978212
"""
# revision identifiers, used by Alembic.
revision = '2eb48ce629a0'
down_revision = 'd5045d5ecf4'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('image_size',
sa.Column('key', sa.String(length=32), nullable=False),
sa.Column('width', sa.Integer(), nullable=True),
sa.Column('height', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('key')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('image_size')
### end Alembic commands ###
|
{"/admin/urls.py": ["/admin/views.py"], "/ops/room_design.py": ["/util/sqlerr.py", "/util/utils.py", "/models.py", "/ops/utils.py", "/ops/cache.py", "/settings.py"], "/util/sign.py": ["/settings.py"], "/ops/hospital.py": ["/util/sqlerr.py", "/models.py", "/ops/utils.py"], "/ops/utils.py": ["/util/utils.py", "/models.py"], "/ops/credit.py": ["/models.py", "/util/sqlerr.py", "/util/utils.py", "/ops/utils.py", "/settings.py", "/constants.py"], "/user/api_urls.py": ["/user/auth.py", "/user/trial.py"], "/migrations/versions/3621ae6c4339_.py": ["/models.py"], "/ops/comment.py": ["/models.py", "/ops/utils.py", "/util/utils.py"], "/migrations/versions/273db5f3044f_.py": ["/models.py"], "/ops/notification.py": ["/models.py", "/util/utils.py", "/ops/utils.py"], "/ops/activity.py": ["/models.py", "/util/utils.py", "/ops/utils.py"], "/migrations/versions/18e20ed0da8d_.py": ["/models.py"], "/hospital/urls.py": ["/hospital/views.py"], "/ops/coupon.py": ["/util/utils.py", "/models.py", "/ops/utils.py"], "/ops/log.py": ["/models.py", "/util/utils.py"], "/migrations/versions/55f4c256c989_.py": ["/models.py"], "/ops/beauty_tutorial.py": ["/models.py", "/ops/utils.py", "/util/utils.py"], "/migrations/versions/42e923c1238_.py": ["/models.py"], "/user/urls.py": ["/user/views.py", "/user/auth.py", "/user/trial.py", "/user/room_design.py", "/user/redpack.py", "/user/draw_money.py"], "/ops/actions.py": ["/models.py", "/ops/order.py", "/ops/coupon.py", "/ops/credit.py"], "/migrations/versions/36d5b6be1479_.py": ["/models.py"], "/ops/redpack.py": ["/util/sqlerr.py", "/util/utils.py", "/models.py", "/ops/utils.py", "/ops/cache.py", "/settings.py"], "/ops/user.py": ["/util/sqlerr.py", "/models.py", "/ops/utils.py"], "/ops/item.py": ["/models.py", "/util/utils.py", "/util/sqlerr.py", "/ops/utils.py"], "/migrations/versions/18e507e87862_.py": ["/models.py"], "/user/draw_money.py": ["/models.py", "/util/utils.py", "/util/decorators.py", "/util/validators.py", "/util/sign.py", "/util/drawgift.py", "/ops/bulks.py", "/ops/item.py", "/ops/data.py", "/ops/user.py", "/ops/redpack.py", "/ops/promote.py", "/ops/cache.py", "/ops/room_design.py", "/constants.py", "/thirdparty/sms.py", "/thirdparty/wechat.py", "/settings.py"], "/thirdparty/alipay/config.py": ["/settings.py"], "/promote/urls.py": ["/promote/views.py"], "/ops/admin.py": ["/util/sqlerr.py", "/models.py", "/ops/utils.py"], "/udp_server.py": ["/settings.py"], "/migrations/versions/4eefa5b6eb51_.py": ["/models.py"], "/demo.py": ["/thirdparty/wechat.py"], "/user/common.py": ["/models.py", "/ops/order.py", "/ops/coupon.py", "/ops/credit.py", "/constants.py"], "/models.py": ["/util/utils.py", "/settings.py", "/constants.py"]}
|
19,485
|
qsq-dm/mff
|
refs/heads/master
|
/models.py
|
# -*- coding: utf-8 -*-
'''
'''
from flask import Flask
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext.script import Manager
from flask.ext.migrate import Migrate, MigrateCommand
from sqlalchemy import TypeDecorator
from sqlalchemy import UniqueConstraint
from sqlalchemy import PrimaryKeyConstraint
from sqlalchemy.ext import mutable
from sqlalchemy.sql.sqltypes import String
from sqlalchemy.sql.sqltypes import Text
from sqlalchemy.sql.sqltypes import Integer
from sqlalchemy.sql.sqltypes import UnicodeText
from sqlalchemy.sql.sqltypes import DateTime
from sqlalchemy.sql.sqltypes import Float
from sqlalchemy.sql.sqltypes import Boolean
from sqlalchemy.dialects.mysql import TINYINT,DECIMAL,CHAR,INTEGER
from sqlalchemy.sql.expression import cast
from util.utils import prefix_http
from util.utils import dt_obj
from util.utils import format_price
from util.utils import format_rate
from util.utils import prefix_img_domain
from util.utils import prefix_img_list
from util.utils import prefix_img_list_thumb
from util.utils import str_to_int_list
from util.utils import comma_str_to_list
from util.utils import imgs_to_list
from settings import MAIN_MYSQL_URI
from settings import DEFAULT_IMAGE
from constants import CREDIT_STATUS
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = MAIN_MYSQL_URI
db = SQLAlchemy(app)
Column = db.Column
Table = db.Table
ForeignKey = db.ForeignKey
class Model(db.Model):
__abstract__ = True
@staticmethod
def show_status():
return True
class MoneyField(TypeDecorator):
impl = DECIMAL(10, 2)
def column_expression(self, col):
return cast(col, Float)
def process_result_value(self, value, dialect):
return float(value or 0)
class User(Model):
''' 用户 '''
id = db.Column(Integer, primary_key=True)
name = db.Column(String(80), unique=True)
avatar = db.Column(String(1000))
phone = db.Column(String(80), unique=True)
passwd = db.Column(String(80))
city_id = Column(Integer, ForeignKey('city.id'))
create_time = Column(DateTime, default=dt_obj.now)
def as_dict(self):
return dict(
id = self.id,
name = self.name,
avatar = prefix_img_domain(self.avatar or DEFAULT_IMAGE),
phone = self.phone,
create_time = self.create_time
)
class Wechat(Model):
__tablename__ = 'wechat'
__table_args__ = (
PrimaryKeyConstraint('open_id'),
)
open_id = Column(String(32), autoincrement=False)
user_id = Column(Integer, ForeignKey('user.id'), nullable=True)
create_time = Column(DateTime, default=dt_obj.now)
status = Column(TINYINT(1), nullable=False, default=0) #1已登录 0新注册未绑定user -1已退出
class Order(Model):
'''
提交订单时, 优惠券和额度都锁定
credit_choice_id 下单时 就存下来
直到真正付款成功了生成每一期的PeriodPayLog记录
'''
id = db.Column(Integer, primary_key=True)
pay_method = Column(TINYINT(1), nullable=False, default=0)#0没用付钱(可能是全部使用优惠券或信用额度) 1微信号 2微信app 3支付宝
user_id = Column(Integer, ForeignKey('user.id'), nullable=False)
hospital_id = Column(Integer, ForeignKey('hospital.id'), nullable=False)
item_id = Column(Integer, ForeignKey('item.id'), nullable=False)
order_no = db.Column(String(30), unique=True)
transaction_id = db.Column(String(100))
credit_choice_id = Column(Integer, ForeignKey('period_pay_choice.id'), nullable=True)#只用做订单支付预览显示
coupon_id = Column(Integer, ForeignKey('user_coupon.id'), nullable=True, unique=True)
coupon_amount = Column(MoneyField, nullable=False, default=0)#优惠券面值用量
credit_amount = Column(MoneyField, nullable=False, default=0)#信用额度使用量(分期总额+分期费用)
total_fee = Column(MoneyField, nullable=False, default=0)#分期费用
price = Column(MoneyField, nullable=False, default=0)#订单实际付款的钱 不包括信用额度
total = Column(MoneyField, nullable=False, default=0)#订单总价 不使用优惠券时的价格
create_time = Column(DateTime, default=dt_obj.now)
status = Column(TINYINT(1), nullable=False, default=0) #0待支付 (额度已外金额付款状态)
refund = Column(TINYINT(1), nullable=False, default=0) #0为退款 1已退款
credit_verified = Column(TINYINT(1), nullable=False, default=0) #额度是否通过审核 0待审核 1通过审核 2被拒绝重新申请
user_finished = Column(Boolean, default=False) #用户已确认完成
remark = db.Column(String(300))
def as_dict(self):
return dict(
id = self.id,
user_id = self.user_id,
item_id = self.item_id,
order_no = self.order_no,
transaction_id = self.transaction_id,
hospital_id = self.hospital_id,
coupon_id = self.coupon_id or 0,
price = format_price(self.price or 0),
total_fee = format_price(self.total_fee or 0),
total = format_price(self.total or 0),
credit_amount = format_price(self.credit_amount or 0),
coupon_amount = format_price(self.coupon_amount or 0),
create_time = str(self.create_time),
status = self.status,
credit_choice_id = self.credit_choice_id or 0,
refund = self.refund,
credit_verified = self.credit_verified,
user_finished = self.user_finished,
remark = self.remark or ''
)
class Coupon(Model):
'''优惠券'''
id = db.Column(Integer, primary_key=True)
item_id = Column(Integer, ForeignKey('item.id'), nullable=True)
title = Column(String(300), default='')
price = Column(MoneyField, nullable=False, default=0) #实付金额
need = Column(MoneyField, nullable=False, default=0) #需要满多少才能使用
coupon_cat = Column(TINYINT(1), nullable=False, default=0) #优惠券类型
cat_id = Column(Integer, ForeignKey('item_cat.id'), nullable=True)#0分类
sub_cat_id = Column(Integer, ForeignKey('item_sub_cat.id'), nullable=True)
effective = Column(Integer,nullable=False,default=0)
remark = Column(String(100), default='')
is_trial = Column(Boolean, default=False) #是否是试用券
def as_dict(self):
need_cat = 1 if self.need else 2 #1满减券 2普通
return dict(
id = self.id,
coupon_cat = self.coupon_cat,
is_trial = 1 if self.is_trial else 0,
item_id = self.item_id,
need = format_price(self.need),
title = self.title,
price = format_price(self.price),
cat_id = self.cat_id,
need_cat = need_cat,
sub_cat_id = self.sub_cat_id,
effective = self.effective,
effective_days = self.effective/86400,
remark = self.remark,
)
class UserCoupon(Model):
''' 用户优惠券 '''
id = db.Column(Integer, primary_key=True)
coupon_id = Column(Integer, ForeignKey('coupon.id'), autoincrement=False)
title = Column(String(300), default='')
user_id = Column(Integer, ForeignKey('user.id'), nullable=False)
item_id = Column(Integer, ForeignKey('item.id'), nullable=True)
need = Column(MoneyField, nullable=False, default=0) #需要满多少才能使用
coupon_cat = Column(TINYINT(1), nullable=False, default=0) #优惠券类型 0全部 1cat分类 2子分类 3指定项目
cat_id = Column(Integer, ForeignKey('item_cat.id'), nullable=True)#0分类
sub_cat_id = Column(Integer, ForeignKey('item_sub_cat.id'), nullable=True)
price = Column(MoneyField, nullable=False, default=0)#实付金额
status = Column(TINYINT(1), nullable=False, default=0)#0未使用 1已使用
end_time = Column(DateTime, nullable=False)
create_time = Column(DateTime, nullable=False, default=dt_obj.now)
remark = Column(String(100), default='')
is_trial = Column(Boolean, default=False) #是否是试用券
def as_dict(self):
return dict(
id = self.id,
coupon_cat = self.coupon_cat,
cat_id = self.cat_id,
is_trial = 1 if self.is_trial else 0,
title = self.title,
sub_cat_id = self.sub_cat_id,
user_id = self.user_id,
need = format_price(self.need),
item_id = self.item_id,
price = format_price(self.price),
status = self.status,
end_time = str(self.end_time),
create_time = str(self.create_time),
coupon_id = self.coupon_id,
remark = self.remark,
)
class PeriodPayChoice(Model):
''' 分期费率表 '''
id = db.Column(Integer, primary_key=True)
period_count = Column(Integer, nullable=False, unique=True) #分期数
period_fee = Column(Float, nullable=False) #分期税率
def as_dict(self):
return dict(
id = self.id,
period_count = self.period_count,
period_fee = self.period_fee,
)
class PeriodPayLog(Model):
'''
*滞纳金动态计算*
每期还款额列表建模
'''
id = db.Column(Integer, primary_key=True)
amount = Column(MoneyField, nullable=False, default=0) #每期金额
fee = Column(MoneyField, nullable=False, default=0) #每期手续费用
punish = Column(MoneyField, nullable=False, default=0) #预期滞纳金
user_id = Column(Integer, ForeignKey('user.id'), nullable=False)
order_id = Column(Integer, ForeignKey('order.id'), nullable=True)
period_pay_index = Column(Integer, nullable=True) #分期应该还的第几期
period_count = Column(Integer, nullable=True) #分期总数
create_time = Column(DateTime, default=dt_obj.now)
deadline = Column(DateTime)#还款日
repayment_time = Column(DateTime)#实际还款日
status = Column(TINYINT(1), nullable=False, default=0)#0待还 1已还 2 已取消
def as_dict(self):
return dict(
id = self.id,
amount = format_price(self.amount or 0),
punish = format_price(self.punish or 0),
period_count= self.period_count,
fee = float(self.fee or 0),
user_id = self.user_id,
order_id = self.order_id,
period_pay_index = self.period_pay_index,
deadline = str(self.deadline),
repayment_time = str(self.repayment_time or ''),
create_time = self.create_time,
status = self.status,
)
class PunishLog(Model):
'''滞纳金产生 历史'''
id = db.Column(Integer, primary_key=True)
log_id = Column(Integer, ForeignKey('period_pay_log.id'), nullable=True)
amount = Column(MoneyField, nullable=False, default=0)
create_time = Column(DateTime, default=dt_obj.now)
class CreditUseLog(Model):
''' 可用信用额度使用历史 '''
id = db.Column(Integer, primary_key=True)
user_id = Column(Integer, ForeignKey('user.id'), nullable=False)
amount = Column(MoneyField, nullable=False, default=0)
order_id = Column(Integer, ForeignKey('order.id'), nullable=True)
status = Column(TINYINT(1), nullable=False, default=0)#额度当期状态
create_time = Column(DateTime, default=dt_obj.now)
class CreditChangeLog(Model):
''' 信用总额变更历史 '''
id = db.Column(Integer, primary_key=True)
user_id = Column(Integer, ForeignKey('user.id'), nullable=False)
amount = Column(MoneyField, nullable=False, default=0)
create_time = Column(DateTime, default=dt_obj.now)
class UserCredit(Model):
''' 用户信用额度 '''
id = db.Column(Integer, primary_key=True)
user_id = Column(Integer, ForeignKey('user.id'), nullable=False)
total = Column(MoneyField, nullable=False, default=0)#总额度
used = Column(MoneyField, nullable=False, default=0) #已使用额度
status = Column(TINYINT(1), nullable=False, default=CREDIT_STATUS.DEFAULT)#0默认 1审核中 2审核通过 3被拒
def as_dict(self):
return dict(
id = self.id,
user_id = self.user_id,
total = format_price(self.total or 0),
used = format_price(self.used or 0),
status = self.status
)
class Hospital(Model):
''' 医院 '''
id = db.Column(Integer, primary_key=True)
name = db.Column(String(100))
city_id = Column(Integer, ForeignKey('city.id'), nullable=True)
image = db.Column(String(100))
phone = db.Column(String(100))
desc = db.Column(String(10000))
tags = db.Column(String(1000)) #逗号分割的医院标签
addr = db.Column(String(300)) #地址
working_time = db.Column(String(100)) #工作时间
long_lat = db.Column(String(30)) #经纬度
photos = db.Column(String(1000))
rate = Column(Float, default=5) #评分
sold_count = db.Column(Integer, default=0) #已售数量
status = Column(TINYINT(1), nullable=False, default=0)#0下线 1上线
def as_dict(self):
return dict(
id = self.id,
status = self.status,
city_id = self.city_id,
sold_count = self.sold_count or 0,
photo_list = prefix_img_list(self.photos),
image = prefix_img_domain(self.image),
photos = self.photos,
name = self.name,
rate = format_rate(self.rate or 5),
phone = self.phone,
desc = self.desc,
working_time = self.working_time,
tag_list = comma_str_to_list(self.tags),
tags = self.tags,
addr = self.addr,
long_lat = self.long_lat,
lng = self.long_lat.split(',')[0] if self.long_lat else '',
lat = self.long_lat.split(',')[1] if self.long_lat else '',
)
class ItemCat(Model):
''' 分类 '''
id = db.Column(Integer, primary_key=True)
name = db.Column(String(100), unique=True)
sort_order = Column(Integer, default=0) #小的排在前面
status = Column(TINYINT(1), nullable=False, default=0)#0未上线 1已上线
def as_dict(self):
return dict(
id = self.id,
name = self.name,
status = self.status,
sort_order = self.sort_order
)
class ItemSubCat(Model):
''' 子分类 '''
id = db.Column(Integer, primary_key=True)
name = db.Column(String(100))
desc = db.Column(String(1000))
icon = db.Column(String(100))
cat_id = Column(Integer, ForeignKey('item_cat.id'), nullable=False)#父分类id
cat_ids = db.Column(String(500))
status = Column(TINYINT(1), nullable=False, default=0)#0未上线 1已上线
def as_dict(self):
return dict(
id = self.id,
name = self.name,
desc = self.desc,
cat_ids = self.cat_ids,
cat_id_list = str_to_int_list(self.cat_ids),
icon = prefix_img_domain(self.icon),
cat_id = self.cat_id,
status = self.status
)
class Item(Model):
''' 商品 '''
id = db.Column(Integer, primary_key=True)
orig_price = Column(MoneyField, nullable=False, default=0)
price = Column(MoneyField, nullable=False, default=0)
sub_cat_id = Column(Integer, ForeignKey('item_sub_cat.id'), nullable=False)#子分类id
hospital_id = Column(Integer, ForeignKey('hospital.id'), nullable=False)
sub_cat_ids = db.Column(String(100))
image = db.Column(String(300))
photos = db.Column(String(1000))
title = db.Column(String(500))
item_no = db.Column(String(100), index=True) #项目编号
support_choices = db.Column(String(50)) #支持的分期数选项
sold_count = db.Column(Integer, default=0) #已售数量
has_fee = Column(Boolean, default=True) #是否免息
direct_buy = Column(Boolean) #是否可以直接购买
status = Column(TINYINT(1), nullable=False, default=0)#0未上线 1已上线 2医院被下线
surgery_desc = Column(Text)
doctor_desc = Column(Text)
create_time = Column(DateTime, default=dt_obj.now)
use_time = db.Column(String(300))
note = db.Column(String(500)) #提示
def as_dict(self):
return dict(
id = self.id,
sub_cat_id = self.sub_cat_id,
title = self.title,
sub_cat_ids = self.sub_cat_ids,
sub_cat_id_list = map(int, filter(bool, (self.sub_cat_ids or '').split(','))),
direct_buy = bool(self.direct_buy),
price = format_price(self.price or 0),
orig_price = format_price(self.orig_price or 0),
photos = self.photos,
item_no = str(self.id),
hospital_id = self.hospital_id,
sold_count = self.sold_count or 0,
image = prefix_img_domain(self.image),
photo_list = prefix_img_list(self.photos) if self.photos else [],
support_choices = self.support_choices,
support_choice_list = str_to_int_list(self.support_choices),
status = self.status,
surgery_desc = self.surgery_desc,
use_time = self.use_time,
note = self.note,
doctor_desc = self.doctor_desc,
has_fee = bool(self.has_fee),
create_time = self.create_time,
)
class ItemComment(Model):
''' 商品评价 '''
id = db.Column(Integer, primary_key=True)
item_id = Column(Integer, ForeignKey('item.id'))
user_id = Column(Integer, ForeignKey('user.id'))
order_id = Column(Integer, ForeignKey('order.id'))
photos = db.Column(String(1000))
content = db.Column(String(10000))
rate = Column(Float, default=0) #评分
is_anonymous = Column(Boolean, default=False)
is_re_comment = Column(Boolean, default=False)
create_time = Column(DateTime, default=dt_obj.now)
def as_dict(self):
return dict(
id = self.id,
is_anonymous = self.is_anonymous,
is_re_comment = bool(self.is_re_comment),
item_id = self.item_id,
order_id = self.order_id,
user_id = self.user_id,
rate = self.rate or 0,
photos = self.photos,
photo_list = prefix_img_list(self.photos) if self.photos else [],
thumb_photo_list= prefix_img_list_thumb(self.photos) if self.photos else [],
content = self.content,
create_time = str(self.create_time)
)
class ItemFav(Model):
''' 心愿单 '''
__table_args__ = (
UniqueConstraint('user_id', 'item_id'),
)
id = db.Column(Integer, primary_key=True)
item_id = Column(Integer, ForeignKey('item.id'))
user_id = Column(Integer, ForeignKey('user.id'))
create_time = Column(DateTime, default=dt_obj.now)
def as_dict(self):
return dict(
id = self.id,
item_id = self.item_id,
user_id = self.user_id,
create_time = str(self.create_time)
)
class UserAdvice(Model):
''' 用户反馈 '''
id = db.Column(Integer, primary_key=True)
user_id = Column(Integer, ForeignKey('user.id'), nullable=True)
content = db.Column(String(10000))
contact = db.Column(String(100))
create_time = Column(DateTime, default=dt_obj.now)
remark = db.Column(String(300))
def as_dict(self):
return dict(
id = self.id,
user_id = self.user_id,
content = self.content,
contact = self.contact,
create_time = self.create_time,
remark = self.remark
)
class ServiceCode(Model):
''' 预约服务码 '''
id = db.Column(Integer, primary_key=True)
order_id = Column(Integer, ForeignKey('order.id'), unique=True)
code = Column(String(100), index=True, unique=True)
status = Column(TINYINT(1), nullable=False, default=0) #0未使用 1已预约 2已确认
book_time = Column(DateTime) #预约时间
create_time = Column(DateTime, default=dt_obj.now)
def as_dict(self):
return dict(
id = self.id,
order_id = self.order_id,
code = self.code,
book_time = self.book_time,
status = self.status
)
class PayNotifyLog(Model):
''' 通知回调日志 '''
id = db.Column(Integer, primary_key=True)
pay_type = Column(TINYINT(1), nullable=False, default=0) #1微信公众号 2微信app 3支付宝
content = db.Column(String(10000))
create_time = Column(DateTime, default=dt_obj.now)
class OrderLog(Model):
''' 订单状态变更日志 '''
id = db.Column(Integer, primary_key=True)
order_id = Column(Integer, ForeignKey('order.id'))
status = Column(TINYINT(1), nullable=False) #订单当前状态
remark = db.Column(String(100))
create_time = Column(DateTime, default=dt_obj.now)
class CreditApply(Model):
''' 额度申请 大学学生升到了研究生后,学历信息/毕业时间需要提醒她们更改 '''
id = db.Column(Integer, primary_key=True)
user_id = Column(Integer, ForeignKey('user.id'), unique=True)
name = db.Column(String(100)) #姓名
id_no = db.Column(String(18)) #身份证号码
school = db.Column(String(100)) #学校名字
enrollment_time = Column(DateTime) #入学时间
major = db.Column(String(100)) #专业
stu_no = db.Column(String(20)) #学号
stu_education = db.Column(String(20)) #学历
stu_years = Column(Float, default=4) #学制
addr = db.Column(String(100)) #地址
parent_contact = db.Column(String(100)) #父母联系方式
chsi_name = db.Column(String(100)) #学信网账号
chsi_passwd = db.Column(String(100)) #学信网密码
id_card_photo = db.Column(String(100)) #身份证照
stu_card_photo = db.Column(String(100)) #学生证照
body_choice_ids = db.Column(String(100)) #部位id
body_choice_text = db.Column(String(100)) #其他内容
create_time = Column(DateTime, default=dt_obj.now)
update_time = Column(DateTime, default=dt_obj.now)
graduate_time = Column(DateTime)
has_supply = Column(Boolean, default=False) #资料已经从学信网补充
reason = db.Column(String(500)) #被拒原因
status = Column(TINYINT(1), nullable=False, default=1) #1第一步 2第二步 3通过 4被拒绝
remark = db.Column(String(500)) #备注
remark_img = db.Column(String(500)) #备注图片
def as_dict(self):
return dict(
id = self.id,
id_no = self.id_no or '',
stu_education = self.stu_education,
create_time = self.create_time,
update_time = self.update_time,
status = self.status,
name = self.name or '',
stu_no = self.stu_no,
user_id = self.user_id,
school = self.school,
enrollment_time = self.enrollment_time,
major = self.major,
addr = self.addr,
graduate_time = self.graduate_time,
chsi_name = self.chsi_name or '',
chsi_passwd = self.chsi_passwd or '',
parent_contact = self.parent_contact or '',
stu_years = self.stu_years,
reason = self.reason or '',
id_card_photo = prefix_img_domain(self.id_card_photo),
stu_card_photo = prefix_img_domain(self.stu_card_photo),
id_card_photo_key = self.id_card_photo,
stu_card_photo_key= self.stu_card_photo,
has_supply = self.has_supply,
body_choice_ids = self.body_choice_ids,
body_choice_text = self.body_choice_text,
remark = self.remark,
remark_img = prefix_img_domain(self.remark_img),
)
class School(Model):
''' 学校 '''
id = db.Column(Integer, primary_key=True)
name = db.Column(String(100), unique=True) #学校名字
city_name = db.Column(String(100)) #城市名字
link = db.Column(String(100)) #链接
pics_count = db.Column(Integer, default=0, index=True) #图片数量
def as_dict(self):
return dict(
id = self.id,
name = self.name,
link = prefix_http(self.link),
city_name = self.city_name,
pics_count = self.pics_count or 0
)
class AdminUser(Model):
''' 管理员 '''
id = db.Column(Integer, primary_key=True)
name = db.Column(String(100), unique=True)
city_id = Column(Integer, ForeignKey('city.id'))
passwd = db.Column(String(100))
cat = Column(TINYINT(1), nullable=False, default=0)#0所有权限 1编辑 2推广
create_time = Column(DateTime, default=dt_obj.now)
class City(Model):
''' 城市 '''
id = db.Column(Integer, primary_key=True)
name = db.Column(String(100), unique=True)
city_code = db.Column(String(30), unique=True) #百度cityCode
amap_code = db.Column(String(30), unique=True) #高德地图cityCode
def as_dict(self):
return dict(
id = self.id,
name = self.name,
amap_code = self.amap_code,
city_code = self.city_code
)
class Repayment(Model):
''' 还款订单 '''
id = db.Column(Integer, primary_key=True)
user_id = Column(Integer, ForeignKey('user.id'))
pay_method = Column(TINYINT(1), nullable=False, default=0)#0没用付钱(可能是全部使用优惠券或信用额度) 1微信 2支付宝
coupon_id = Column(Integer, ForeignKey('user_coupon.id'), nullable=True, unique=True)
price = Column(MoneyField, nullable=False, default=0) #每期手续费用
data = db.Column(String(10000)) #还了哪些期 还款时的每期金额
order_no = db.Column(String(30), unique=True)
transaction_id = db.Column(String(100))
create_time = Column(DateTime, default=dt_obj.now)
update_time = Column(DateTime, default=dt_obj.now)
status = Column(TINYINT(1), nullable=False, default=0) #0待支付 1支付中 2支付成功
def as_dict(self):
''' '''
return dict(
id = self.id,
pay_method = self.pay_method,
coupon_id = self.coupon_id,
data = self.data,
price = format_price(self.price),
order_no = self.order_no,
create_time = self.create_time,
update_time = self.update_time,
status = self.status,
transaction_id = self.transaction_id
)
class HospitalUser(Model):
''' 医院管理员 '''
id = db.Column(Integer, primary_key=True)
hospital_id = Column(Integer, ForeignKey('hospital.id'))
name = db.Column(String(100), unique=True)
passwd = db.Column(String(100))
create_time = Column(DateTime, default=dt_obj.now)
def as_dict(self):
return dict(
id = self.id,
hospital_id = self.hospital_id,
name = self.name,
create_time = self.create_time
)
class HelpCat(Model):
''' 帮助分类 '''
id = db.Column(Integer, primary_key=True)
name = db.Column(String(100), unique=True)
def as_dict(self):
return dict(
id = self.id,
name = self.name,
)
class HelpEntry(Model):
''' 帮助条目 '''
id = db.Column(Integer, primary_key=True)
title = db.Column(String(100))
cat_id = Column(Integer, ForeignKey('help_cat.id'))
content = db.Column(String(10000))
def as_dict(self):
return dict(
id = self.id,
title = self.title,
cat_id = self.cat_id,
content = self.content
)
class Activity(Model):
''' 活动 '''
id = db.Column(Integer, primary_key=True)
title = db.Column(String(300))
city_id = Column(Integer, ForeignKey('city.id'))
desc = db.Column(String(1000))
start_time = Column(DateTime)
end_time = Column(DateTime)
create_time = Column(DateTime, default=dt_obj.now)
def as_dict(self):
return dict(
id = self.id,
title = self.title,
city_id = self.city_id,
desc = self.desc,
start_time = self.start_time,
end_time = self.end_time,
create_time = self.create_time
)
class ActivityItem(Model):
''' 活动商品 '''
id = db.Column(Integer, primary_key=True)
activity_id = Column(Integer, ForeignKey('activity.id'))
item_id = Column(Integer, ForeignKey('item.id'))
sort_order = Column(Integer, default=0) #小的排在前面
price = Column(MoneyField, nullable=False, default=0) #活动价格
image = db.Column(String(300))
def as_dict(self):
return dict(
id = self.id,
image = prefix_img_domain(self.image),
activity_id = self.activity_id,
item_id = self.item_id,
price = format_price(self.price),
sort_order = self.sort_order
)
class RecommendItem(Model):
''' 推荐商品 '''
id = db.Column(Integer, primary_key=True)
item_id = Column(Integer, ForeignKey('item.id'), unique=True)
sort_order = Column(Integer, default=0) #小的排在前面
image = db.Column(String(300))
desc = db.Column(String(500))
def as_dict(self):
return dict(
id = self.id,
sort_order = self.sort_order,
item_id = self.item_id,
image = prefix_img_domain(self.image),
desc = self.desc
)
class RecommendSubcat(Model):
''' 推荐商品子分类 '''
id = db.Column(Integer, primary_key=True)
sub_cat_id = Column(Integer, ForeignKey('item_sub_cat.id'), unique=True)
sort_order = Column(Integer, default=0) #小的排在前面
icon = db.Column(String(300))
def as_dict(self):
return dict(
id = self.id,
sort_order = self.sort_order,
sub_cat_id = self.sub_cat_id,
icon = prefix_img_domain(self.icon)
)
class EditNameLog(Model):
''' 名字修改记录 '''
id = db.Column(Integer, primary_key=True)
user_id = Column(Integer, ForeignKey('user.id'))
create_time = Column(DateTime, default=dt_obj.now)
def as_dict(self):
return dict(
id = self.id,
user_id = self.user_id,
create_time = self.create_time
)
class PayLogOrderNo(Model):
''' 还款期记录对应 订单号
还款后取消订单的操作是: 退换已还的款项, 将未还的log至为status 2
'''
id = db.Column(Integer, primary_key=True)
order_no = db.Column(String(30), index=True)
period_pay_log_id = Column(Integer, ForeignKey('period_pay_log.id'), unique=True)
price = Column(MoneyField, nullable=False, default=0) #还款金额
total = Column(MoneyField, nullable=False, default=0) #总还款金额
create_time = Column(DateTime, default=dt_obj.now)
class QrCodeUser(Model):
''' 扫描二维码关注用户
'''
id = db.Column(Integer, primary_key=True)
open_id = db.Column(String(50), unique=True) #唯一索引
qrcode_id = Column(Integer, ForeignKey('qrcode.id'), nullable=True)
user_id = Column(Integer, ForeignKey('user.id'), nullable=True)
sex = Column(Integer, default=0)
city = db.Column(String(100))
headimgurl = db.Column(String(300))
nickname = db.Column(String(100))
location = db.Column(String(100))
lnglat = db.Column(String(100))
create_time = Column(DateTime, default=dt_obj.now)
status = Column(TINYINT(1), default=1, index=True) #0取消关注 1已关注 -1未曾关注
def as_dict(self):
return dict(
id = self.id,
open_id = self.open_id,
qrcode_id = self.qrcode_id,
user_id = self.user_id,
sex = self.sex,
headimgurl = self.headimgurl or DEFAULT_IMAGE,
city = self.city,
nickname = self.nickname,
location = self.location,
lnglat = self.lnglat,
create_time = self.create_time,
status = self.status,
)
class Promoter(Model):
''' 推广员 '''
id = db.Column(Integer, primary_key=True)
phone = db.Column(String(20), unique=True)
name = db.Column(String(50))
passwd = db.Column(String(50))
follow_count = Column(Integer, default=0, index=True) #关注数
reg_count = Column(Integer, default=0, index=True) #注册数
dup_count = Column(Integer, default=0, index=True) #重复注册数
unfollow_count = Column(Integer, default=0, index=True) #取消关注数
create_time = Column(DateTime, default=dt_obj.now)
create_by = Column(Integer, ForeignKey('promoter.id'), nullable=True)
status = Column(TINYINT(1), nullable=False, default=1) #0已下线 1可创建二维码 2不可创建二维码
def as_dict(self):
return dict(
id = self.id,
dup_count = self.dup_count,
phone = self.phone,
name = self.name,
passwd = self.passwd,
create_by = self.create_by,
follow_count= self.follow_count,
reg_count = self.reg_count,
unfollow_count= self.unfollow_count,
status = self.status
)
class Qrcode(Model):
''' 二维码id '''
id = db.Column(Integer, primary_key=True)
ticket = db.Column(String(100))
image = db.Column(String(300))
act_type = db.Column(Integer, default=0) #推广活动类型 9现金活动
promoter_id = Column(Integer, ForeignKey('promoter.id'), nullable=False)
create_time = Column(DateTime, default=dt_obj.now)
def as_dict(self):
return dict(
id = self.id,
ticket = self.ticket,
image = prefix_img_domain(self.image),
promoter_id = self.promoter_id,
create_time = self.create_time,
act_type = self.act_type
)
class WechatLocation(Model):
''' 微信定位 '''
id = db.Column(Integer, primary_key=True)
open_id = db.Column(String(50), index=True) #用户open_id
lng = db.Column(String(50))
lat = db.Column(String(50))
create_time = Column(DateTime, default=dt_obj.now)
def as_dict(self):
return dict(
id = self.id,
open_id = self.open_id,
lng = self.lng,
lat = self.lat,
create_time = self.create_time
)
class FakeUser(Model):
''' 假用户 '''
id = db.Column(Integer, primary_key=True)
user_id = Column(Integer, ForeignKey('user.id'), nullable=False)
class Trial(Model):
''' 试用 '''
id = db.Column(Integer, primary_key=True)
title = db.Column(String(300))
image = db.Column(String(300)) #图片
cat = Column(Integer, default=0) #试用类型 0免费礼品 1特定项目代金券
coupon_id = Column(Integer, ForeignKey('coupon.id'), nullable=True)
total = Column(Integer, default=0) #申请数
sent = Column(Integer, default=0) #已发放数
sort_order = Column(Integer, default=0) #试用排序
apply_count = Column(Integer, default=0) #人气
rules = db.Column(Text) #试用规则
process = db.Column(Text) #流程
create_time = Column(DateTime, default=dt_obj.now)
start_time = Column(DateTime)
end_time = Column(DateTime)
def as_dict(self):
return dict(
id = self.id,
title = self.title,
image = prefix_img_domain(self.image),
cat = self.cat,
cat_str = '免费, 包邮' if self.cat==0 else '免费',
total = self.total,
coupon_id = self.coupon_id,
sent = self.sent,
sort_order = self.sort_order,
apply_count = self.apply_count,
rules = self.rules,
process = self.process,
create_time = self.create_time,
end_time = self.end_time,
start_time = self.start_time,
)
class TrialApply(Model):
''' 试用申请 '''
__table_args__ = (
UniqueConstraint('user_id', 'trial_id'),
)
id = db.Column(Integer, primary_key=True)
user_id = Column(Integer, ForeignKey('user.id'), nullable=False)
trial_id = Column(Integer, ForeignKey('trial.id'), nullable=False)
cat = Column(Integer, default=0) #试用类型 0免费礼品 1特定项目代金券
coupon_id = Column(Integer, ForeignKey('user_coupon.id'), nullable=True)
name = db.Column(String(100))
phone = db.Column(String(30))
school = db.Column(String(100))
sex = Column(TINYINT(1), nullable=False, default=0) #0保密 1男 2女
addr = db.Column(String(100))
content = db.Column(String(1000))
create_time = Column(DateTime, default=dt_obj.now) #创建时间
status = Column(TINYINT(1), nullable=False, default=0) #0等待审核 1获得资格
def as_dict(self):
return dict(
id = self.id,
sex = self.sex,
cat = self.cat,
coupon_id = self.coupon_id,
user_id = self.user_id,
trial_id = self.trial_id,
name = self.name,
phone = self.phone,
school = self.school,
addr = self.addr,
content = self.content,
create_time = self.create_time,
status = self.status
)
class TrialComment(Model):
''' 体会评价 '''
id = db.Column(Integer, primary_key=True)
user_id = Column(Integer, ForeignKey('user.id'), nullable=False)
trial_id = Column(Integer, ForeignKey('trial.id'), nullable=False)
photos = db.Column(String(1000))
content = db.Column(String(10000))
create_time = Column(DateTime, default=dt_obj.now) #创建时间
def as_dict(self):
return dict(
id = self.id,
user_id = self.user_id,
trial_id = self.trial_id,
photos = self.photos,
content = self.content,
create_time = self.create_time,
photo_list = prefix_img_list(self.photos)
)
class ImageSize(Model):
__tablename__ = 'image_size'
__table_args__ = (
PrimaryKeyConstraint('key'),
)
key = Column(String(32))
width = Column(Integer, default=0)
height = Column(Integer, default=0)
def as_dict(self):
return dict(
key = self.key,
width = self.width,
height = self.height
)
class WechatReg(Model):
''' 体会评价 '''
id = db.Column(Integer, primary_key=True)
user_id = Column(Integer, ForeignKey('user.id'), nullable=False)
open_id = db.Column(String(100))
create_time = Column(DateTime, default=dt_obj.now) #创建时间
class RecommendBeautyItem(Model):
''' 美攻略推荐项目 '''
id = db.Column(Integer, primary_key=True)
item_id = Column(Integer, ForeignKey('item.id'), nullable=False)
create_time = Column(DateTime, default=dt_obj.now) #创建时间
def as_dict(self):
return dict(
id = self.id,
item_id = self.item_id,
create_time = self.create_time
)
class BeautyEntry(Model):
''' 美攻略 '''
id = db.Column(Integer, primary_key=True)
title = db.Column(String(100))
icon = db.Column(String(100)) #列表图
image = db.Column(String(100)) #首页图
photo = db.Column(String(100)) #详情页大图
items = db.Column(String(100))
view_count = Column(Integer, default=0)
create_time = Column(DateTime, default=dt_obj.now)
status = Column(TINYINT(1), nullable=False, default=0)#0未上线 1上线
def as_dict(self):
return dict(
id = self.id,
icon = prefix_img_domain(self.icon),
view_count = self.view_count,
title = self.title,
image = prefix_img_domain(self.image),
photo = prefix_img_domain(self.photo),
items = self.items,
item_id_list= map(int, filter(bool, (self.items or '').split(','))),
status = self.status,
create_time = self.create_time
)
class DailyCoupon(Model):
''' 每日优惠券 '''
id = db.Column(Integer, primary_key=True)
coupon_id = Column(Integer, ForeignKey('coupon.id'), nullable=False)
start_time = Column(DateTime)
end_time = Column(DateTime)
total = Column(Integer, default=0)
sent = Column(Integer, default=0)
title = db.Column(String(100))
use_condition = db.Column(String(100))
use_time = db.Column(String(100))
create_time = Column(DateTime, default=dt_obj.now)
def as_dict(self):
return dict(
id = self.id,
title = self.title,
coupon_id = self.coupon_id,
start_time = self.start_time,
use_time = self.use_time or '',
use_condition = self.use_condition or '',
end_time = self.end_time,
sent = self.sent or 0,
total = self.total or 0,
remain = self.total-self.sent,
create_time = self.create_time
)
class DailyUser(Model):
''' 用户每日优惠券 '''
__table_args__ = (
UniqueConstraint('daily_id', 'user_id'),
)
id = db.Column(Integer, primary_key=True)
user_id = Column(Integer, ForeignKey('user.id'), nullable=False)
daily_id = Column(Integer, ForeignKey('daily_coupon.id'), nullable=False)
create_time = Column(DateTime, default=dt_obj.now)
def as_dict(self):
return dict(
id = self.id,
user_id = self.user_id,
daily_id= self.daily_id,
create_time=self.create_time
)
class AlipayOrderUser(Model):
''' 支付宝支付订单对应用户支付宝账号 '''
id = db.Column(Integer, primary_key=True)
order_no = db.Column(String(100), unique=True)
buyer_email = db.Column(String(100), index=True)
create_time = Column(DateTime, default=dt_obj.now)
class RecommendHospital(Model):
''' 推荐医院 '''
id = db.Column(Integer, primary_key=True)
hospital_id = Column(Integer, ForeignKey('hospital.id'), unique=True)
sort_order = Column(Integer, default=0) #小的排在前面
tag = db.Column(String(50))
color = db.Column(String(50))
def as_dict(self):
return dict(
id = self.id,
hospital_id = self.hospital_id,
sort_order = self.sort_order,
tag = self.tag,
color = self.color
)
class Article(Model):
''' 通知文章 '''
id = db.Column(Integer, primary_key=True)
title = db.Column(String(300))
desc = db.Column(String(1000))
image = db.Column(String(300))
link = db.Column(String(300))
create_time = Column(DateTime, default=dt_obj.now)
status = Column(TINYINT(1), nullable=False, default=0) #0未上线 1上线
def as_dict(self):
return dict(
id = self.id,
title = self.title,
desc = self.desc,
image = self.image,
link = self.link,
create_time = self.create_time,
status = self.status
)
class Notification(Model):
''' 消息通知 '''
id = db.Column(Integer, primary_key=True)
article_id = Column(Integer, ForeignKey('article.id'))
user_id = Column(Integer, ForeignKey('user.id'))
create_time = Column(DateTime, default=dt_obj.now)
status = Column(TINYINT(1), nullable=False, default=0) #0未读 1已读
def as_dict(self):
return dict(
id = self.id,
user_id = self.user_id,
article_id = self.article_id,
create_time = self.create_time,
status = self.status
)
class RoomDesignDetail(Model):
''' 寝室设计详情 '''
id = Column(Integer, primary_key=True)
room_name = Column(String(30), unique=True)
applyer_name = Column(String(30))
addr = Column(String(30))
phone = Column(String(30), unique=True)
user_id = Column(Integer, ForeignKey('user.id'))
school_id = Column(Integer, ForeignKey('school.id'))
apply_no = Column(String(30), unique=True) #编号
pics = Column(String(500))
vote_count = db.Column(Integer, default=0) #投票数量数量
pics_count = db.Column(Integer, default=0, index=True) #图片数量
create_time = Column(DateTime, default=dt_obj.now)
def as_dict(self):
result = dict(
id = self.id,
room_name = self.room_name,
apply_no = self.apply_no,
addr = self.addr,
phone = self.phone,
applyer_name= self.applyer_name,
school_id = self.school_id,
vote_count = self.vote_count,
pics_count = self.pics_count,
user_id = self.user_id,
pics = self.pics,
orig_pics = imgs_to_list(self.pics),
create_time = self.create_time,
pic_list = prefix_img_list_thumb(self.pics, width=720),
thumb_pic_list = prefix_img_list_thumb(self.pics),
)
if len(result['pic_list'])<4:
for i in range(4-len(result['pic_list'])):
result['pic_list'].append('')
if len(result['thumb_pic_list'])<4:
for i in range(4-len(result['thumb_pic_list'])):
result['thumb_pic_list'].append('')
return result
class RoomDesignVotePrivilege(Model):
''' 投票权限 '''
id = Column(Integer, primary_key=True)
user_id = Column(Integer, ForeignKey('user.id'))
status = Column(TINYINT(1), nullable=False, default=0) #0未使用 1已使用
source = Column(TINYINT(1), nullable=False, default=0) #1完成申请额度(20票) 2完成一单(200票)
create_time = Column(DateTime, default=dt_obj.now)
def as_dict(self):
return dict(
id = self.id,
user_id = self.user_id,
status = self.status,
source = self.source,
create_time = self.create_time
)
class RoomDesignVoteLog(Model):
''' 投票记录log '''
id = Column(Integer, primary_key=True)
user_id = Column(Integer, ForeignKey('user.id'))
room_id = Column(Integer, ForeignKey('room_design_detail.id'))
source = Column(TINYINT(1), nullable=False, default=0) #1完成申请额度(20票) 2完成一单(200票)
create_time = Column(DateTime, default=dt_obj.now)
def as_dict(self):
return dict(
id = self.id,
user_id = self.user_id,
room_id = self.room_id,
source = self.source,
create_time= self.create_time
)
class RedpackQuestion(Model):
''' 红包推荐问题 '''
id = Column(Integer, primary_key=True)
content = Column(String(1000))
create_time = Column(DateTime, default=dt_obj.now)
status = Column(TINYINT(1), nullable=False, default=0) #0下线 1上线
def as_dict(self):
return dict(
id = self.id,
content = self.content,
create_time = self.create_time,
status = self.status
)
class RedpackUserQuestion(Model):
''' 红包用户问答 '''
id = Column(Integer, primary_key=True)
qr_user_id = Column(Integer, ForeignKey('qr_code_user.id'))
question_id = Column(Integer, ForeignKey('redpack_question.id'))
price = Column(MoneyField) #需支付价格
question = Column(String(1000))
answer = Column(String(1000))
is_custom = Column(TINYINT(1), nullable=False, default=0) #0美分分提供问题 1自定义问题
is_random = Column(TINYINT(1), nullable=False, default=0) #0不随机 1随机
price = Column(MoneyField) #需支付价格
money = Column(MoneyField, default=0) #总收到金额
status = Column(TINYINT(1), nullable=False, default=0) #0新下单 1支付中 2支付成功
view_count = db.Column(Integer, default=0) #查看数量
create_time = Column(DateTime, default=dt_obj.now)
def as_dict(self):
return dict(
id = self.id,
view_count = self.view_count or 0,
qr_user_id = self.qr_user_id,
question_id = self.question_id,
is_custom = self.is_custom,
is_random = self.is_random,
question = self.question,
answer = self.answer,
price = format_price(self.price),
money = format_price(self.money),
status = self.status,
create_time = self.create_time
)
class RedpackPay(Model):
''' 红包支付纪录 '''
id = Column(Integer, primary_key=True)
qr_user_id = Column(Integer, ForeignKey('qr_code_user.id'))
user_question_id = Column(Integer, ForeignKey('redpack_user_question.id'))
order_no = db.Column(String(30), unique=True)
transaction_id = db.Column(String(100))
price = Column(MoneyField) #需支付价格
status = Column(TINYINT(1), nullable=False, default=0) #0新下单 1支付中 2支付成功
create_time = Column(DateTime, default=dt_obj.now)
def as_dict(self):
return dict(
id = self.id,
order_no = self.order_no,
qr_user_id = self.qr_user_id,
transaction_id = self.transaction_id,
user_question_id = self.user_question_id,
price = format_price(self.price),
status = self.status,
create_time = self.create_time
)
class RedpackPayUser(Model):
''' 问题查看用户'''
id = Column(Integer, primary_key=True)
qr_user_id = Column(Integer, ForeignKey('qr_code_user.id'))
price = Column(MoneyField) #需支付价格
user_question_id = Column(Integer, ForeignKey('redpack_user_question.id'))
pay_id = Column(Integer, ForeignKey('redpack_pay.id'))
create_time = Column(DateTime, default=dt_obj.now)
def as_dict(self):
return dict(
id = self.id,
qr_user_id = self.qr_user_id,
price = format_price(self.price),
pay_id = self.pay_id,
user_question_id = self.user_question_id,
create_time = self.create_time,
)
class UserDevice(Model):
''' 用户设备 '''
id = Column(Integer, primary_key=True)
user_id = Column(Integer, ForeignKey('user.id'), nullable=True)
device_id = db.Column(String(50), unique=True)
push_token = db.Column(String(50))
os_version = db.Column(String(10))
app_version = db.Column(String(10))
device_name = db.Column(String(100))
cat = Column(TINYINT(1), nullable=False, default=0) #1ios 2android
create_time = Column(DateTime, default=dt_obj.now)
update_time = Column(DateTime, default=dt_obj.now)
def as_dict(self):
''' '''
return dict(
id = self.id,
user_id = self.user_id,
device_id = self.device_id,
push_token = self.push_token,
os_version = self.os_version,
app_version = self.app_version,
device_name = self.device_name,
cat = self.cat,
create_time = self.create_time,
update_time = self.update_time
)
class UserDeviceLog(Model):
''' 用户历史设备表 '''
id = Column(Integer, primary_key=True)
user_id = Column(Integer, ForeignKey('user.id'), nullable=False)
device_id = db.Column(String(50), index=True)
create_time = Column(DateTime, default=dt_obj.now)
def as_dict(self):
''' '''
return dict(
id = self.id,
user_id = self.user_id,
device_id = self.device_id,
create_time = self.create_time
)
class RdUserQrcode(Model):
''' 现金用户分享二维码 '''
__table_args__ = (
UniqueConstraint('user_id', 'qrcode_id'),
)
id = db.Column(Integer, primary_key=True)
user_id = Column(Integer, ForeignKey('user.id'))
qrcode_id = Column(Integer, ForeignKey('qrcode.id'))
follow_count = Column(Integer, default=0)
reg_count = Column(Integer, default=0)
create_time = Column(DateTime, default=dt_obj.now)
def as_dict(self):
return dict(
id = self.id,
qrcode_id = self.qrcode_id,
user_id = self.user_id,
follow_count= self.follow_count,
reg_count = self.reg_count,
create_time = str(self.create_time)
)
class RdQrcodeUser(Model):
''' 二维码注册用户 '''
id = db.Column(Integer, primary_key=True)
user_id = Column(Integer, ForeignKey('user.id'))
qrcode_id = Column(Integer, ForeignKey('qrcode.id'))
create_time = Column(DateTime, default=dt_obj.now)
def as_dict(self):
return dict(
id = self.id,
qrcode_id = self.qrcode_id,
user_id = self.user_id,
create_time = str(self.create_time)
)
class RdMoneyPrize(Model):
''' 现金奖励金额 '''
id = db.Column(Integer, primary_key=True)
amount = Column(Integer, default=0)
sent = Column(Integer, default=0)
total = Column(Integer, default=0)
create_time = Column(DateTime, default=dt_obj.now)
def as_dict(self):
return dict(
id = self.id,
amount = self.amount,
sent = self.sent,
total = self.total
)
class RdDrawCounter(Model):
''' 现金奖励抽奖计数 '''
id = db.Column(Integer, primary_key=True)
user_id = Column(Integer, ForeignKey('user.id'))
used = Column(Integer, default=0)
total = Column(Integer, default=0)
create_time = Column(DateTime, default=dt_obj.now)
def as_dict(self):
return dict(
id = self.id,
user_id = self.user_id,
used = self.used,
total = self.total
)
class RdDrawCounterLog(Model):
''' 现金奖励抽奖机会变更历史 '''
id = db.Column(Integer, primary_key=True)
user_id = Column(Integer, ForeignKey('user.id'))
count = Column(Integer, default=0)
source = Column(TINYINT(1), nullable=False, default=1) #1额度申请 2邀请 3完成订单
create_time = Column(DateTime, default=dt_obj.now)
def as_dict(self):
return dict(
id = self.id,
user_id = self.user_id,
count = self.count,
source = self.source
)
|
{"/admin/urls.py": ["/admin/views.py"], "/ops/room_design.py": ["/util/sqlerr.py", "/util/utils.py", "/models.py", "/ops/utils.py", "/ops/cache.py", "/settings.py"], "/util/sign.py": ["/settings.py"], "/ops/hospital.py": ["/util/sqlerr.py", "/models.py", "/ops/utils.py"], "/ops/utils.py": ["/util/utils.py", "/models.py"], "/ops/credit.py": ["/models.py", "/util/sqlerr.py", "/util/utils.py", "/ops/utils.py", "/settings.py", "/constants.py"], "/user/api_urls.py": ["/user/auth.py", "/user/trial.py"], "/migrations/versions/3621ae6c4339_.py": ["/models.py"], "/ops/comment.py": ["/models.py", "/ops/utils.py", "/util/utils.py"], "/migrations/versions/273db5f3044f_.py": ["/models.py"], "/ops/notification.py": ["/models.py", "/util/utils.py", "/ops/utils.py"], "/ops/activity.py": ["/models.py", "/util/utils.py", "/ops/utils.py"], "/migrations/versions/18e20ed0da8d_.py": ["/models.py"], "/hospital/urls.py": ["/hospital/views.py"], "/ops/coupon.py": ["/util/utils.py", "/models.py", "/ops/utils.py"], "/ops/log.py": ["/models.py", "/util/utils.py"], "/migrations/versions/55f4c256c989_.py": ["/models.py"], "/ops/beauty_tutorial.py": ["/models.py", "/ops/utils.py", "/util/utils.py"], "/migrations/versions/42e923c1238_.py": ["/models.py"], "/user/urls.py": ["/user/views.py", "/user/auth.py", "/user/trial.py", "/user/room_design.py", "/user/redpack.py", "/user/draw_money.py"], "/ops/actions.py": ["/models.py", "/ops/order.py", "/ops/coupon.py", "/ops/credit.py"], "/migrations/versions/36d5b6be1479_.py": ["/models.py"], "/ops/redpack.py": ["/util/sqlerr.py", "/util/utils.py", "/models.py", "/ops/utils.py", "/ops/cache.py", "/settings.py"], "/ops/user.py": ["/util/sqlerr.py", "/models.py", "/ops/utils.py"], "/ops/item.py": ["/models.py", "/util/utils.py", "/util/sqlerr.py", "/ops/utils.py"], "/migrations/versions/18e507e87862_.py": ["/models.py"], "/user/draw_money.py": ["/models.py", "/util/utils.py", "/util/decorators.py", "/util/validators.py", "/util/sign.py", "/util/drawgift.py", "/ops/bulks.py", "/ops/item.py", "/ops/data.py", "/ops/user.py", "/ops/redpack.py", "/ops/promote.py", "/ops/cache.py", "/ops/room_design.py", "/constants.py", "/thirdparty/sms.py", "/thirdparty/wechat.py", "/settings.py"], "/thirdparty/alipay/config.py": ["/settings.py"], "/promote/urls.py": ["/promote/views.py"], "/ops/admin.py": ["/util/sqlerr.py", "/models.py", "/ops/utils.py"], "/udp_server.py": ["/settings.py"], "/migrations/versions/4eefa5b6eb51_.py": ["/models.py"], "/demo.py": ["/thirdparty/wechat.py"], "/user/common.py": ["/models.py", "/ops/order.py", "/ops/coupon.py", "/ops/credit.py", "/constants.py"], "/models.py": ["/util/utils.py", "/settings.py", "/constants.py"]}
|
19,486
|
qsq-dm/mff
|
refs/heads/master
|
/migrations/versions/5adc2c5e2c4f_.py
|
"""empty message
Revision ID: 5adc2c5e2c4f
Revises: 498586bf16c2
Create Date: 2016-03-03 13:59:11.264954
"""
# revision identifiers, used by Alembic.
revision = '5adc2c5e2c4f'
down_revision = '498586bf16c2'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('promoter', sa.Column('unfollow_count', sa.Integer(), nullable=True))
op.create_index(op.f('ix_promoter_unfollow_count'), 'promoter', ['unfollow_count'], unique=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_promoter_unfollow_count'), table_name='promoter')
op.drop_column('promoter', 'unfollow_count')
### end Alembic commands ###
|
{"/admin/urls.py": ["/admin/views.py"], "/ops/room_design.py": ["/util/sqlerr.py", "/util/utils.py", "/models.py", "/ops/utils.py", "/ops/cache.py", "/settings.py"], "/util/sign.py": ["/settings.py"], "/ops/hospital.py": ["/util/sqlerr.py", "/models.py", "/ops/utils.py"], "/ops/utils.py": ["/util/utils.py", "/models.py"], "/ops/credit.py": ["/models.py", "/util/sqlerr.py", "/util/utils.py", "/ops/utils.py", "/settings.py", "/constants.py"], "/user/api_urls.py": ["/user/auth.py", "/user/trial.py"], "/migrations/versions/3621ae6c4339_.py": ["/models.py"], "/ops/comment.py": ["/models.py", "/ops/utils.py", "/util/utils.py"], "/migrations/versions/273db5f3044f_.py": ["/models.py"], "/ops/notification.py": ["/models.py", "/util/utils.py", "/ops/utils.py"], "/ops/activity.py": ["/models.py", "/util/utils.py", "/ops/utils.py"], "/migrations/versions/18e20ed0da8d_.py": ["/models.py"], "/hospital/urls.py": ["/hospital/views.py"], "/ops/coupon.py": ["/util/utils.py", "/models.py", "/ops/utils.py"], "/ops/log.py": ["/models.py", "/util/utils.py"], "/migrations/versions/55f4c256c989_.py": ["/models.py"], "/ops/beauty_tutorial.py": ["/models.py", "/ops/utils.py", "/util/utils.py"], "/migrations/versions/42e923c1238_.py": ["/models.py"], "/user/urls.py": ["/user/views.py", "/user/auth.py", "/user/trial.py", "/user/room_design.py", "/user/redpack.py", "/user/draw_money.py"], "/ops/actions.py": ["/models.py", "/ops/order.py", "/ops/coupon.py", "/ops/credit.py"], "/migrations/versions/36d5b6be1479_.py": ["/models.py"], "/ops/redpack.py": ["/util/sqlerr.py", "/util/utils.py", "/models.py", "/ops/utils.py", "/ops/cache.py", "/settings.py"], "/ops/user.py": ["/util/sqlerr.py", "/models.py", "/ops/utils.py"], "/ops/item.py": ["/models.py", "/util/utils.py", "/util/sqlerr.py", "/ops/utils.py"], "/migrations/versions/18e507e87862_.py": ["/models.py"], "/user/draw_money.py": ["/models.py", "/util/utils.py", "/util/decorators.py", "/util/validators.py", "/util/sign.py", "/util/drawgift.py", "/ops/bulks.py", "/ops/item.py", "/ops/data.py", "/ops/user.py", "/ops/redpack.py", "/ops/promote.py", "/ops/cache.py", "/ops/room_design.py", "/constants.py", "/thirdparty/sms.py", "/thirdparty/wechat.py", "/settings.py"], "/thirdparty/alipay/config.py": ["/settings.py"], "/promote/urls.py": ["/promote/views.py"], "/ops/admin.py": ["/util/sqlerr.py", "/models.py", "/ops/utils.py"], "/udp_server.py": ["/settings.py"], "/migrations/versions/4eefa5b6eb51_.py": ["/models.py"], "/demo.py": ["/thirdparty/wechat.py"], "/user/common.py": ["/models.py", "/ops/order.py", "/ops/coupon.py", "/ops/credit.py", "/constants.py"], "/models.py": ["/util/utils.py", "/settings.py", "/constants.py"]}
|
19,487
|
qsq-dm/mff
|
refs/heads/master
|
/util/sqlerr.py
|
# -*- coding: utf-8 -*-
import re
SQL_DUPLICATE = re.compile(r"Duplicate entry .*? for key")
_DUPLICATE_PRIMARY = re.compile(r"Duplicate entry '.*?' for key 'PRIMARY'")
class RegDup(object):
@staticmethod
def search(string):
return bool(SQL_DUPLICATE.search(string)) and not(bool(_DUPLICATE_PRIMARY.search(string)))
SQL_REF_NOT_EXIST_ERR = re.compile("a foreign key constraint fails")
SQL_DUPLICATE_ENTRY = RegDup
SQL_MONEY_NOT_ENOUGH = re.compile('BIGINT UNSIGNED value is out of range in')
SQL_DUPLICATE_NAME = re.compile(r"Duplicate entry '.*?' for key 'name'")
SQL_DUPLICATE_PHONE = re.compile(r"Duplicate entry '.*?' for key 'phone'")
SQL_DUPLICATE_WECHAT = re.compile(r"Duplicate entry '.*?' for key 'wx_id'")
SQL_DUPLICATE_BIND_WECHAT = re.compile(r"with identity key")
SQL_DUPLICATE_ORDER_NO = re.compile(r"Duplicate entry '.*?' for key 'order_no'")
SQL_DUPLICATE_COUPON = re.compile(r"Duplicate entry '.*?' for key 'coupon_id'")
SQL_REF_COUPON_NOT_EXIST = re.compile("a foreign key constraint fails .*? FOREIGN KEY \(\`coupon_id")
|
{"/admin/urls.py": ["/admin/views.py"], "/ops/room_design.py": ["/util/sqlerr.py", "/util/utils.py", "/models.py", "/ops/utils.py", "/ops/cache.py", "/settings.py"], "/util/sign.py": ["/settings.py"], "/ops/hospital.py": ["/util/sqlerr.py", "/models.py", "/ops/utils.py"], "/ops/utils.py": ["/util/utils.py", "/models.py"], "/ops/credit.py": ["/models.py", "/util/sqlerr.py", "/util/utils.py", "/ops/utils.py", "/settings.py", "/constants.py"], "/user/api_urls.py": ["/user/auth.py", "/user/trial.py"], "/migrations/versions/3621ae6c4339_.py": ["/models.py"], "/ops/comment.py": ["/models.py", "/ops/utils.py", "/util/utils.py"], "/migrations/versions/273db5f3044f_.py": ["/models.py"], "/ops/notification.py": ["/models.py", "/util/utils.py", "/ops/utils.py"], "/ops/activity.py": ["/models.py", "/util/utils.py", "/ops/utils.py"], "/migrations/versions/18e20ed0da8d_.py": ["/models.py"], "/hospital/urls.py": ["/hospital/views.py"], "/ops/coupon.py": ["/util/utils.py", "/models.py", "/ops/utils.py"], "/ops/log.py": ["/models.py", "/util/utils.py"], "/migrations/versions/55f4c256c989_.py": ["/models.py"], "/ops/beauty_tutorial.py": ["/models.py", "/ops/utils.py", "/util/utils.py"], "/migrations/versions/42e923c1238_.py": ["/models.py"], "/user/urls.py": ["/user/views.py", "/user/auth.py", "/user/trial.py", "/user/room_design.py", "/user/redpack.py", "/user/draw_money.py"], "/ops/actions.py": ["/models.py", "/ops/order.py", "/ops/coupon.py", "/ops/credit.py"], "/migrations/versions/36d5b6be1479_.py": ["/models.py"], "/ops/redpack.py": ["/util/sqlerr.py", "/util/utils.py", "/models.py", "/ops/utils.py", "/ops/cache.py", "/settings.py"], "/ops/user.py": ["/util/sqlerr.py", "/models.py", "/ops/utils.py"], "/ops/item.py": ["/models.py", "/util/utils.py", "/util/sqlerr.py", "/ops/utils.py"], "/migrations/versions/18e507e87862_.py": ["/models.py"], "/user/draw_money.py": ["/models.py", "/util/utils.py", "/util/decorators.py", "/util/validators.py", "/util/sign.py", "/util/drawgift.py", "/ops/bulks.py", "/ops/item.py", "/ops/data.py", "/ops/user.py", "/ops/redpack.py", "/ops/promote.py", "/ops/cache.py", "/ops/room_design.py", "/constants.py", "/thirdparty/sms.py", "/thirdparty/wechat.py", "/settings.py"], "/thirdparty/alipay/config.py": ["/settings.py"], "/promote/urls.py": ["/promote/views.py"], "/ops/admin.py": ["/util/sqlerr.py", "/models.py", "/ops/utils.py"], "/udp_server.py": ["/settings.py"], "/migrations/versions/4eefa5b6eb51_.py": ["/models.py"], "/demo.py": ["/thirdparty/wechat.py"], "/user/common.py": ["/models.py", "/ops/order.py", "/ops/coupon.py", "/ops/credit.py", "/constants.py"], "/models.py": ["/util/utils.py", "/settings.py", "/constants.py"]}
|
19,488
|
qsq-dm/mff
|
refs/heads/master
|
/migrations/versions/75f96105f81_.py
|
"""empty message
Revision ID: 75f96105f81
Revises: 41e40e694b32
Create Date: 2015-11-27 15:04:57.429923
"""
# revision identifiers, used by Alembic.
revision = '75f96105f81'
down_revision = '41e40e694b32'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('repayment', 'data')
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('repayment', sa.Column('data', mysql.VARCHAR(length=1000), nullable=True))
### end Alembic commands ###
|
{"/admin/urls.py": ["/admin/views.py"], "/ops/room_design.py": ["/util/sqlerr.py", "/util/utils.py", "/models.py", "/ops/utils.py", "/ops/cache.py", "/settings.py"], "/util/sign.py": ["/settings.py"], "/ops/hospital.py": ["/util/sqlerr.py", "/models.py", "/ops/utils.py"], "/ops/utils.py": ["/util/utils.py", "/models.py"], "/ops/credit.py": ["/models.py", "/util/sqlerr.py", "/util/utils.py", "/ops/utils.py", "/settings.py", "/constants.py"], "/user/api_urls.py": ["/user/auth.py", "/user/trial.py"], "/migrations/versions/3621ae6c4339_.py": ["/models.py"], "/ops/comment.py": ["/models.py", "/ops/utils.py", "/util/utils.py"], "/migrations/versions/273db5f3044f_.py": ["/models.py"], "/ops/notification.py": ["/models.py", "/util/utils.py", "/ops/utils.py"], "/ops/activity.py": ["/models.py", "/util/utils.py", "/ops/utils.py"], "/migrations/versions/18e20ed0da8d_.py": ["/models.py"], "/hospital/urls.py": ["/hospital/views.py"], "/ops/coupon.py": ["/util/utils.py", "/models.py", "/ops/utils.py"], "/ops/log.py": ["/models.py", "/util/utils.py"], "/migrations/versions/55f4c256c989_.py": ["/models.py"], "/ops/beauty_tutorial.py": ["/models.py", "/ops/utils.py", "/util/utils.py"], "/migrations/versions/42e923c1238_.py": ["/models.py"], "/user/urls.py": ["/user/views.py", "/user/auth.py", "/user/trial.py", "/user/room_design.py", "/user/redpack.py", "/user/draw_money.py"], "/ops/actions.py": ["/models.py", "/ops/order.py", "/ops/coupon.py", "/ops/credit.py"], "/migrations/versions/36d5b6be1479_.py": ["/models.py"], "/ops/redpack.py": ["/util/sqlerr.py", "/util/utils.py", "/models.py", "/ops/utils.py", "/ops/cache.py", "/settings.py"], "/ops/user.py": ["/util/sqlerr.py", "/models.py", "/ops/utils.py"], "/ops/item.py": ["/models.py", "/util/utils.py", "/util/sqlerr.py", "/ops/utils.py"], "/migrations/versions/18e507e87862_.py": ["/models.py"], "/user/draw_money.py": ["/models.py", "/util/utils.py", "/util/decorators.py", "/util/validators.py", "/util/sign.py", "/util/drawgift.py", "/ops/bulks.py", "/ops/item.py", "/ops/data.py", "/ops/user.py", "/ops/redpack.py", "/ops/promote.py", "/ops/cache.py", "/ops/room_design.py", "/constants.py", "/thirdparty/sms.py", "/thirdparty/wechat.py", "/settings.py"], "/thirdparty/alipay/config.py": ["/settings.py"], "/promote/urls.py": ["/promote/views.py"], "/ops/admin.py": ["/util/sqlerr.py", "/models.py", "/ops/utils.py"], "/udp_server.py": ["/settings.py"], "/migrations/versions/4eefa5b6eb51_.py": ["/models.py"], "/demo.py": ["/thirdparty/wechat.py"], "/user/common.py": ["/models.py", "/ops/order.py", "/ops/coupon.py", "/ops/credit.py", "/constants.py"], "/models.py": ["/util/utils.py", "/settings.py", "/constants.py"]}
|
19,489
|
qsq-dm/mff
|
refs/heads/master
|
/ops/order.py
|
# -*- coding: utf-8 -*-
import json
from collections import defaultdict
from sqlalchemy import and_
from models import db
from models import Order
from models import UserCoupon
from models import UserCredit
from models import ServiceCode
from models import OrderLog
from models import Repayment
from models import Repayment
from models import PayLogOrderNo
from models import PeriodPayLog
from util.utils import random_str
from util.utils import random_no
from util.utils import get_time_str_from_dt
from util.utils import dt_obj
from util.utils import format_price
from util.sqlerr import SQL_DUPLICATE
from util.sqlerr import SQL_DUPLICATE_ORDER_NO
from util.sqlerr import SQL_DUPLICATE_COUPON
from util.sqlerr import SQL_REF_COUPON_NOT_EXIST
from ops.utils import get_page
from ops.utils import get_items
from ops.utils import count_items
from constants import ORDER_STATUS
from constants import SERVICE_STATUS
class OrderService(object):
''' '''
@staticmethod
def add_order(user_id, item_id, hospital_id, \
price, credit_amount, total_fee, coupon_amount, total, \
credit_choice_id, user_coupon_id, order_no, credit_verified, \
status=ORDER_STATUS.NEW_ORDER):
try:
coupon_id = user_coupon_id or None #外键约束 不能为0
credit_choice_id = credit_choice_id or None
order = Order(
total_fee = total_fee,
user_id = user_id,
item_id = item_id,
hospital_id = hospital_id,
total = total,
credit_choice_id = credit_choice_id,
coupon_id = coupon_id,
order_no = order_no,
credit_amount = credit_amount,
price = price,
status = status,
coupon_amount = coupon_amount,
credit_verified = credit_verified
)
db.session.add(order)
db.session.commit()
return order.id
except Exception as e:
db.session.rollback()
import traceback
traceback.print_exc()
if SQL_DUPLICATE_ORDER_NO.search(str(e)):
print 'duplicate order no'
assert 0, '服务器忙'
@staticmethod
def update_order(where, commit=True, **kw):
count = Order.query.filter(where).update(kw, synchronize_session=False)
db.session.commit()
return count
@staticmethod
def get_user_order(order_id, user_id):
query = and_(
Order.id==order_id,
Order.user_id==user_id
)
return Order.query.filter(query).first()
@staticmethod
def create_servicecode(order_id):
random_code = random_str()
service_code = ServiceCode.query.filter(ServiceCode.code==random_code).first()
while service_code:
random_code = random_str()
service_code = ServiceCode.query.filter(ServiceCode.code==random_code).first()
try:
service = ServiceCode(order_id=order_id, code=random_code)
db.session.add(service)
db.session.commit()
return random_code
except Exception as e:
db.session.rollback()
@staticmethod
def get_servicecode(order_id):
return ServiceCode.query.filter(ServiceCode.order_id==order_id).first()
@staticmethod
def get_paged_orders(**kw):
return get_page(Order, {}, **kw)
@staticmethod
def get_orders(where):
''' 订单列表 '''
return Order.query.filter(where).all()
@staticmethod
def create_no():
''' 随机生成订单号 第12位插入一个'''
now = dt_obj.now()
timestr = get_time_str_from_dt(now, format='%Y%m%d%H%M%S%f')
random_number = random_no(4)
print now, timestr, random_number
return timestr[:12] + random_number + timestr[12:]
@staticmethod
def get_order_by_orderno(order_no):
''' '''
return Order.query.filter(Order.order_no==order_no).first()
@staticmethod
def update_order_status(order_id, status, user_id=None, where=None):
query = and_()
query.append(Order.id==order_id)
if user_id: query.append(Order.user_id==user_id)
if where is not None: query.append(where)
count = Order.query.filter(query).update({'status':status},synchronize_session=False)
if count:
log = OrderLog(order_id=order_id, status=status)
db.session.add(log)
db.session.commit()
return count
@staticmethod
def repayment(user_id, pay_method, coupon_id, price, data, order_no):
try:
repayment = Repayment(
pay_method=pay_method, coupon_id=coupon_id,
user_id=user_id, price=price, order_no=order_no,
data=data)
db.session.add(repayment)
db.session.commit()
return repayment.id
except Exception as e:
print 'except'
print str(e)
db.session.rollback()
if SQL_REF_COUPON_NOT_EXIST.search(str(e)):
print '优惠券不存在'
elif SQL_DUPLICATE_ORDER_NO.search(str(e)):
print '订单号已存在'
elif SQL_DUPLICATE_COUPON.search(str(e)):
print '优惠券已被使用'
@staticmethod
def update_repayment(where, **kw):
''' 更新还款单状态 '''
count = Repayment.query.filter(where).update(kw, synchronize_session=False)
db.session.commit()
return count
@staticmethod
def book_surgery(order_id, book_time):
''' 预约时间手术 '''
query = and_(
ServiceCode.order_id==order_id,
ServiceCode.status==SERVICE_STATUS.STANDBY
)
data = {
'status' : SERVICE_STATUS.BOOKED,
'book_time' : book_time
}
count = ServiceCode.query.filter(query).update(data)
db.session.commit()
return count
@staticmethod
def cancel_book(order_id):
''' 取消预约 '''
query = and_(
ServiceCode.order_id==order_id,
ServiceCode.status==SERVICE_STATUS.BOOKED
)
data = {
'status' : SERVICE_STATUS.STANDBY,
}
count = ServiceCode.query.filter(query).update(data)
db.session.commit()
return count
@staticmethod
def verify_servicecode(order_id, service_code):
''' 验证服务码 确认手术 '''
query = and_(
ServiceCode.order_id==order_id,
ServiceCode.code==service_code,
ServiceCode.status==SERVICE_STATUS.BOOKED
)
count = ServiceCode.query.filter(query).update({'status':SERVICE_STATUS.VERIFYED})
db.session.commit()
if count:
print '确认手术'
else:
print '服务码找不到'
return count
@staticmethod
def cancel_surgery(order_id):
''' 取消手术 '''
query = and_(
ServiceCode.order_id==order_id,
ServiceCode.status==SERVICE_STATUS.VERIFYED
)
count = ServiceCode.query.filter(query).update({'status':SERVICE_STATUS.BOOKED})
db.session.commit()
if count:
print '已取消手术'
else:
print '服务码找不到'
return count
@staticmethod
def get_user_repayment(repayment_id, user_id):
query = and_(
Repayment.id==repayment_id,
Repayment.user_id==user_id
)
repayment = Repayment.query.filter(query).first()
return repayment
@staticmethod
def get_repayment_by_orderno(order_no):
query = and_(
Repayment.order_no==order_no
)
repayment = Repayment.query.filter(query).first()
return repayment
@staticmethod
def count_order(where=None):
return count_items(Order, where=where)
@staticmethod
def get_order_by_id(order_id):
order = Order.query.filter(Order.id==order_id).first()
return order
@staticmethod
def get_service_codes_by_order_ids(order_ids):
''' '''
rows = ServiceCode.query.filter(ServiceCode.order_id.in_(order_ids)).all()
return {i.order_id:i.status for i in rows}
@staticmethod
def get_servicecodes_by_order_ids(order_ids, **kw):
rows = ServiceCode.query.filter(ServiceCode.order_id.in_(order_ids)).all()
return [i.as_dict() for i in rows]
@staticmethod
def get_orders_by_ids(order_ids):
''' 返回 '''
return get_items(Order, order_ids)
@staticmethod
def add_repayment_log(period_pay_log_id, price, total, order_no):
try:
log = PayLogOrderNo(period_pay_log_id=period_pay_log_id, price=price, total=total, order_no=order_no)
db.session.add(log)
db.session.commit()
return log.id
except Exception as e:
import traceback
traceback.print_exc()
db.session.rollback()
if SQL_DUPLICATE.search(str(e)):
assert 0, '分期{}已还{}'.format(period_pay_log_id, price)
@staticmethod
def gen_repayment_log(repayment):
''' 还款ID '''
log_list = json.loads(repayment.data)
print log_list, 'log_list'
for data in log_list:
print data,'...'
period_pay_log_id = data['id']
amount = data['amount']
fee = data['fee']
punish = data['punish']
#total = format_price(float(amount)+float(fee or 0)+float(punish or 0))
price = format_price(float(amount)+float(fee or 0))
OrderService.add_repayment_log(period_pay_log_id, price, repayment.price, repayment.order_no)
@staticmethod
def order_repayment_logs_amount(order_id):
''' 已还的总额 '''
subquery = db.session.query(PeriodPayLog.id).filter(PeriodPayLog.order_id==order_id).subquery()
logs = PayLogOrderNo.query.filter(PayLogOrderNo.period_pay_log_id.in_(subquery)).all()
return sum(log.price for log in logs)
@staticmethod
def get_order_repayment_logs_amount(order_id):
''' 所有已还的总额按订单划分 '''
subquery = db.session.query(PeriodPayLog.id).filter(PeriodPayLog.order_id==order_id).subquery()
logs = PayLogOrderNo.query.filter(PayLogOrderNo.period_pay_log_id.in_(subquery)).all()
order_no_map = defaultdict(lambda:0)
order_no_total_map = {}
for log in logs:
order_no_total_map[log.order_no] = format_price(log.total)
order_no_map[log.order_no] += format_price(log.price)
data = {}
for order_no, price in order_no_map.items():
repayment = Repayment.query.filter(Repayment.order_no==order_no).first()
assert repayment, '还款不存在'
data[order_no] = {
'price': format_price(price),
'pay_method': repayment.pay_method,
'total': order_no_total_map[order_no],
'transaction_id': repayment.transaction_id
}
return data
@staticmethod
def get_order_by_coupon_id(coupon_id):
''' '''
return Order.query.filter(Order.coupon_id==coupon_id).first()
def set_order_status(order, comment=None, servicecode=None):
''' 根据服务码状态 是否已评论重新订单状态 '''
if order['user_finished']:
order['status'] = ORDER_STATUS.FINISH
elif order['status']==ORDER_STATUS.FINISH:
order['status'] = ORDER_STATUS.PAY_SUCCESS
if order['credit_verified']==0 and order['status'] in [ORDER_STATUS.PAY_SUCCESS]:
order['status'] = ORDER_STATUS.VERIFYING
elif order['credit_verified']==2:
order['status'] = ORDER_STATUS.REJECTED
elif order['status']==ORDER_STATUS.PAY_SUCCESS:
if servicecode['status'] == 1:
order['status'] = ORDER_STATUS.BOOKED
elif servicecode['status'] == 2:
order['status'] = ORDER_STATUS.CONFIRMED
elif order['status'] == ORDER_STATUS.FINISH and not comment:
order['status'] = ORDER_STATUS.TO_COMMENT
|
{"/admin/urls.py": ["/admin/views.py"], "/ops/room_design.py": ["/util/sqlerr.py", "/util/utils.py", "/models.py", "/ops/utils.py", "/ops/cache.py", "/settings.py"], "/util/sign.py": ["/settings.py"], "/ops/hospital.py": ["/util/sqlerr.py", "/models.py", "/ops/utils.py"], "/ops/utils.py": ["/util/utils.py", "/models.py"], "/ops/credit.py": ["/models.py", "/util/sqlerr.py", "/util/utils.py", "/ops/utils.py", "/settings.py", "/constants.py"], "/user/api_urls.py": ["/user/auth.py", "/user/trial.py"], "/migrations/versions/3621ae6c4339_.py": ["/models.py"], "/ops/comment.py": ["/models.py", "/ops/utils.py", "/util/utils.py"], "/migrations/versions/273db5f3044f_.py": ["/models.py"], "/ops/notification.py": ["/models.py", "/util/utils.py", "/ops/utils.py"], "/ops/activity.py": ["/models.py", "/util/utils.py", "/ops/utils.py"], "/migrations/versions/18e20ed0da8d_.py": ["/models.py"], "/hospital/urls.py": ["/hospital/views.py"], "/ops/coupon.py": ["/util/utils.py", "/models.py", "/ops/utils.py"], "/ops/log.py": ["/models.py", "/util/utils.py"], "/migrations/versions/55f4c256c989_.py": ["/models.py"], "/ops/beauty_tutorial.py": ["/models.py", "/ops/utils.py", "/util/utils.py"], "/migrations/versions/42e923c1238_.py": ["/models.py"], "/user/urls.py": ["/user/views.py", "/user/auth.py", "/user/trial.py", "/user/room_design.py", "/user/redpack.py", "/user/draw_money.py"], "/ops/actions.py": ["/models.py", "/ops/order.py", "/ops/coupon.py", "/ops/credit.py"], "/migrations/versions/36d5b6be1479_.py": ["/models.py"], "/ops/redpack.py": ["/util/sqlerr.py", "/util/utils.py", "/models.py", "/ops/utils.py", "/ops/cache.py", "/settings.py"], "/ops/user.py": ["/util/sqlerr.py", "/models.py", "/ops/utils.py"], "/ops/item.py": ["/models.py", "/util/utils.py", "/util/sqlerr.py", "/ops/utils.py"], "/migrations/versions/18e507e87862_.py": ["/models.py"], "/user/draw_money.py": ["/models.py", "/util/utils.py", "/util/decorators.py", "/util/validators.py", "/util/sign.py", "/util/drawgift.py", "/ops/bulks.py", "/ops/item.py", "/ops/data.py", "/ops/user.py", "/ops/redpack.py", "/ops/promote.py", "/ops/cache.py", "/ops/room_design.py", "/constants.py", "/thirdparty/sms.py", "/thirdparty/wechat.py", "/settings.py"], "/thirdparty/alipay/config.py": ["/settings.py"], "/promote/urls.py": ["/promote/views.py"], "/ops/admin.py": ["/util/sqlerr.py", "/models.py", "/ops/utils.py"], "/udp_server.py": ["/settings.py"], "/migrations/versions/4eefa5b6eb51_.py": ["/models.py"], "/demo.py": ["/thirdparty/wechat.py"], "/user/common.py": ["/models.py", "/ops/order.py", "/ops/coupon.py", "/ops/credit.py", "/constants.py"], "/models.py": ["/util/utils.py", "/settings.py", "/constants.py"]}
|
19,490
|
qsq-dm/mff
|
refs/heads/master
|
/migrations/versions/2b3331ab4b9d_.py
|
"""empty message
Revision ID: 2b3331ab4b9d
Revises: f1412ee78a9
Create Date: 2015-12-05 10:28:51.755265
"""
# revision identifiers, used by Alembic.
revision = '2b3331ab4b9d'
down_revision = 'f1412ee78a9'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('promoter',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('phone', sa.String(length=20), nullable=True),
sa.Column('passwd', sa.String(length=50), nullable=True),
sa.Column('create_time', sa.DateTime(), nullable=True),
sa.Column('create_by', sa.Integer(), nullable=False),
sa.Column('status', mysql.TINYINT(display_width=1), nullable=False),
sa.ForeignKeyConstraint(['create_by'], ['promoter.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('phone')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('promoter')
### end Alembic commands ###
|
{"/admin/urls.py": ["/admin/views.py"], "/ops/room_design.py": ["/util/sqlerr.py", "/util/utils.py", "/models.py", "/ops/utils.py", "/ops/cache.py", "/settings.py"], "/util/sign.py": ["/settings.py"], "/ops/hospital.py": ["/util/sqlerr.py", "/models.py", "/ops/utils.py"], "/ops/utils.py": ["/util/utils.py", "/models.py"], "/ops/credit.py": ["/models.py", "/util/sqlerr.py", "/util/utils.py", "/ops/utils.py", "/settings.py", "/constants.py"], "/user/api_urls.py": ["/user/auth.py", "/user/trial.py"], "/migrations/versions/3621ae6c4339_.py": ["/models.py"], "/ops/comment.py": ["/models.py", "/ops/utils.py", "/util/utils.py"], "/migrations/versions/273db5f3044f_.py": ["/models.py"], "/ops/notification.py": ["/models.py", "/util/utils.py", "/ops/utils.py"], "/ops/activity.py": ["/models.py", "/util/utils.py", "/ops/utils.py"], "/migrations/versions/18e20ed0da8d_.py": ["/models.py"], "/hospital/urls.py": ["/hospital/views.py"], "/ops/coupon.py": ["/util/utils.py", "/models.py", "/ops/utils.py"], "/ops/log.py": ["/models.py", "/util/utils.py"], "/migrations/versions/55f4c256c989_.py": ["/models.py"], "/ops/beauty_tutorial.py": ["/models.py", "/ops/utils.py", "/util/utils.py"], "/migrations/versions/42e923c1238_.py": ["/models.py"], "/user/urls.py": ["/user/views.py", "/user/auth.py", "/user/trial.py", "/user/room_design.py", "/user/redpack.py", "/user/draw_money.py"], "/ops/actions.py": ["/models.py", "/ops/order.py", "/ops/coupon.py", "/ops/credit.py"], "/migrations/versions/36d5b6be1479_.py": ["/models.py"], "/ops/redpack.py": ["/util/sqlerr.py", "/util/utils.py", "/models.py", "/ops/utils.py", "/ops/cache.py", "/settings.py"], "/ops/user.py": ["/util/sqlerr.py", "/models.py", "/ops/utils.py"], "/ops/item.py": ["/models.py", "/util/utils.py", "/util/sqlerr.py", "/ops/utils.py"], "/migrations/versions/18e507e87862_.py": ["/models.py"], "/user/draw_money.py": ["/models.py", "/util/utils.py", "/util/decorators.py", "/util/validators.py", "/util/sign.py", "/util/drawgift.py", "/ops/bulks.py", "/ops/item.py", "/ops/data.py", "/ops/user.py", "/ops/redpack.py", "/ops/promote.py", "/ops/cache.py", "/ops/room_design.py", "/constants.py", "/thirdparty/sms.py", "/thirdparty/wechat.py", "/settings.py"], "/thirdparty/alipay/config.py": ["/settings.py"], "/promote/urls.py": ["/promote/views.py"], "/ops/admin.py": ["/util/sqlerr.py", "/models.py", "/ops/utils.py"], "/udp_server.py": ["/settings.py"], "/migrations/versions/4eefa5b6eb51_.py": ["/models.py"], "/demo.py": ["/thirdparty/wechat.py"], "/user/common.py": ["/models.py", "/ops/order.py", "/ops/coupon.py", "/ops/credit.py", "/constants.py"], "/models.py": ["/util/utils.py", "/settings.py", "/constants.py"]}
|
19,491
|
qsq-dm/mff
|
refs/heads/master
|
/migrations/versions/3d0882a6044_.py
|
"""empty message
Revision ID: 3d0882a6044
Revises: 31291b2ba259
Create Date: 2016-01-26 14:39:20.133527
"""
# revision identifiers, used by Alembic.
revision = '3d0882a6044'
down_revision = '31291b2ba259'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('room_design_apply',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('school_id', sa.Integer(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('room_name', sa.String(length=30), nullable=True),
sa.Column('applyer_name', sa.String(length=30), nullable=True),
sa.Column('phone', sa.String(length=30), nullable=True),
sa.Column('addr', sa.String(length=30), nullable=True),
sa.Column('create_time', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['school_id'], ['school.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('room_name')
)
op.create_table('room_design_vote_privilege',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('status', mysql.TINYINT(display_width=1), nullable=False),
sa.Column('source', mysql.TINYINT(display_width=1), nullable=False),
sa.Column('create_time', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('room_design_detail',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('school_id', sa.Integer(), nullable=True),
sa.Column('room_id', sa.Integer(), nullable=True),
sa.Column('pics', sa.String(length=500), nullable=True),
sa.Column('vote_count', sa.Integer(), nullable=True),
sa.Column('create_time', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['room_id'], ['room_design_apply.id'], ),
sa.ForeignKeyConstraint(['school_id'], ['school.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('room_design_vote_log',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('room_id', sa.Integer(), nullable=True),
sa.Column('vote_count', sa.Integer(), nullable=True),
sa.Column('create_time', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['room_id'], ['room_design_apply.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('room_design_vote_log')
op.drop_table('room_design_detail')
op.drop_table('room_design_vote_privilege')
op.drop_table('room_design_apply')
### end Alembic commands ###
|
{"/admin/urls.py": ["/admin/views.py"], "/ops/room_design.py": ["/util/sqlerr.py", "/util/utils.py", "/models.py", "/ops/utils.py", "/ops/cache.py", "/settings.py"], "/util/sign.py": ["/settings.py"], "/ops/hospital.py": ["/util/sqlerr.py", "/models.py", "/ops/utils.py"], "/ops/utils.py": ["/util/utils.py", "/models.py"], "/ops/credit.py": ["/models.py", "/util/sqlerr.py", "/util/utils.py", "/ops/utils.py", "/settings.py", "/constants.py"], "/user/api_urls.py": ["/user/auth.py", "/user/trial.py"], "/migrations/versions/3621ae6c4339_.py": ["/models.py"], "/ops/comment.py": ["/models.py", "/ops/utils.py", "/util/utils.py"], "/migrations/versions/273db5f3044f_.py": ["/models.py"], "/ops/notification.py": ["/models.py", "/util/utils.py", "/ops/utils.py"], "/ops/activity.py": ["/models.py", "/util/utils.py", "/ops/utils.py"], "/migrations/versions/18e20ed0da8d_.py": ["/models.py"], "/hospital/urls.py": ["/hospital/views.py"], "/ops/coupon.py": ["/util/utils.py", "/models.py", "/ops/utils.py"], "/ops/log.py": ["/models.py", "/util/utils.py"], "/migrations/versions/55f4c256c989_.py": ["/models.py"], "/ops/beauty_tutorial.py": ["/models.py", "/ops/utils.py", "/util/utils.py"], "/migrations/versions/42e923c1238_.py": ["/models.py"], "/user/urls.py": ["/user/views.py", "/user/auth.py", "/user/trial.py", "/user/room_design.py", "/user/redpack.py", "/user/draw_money.py"], "/ops/actions.py": ["/models.py", "/ops/order.py", "/ops/coupon.py", "/ops/credit.py"], "/migrations/versions/36d5b6be1479_.py": ["/models.py"], "/ops/redpack.py": ["/util/sqlerr.py", "/util/utils.py", "/models.py", "/ops/utils.py", "/ops/cache.py", "/settings.py"], "/ops/user.py": ["/util/sqlerr.py", "/models.py", "/ops/utils.py"], "/ops/item.py": ["/models.py", "/util/utils.py", "/util/sqlerr.py", "/ops/utils.py"], "/migrations/versions/18e507e87862_.py": ["/models.py"], "/user/draw_money.py": ["/models.py", "/util/utils.py", "/util/decorators.py", "/util/validators.py", "/util/sign.py", "/util/drawgift.py", "/ops/bulks.py", "/ops/item.py", "/ops/data.py", "/ops/user.py", "/ops/redpack.py", "/ops/promote.py", "/ops/cache.py", "/ops/room_design.py", "/constants.py", "/thirdparty/sms.py", "/thirdparty/wechat.py", "/settings.py"], "/thirdparty/alipay/config.py": ["/settings.py"], "/promote/urls.py": ["/promote/views.py"], "/ops/admin.py": ["/util/sqlerr.py", "/models.py", "/ops/utils.py"], "/udp_server.py": ["/settings.py"], "/migrations/versions/4eefa5b6eb51_.py": ["/models.py"], "/demo.py": ["/thirdparty/wechat.py"], "/user/common.py": ["/models.py", "/ops/order.py", "/ops/coupon.py", "/ops/credit.py", "/constants.py"], "/models.py": ["/util/utils.py", "/settings.py", "/constants.py"]}
|
19,492
|
qsq-dm/mff
|
refs/heads/master
|
/migrations/versions/a123ae998bf_.py
|
"""empty message
Revision ID: a123ae998bf
Revises: 36d5b6be1479
Create Date: 2015-11-11 17:01:19.461450
"""
# revision identifiers, used by Alembic.
revision = 'a123ae998bf'
down_revision = '36d5b6be1479'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('item', sa.Column('image', sa.String(length=300), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('item', 'image')
### end Alembic commands ###
|
{"/admin/urls.py": ["/admin/views.py"], "/ops/room_design.py": ["/util/sqlerr.py", "/util/utils.py", "/models.py", "/ops/utils.py", "/ops/cache.py", "/settings.py"], "/util/sign.py": ["/settings.py"], "/ops/hospital.py": ["/util/sqlerr.py", "/models.py", "/ops/utils.py"], "/ops/utils.py": ["/util/utils.py", "/models.py"], "/ops/credit.py": ["/models.py", "/util/sqlerr.py", "/util/utils.py", "/ops/utils.py", "/settings.py", "/constants.py"], "/user/api_urls.py": ["/user/auth.py", "/user/trial.py"], "/migrations/versions/3621ae6c4339_.py": ["/models.py"], "/ops/comment.py": ["/models.py", "/ops/utils.py", "/util/utils.py"], "/migrations/versions/273db5f3044f_.py": ["/models.py"], "/ops/notification.py": ["/models.py", "/util/utils.py", "/ops/utils.py"], "/ops/activity.py": ["/models.py", "/util/utils.py", "/ops/utils.py"], "/migrations/versions/18e20ed0da8d_.py": ["/models.py"], "/hospital/urls.py": ["/hospital/views.py"], "/ops/coupon.py": ["/util/utils.py", "/models.py", "/ops/utils.py"], "/ops/log.py": ["/models.py", "/util/utils.py"], "/migrations/versions/55f4c256c989_.py": ["/models.py"], "/ops/beauty_tutorial.py": ["/models.py", "/ops/utils.py", "/util/utils.py"], "/migrations/versions/42e923c1238_.py": ["/models.py"], "/user/urls.py": ["/user/views.py", "/user/auth.py", "/user/trial.py", "/user/room_design.py", "/user/redpack.py", "/user/draw_money.py"], "/ops/actions.py": ["/models.py", "/ops/order.py", "/ops/coupon.py", "/ops/credit.py"], "/migrations/versions/36d5b6be1479_.py": ["/models.py"], "/ops/redpack.py": ["/util/sqlerr.py", "/util/utils.py", "/models.py", "/ops/utils.py", "/ops/cache.py", "/settings.py"], "/ops/user.py": ["/util/sqlerr.py", "/models.py", "/ops/utils.py"], "/ops/item.py": ["/models.py", "/util/utils.py", "/util/sqlerr.py", "/ops/utils.py"], "/migrations/versions/18e507e87862_.py": ["/models.py"], "/user/draw_money.py": ["/models.py", "/util/utils.py", "/util/decorators.py", "/util/validators.py", "/util/sign.py", "/util/drawgift.py", "/ops/bulks.py", "/ops/item.py", "/ops/data.py", "/ops/user.py", "/ops/redpack.py", "/ops/promote.py", "/ops/cache.py", "/ops/room_design.py", "/constants.py", "/thirdparty/sms.py", "/thirdparty/wechat.py", "/settings.py"], "/thirdparty/alipay/config.py": ["/settings.py"], "/promote/urls.py": ["/promote/views.py"], "/ops/admin.py": ["/util/sqlerr.py", "/models.py", "/ops/utils.py"], "/udp_server.py": ["/settings.py"], "/migrations/versions/4eefa5b6eb51_.py": ["/models.py"], "/demo.py": ["/thirdparty/wechat.py"], "/user/common.py": ["/models.py", "/ops/order.py", "/ops/coupon.py", "/ops/credit.py", "/constants.py"], "/models.py": ["/util/utils.py", "/settings.py", "/constants.py"]}
|
19,493
|
qsq-dm/mff
|
refs/heads/master
|
/migrations/versions/569e3d7f70ab_.py
|
"""empty message
Revision ID: 569e3d7f70ab
Revises: 5784ac6510c3
Create Date: 2015-12-10 10:39:57.648906
"""
# revision identifiers, used by Alembic.
revision = '569e3d7f70ab'
down_revision = '5784ac6510c3'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('coupon', sa.Column('sub_cat_id', sa.Integer(), nullable=True))
op.create_foreign_key(None, 'coupon', 'item_sub_cat', ['sub_cat_id'], ['id'])
op.add_column('user_coupon', sa.Column('sub_cat_id', sa.Integer(), nullable=True))
op.create_foreign_key(None, 'user_coupon', 'item_sub_cat', ['sub_cat_id'], ['id'])
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'user_coupon', type_='foreignkey')
op.drop_column('user_coupon', 'sub_cat_id')
op.drop_constraint(None, 'coupon', type_='foreignkey')
op.drop_column('coupon', 'sub_cat_id')
### end Alembic commands ###
|
{"/admin/urls.py": ["/admin/views.py"], "/ops/room_design.py": ["/util/sqlerr.py", "/util/utils.py", "/models.py", "/ops/utils.py", "/ops/cache.py", "/settings.py"], "/util/sign.py": ["/settings.py"], "/ops/hospital.py": ["/util/sqlerr.py", "/models.py", "/ops/utils.py"], "/ops/utils.py": ["/util/utils.py", "/models.py"], "/ops/credit.py": ["/models.py", "/util/sqlerr.py", "/util/utils.py", "/ops/utils.py", "/settings.py", "/constants.py"], "/user/api_urls.py": ["/user/auth.py", "/user/trial.py"], "/migrations/versions/3621ae6c4339_.py": ["/models.py"], "/ops/comment.py": ["/models.py", "/ops/utils.py", "/util/utils.py"], "/migrations/versions/273db5f3044f_.py": ["/models.py"], "/ops/notification.py": ["/models.py", "/util/utils.py", "/ops/utils.py"], "/ops/activity.py": ["/models.py", "/util/utils.py", "/ops/utils.py"], "/migrations/versions/18e20ed0da8d_.py": ["/models.py"], "/hospital/urls.py": ["/hospital/views.py"], "/ops/coupon.py": ["/util/utils.py", "/models.py", "/ops/utils.py"], "/ops/log.py": ["/models.py", "/util/utils.py"], "/migrations/versions/55f4c256c989_.py": ["/models.py"], "/ops/beauty_tutorial.py": ["/models.py", "/ops/utils.py", "/util/utils.py"], "/migrations/versions/42e923c1238_.py": ["/models.py"], "/user/urls.py": ["/user/views.py", "/user/auth.py", "/user/trial.py", "/user/room_design.py", "/user/redpack.py", "/user/draw_money.py"], "/ops/actions.py": ["/models.py", "/ops/order.py", "/ops/coupon.py", "/ops/credit.py"], "/migrations/versions/36d5b6be1479_.py": ["/models.py"], "/ops/redpack.py": ["/util/sqlerr.py", "/util/utils.py", "/models.py", "/ops/utils.py", "/ops/cache.py", "/settings.py"], "/ops/user.py": ["/util/sqlerr.py", "/models.py", "/ops/utils.py"], "/ops/item.py": ["/models.py", "/util/utils.py", "/util/sqlerr.py", "/ops/utils.py"], "/migrations/versions/18e507e87862_.py": ["/models.py"], "/user/draw_money.py": ["/models.py", "/util/utils.py", "/util/decorators.py", "/util/validators.py", "/util/sign.py", "/util/drawgift.py", "/ops/bulks.py", "/ops/item.py", "/ops/data.py", "/ops/user.py", "/ops/redpack.py", "/ops/promote.py", "/ops/cache.py", "/ops/room_design.py", "/constants.py", "/thirdparty/sms.py", "/thirdparty/wechat.py", "/settings.py"], "/thirdparty/alipay/config.py": ["/settings.py"], "/promote/urls.py": ["/promote/views.py"], "/ops/admin.py": ["/util/sqlerr.py", "/models.py", "/ops/utils.py"], "/udp_server.py": ["/settings.py"], "/migrations/versions/4eefa5b6eb51_.py": ["/models.py"], "/demo.py": ["/thirdparty/wechat.py"], "/user/common.py": ["/models.py", "/ops/order.py", "/ops/coupon.py", "/ops/credit.py", "/constants.py"], "/models.py": ["/util/utils.py", "/settings.py", "/constants.py"]}
|
19,494
|
qsq-dm/mff
|
refs/heads/master
|
/migrations/versions/4a4cc4517bb_.py
|
"""empty message
Revision ID: 4a4cc4517bb
Revises: 4cf4f86adc0c
Create Date: 2015-11-11 14:04:48.035474
"""
# revision identifiers, used by Alembic.
revision = '4a4cc4517bb'
down_revision = '4cf4f86adc0c'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('activity_item', sa.Column('sort_order', sa.Integer(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('activity_item', 'sort_order')
### end Alembic commands ###
|
{"/admin/urls.py": ["/admin/views.py"], "/ops/room_design.py": ["/util/sqlerr.py", "/util/utils.py", "/models.py", "/ops/utils.py", "/ops/cache.py", "/settings.py"], "/util/sign.py": ["/settings.py"], "/ops/hospital.py": ["/util/sqlerr.py", "/models.py", "/ops/utils.py"], "/ops/utils.py": ["/util/utils.py", "/models.py"], "/ops/credit.py": ["/models.py", "/util/sqlerr.py", "/util/utils.py", "/ops/utils.py", "/settings.py", "/constants.py"], "/user/api_urls.py": ["/user/auth.py", "/user/trial.py"], "/migrations/versions/3621ae6c4339_.py": ["/models.py"], "/ops/comment.py": ["/models.py", "/ops/utils.py", "/util/utils.py"], "/migrations/versions/273db5f3044f_.py": ["/models.py"], "/ops/notification.py": ["/models.py", "/util/utils.py", "/ops/utils.py"], "/ops/activity.py": ["/models.py", "/util/utils.py", "/ops/utils.py"], "/migrations/versions/18e20ed0da8d_.py": ["/models.py"], "/hospital/urls.py": ["/hospital/views.py"], "/ops/coupon.py": ["/util/utils.py", "/models.py", "/ops/utils.py"], "/ops/log.py": ["/models.py", "/util/utils.py"], "/migrations/versions/55f4c256c989_.py": ["/models.py"], "/ops/beauty_tutorial.py": ["/models.py", "/ops/utils.py", "/util/utils.py"], "/migrations/versions/42e923c1238_.py": ["/models.py"], "/user/urls.py": ["/user/views.py", "/user/auth.py", "/user/trial.py", "/user/room_design.py", "/user/redpack.py", "/user/draw_money.py"], "/ops/actions.py": ["/models.py", "/ops/order.py", "/ops/coupon.py", "/ops/credit.py"], "/migrations/versions/36d5b6be1479_.py": ["/models.py"], "/ops/redpack.py": ["/util/sqlerr.py", "/util/utils.py", "/models.py", "/ops/utils.py", "/ops/cache.py", "/settings.py"], "/ops/user.py": ["/util/sqlerr.py", "/models.py", "/ops/utils.py"], "/ops/item.py": ["/models.py", "/util/utils.py", "/util/sqlerr.py", "/ops/utils.py"], "/migrations/versions/18e507e87862_.py": ["/models.py"], "/user/draw_money.py": ["/models.py", "/util/utils.py", "/util/decorators.py", "/util/validators.py", "/util/sign.py", "/util/drawgift.py", "/ops/bulks.py", "/ops/item.py", "/ops/data.py", "/ops/user.py", "/ops/redpack.py", "/ops/promote.py", "/ops/cache.py", "/ops/room_design.py", "/constants.py", "/thirdparty/sms.py", "/thirdparty/wechat.py", "/settings.py"], "/thirdparty/alipay/config.py": ["/settings.py"], "/promote/urls.py": ["/promote/views.py"], "/ops/admin.py": ["/util/sqlerr.py", "/models.py", "/ops/utils.py"], "/udp_server.py": ["/settings.py"], "/migrations/versions/4eefa5b6eb51_.py": ["/models.py"], "/demo.py": ["/thirdparty/wechat.py"], "/user/common.py": ["/models.py", "/ops/order.py", "/ops/coupon.py", "/ops/credit.py", "/constants.py"], "/models.py": ["/util/utils.py", "/settings.py", "/constants.py"]}
|
19,495
|
qsq-dm/mff
|
refs/heads/master
|
/migrations/versions/29347d4f2522_.py
|
"""empty message
Revision ID: 29347d4f2522
Revises: 2ab4005efb6c
Create Date: 2016-01-27 17:27:42.642697
"""
# revision identifiers, used by Alembic.
revision = '29347d4f2522'
down_revision = '2ab4005efb6c'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('room_design_detail', sa.Column('apply_no', sa.String(length=30), nullable=True))
op.create_unique_constraint(None, 'room_design_detail', ['apply_no'])
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'room_design_detail', type_='unique')
op.drop_column('room_design_detail', 'apply_no')
### end Alembic commands ###
|
{"/admin/urls.py": ["/admin/views.py"], "/ops/room_design.py": ["/util/sqlerr.py", "/util/utils.py", "/models.py", "/ops/utils.py", "/ops/cache.py", "/settings.py"], "/util/sign.py": ["/settings.py"], "/ops/hospital.py": ["/util/sqlerr.py", "/models.py", "/ops/utils.py"], "/ops/utils.py": ["/util/utils.py", "/models.py"], "/ops/credit.py": ["/models.py", "/util/sqlerr.py", "/util/utils.py", "/ops/utils.py", "/settings.py", "/constants.py"], "/user/api_urls.py": ["/user/auth.py", "/user/trial.py"], "/migrations/versions/3621ae6c4339_.py": ["/models.py"], "/ops/comment.py": ["/models.py", "/ops/utils.py", "/util/utils.py"], "/migrations/versions/273db5f3044f_.py": ["/models.py"], "/ops/notification.py": ["/models.py", "/util/utils.py", "/ops/utils.py"], "/ops/activity.py": ["/models.py", "/util/utils.py", "/ops/utils.py"], "/migrations/versions/18e20ed0da8d_.py": ["/models.py"], "/hospital/urls.py": ["/hospital/views.py"], "/ops/coupon.py": ["/util/utils.py", "/models.py", "/ops/utils.py"], "/ops/log.py": ["/models.py", "/util/utils.py"], "/migrations/versions/55f4c256c989_.py": ["/models.py"], "/ops/beauty_tutorial.py": ["/models.py", "/ops/utils.py", "/util/utils.py"], "/migrations/versions/42e923c1238_.py": ["/models.py"], "/user/urls.py": ["/user/views.py", "/user/auth.py", "/user/trial.py", "/user/room_design.py", "/user/redpack.py", "/user/draw_money.py"], "/ops/actions.py": ["/models.py", "/ops/order.py", "/ops/coupon.py", "/ops/credit.py"], "/migrations/versions/36d5b6be1479_.py": ["/models.py"], "/ops/redpack.py": ["/util/sqlerr.py", "/util/utils.py", "/models.py", "/ops/utils.py", "/ops/cache.py", "/settings.py"], "/ops/user.py": ["/util/sqlerr.py", "/models.py", "/ops/utils.py"], "/ops/item.py": ["/models.py", "/util/utils.py", "/util/sqlerr.py", "/ops/utils.py"], "/migrations/versions/18e507e87862_.py": ["/models.py"], "/user/draw_money.py": ["/models.py", "/util/utils.py", "/util/decorators.py", "/util/validators.py", "/util/sign.py", "/util/drawgift.py", "/ops/bulks.py", "/ops/item.py", "/ops/data.py", "/ops/user.py", "/ops/redpack.py", "/ops/promote.py", "/ops/cache.py", "/ops/room_design.py", "/constants.py", "/thirdparty/sms.py", "/thirdparty/wechat.py", "/settings.py"], "/thirdparty/alipay/config.py": ["/settings.py"], "/promote/urls.py": ["/promote/views.py"], "/ops/admin.py": ["/util/sqlerr.py", "/models.py", "/ops/utils.py"], "/udp_server.py": ["/settings.py"], "/migrations/versions/4eefa5b6eb51_.py": ["/models.py"], "/demo.py": ["/thirdparty/wechat.py"], "/user/common.py": ["/models.py", "/ops/order.py", "/ops/coupon.py", "/ops/credit.py", "/constants.py"], "/models.py": ["/util/utils.py", "/settings.py", "/constants.py"]}
|
19,496
|
qsq-dm/mff
|
refs/heads/master
|
/migrations/versions/498586bf16c2_.py
|
"""empty message
Revision ID: 498586bf16c2
Revises: 3d1f1303d3e0
Create Date: 2016-03-03 10:54:43.656812
"""
# revision identifiers, used by Alembic.
revision = '498586bf16c2'
down_revision = '3d1f1303d3e0'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('qr_code_user', sa.Column('status', mysql.TINYINT(display_width=1), nullable=True))
op.create_index(op.f('ix_qr_code_user_status'), 'qr_code_user', ['status'], unique=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_qr_code_user_status'), table_name='qr_code_user')
op.drop_column('qr_code_user', 'status')
### end Alembic commands ###
|
{"/admin/urls.py": ["/admin/views.py"], "/ops/room_design.py": ["/util/sqlerr.py", "/util/utils.py", "/models.py", "/ops/utils.py", "/ops/cache.py", "/settings.py"], "/util/sign.py": ["/settings.py"], "/ops/hospital.py": ["/util/sqlerr.py", "/models.py", "/ops/utils.py"], "/ops/utils.py": ["/util/utils.py", "/models.py"], "/ops/credit.py": ["/models.py", "/util/sqlerr.py", "/util/utils.py", "/ops/utils.py", "/settings.py", "/constants.py"], "/user/api_urls.py": ["/user/auth.py", "/user/trial.py"], "/migrations/versions/3621ae6c4339_.py": ["/models.py"], "/ops/comment.py": ["/models.py", "/ops/utils.py", "/util/utils.py"], "/migrations/versions/273db5f3044f_.py": ["/models.py"], "/ops/notification.py": ["/models.py", "/util/utils.py", "/ops/utils.py"], "/ops/activity.py": ["/models.py", "/util/utils.py", "/ops/utils.py"], "/migrations/versions/18e20ed0da8d_.py": ["/models.py"], "/hospital/urls.py": ["/hospital/views.py"], "/ops/coupon.py": ["/util/utils.py", "/models.py", "/ops/utils.py"], "/ops/log.py": ["/models.py", "/util/utils.py"], "/migrations/versions/55f4c256c989_.py": ["/models.py"], "/ops/beauty_tutorial.py": ["/models.py", "/ops/utils.py", "/util/utils.py"], "/migrations/versions/42e923c1238_.py": ["/models.py"], "/user/urls.py": ["/user/views.py", "/user/auth.py", "/user/trial.py", "/user/room_design.py", "/user/redpack.py", "/user/draw_money.py"], "/ops/actions.py": ["/models.py", "/ops/order.py", "/ops/coupon.py", "/ops/credit.py"], "/migrations/versions/36d5b6be1479_.py": ["/models.py"], "/ops/redpack.py": ["/util/sqlerr.py", "/util/utils.py", "/models.py", "/ops/utils.py", "/ops/cache.py", "/settings.py"], "/ops/user.py": ["/util/sqlerr.py", "/models.py", "/ops/utils.py"], "/ops/item.py": ["/models.py", "/util/utils.py", "/util/sqlerr.py", "/ops/utils.py"], "/migrations/versions/18e507e87862_.py": ["/models.py"], "/user/draw_money.py": ["/models.py", "/util/utils.py", "/util/decorators.py", "/util/validators.py", "/util/sign.py", "/util/drawgift.py", "/ops/bulks.py", "/ops/item.py", "/ops/data.py", "/ops/user.py", "/ops/redpack.py", "/ops/promote.py", "/ops/cache.py", "/ops/room_design.py", "/constants.py", "/thirdparty/sms.py", "/thirdparty/wechat.py", "/settings.py"], "/thirdparty/alipay/config.py": ["/settings.py"], "/promote/urls.py": ["/promote/views.py"], "/ops/admin.py": ["/util/sqlerr.py", "/models.py", "/ops/utils.py"], "/udp_server.py": ["/settings.py"], "/migrations/versions/4eefa5b6eb51_.py": ["/models.py"], "/demo.py": ["/thirdparty/wechat.py"], "/user/common.py": ["/models.py", "/ops/order.py", "/ops/coupon.py", "/ops/credit.py", "/constants.py"], "/models.py": ["/util/utils.py", "/settings.py", "/constants.py"]}
|
19,497
|
qsq-dm/mff
|
refs/heads/master
|
/migrations/versions/3d20dc8132b4_.py
|
"""empty message
Revision ID: 3d20dc8132b4
Revises: 4e224649d340
Create Date: 2015-12-09 16:02:14.572280
"""
# revision identifiers, used by Alembic.
revision = '3d20dc8132b4'
down_revision = '4e224649d340'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_unique_constraint(None, 'trial_apply', ['user_id', 'trial_id'])
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'trial_apply', type_='unique')
### end Alembic commands ###
|
{"/admin/urls.py": ["/admin/views.py"], "/ops/room_design.py": ["/util/sqlerr.py", "/util/utils.py", "/models.py", "/ops/utils.py", "/ops/cache.py", "/settings.py"], "/util/sign.py": ["/settings.py"], "/ops/hospital.py": ["/util/sqlerr.py", "/models.py", "/ops/utils.py"], "/ops/utils.py": ["/util/utils.py", "/models.py"], "/ops/credit.py": ["/models.py", "/util/sqlerr.py", "/util/utils.py", "/ops/utils.py", "/settings.py", "/constants.py"], "/user/api_urls.py": ["/user/auth.py", "/user/trial.py"], "/migrations/versions/3621ae6c4339_.py": ["/models.py"], "/ops/comment.py": ["/models.py", "/ops/utils.py", "/util/utils.py"], "/migrations/versions/273db5f3044f_.py": ["/models.py"], "/ops/notification.py": ["/models.py", "/util/utils.py", "/ops/utils.py"], "/ops/activity.py": ["/models.py", "/util/utils.py", "/ops/utils.py"], "/migrations/versions/18e20ed0da8d_.py": ["/models.py"], "/hospital/urls.py": ["/hospital/views.py"], "/ops/coupon.py": ["/util/utils.py", "/models.py", "/ops/utils.py"], "/ops/log.py": ["/models.py", "/util/utils.py"], "/migrations/versions/55f4c256c989_.py": ["/models.py"], "/ops/beauty_tutorial.py": ["/models.py", "/ops/utils.py", "/util/utils.py"], "/migrations/versions/42e923c1238_.py": ["/models.py"], "/user/urls.py": ["/user/views.py", "/user/auth.py", "/user/trial.py", "/user/room_design.py", "/user/redpack.py", "/user/draw_money.py"], "/ops/actions.py": ["/models.py", "/ops/order.py", "/ops/coupon.py", "/ops/credit.py"], "/migrations/versions/36d5b6be1479_.py": ["/models.py"], "/ops/redpack.py": ["/util/sqlerr.py", "/util/utils.py", "/models.py", "/ops/utils.py", "/ops/cache.py", "/settings.py"], "/ops/user.py": ["/util/sqlerr.py", "/models.py", "/ops/utils.py"], "/ops/item.py": ["/models.py", "/util/utils.py", "/util/sqlerr.py", "/ops/utils.py"], "/migrations/versions/18e507e87862_.py": ["/models.py"], "/user/draw_money.py": ["/models.py", "/util/utils.py", "/util/decorators.py", "/util/validators.py", "/util/sign.py", "/util/drawgift.py", "/ops/bulks.py", "/ops/item.py", "/ops/data.py", "/ops/user.py", "/ops/redpack.py", "/ops/promote.py", "/ops/cache.py", "/ops/room_design.py", "/constants.py", "/thirdparty/sms.py", "/thirdparty/wechat.py", "/settings.py"], "/thirdparty/alipay/config.py": ["/settings.py"], "/promote/urls.py": ["/promote/views.py"], "/ops/admin.py": ["/util/sqlerr.py", "/models.py", "/ops/utils.py"], "/udp_server.py": ["/settings.py"], "/migrations/versions/4eefa5b6eb51_.py": ["/models.py"], "/demo.py": ["/thirdparty/wechat.py"], "/user/common.py": ["/models.py", "/ops/order.py", "/ops/coupon.py", "/ops/credit.py", "/constants.py"], "/models.py": ["/util/utils.py", "/settings.py", "/constants.py"]}
|
19,498
|
qsq-dm/mff
|
refs/heads/master
|
/migrations/versions/480dd7e7caac_.py
|
"""empty message
Revision ID: 480dd7e7caac
Revises: 59a610b5633d
Create Date: 2015-12-09 17:20:34.481073
"""
# revision identifiers, used by Alembic.
revision = '480dd7e7caac'
down_revision = '59a610b5633d'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('trial', 'sex')
op.add_column('trial_apply', sa.Column('sex', mysql.TINYINT(display_width=1), nullable=False))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('trial_apply', 'sex')
op.add_column('trial', sa.Column('sex', mysql.TINYINT(display_width=1), autoincrement=False, nullable=False))
### end Alembic commands ###
|
{"/admin/urls.py": ["/admin/views.py"], "/ops/room_design.py": ["/util/sqlerr.py", "/util/utils.py", "/models.py", "/ops/utils.py", "/ops/cache.py", "/settings.py"], "/util/sign.py": ["/settings.py"], "/ops/hospital.py": ["/util/sqlerr.py", "/models.py", "/ops/utils.py"], "/ops/utils.py": ["/util/utils.py", "/models.py"], "/ops/credit.py": ["/models.py", "/util/sqlerr.py", "/util/utils.py", "/ops/utils.py", "/settings.py", "/constants.py"], "/user/api_urls.py": ["/user/auth.py", "/user/trial.py"], "/migrations/versions/3621ae6c4339_.py": ["/models.py"], "/ops/comment.py": ["/models.py", "/ops/utils.py", "/util/utils.py"], "/migrations/versions/273db5f3044f_.py": ["/models.py"], "/ops/notification.py": ["/models.py", "/util/utils.py", "/ops/utils.py"], "/ops/activity.py": ["/models.py", "/util/utils.py", "/ops/utils.py"], "/migrations/versions/18e20ed0da8d_.py": ["/models.py"], "/hospital/urls.py": ["/hospital/views.py"], "/ops/coupon.py": ["/util/utils.py", "/models.py", "/ops/utils.py"], "/ops/log.py": ["/models.py", "/util/utils.py"], "/migrations/versions/55f4c256c989_.py": ["/models.py"], "/ops/beauty_tutorial.py": ["/models.py", "/ops/utils.py", "/util/utils.py"], "/migrations/versions/42e923c1238_.py": ["/models.py"], "/user/urls.py": ["/user/views.py", "/user/auth.py", "/user/trial.py", "/user/room_design.py", "/user/redpack.py", "/user/draw_money.py"], "/ops/actions.py": ["/models.py", "/ops/order.py", "/ops/coupon.py", "/ops/credit.py"], "/migrations/versions/36d5b6be1479_.py": ["/models.py"], "/ops/redpack.py": ["/util/sqlerr.py", "/util/utils.py", "/models.py", "/ops/utils.py", "/ops/cache.py", "/settings.py"], "/ops/user.py": ["/util/sqlerr.py", "/models.py", "/ops/utils.py"], "/ops/item.py": ["/models.py", "/util/utils.py", "/util/sqlerr.py", "/ops/utils.py"], "/migrations/versions/18e507e87862_.py": ["/models.py"], "/user/draw_money.py": ["/models.py", "/util/utils.py", "/util/decorators.py", "/util/validators.py", "/util/sign.py", "/util/drawgift.py", "/ops/bulks.py", "/ops/item.py", "/ops/data.py", "/ops/user.py", "/ops/redpack.py", "/ops/promote.py", "/ops/cache.py", "/ops/room_design.py", "/constants.py", "/thirdparty/sms.py", "/thirdparty/wechat.py", "/settings.py"], "/thirdparty/alipay/config.py": ["/settings.py"], "/promote/urls.py": ["/promote/views.py"], "/ops/admin.py": ["/util/sqlerr.py", "/models.py", "/ops/utils.py"], "/udp_server.py": ["/settings.py"], "/migrations/versions/4eefa5b6eb51_.py": ["/models.py"], "/demo.py": ["/thirdparty/wechat.py"], "/user/common.py": ["/models.py", "/ops/order.py", "/ops/coupon.py", "/ops/credit.py", "/constants.py"], "/models.py": ["/util/utils.py", "/settings.py", "/constants.py"]}
|
19,499
|
qsq-dm/mff
|
refs/heads/master
|
/ops/bulks.py
|
# -*- coding: utf-8 -*-
import time
import urllib
from collections import defaultdict
from models import User
from ops.user import UserService
from ops.item import ItemService
from ops.activity import ActivityService
from ops.credit import CreditService
from ops.order import OrderService
from ops.promote import PromoteService
from ops.trial import TrialService
from ops.coupon import CouponService
from ops.notification import NotificationService
from ops.redpack import RedpackService
from ops.data import DataService
from settings import ANONY_IMAGE
now = lambda :int(time.time())
def fetch_refs(items, id_, func=None, keep_id=False, **kw):
refs = defaultdict(dict)
dest_key = kw.pop('dest_key', None) or id_.replace('_id', '')
ref_key = kw.pop('ref_key', None) or 'id'
for item in items:
ref_id = item.get(id_)
item[dest_key] = refs[ref_id]
ref_list = func(refs.keys(), **kw)
for item in ref_list:
refs[item[ref_key]].update(item)
if not keep_id:
#重复的关联怎么优化处理 只保留一个引用
for item in items:
item.pop(id_, None)
print items
ANONYMOUS_USER = {
'name': '匿名用户',
'id': 0,
'avatar': ANONY_IMAGE
}
def fetch_user_refs(items, func=UserService.get_users_by_ids, **kw):
id_ = 'user_id'
fetch_refs(items, id_, func, **kw)
for item in items:
if item.get('is_anonymous'):
item['user'] = ANONYMOUS_USER
def fetch_item_refs(items, id_='item_id', func=ItemService.get_items_by_ids, **kw):
fetch_refs(items, id_, func, **kw)
def fetch_item_cat_refs(items, id_='cat_id', func=ItemService.get_cats_by_ids, **kw):
fetch_refs(items, id_, func, **kw)
def fetch_item_subcat_refs(items, id_='sub_cat_id', func=ItemService.get_subcats_by_ids, **kw):
fetch_refs(items, id_, func, **kw)
def fetch_credit_refs(items, id_='user_id', func=UserService.get_credit_applies_by_ids, **kw):
fetch_refs(items, id_, func, ref_key='user_id', **kw)
def fetch_activity_refs(items, id_='activity_id', func=ActivityService.get_activitys_by_ids, **kw):
fetch_refs(items, id_, func, **kw)
def fetch_hospital_refs(items, id_='hospital_id', func=ItemService.get_hospitals_by_ids, **kw):
fetch_refs(items, id_, func, **kw)
def fetch_servicecode_refrence(items, id_='order_id', func=OrderService.get_servicecodes_by_order_ids, **kw):
fetch_refs(items, id_, func, ref_key='order_id', **kw)
def fetch_order_refs(items, id_='order_id', func=OrderService.get_orders_by_ids, **kw):
fetch_refs(items, id_, func, **kw)
def fetch_wechatinfo_refs(items, id_='user_id', func=PromoteService.get_user_qrcodes_by_user_ids, **kw):
fetch_refs(items, id_, func, ref_key='user_id', **kw)
def fetch_apply_refs(items, id_='user_id', func=TrialService.get_trial_apply_by_user_ids, **kw):
fetch_refs(items, id_, func, ref_key='user_id', **kw)
def fetch_coupon_refs(items, id_='coupon_id', func=CouponService.get_coupon_by_ids, **kw):
fetch_refs(items, id_, func, **kw)
def fetch_article_refs(items, id_='article_id', func=NotificationService.get_articles_by_ids, **kw):
fetch_refs(items, id_, func, **kw)
def fetch_question_refs(items, id_='question_id', func=RedpackService.get_questions_by_ids, **kw):
fetch_refs(items, id_, func, **kw)
def fetch_qrcodeuser_refs(items, id_='qr_user_id', func=RedpackService.get_qr_user_by_ids, **kw):
fetch_refs(items, id_, func, **kw)
def fetch_school_refs(items, id_='school_id', func=DataService.get_schools_dict_by_ids, **kw):
fetch_refs(items, id_, func, **kw)
def fetch_min_period_info(items):
''' 商品列表 获取最低分期价格期数 '''
_, period_pay_choices = CreditService.get_paged_period_choices()
choice_map = {i['id']:i for i in period_pay_choices}
period_id_count_map = {i['id']:i['period_count'] for i in period_pay_choices}
min_choice_id_func = lambda choices: max(choices, key=lambda i:period_id_count_map[i])
for item in items:
choices = item.pop('support_choice_list')
min_choice = choice_map[min_choice_id_func(choices)] if choices else None
if min_choice:
period_count = min_choice['period_count']
period_fee = min_choice['period_fee']
price = item['price']
period_amount = price/period_count
item['period_count']= period_count
item['period_money']= int(period_amount*(1+period_fee))
else:
item['period_count']= 1
item['period_money']= item['price']
|
{"/admin/urls.py": ["/admin/views.py"], "/ops/room_design.py": ["/util/sqlerr.py", "/util/utils.py", "/models.py", "/ops/utils.py", "/ops/cache.py", "/settings.py"], "/util/sign.py": ["/settings.py"], "/ops/hospital.py": ["/util/sqlerr.py", "/models.py", "/ops/utils.py"], "/ops/utils.py": ["/util/utils.py", "/models.py"], "/ops/credit.py": ["/models.py", "/util/sqlerr.py", "/util/utils.py", "/ops/utils.py", "/settings.py", "/constants.py"], "/user/api_urls.py": ["/user/auth.py", "/user/trial.py"], "/migrations/versions/3621ae6c4339_.py": ["/models.py"], "/ops/comment.py": ["/models.py", "/ops/utils.py", "/util/utils.py"], "/migrations/versions/273db5f3044f_.py": ["/models.py"], "/ops/notification.py": ["/models.py", "/util/utils.py", "/ops/utils.py"], "/ops/activity.py": ["/models.py", "/util/utils.py", "/ops/utils.py"], "/migrations/versions/18e20ed0da8d_.py": ["/models.py"], "/hospital/urls.py": ["/hospital/views.py"], "/ops/coupon.py": ["/util/utils.py", "/models.py", "/ops/utils.py"], "/ops/log.py": ["/models.py", "/util/utils.py"], "/migrations/versions/55f4c256c989_.py": ["/models.py"], "/ops/beauty_tutorial.py": ["/models.py", "/ops/utils.py", "/util/utils.py"], "/migrations/versions/42e923c1238_.py": ["/models.py"], "/user/urls.py": ["/user/views.py", "/user/auth.py", "/user/trial.py", "/user/room_design.py", "/user/redpack.py", "/user/draw_money.py"], "/ops/actions.py": ["/models.py", "/ops/order.py", "/ops/coupon.py", "/ops/credit.py"], "/migrations/versions/36d5b6be1479_.py": ["/models.py"], "/ops/redpack.py": ["/util/sqlerr.py", "/util/utils.py", "/models.py", "/ops/utils.py", "/ops/cache.py", "/settings.py"], "/ops/user.py": ["/util/sqlerr.py", "/models.py", "/ops/utils.py"], "/ops/item.py": ["/models.py", "/util/utils.py", "/util/sqlerr.py", "/ops/utils.py"], "/migrations/versions/18e507e87862_.py": ["/models.py"], "/user/draw_money.py": ["/models.py", "/util/utils.py", "/util/decorators.py", "/util/validators.py", "/util/sign.py", "/util/drawgift.py", "/ops/bulks.py", "/ops/item.py", "/ops/data.py", "/ops/user.py", "/ops/redpack.py", "/ops/promote.py", "/ops/cache.py", "/ops/room_design.py", "/constants.py", "/thirdparty/sms.py", "/thirdparty/wechat.py", "/settings.py"], "/thirdparty/alipay/config.py": ["/settings.py"], "/promote/urls.py": ["/promote/views.py"], "/ops/admin.py": ["/util/sqlerr.py", "/models.py", "/ops/utils.py"], "/udp_server.py": ["/settings.py"], "/migrations/versions/4eefa5b6eb51_.py": ["/models.py"], "/demo.py": ["/thirdparty/wechat.py"], "/user/common.py": ["/models.py", "/ops/order.py", "/ops/coupon.py", "/ops/credit.py", "/constants.py"], "/models.py": ["/util/utils.py", "/settings.py", "/constants.py"]}
|
19,500
|
qsq-dm/mff
|
refs/heads/master
|
/migrations/versions/57366d94ca9a_.py
|
"""empty message
Revision ID: 57366d94ca9a
Revises: 2d7888ae13f9
Create Date: 2015-12-30 15:30:29.964102
"""
# revision identifiers, used by Alembic.
revision = '57366d94ca9a'
down_revision = '2d7888ae13f9'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('hospital', sa.Column('rate', sa.Float(), nullable=True))
op.add_column('hospital', sa.Column('sold_count', sa.Integer(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('hospital', 'sold_count')
op.drop_column('hospital', 'rate')
### end Alembic commands ###
|
{"/admin/urls.py": ["/admin/views.py"], "/ops/room_design.py": ["/util/sqlerr.py", "/util/utils.py", "/models.py", "/ops/utils.py", "/ops/cache.py", "/settings.py"], "/util/sign.py": ["/settings.py"], "/ops/hospital.py": ["/util/sqlerr.py", "/models.py", "/ops/utils.py"], "/ops/utils.py": ["/util/utils.py", "/models.py"], "/ops/credit.py": ["/models.py", "/util/sqlerr.py", "/util/utils.py", "/ops/utils.py", "/settings.py", "/constants.py"], "/user/api_urls.py": ["/user/auth.py", "/user/trial.py"], "/migrations/versions/3621ae6c4339_.py": ["/models.py"], "/ops/comment.py": ["/models.py", "/ops/utils.py", "/util/utils.py"], "/migrations/versions/273db5f3044f_.py": ["/models.py"], "/ops/notification.py": ["/models.py", "/util/utils.py", "/ops/utils.py"], "/ops/activity.py": ["/models.py", "/util/utils.py", "/ops/utils.py"], "/migrations/versions/18e20ed0da8d_.py": ["/models.py"], "/hospital/urls.py": ["/hospital/views.py"], "/ops/coupon.py": ["/util/utils.py", "/models.py", "/ops/utils.py"], "/ops/log.py": ["/models.py", "/util/utils.py"], "/migrations/versions/55f4c256c989_.py": ["/models.py"], "/ops/beauty_tutorial.py": ["/models.py", "/ops/utils.py", "/util/utils.py"], "/migrations/versions/42e923c1238_.py": ["/models.py"], "/user/urls.py": ["/user/views.py", "/user/auth.py", "/user/trial.py", "/user/room_design.py", "/user/redpack.py", "/user/draw_money.py"], "/ops/actions.py": ["/models.py", "/ops/order.py", "/ops/coupon.py", "/ops/credit.py"], "/migrations/versions/36d5b6be1479_.py": ["/models.py"], "/ops/redpack.py": ["/util/sqlerr.py", "/util/utils.py", "/models.py", "/ops/utils.py", "/ops/cache.py", "/settings.py"], "/ops/user.py": ["/util/sqlerr.py", "/models.py", "/ops/utils.py"], "/ops/item.py": ["/models.py", "/util/utils.py", "/util/sqlerr.py", "/ops/utils.py"], "/migrations/versions/18e507e87862_.py": ["/models.py"], "/user/draw_money.py": ["/models.py", "/util/utils.py", "/util/decorators.py", "/util/validators.py", "/util/sign.py", "/util/drawgift.py", "/ops/bulks.py", "/ops/item.py", "/ops/data.py", "/ops/user.py", "/ops/redpack.py", "/ops/promote.py", "/ops/cache.py", "/ops/room_design.py", "/constants.py", "/thirdparty/sms.py", "/thirdparty/wechat.py", "/settings.py"], "/thirdparty/alipay/config.py": ["/settings.py"], "/promote/urls.py": ["/promote/views.py"], "/ops/admin.py": ["/util/sqlerr.py", "/models.py", "/ops/utils.py"], "/udp_server.py": ["/settings.py"], "/migrations/versions/4eefa5b6eb51_.py": ["/models.py"], "/demo.py": ["/thirdparty/wechat.py"], "/user/common.py": ["/models.py", "/ops/order.py", "/ops/coupon.py", "/ops/credit.py", "/constants.py"], "/models.py": ["/util/utils.py", "/settings.py", "/constants.py"]}
|
19,501
|
qsq-dm/mff
|
refs/heads/master
|
/migrations/versions/32ca0414826f_.py
|
"""empty message
Revision ID: 32ca0414826f
Revises: 29bbb2cfc971
Create Date: 2016-01-28 11:49:27.884628
"""
# revision identifiers, used by Alembic.
revision = '32ca0414826f'
down_revision = '29bbb2cfc971'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('room_design_vote_log', 'vote_count')
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('room_design_vote_log', sa.Column('vote_count', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True))
### end Alembic commands ###
|
{"/admin/urls.py": ["/admin/views.py"], "/ops/room_design.py": ["/util/sqlerr.py", "/util/utils.py", "/models.py", "/ops/utils.py", "/ops/cache.py", "/settings.py"], "/util/sign.py": ["/settings.py"], "/ops/hospital.py": ["/util/sqlerr.py", "/models.py", "/ops/utils.py"], "/ops/utils.py": ["/util/utils.py", "/models.py"], "/ops/credit.py": ["/models.py", "/util/sqlerr.py", "/util/utils.py", "/ops/utils.py", "/settings.py", "/constants.py"], "/user/api_urls.py": ["/user/auth.py", "/user/trial.py"], "/migrations/versions/3621ae6c4339_.py": ["/models.py"], "/ops/comment.py": ["/models.py", "/ops/utils.py", "/util/utils.py"], "/migrations/versions/273db5f3044f_.py": ["/models.py"], "/ops/notification.py": ["/models.py", "/util/utils.py", "/ops/utils.py"], "/ops/activity.py": ["/models.py", "/util/utils.py", "/ops/utils.py"], "/migrations/versions/18e20ed0da8d_.py": ["/models.py"], "/hospital/urls.py": ["/hospital/views.py"], "/ops/coupon.py": ["/util/utils.py", "/models.py", "/ops/utils.py"], "/ops/log.py": ["/models.py", "/util/utils.py"], "/migrations/versions/55f4c256c989_.py": ["/models.py"], "/ops/beauty_tutorial.py": ["/models.py", "/ops/utils.py", "/util/utils.py"], "/migrations/versions/42e923c1238_.py": ["/models.py"], "/user/urls.py": ["/user/views.py", "/user/auth.py", "/user/trial.py", "/user/room_design.py", "/user/redpack.py", "/user/draw_money.py"], "/ops/actions.py": ["/models.py", "/ops/order.py", "/ops/coupon.py", "/ops/credit.py"], "/migrations/versions/36d5b6be1479_.py": ["/models.py"], "/ops/redpack.py": ["/util/sqlerr.py", "/util/utils.py", "/models.py", "/ops/utils.py", "/ops/cache.py", "/settings.py"], "/ops/user.py": ["/util/sqlerr.py", "/models.py", "/ops/utils.py"], "/ops/item.py": ["/models.py", "/util/utils.py", "/util/sqlerr.py", "/ops/utils.py"], "/migrations/versions/18e507e87862_.py": ["/models.py"], "/user/draw_money.py": ["/models.py", "/util/utils.py", "/util/decorators.py", "/util/validators.py", "/util/sign.py", "/util/drawgift.py", "/ops/bulks.py", "/ops/item.py", "/ops/data.py", "/ops/user.py", "/ops/redpack.py", "/ops/promote.py", "/ops/cache.py", "/ops/room_design.py", "/constants.py", "/thirdparty/sms.py", "/thirdparty/wechat.py", "/settings.py"], "/thirdparty/alipay/config.py": ["/settings.py"], "/promote/urls.py": ["/promote/views.py"], "/ops/admin.py": ["/util/sqlerr.py", "/models.py", "/ops/utils.py"], "/udp_server.py": ["/settings.py"], "/migrations/versions/4eefa5b6eb51_.py": ["/models.py"], "/demo.py": ["/thirdparty/wechat.py"], "/user/common.py": ["/models.py", "/ops/order.py", "/ops/coupon.py", "/ops/credit.py", "/constants.py"], "/models.py": ["/util/utils.py", "/settings.py", "/constants.py"]}
|
19,538
|
bjmedina/PSTH
|
refs/heads/master
|
/nwb_plots_firing_rates.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 1 11:50:02 EDT 2019
@author: Bryan Medina
"""
###### Imports ########
from nwb_plots_functions import *
########################
###### UPDATE PATH #################################
DIRECTORY = '/Users/bjm/Documents/CMU/Research/data'
VAR_DIREC = '/Users/bjm/Documents/CMU/Research/data/plots/variations/'
MICE_ID = ['424448', '421338', '405751']
MOUSE_ID = '421338'
####################################################
# Get file from directory
spikes_nwb_file = os.path.join(DIRECTORY, 'mouse' + MOUSE_ID + '.spikes.nwb')
nwb = h5.File(spikes_nwb_file, 'r')
probe_names = nwb['processing']
# keeps track of max firing rate for each cell in
probe_fr = {}
colors = {'424448':'red',
'421338':'green',
'405751':'blue'}
# firing rate filename
filename = MOUSE_ID + '_probes_fr'
PLOT_ALL = True
rows = 2
cols = 2
# Ideally, you should do this for every mouse.
# We want to check to see if we have this data
try:
with open(filename+"_", 'rb') as f:
probe_fr = pickle.load(f)
except:
# only keep track of maximal firing rates...
probe_fr = {}
for probe_name in probe_names:
# Getting all data for a given cell
# File to get data from.
probe_filename = MOUSE_ID + "_" + probe_name
print(probe_filename)
try:
with open(probe_filename, 'rb') as f:
# Plotting all curves for every region for a given mouse.
probe = pickle.load(f)
except FileNotFoundError:
saveProbeData(MOUSE_ID, probe_name, nwb)
print("Run again nwb_plots with plotting off")
sys.exit(1)
probe_fr[probe_name] = []
for cell in probe.getCellList():
# Get max, add it here...
probe_fr[probe_name].append(probe.getCell(cell).max_frate)
# Plot everything
for probe_name in probe_names:
# Plot variability of every region
if(PLOT_ALL):
# Plotting how variable neuron can be
for probe_name in probe_names:
plt.title("Mouse: " + str(MOUSE_ID) + " / " + probe_name + " Variation")
plt.ylim(0, 14)
plt.xlabel("Maximal Firing Rate (Spikes/Sec)")
plt.ylabel("Number of Neurons")
plt.hist(probe_fr[probe_name], bins = 100, edgecolor='black')
plt.savefig(VAR_DIREC + MOUSE_ID + probe_name + "_variations.png")
plt.clf()
# Plotting multiple summary plots in one plot.
fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(8,8))
fig.suptitle("Variation in Maximal Firing Rates")
fig.text(0.5, 0.04, 'Maximal Firing Rate (Spikes/sec)', ha='center')
fig.text(0.04, 0.5, 'Number of Neurons', va='center', rotation='vertical')
variability = []
curves = {}
i = 0
# Plotting 4 plots in one figure.
for row in range(0, rows):
for col in range(0, cols):
if( not (row + 1 == rows and col + 1 == cols) ):
MOUSE = MICE_ID[i]
filename = MOUSE + '_probes_fr'
with open(filename, 'rb') as f:
probe_fr = pickle.load(f)
for probe_name in probe_names:
variability.extend(probe_fr[probe_name])
axes[row, col].set_ylim([0, 90])
axes[row, col].set_xlim([0, 100])
axes[row, col].set_title("Mouse %s" % (MOUSE))
ys, bins, c = axes[row, col].hist(variability, bins = 100,color=colors[MOUSE], edgecolor='black', alpha=0.7)
curves[MOUSE] = [LSQUnivariateSpline(bins[0:len(bins)-1], ys, [10, 30, 55, 70, 100]), bins[0:len(bins)-1]]
i = i+1
variability = []
else:
axes[row, col].set_ylim([0, 90])
axes[row, col].set_xlim([0, 100])
axes[row, col].set_title("All Variations")
for ID in MICE_ID:
axes[row, col].plot(curves[ID][1], curves[ID][0](curves[ID][1]), label=ID, color=colors[ID], alpha=0.7)
axes[row, col].legend()
plt.savefig(VAR_DIREC + "firing_rate_variations.png")
# Save the probe_fr file.
with open(filename, 'wb') as f:
pickle.dump(probe_fr, f)
|
{"/nwb_plots_firing_rates.py": ["/nwb_plots_functions.py"], "/nwb_plots_percentile.py": ["/nwb_plots_functions.py"], "/nwb_plots.py": ["/nwb_plots_functions.py"], "/nwb_trials.py": ["/nwb_plots_functions.py"]}
|
19,539
|
bjmedina/PSTH
|
refs/heads/master
|
/nwb_plots_percentile.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 1 11:50:02 EDT 2019
@author: Bryan Medina
"""
###### Imports ########
from nwb_plots_functions import *
from scipy.interpolate import LSQUnivariateSpline
import h5py as h5
import matplotlib.pyplot as plt
import numpy as np
import os
import pickle
import sys
########################
###### UPDATE PATH #################################
DIRECTORY = '/Users/bjm/Documents/CMU/Research/data'
VAR_DIREC = '/Users/bjm/Documents/CMU/Research/data/plots/variations/'
PERC_PLOTS_DIRECTORY = '/Users/bjm/Documents/CMU/Research/data/plots/percentile/'
MOUSE_ID = '424448'
####################################################
# Get file from directory
spikes_nwb_file = os.path.join(DIRECTORY, 'mouse' + MOUSE_ID + '.spikes.nwb')
nwb = h5.File(spikes_nwb_file, 'r')
probes = nwb['processing']
probe_names = [name for name in probes.keys()]
# save all curves for all regions
mid = {}
top = {}
bot = {}
# Used for plotting
rows = 3
cols = 2
for probe_name in probe_names:
# Calculate median neuron, and also 90th and 10th percentile neuron
median_n = []
top_ten = []
bot_ten = []
probe_filename = MOUSE_ID + "_" + probe_name
with open(probe_filename, 'rb') as f:
probe = pickle.load(f)
for xval in xs:
rates = []
for cell in probe.getCellList():
rates.append(probe.getCell(cell).lsq(xval))
# Sort this list...
rates.sort()
median_n.append(np.median(rates))
top_ten.append(np.percentile(rates, 75))
bot_ten.append(np.percentile(rates, 25))
# save the curves
mid[probe_name] = LSQUnivariateSpline(xs, median_n, knots[1:-1])
top[probe_name] = LSQUnivariateSpline(xs, top_ten, knots[1:-1])
bot[probe_name] = LSQUnivariateSpline(xs, bot_ten, knots)
# Plotting median, 75th percentile, and 25th percentile neuron
# Do multiple plots on one figure
fig, axes = plt.subplots(nrows=3, ncols=2, figsize=(10, 10))
fig.tight_layout(pad=0.1, w_pad=0.1, h_pad=0.1)
fig.suptitle("Mouse %s Neural Activity" % (MOUSE_ID))
fig.text(0.5, 0.04, 'Bins (ms)', ha='center')
fig.text(0.04, 0.5, 'Firing Rate (Spike/sec)', va='center', rotation='vertical')
i = 0
for row in range(0, rows):
for col in range(0, cols):
probe_name = probe_names[i]
probe_filename = MOUSE_ID + "_" + probe_name
with open(probe_filename, 'rb') as f:
probe = pickle.load(f)
box = axes[row,col].get_position()
move = 0.08
move2 = 0.033
move3 = 0.053
if(row == 0):
if(col == 0):
axes[row,col].set_position([move+box.x0+box.x0/5, box.y0, box.width * 0.8 , box.height * 0.8])
else:
axes[row,col].set_position([move+box.x0-box.x0/7, box.y0, box.width * 0.8 , box.height * 0.8])
elif(row == 1):
if(col == 0):
axes[row,col].set_position([move+box.x0+box.x0/5, box.y0+move2, box.width * 0.8 , box.height * 0.8])
else:
axes[row,col].set_position([move+box.x0-box.x0/7, box.y0+move2, box.width * 0.8 , box.height * 0.8])
elif(row == 2):
if(col == 0):
axes[row,col].set_position([move+box.x0+box.x0/5, box.y0+move3, box.width * 0.8 , box.height * 0.8])
else:
axes[row,col].set_position([move+box.x0-box.x0/7, box.y0+move3, box.width * 0.8 , box.height * 0.8])
axes[row, col].set_ylim([0, 13])
axes[row, col].set_xlim([-20, 500])
axes[row, col].set_title(probe.name)
axes[row, col].plot(xs, top[probe_name](xs), label = "75th Percentile")
axes[row, col].plot(xs, mid[probe_name](xs), label = "Median Neuron")
axes[row, col].plot(xs, bot[probe_name](xs), label = "25th Percentile")
if(row == 0 and col == cols - 1):
axes[row, col].legend()
# Next probe
i = i+1
plt.savefig(PERC_PLOTS_DIRECTORY + str(MOUSE_ID) + "_percentile.png")
plt.clf()
|
{"/nwb_plots_firing_rates.py": ["/nwb_plots_functions.py"], "/nwb_plots_percentile.py": ["/nwb_plots_functions.py"], "/nwb_plots.py": ["/nwb_plots_functions.py"], "/nwb_trials.py": ["/nwb_plots_functions.py"]}
|
19,540
|
bjmedina/PSTH
|
refs/heads/master
|
/nwb_plots.py
|
"""
Created on Wed Jun 12 09:25:21 EDT 2019
@author: Bryan Medina
"""
from nwb_plots_functions import *
# READ ME ################################
# This file plots
# - (1) PSTHs for every cell (averaged across all trials) as well as a smoothed curve
# - (2) PSTHs for every probe (averaged across all trials and all cells) as well as a smoothed curve
# - (3) Smoothed curve for every probe
##########################################
## CHANGE ME #############################################################
# Data directory
DIRECTORY = '/home/bjm/Documents/CS/PSTH'
SUMMARY_PLOTS_DIRECTORY = '/home/bjm/Documents/CS/PSTH/plots/'
VAR_DIREC = '/home/bjm/Documents/CS/PSTH/plots/variations/'
MOUSE_ID = '421338'
##########################################################################
# Get file from directory
spikes_nwb_file = os.path.join(DIRECTORY, 'mouse' + MOUSE_ID + '.spikes.nwb')
nwb = h5.File(spikes_nwb_file, 'r')
probe_names = nwb['processing']
# Allows plotting (takes more time)
PLOTTING = True
# Print Descriptions
DESCRIPTIONS = True
# Turn this on if it's your first time running this code.
ALL_PLOTS = True
if(ALL_PLOTS):
for probe_name in probe_names:
# File to get data from.
probe_filename = MOUSE_ID + "_" + probe_name
print(probe_filename)
# plot directories
## CHANGE ME ####################################################################################
PROBE_PLOTS_DIRECTORY = '/home/bjm/Documents/CS/PSTH/plots/probes/'
CELL_PLOTS_DIRECTORY = '/home/bjm/Documents/CS/PSTH/plots/cells/' + probe_name + '/'
#################################################################################################
## Find probe to override
try:
with open(probe_filename, 'rb') as f:
probe = pickle.load(f)
## If probe file doesn't exist, then we'll have to make that file from scratch
except FileNotFoundError:
for probe_name in probe_names:
saveProbeData(MOUSE_ID, probe_name, nwb)
print("Run again")
sys.exit(1)
# Summary of all activity across all cells in a probe.
x = np.zeros((len(bins), 1))
# Plotting (1) #####################
# Getting all data for a given cell
for cell in probe.getCellList():
# current cell spiking data
curr_cell = np.zeros((len(bins), 1))
for freq in temp_freqs:
for angle in orientations:
config = str(freq) + "_" + str(angle)
curr_cell += probe.getCell(cell).getSpikes(config)
# Plot curr cell
x += probe.getCell(cell).getSpikes(config)
# Convert cell spiking data to a format 'plt.hist' will like
z = fromFreqList(curr_cell)
curr_cell,b,c = plt.hist(z, bins)
plt.clf()
# Normalize
curr_cell /= num_trials*0.001
# Get some information on the cell such as max firing rate, avg, std, and name
################# Finding peaks and valleys #######################
probe.getCell(cell).max_frate = max(curr_cell[0:500])
probe.getCell(cell).max_ftime = np.where(curr_cell[0:500] == probe.getCell(cell).max_frate)[0][0]
probe.getCell(cell).avg_frate = np.mean(curr_cell[0:500])
probe.getCell(cell).std = np.std(curr_cell[0:500])
probe.getCell(cell).name = cell
# Also get the associated firing rate curve for the cell
lsq = LSQUnivariateSpline(bins[0:len(bins)-1], curr_cell, knots)
probe.getCell(cell).lsq = lsq
cpm_result = cpm.detectChangePoint(FloatVector(lsq(curr_cell[0:probe.getCell(cell).max_ftime])), cpmType='Student', ARL0=1000)
cpm_result = robj_to_dict(cpm_result)
probe.getCell(cell).change_pt = lsq(cpm_result['changePoint'][0])
probe.getCell(cell).chg_time = cpm_result['changePoint'][0]
####################################################################
if(DESCRIPTIONS):
print("Cell " + str(cell) + " : " + str(probe.getCell(cell)))
# Plotting
if(PLOTTING):
# Plotting normalized cell activity
cell_filename = MOUSE_ID + "_cell" + str(cell)
plt.axvline(x=probe.getCell(cell).chg_time, alpha=0.5, linestyle='--', color='magenta')
plt.ylim(0, 75)
plt.xlim(-20, 520)
plt.ylabel('Spikes/second')
plt.xlabel('Bins')
plt.title("Mouse: " + str(MOUSE_ID) + " / " + probe_name + " in "+ probe.name + ". Cell: " + str(cell))
plt.plot(xs, lsq(xs), color = 'magenta', alpha=0.9)
plt.bar(b[0:len(b)-1], curr_cell)
plt.savefig(CELL_PLOTS_DIRECTORY + cell_filename + ".png")
plt.clf()
# End Plotting (1) ####################
# Plotting normalized probe activity
z = fromFreqList(x)
x,b,c = plt.hist(z, bins)
plt.clf()
###
### Normalization
# also divide by number of neurons in that particular region
x /= num_trials*(0.001)*len(probe.getCellList())
# Need to find the two maxes and two mins
################# Finding peaks and valleys #######################
# First we find the first peak and the time it occurs at.
probe.max_frate = max(x[0:500])
probe.max_ftime = np.where(x[0:500] == probe.max_frate)[0][0]
# Now first valley
probe.min_frate = min(x[0:probe.max_ftime])
probe.min_ftime = np.where(x[0:probe.max_ftime] == probe.min_frate)[0][0]
# Now second peak
probe.max_frate2 = max(x[200:300])
probe.max_ftime2 = np.where(x[200:300] == probe.max_frate2)[0][0] + 200
# Last valley
probe.min_frate2 = min(x[probe.max_ftime:probe.max_ftime2])
probe.min_ftime2 = np.where(x[probe.max_ftime:probe.max_ftime2] == probe.min_frate2)[0][0] + probe.max_ftime
# The value it converges towards the end.
probe.converge = min(x[probe.max_ftime2:500])
# Average firing rate + standard deviation
probe.avg_frate = np.mean(x[0:500])
probe.std = np.std(x[0:500])
# Smoothed Function
lsq = LSQUnivariateSpline(bins[0:len(bins)-1], x, knots)
probe.lsq = lsq
# Get the change point here
cpm_result = cpm.detectChangePoint(FloatVector(lsq(xs[probe.min_ftime-5:probe.max_ftime+1])), cpmType='Student', ARL0=1000)
cpm_result = robj_to_dict(cpm_result)
# Set chnage point and change point time
probe.change_pt = lsq(cpm_result['changePoint'][0]+probe.min_ftime-5)
probe.chg_time = cpm_result['changePoint'][0]+probe.min_ftime-5
###################################################################
if(DESCRIPTIONS):
print(repr(probe))
# Plotting (2) ###############################################
if(PLOTTING):
# Plotting
plt.axvline(x=probe.chg_time, color='red', linestyle='--', alpha=0.7)
plt.ylim(0, 12)
plt.xlim(-20, 500)
plt.ylabel('Spikes/second')
plt.xlabel('Bins')
plt.title("Mouse: " + str(MOUSE_ID) + " / " + probe_name + " in "+ probe.name)
plt.plot(xs, lsq(xs), color = 'red')
plt.bar(b[0:len(b)-1], x, alpha=0.8)
plt.savefig(PROBE_PLOTS_DIRECTORY + probe_filename + ".png")
plt.clf()
with open(probe_filename, 'wb') as f:
pickle.dump(probe, f)
# End Plotting (2) ###########################################
# Plotting (3) ###############################################
# Here, we'll plot all curves for every region for a given mouse.
probes = []
# First, lets order the probe in terms of the time in which the max firing rate occurs
for probe_name in probe_names:
probe_filename = MOUSE_ID + "_" + probe_name
with open(probe_filename, 'rb') as f:
# Plotting all curves for every region for a given mouse.
probe = pickle.load(f)
probes.append(probe)
probes.sort(key=lambda x: x.max_ftime)
# Finally, we can plot
for i in range(0, len(probes)):
probe = probes[i]
plt.ylabel('Firing Rate (Spikes/second)')
plt.xlabel('Bins (ms)')
plt.ylim(0, 12)
plt.xlim(-20, 500)
plt.title("Mouse: " + str(MOUSE_ID) + " | Average Firing Rates")
plt.plot(xs, probe.lsq(xs), label = probe.name, color=colors[i])
plt.legend()
plt.savefig(SUMMARY_PLOTS_DIRECTORY + str(MOUSE_ID) + ".png")
plt.clf()
# End Plotting (3) ###########################################
|
{"/nwb_plots_firing_rates.py": ["/nwb_plots_functions.py"], "/nwb_plots_percentile.py": ["/nwb_plots_functions.py"], "/nwb_plots.py": ["/nwb_plots_functions.py"], "/nwb_trials.py": ["/nwb_plots_functions.py"]}
|
19,541
|
bjmedina/PSTH
|
refs/heads/master
|
/nwb_trials.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 12 09:25:21 EDT 2019
@author: Bryan Medina
"""
from nwb_plots_functions import *
# READ ME ################################
'''
This file
- Gets the different values for t1, t2, ..., t5, beta1, beta2, ..., beta5 for each trial
- Compares then all against each other.
'''
##########################################
## CHANGE ME #############################################################
# Data directory
DIRECTORY = '/Users/bjm/Documents/CMU/Research/data/'
TRIAL_DATA = '/Users/bjm/Documents/CMU/Research/data/trial_data/'
TRIAL_PLOTS = '/Users/bjm/Documents/CMU/Research/data/plots/trials/'
TMAX_DIREC = '/Users/bjm/Documents/CMU/Research/data/tmax/'
MOUSE_ID = '421338'
##########################################################################
# Get file from directory
spikes_nwb_file = os.path.join(DIRECTORY, 'mouse' + MOUSE_ID + '.spikes.nwb')
nwb = h5.File(spikes_nwb_file, 'r')
probe_names = nwb['processing']
# Whether or not we want to calculate the confidence intervals
CONF_INTERVAL = True
BOOTSTRAPS = 500
trials = []
t_max1 = []
t_max2 = []
# Changes depending on the trial.
start = 0 #in second
end = 2000 #in seconds
# time stamps ( this never changes )
# This is SPECIFICALLY for the 'drifting_gratings_2' stimulus
timestamps = nwb['stimulus']['presentation']['drifting_gratings_2']['timestamps'].value
stim_orient = nwb['stimulus']['presentation']['drifting_gratings_2']['data'].value
PLOTTING = False
## For every region,
for probe_name in probe_names:
print(probe_name)
# File to get data from
probe_filename = DIRECTORY + MOUSE_ID + "_" + probe_name
try:
with open(probe_filename, 'rb') as f:
probe = pickle.load(f)
except FileNotFoundError:
for probe_name in probe_names:
saveProbeData(MOUSE_ID, probe_name, nwb)
## For EVERY trial,
for trial_number in range(len(timestamps)):
print("Trial number %d" % trial_number)
# Check if we have this file
try:
trial_file = TRIAL_DATA + "/" + MOUSE_ID + "/" + probe_name + "/tr_" + str(trial_number)
with open(trial_file, 'rb') as t:
tr = pickle.load(t)
trials.append(tr)
except FileNotFoundError:
trial = timestamps[trial_number]
freq = stim_orient[trial_number][1]
angle = stim_orient[trial_number][3]
# Checking for 'nans'
if not (str(freq) == "nan") or not (str(angle) == "nan"):
freq = int(freq)
angle = int(angle)
config = str(freq) + "_" + str(angle)
## go through every cell in that region,
## find out how that cell is behaving IN THE TRIAL'S TIME FRAME,
## and save that activity to a vector...
## do that for every trial... essentially make PSTHs for every trial...
curr_trial = np.zeros((len(bins), 1))
for cell in probe.getCellList():
spikes = nwb['processing'][probe_name]['UnitTimes'][str(cell)]['times'].value
stimulus_spikes = binarySearch(spikes, trial, 0, len(spikes)-1)
if not (type(stimulus_spikes) == type(-1)):
stimulus_spikes = (stimulus_spikes - trial[0])
stimulus_spikes *= 1000
for stim_spike in stimulus_spikes:
curr_trial[insertToBin(stim_spike, end)] += 1
########################
tr = Trial()
tr.number = trial_number
tr.config = config
tr.spikes = curr_trial
# tr.t
# tr.beta
z = fromFreqList(curr_trial)
curr_trial,b,c = plt.hist(z, bins)
plt.clf()
curr_trial /= 0.001*len(probe.getCellList())
tr.spikes = curr_trial
tr.lsq = LSQUnivariateSpline(bins[0:len(bins)-1], curr_trial, knots)
#tr.lsq = UnivariateSpline(bins[0:len(bins)-1], curr_trial)
trials.append(tr)
#######################
with open(trial_file, 'wb') as t:
pickle.dump(tr, t)
if(PLOTTING):
plt.xlim(-2, 500)
plt.ylim(0, 50)
plt.ylabel('Spikes/second')
plt.xlabel('Bins')
plt.title("Mouse: " + str(MOUSE_ID) + " | " + probe_name + " trial: " + str(tr.number) + " | " + tr.config)
plt.bar(bins[0:len(bins)-1], tr.spikes, alpha=0.8, color='blue')
plt.plot(xs, tr.lsq(xs), color='red', alpha=0.4)
plt.show()
#plt.savefig(TRIAL_PLOTS + MOUSE_ID + "/" + probe_name + "/" + "tr_"+str(trial_number))
plt.clf()
if(CONF_INTERVAL):
# Calculating the confidence intervals
fname = TMAX_DIREC + MOUSE_ID + "/" + probe_name + "/" + MOUSE_ID + "_tmax_"
try:
with open(fname + "1", 'rb') as f:
t_max1 = pickle.load(f)
with open(fname + "2", 'rb') as f:
t_max2 = pickle.load(f)
except FileNotFoundError:
# We're doing 500 bootstraps
for i in range(0, BOOTSTRAPS):
print("BOOTSTRAP %d" % i)
# g is going to be our random sample, size 600, of the 600 trials
g = choices(trials, k = len(trials))
sample_spikes = np.zeros((len(g[0].spikes),))
lsq = np.zeros((len(g[0].lsq(xs)), 1))
# Now we need to construct our curves based on these 600 samples
for sample in g:
# Need to add all spikes together
## To do this, we have to *essentially* do an element wise addition
for j in range(0, len(sample.spikes)):
sample_spikes[j] += sample.spikes[j]
# Recompute tmax_1 and tmax_2
## We have to normalize sample_spikes by number of trials
sample_spikes /= len(g)
peak = max(sample_spikes[0:500])
tmax_1 = np.where(sample_spikes[0:500] == peak)[0][0]
peak2 = max(sample_spikes[200:300])
tmax_2 = np.where(sample_spikes[200:300] == peak2)[0][0] + 200
if(PLOTTING):
print("Peak 1: %d @ %d" % (peak, tmax_1))
print("Peak 2: %d @ %d" % (peak, tmax_2))
plt.ylim(0, 10)
plt.xlim(-2, 500)
plt.bar(bins[:-1], sample_spikes, alpha=0.8, color='blue')
plt.axvline(x=tmax_1,color='red', linestyle='--')
plt.axvline(x=tmax_2,color='red', linestyle='--')
plt.show()
plt.clf()
# Save those two into two separate vectors
t_max1.append(tmax_1)
t_max2.append(tmax_2)
# clear the slate for the next probe
trials = []
with open(fname + "1", 'wb') as f:
pickle.dump(t_max1, f)
with open(fname + "2", 'wb') as f:
pickle.dump(t_max2, f)
t_max1 = []
t_max2 = []
fname = TMAX_DIREC + MOUSE_ID + "/" + probe_name + "/" + MOUSE_ID + "_tmax_"
|
{"/nwb_plots_firing_rates.py": ["/nwb_plots_functions.py"], "/nwb_plots_percentile.py": ["/nwb_plots_functions.py"], "/nwb_plots.py": ["/nwb_plots_functions.py"], "/nwb_trials.py": ["/nwb_plots_functions.py"]}
|
19,542
|
bjmedina/PSTH
|
refs/heads/master
|
/nwb_plots_functions.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 12 09:25:21 EDT 2019
@author: Bryan Medina
"""
###### Imports ########
from random import choices
from rpy2.robjects.vectors import StrVector
from rpy2.robjects.vectors import FloatVector
from scipy.interpolate import LSQUnivariateSpline
from scipy.interpolate import CubicSpline
from scipy.interpolate import UnivariateSpline
import h5py as h5
import matplotlib.pyplot as plt
import numpy as np
import os
import pickle
import rpy2.robjects as robjects
import rpy2.robjects.packages as rpackages
import sys
########################
# Setting up packages for rpy2 use
package_name = 'cpm'
if rpackages.isinstalled(package_name):
have_package = True
print("R package %s already installed" % package_name)
else:
have_pakcage = False
if not have_package:
utils = rpackages.importr('utils')
utils.chooseCRANmirror(ind=1)
utils.install_packages(package_name)
print("installed R package: %s" % package_name)
cpm = rpackages.importr(package_name)
##################################
### This is for the 'drifting gratings' stimulus
# All possible temporal frequencies for the stimulus
temp_freqs = [1, 2, 4, 8, 15]
# All possible orientations of stimulus (angles)
orientations = [i*45 for i in range(8)]
# Knots for spline (selected by eye)
knots = [30, 50, 52, 55, 65, 70, 75, 80, 83, 100, 150, 200, 250, 300, 325, 375, 400]
tr_knots = [50, 110, 160, 200, 250, 300, 350, 400, 450]
# Number of times timulus is presented
num_trials = 600
# Conversion frmo ms to s
msToSec = 1000 # 1000 ms in 1 sec
# For future plotting
xs = np.linspace(0,600,3000)
# Start and end of trials
start = 0
end = 2000
# Bin width
width = 1
# Actual bins (for later use)
bins = np.linspace(start, end, int( (end - start)/width + 1 ))
# Probe to region mapping
mapping = {'probeA': 'AM',
'probeB': 'PM',
'probeC': 'V1',
'probeD': 'LM',
'probeE': 'AL',
'probeF': 'RL'}
colors = ['k', '#9400D3', 'b', 'g', '#FF7F00', 'r']
###
class Probe:
# Max firing rate and the time it occurs
max_frate = 0
max_ftime = 0
# Second highest firing rate
max_frate2 = 0
max_ftime2 = 0
# Min firing rate and the time it occurs
min_frate = 0
min_ftime = 0
# Second lowest
min_frate2 = 0
min_ftime2 = 0
# Average firing rate that is converged to as t -> 500 ms
converge = 0
# Change point (before the first peak)
change_pt = 0
chg_time = 0
# Average firing rate
avg_frate = 0
# Standard deviation of the firing rates
std = 0
# LSQUnivariate function
lsq = " "
def __init__(self, nwb, name):
'''
Description
-----------
Constructor
Input(s)
--------
'nwb': h5py._hl.files.File. 'spikes.nwb' dataset.
'probe': string. name of probe.
Output(s)
---------
New 'Probe' object
'''
self.__cells = getProbeCells(nwb, name)
self.name = mapping[name]
def getCell(self, cell_number):
'''
Description
-----------
Method returns dictionary of cell "at index 'cell_number'"
Input(s)
--------
'cell_number': int. key of a corresponding cells
Output(s)
---------
Dictionary of cell 'cell_number'
'''
return self.__cells[cell_number]
def getCellList(self):
'''
Description
-----------
Method returns dictionary of cells
Output(s)
---------
Dictionary of cell 'cell_number'
'''
return self.__cells.keys()
def __repr__(self):
'''
Description
-----------
Method replaces default '__str__' with one that prints out average spiking rate, 2 maximum and 2 minimum firing rates, and the time in which they occur.
Output(s)
---------
String to print.
'''
return "%s\t Avg: %3.2f Std: %3.2f | Max: %3.2f @ %d | Max2: %3.2f @ %d | Min: %3.2f @ %d | Min2: %3.2f @ %d | Converges to %3.2f | Change: %3.2f @ %d" % (self.name, self.avg_frate, self.std, self.max_frate, self.max_ftime, self.max_frate2, self.max_ftime2, self.min_frate, self.min_ftime, self.min_frate2, self.min_ftime2, self.converge, self.change_pt, self.chg_time)
def __str__(self):
'''
Description
-----------
Method replaces default '__repr__' with one that's great for LaTeX-table making.
Output(s)
---------
String to print.
'''
return "%s & %3.2f & %3.2f & (%3.2f, %d) & (%3.2f, %d) & (%3.2f, %d) & (%3.2f, %d) & %3.2f & (%3.2f, %d)\\\\" % (self.name, self.avg_frate, self.std, self.max_frate, self.max_ftime, self.max_frate2, self.max_ftime2, self.min_frate, self.min_ftime, self.min_frate2, self.min_ftime2, self.converge, self.change_pt, self.chg_time)
def getProbeCells(nwb, probe):
'''
Description
-----------
'GetProbeCells' gets dataset and returns all cells, for a given probe, that are in the Visual Cortex.
Input(s)
--------
'nwb': h5py._hl.files.File. 'spikes.nwb' dataset.
'probe': string. name of probe.
Output(s)
---------
'v_cells': dict. Dictionary that all cells that are in V.
'''
# Get all cells with activity in V
cells = nwb['processing'][probe]['unit_list'].value
v_cells = {}
for cell in cells:
region = nwb['processing'][probe]['UnitTimes'][str(cell)]['ccf_structure'].value.decode('utf-8')
if region[0] == 'V' or region[0] == 'v':
v_cells[cell] = Cell()
return v_cells
class Cell:
max_frate = 0
max_ftime = 0
avg_frate = 0
std = 0
name = " "
lsq = " "
# Change point (before the first peak)
change_pt = 0
chg_time = 0
def __init__(self):
'''
Description
-----------
Constructor
Output(s)
---------
New 'Cell' object
'''
self.__table = makeTable()
def getSpikes(self, config):
'''
Description
-----------
Method returns table for given cell
Input(s)
--------
'config': string. key of dictionary.
Output(s)
---------
table at certain config
'''
return self.__table[config]
def addSpike(self, config, spike, end):
'''
Description
-----------
Method adds 1 to spike counts
Input(s)
--------
'config': string. key of dictionary.
'spike' : time of spike in seconds
'end' : end of trial
Output(s)
---------
table at certain config
'''
# Find out index spike needs to be in.
bn = insertToBin(spike, end)
# Add one to ongoing count.
self.__table[config][bn] += 1
def __str__(self):
'''
Description
-----------
Method replaces default '__str__' with one that prints out average spiking rate, 2 maximum and 2 minimum firing rates, and the time in which they occur.
Output(s)
---------
String to print.
'''
return "Max: %3.2f\t Avg: %3.2f\t Std: %3.2f" % (self.max_frate, self.avg_frate, self.std)
class Trial:
# The trial number
number = -1
# The configuration
config = ""
# Should be five values for each of these
t = [None]*5
beta = [None]*5
# Need to make this go from start to end ... This will hold the PSTH.
spikes = np.zeros((len(bins), 1))
lsq = []
def __add__(self, other_trial):
'''
Description
-----------
Method overrides '+' operator so that you can add two Trial objects
Input(s)
--------
'other_trial': Trial. Another trial object
Output(s)
---------
sum of two trials (adds spiking histogram)
'''
pass
def makeTable():
'''
Description
-----------
'makeTable' creates a dictionary to keep track of time bins for each possible orientation of stimulus. One for each cell.
Output(s)
---------
'table': dict. Dictionary that contains orientation combination as key and all cells that are in V.
'''
bins = np.linspace(start, end, int( (end - start)/width + 1 ))
# In this table, each key is a different configuration of the stimulus
# and each row corresponds to spikes in a time bin.
table = {}
for freq in temp_freqs:
for angle in orientations:
config = str(freq) + "_" + str(angle)
table[config] = np.zeros((len(bins), 1))
return table
def binarySearch(spikes, interval, start, end):
'''
Description
-----------
'binarySearch' will find the index of a spike in a certain interval. Once it finds the index of a spike in the interval, it will try to find all the spikes that are in that interval. Essentially a modified take on the classic binary search algorithm.
Input(s)
--------
'spikes' : list. list of all spikes of a given neuron.
'interval': list. current time interval of stimulus (usually about 2 seconds).
'start' : int. beginning
'end' : int. end
Output(s)
---------
list. Returns list of spikes in a given interval (first spike is found using binary search, the rest with the 'spikesInInterval' method.
'''
if end >= 1:
mid_point = midpoint(start, end)
# If our spike is inside the interval, let's return the index
if inside(spikes[mid_point], interval):
return spikesInInterval(spikes, interval, mid_point)
# If our spike is greater than (or less than) the interval, let's adjust checking bounds
elif spikes[mid_point] > interval[1]:
next_midpoint = midpoint(start, mid_point-1)
# If this is true, then we're going to hit a recursion error...
# We don't want that to happen.
if mid_point == next_midpoint:
return -1
return binarySearch(spikes, interval, start, mid_point-1)
elif spikes[mid_point] < interval[0]:
next_midpoint = midpoint(mid_point+1, end)
# If this is true, then we're going to hit a recursion error...
# We don't want this.
if mid_point == next_midpoint:
return -1
return binarySearch(spikes, interval, mid_point+1, end)
else:
return -1
def spikesInInterval(spikes, interval, known):
'''
Description
-----------
'spikesInInterval' will find all spikes in a certain interval based on the index of one found in the interval.
Input(s)
--------
'spikes' : list. list of all spikes of a given neuron.
'interval': list. current time interval of stimulus (usually about 2 seconds).
'known' : int. Index in 'spikes' of a known spike in the interval.
Output(s)
---------
'spike_set': set. indices of all spikes in the interval. This is converted to a list when returned.
'''
# Index of known spike
i = known
# Boolean variables we'll be using to determine if we're either checking 'above' or 'below' the known value.
# 'DOWN' is true because we'll start by checking below the known spike
DOWN = True
UP = False
# Set of spikes. We'll be using a set because 1) sets can't have duplicates and 2) checking for duplicates can be done in constant O(1) time.
spike_set = set()
# We don't want to check out of bounds of the spikes list.
while i > -1 and i < len(spikes):
if inside(spikes[i], interval) and DOWN:
spike_set.add(spikes[i])
i = i - 1
elif not inside(spikes[i], interval) and DOWN:
i = known + 1
UP = True
DOWN = False
elif inside(spikes[i], interval) and UP:
spike_set.add(spikes[i])
i = i + 1
elif not inside(spikes[i], interval) and UP:
break
# Convert set to list, then return.
return np.array(list(spike_set))
def inside(spike, interval):
'''
Description
-----------
'inside' will determine if a spike is in an interval.
Input(s)
--------
'spikes' : list. list of all spikes of a given neuron.
'interval': list. current time interval of stimulus (usually about 2 seconds).
Output(s)
--------
boolean. True if spike is in interval. False otherwise.
'''
return spike >= interval[0] and spike <= interval[1]
def midpoint(start_rate, end_rate):
'''
Description
-----------
'midpoint' will calculate midpoint between two points
Input(s)
--------
'start_rate' : int. beginning
'end_rate' : int. end
Output(s)
--------
int. midpoint between 'start_rate' and 'end_rate'
'''
return int(start_rate + (end_rate - start_rate)/2)
def insertToBin(spiketime, end):
'''
Description
-----------
'insertToBin' will bin that a spiketime belongs in
Input(s)
--------
'spiketime' : int. spike time in ms
'end' : int. end of trial
Output(s)
--------
int. idx. Index that the spiketime belongs to
'''
idx = int( (spiketime - (spiketime % width)) / width )
if( idx > end ):
#print("spiketime " + str(spiketime) + "\tidx " + str(idx))
idx = end
return idx
def saveProbeData(MOUSE_ID, probe_name, nwb):
'''
Description
-----------
'saveProbeData' save the data, using pandas, of a certain mouse given a certain probe.
Input(s)
--------
'MOUSE_ID' : int. ID of mouse we'll be looking at
'probe_name': string. name of probe
'nwb' : h5py._hl.files.File. Dataset
Output(s)
--------
None.
'''
# Changes depending on the trial.
start = 0 #in second
end = 2000 #in seconds
# time stamps ( this never changes )
# This is SPECIFICALLY for the 'drifting_gratings_2' stimulus
timestamps = nwb['stimulus']['presentation']['drifting_gratings_2']['timestamps'].value
stim_orient = nwb['stimulus']['presentation']['drifting_gratings_2']['data'].value
## Adding spikes
# Get all cells that are in V for every probe
#print(probe_name)
probe = Probe(nwb, probe_name)
# Going to want to save this information later.
filename = MOUSE_ID + "_" + probe_name
# ...get every cell. Then...
cells = probe.getCellList()
# ... for every cell...
for cell in cells:
# (Getting current cell)
curr_cell = probe.getCell(cell)
# ...get the current cells spiking activity.
spikes = nwb['processing'][probe_name]['UnitTimes'][str(cell)]['times'].value
# For every occurrence of each kind of stimulus
for i in range(len(timestamps)):
# Extract interval of stimulus, temporal frequency of stimulus, and angle of stimulus.
trial = timestamps[i]
freq = stim_orient[i][1]
angle = stim_orient[i][3]
# Checking for 'nans'
if not (str(freq) == "nan") or not (str(angle) == "nan"):
freq = int(freq)
angle = int(angle)
# Convert freq and angle to something that can be used as an index.
config = str(freq) + "_" + str(angle)
# Search for all spikes that are in this time frame.
stimulus_spikes = binarySearch(spikes, trial, 0, len(spikes)-1)
if not (type(stimulus_spikes) == type(-1)):
# questionable but should do the trick (to get everything between 0 and 2000 ms)
stimulus_spikes = (stimulus_spikes - trial[0])
stimulus_spikes *= 1000
# For all the spikes you just found, add them to the their respective bin.
for stim_spike in stimulus_spikes:
curr_cell.addSpike(config, stim_spike, end)
print("Saving to " + filename)
with open(filename, 'wb') as f:
pickle.dump(probe, f)
def fromFreqList(x):
'''
Description
-----------
'fromFreqList' converts frequency list to a list of repitions based on index. This is usefull for histograms.
Example
-------
fromFreqList([2,1,4,2]) => [0,0,1,2,2,2,2,3,3]
Input(s)
--------
'x': list of ints.
Output(s)
--------
'z': list of ints.
'''
z = []
for i in range(len(x)):
y = [ i for ii in range(int(x[i])) ]
for num in y:
z.append(num)
return z
def robj_to_dict(robj):
'''
Description
-----------
'robj_to_dict' converts an R object to a python dictionary
Input(s)
--------
'robj': R object
Output(s)
--------
dictionary.
Source
------
https://medium.com/bigdatarepublic/contextual-changepoint-detection-with-python-and-r-using-rpy2-fa7d86259ba9
'''
return dict(zip(robj.names, map(list, robj)))
|
{"/nwb_plots_firing_rates.py": ["/nwb_plots_functions.py"], "/nwb_plots_percentile.py": ["/nwb_plots_functions.py"], "/nwb_plots.py": ["/nwb_plots_functions.py"], "/nwb_trials.py": ["/nwb_plots_functions.py"]}
|
19,543
|
andreaalf97/whatsapp_analysis
|
refs/heads/master
|
/src/file_handler.py
|
import pandas as pd
import os
from os import path
import datetime as dt
from src.dataframe_analysis import df_setup
from src.misc import print_separator_line
def file_to_csv_format(file_path: str, is_apple: bool) -> str:
out_file_path = file_path.replace(".txt", ".tmp")
with open(file_path, "r") as in_file:
with open(out_file_path, "w") as out_file:
this_line = in_file.readline()
next_line = in_file.readline()
out_file.write("datetime|author|message\n")
if is_apple:
while next_line:
if "" in this_line:
this_line = next_line
next_line = in_file.readline()
continue
valid_next_line: bool = (
next_line.count("[") == 1 and
next_line.count("]") == 1 and
next_line.split("] ", 1)[0].count(":") == 2
)
if not valid_next_line:
this_line = this_line.replace("\n", "__n__") + next_line.replace("\n", "__n__") + "\n"
next_line = in_file.readline()
continue
this_line = this_line.replace("|", "__x__")
this_line = this_line.replace("*", "__a__")
this_line = this_line.replace('"', "__vv__")
this_line = this_line.replace("'", "__v__")
this_line = this_line.replace("“", "__vv__")
if "PM" in this_line.split("] ", 1)[0]:
hour_str = this_line.split(", ", 1)[1].split(":", 1)[0]
hour = int(hour_str)
if hour != 12:
hour += 12
this_line = this_line.split(", ", 1)[0] + ", " + str(hour) + ":" + this_line.split(":", 1)[1]
this_line = this_line.replace("PM", "AM", 1)
this_line = this_line.replace("[", "", 1) \
.replace(", ", " ", 1)\
.replace(" AM] ", "|", 1)\
.replace(": ", "|", 1)
out_file.write(this_line)
this_line = next_line
next_line = in_file.readline()
else:
while next_line:
if "" in this_line or this_line.count(":") < 2 or "Hai cambiato l'oggetto da “" in this_line:
this_line = next_line
next_line = in_file.readline()
continue
valid_next_line: bool = (
next_line.split(",", 1)[0].count("/") == 2
)
if not valid_next_line:
this_line = this_line.replace("\n", "__n__") + next_line.replace("\n", "__n__") + "\n"
next_line = in_file.readline()
continue
this_line = this_line.replace("|", "__x__")
this_line = this_line.replace("*", "__a__")
this_line = this_line.replace('"', "__vv__")
this_line = this_line.replace("“", "__vv__")
this_line = this_line.replace("'", "__v__")
this_line = this_line.replace(", ", " ", 1) \
.replace(" - ", ":00|", 1) \
.replace(": ", "|", 1)
out_file.write(this_line)
this_line = next_line
next_line = in_file.readline()
return out_file_path
def load_data_frame(file_path: str, is_apple: bool) -> pd.DataFrame:
# If the backup .frames folder does not exist, I create one
if not path.isdir("../chats/.frames"):
os.mkdir("../chats/.frames")
# The backup file has the same name as the original but is .zip file and is
# saved in the .frames folder
dataframe_file_path = file_path.replace(".txt", "") + ".zip"
dataframe_file_path = dataframe_file_path.replace("chats/", "chats/.frames/")
if path.isfile(dataframe_file_path): # if the file exists it needs to be pickled
print("LOADING BACKUP..")
beginning = dt.datetime.now()
df = pd.read_pickle(dataframe_file_path)
print("It took", (dt.datetime.now() - beginning).microseconds / 1000, "ms to load the pickled dataset")
beginning = dt.datetime.now()
print("It took", (dt.datetime.now() - beginning).microseconds / 1000, "ms to create the df_info dictionary")
print("BACKUP LOADED")
else: # Otherwise, we have to create the dataframe and store is as a pickle file
print("CREATING CSV FORMATTED FILE")
beginning = dt.datetime.now()
temp_file_path = file_to_csv_format(file_path, is_apple) # Transforms the input file into a csv file
print("It took", (dt.datetime.now() - beginning).microseconds / 1000, "ms to create the CSV file")
print("LOADING DATAFRAME FROM CSV")
beginning = dt.datetime.now()
df = pd.read_csv(temp_file_path, sep="|") # Reads the csv into a dataframe
print("It took", (dt.datetime.now() - beginning).microseconds / 1000, "ms to create the CSV file")
df = df_setup(df)
os.remove(temp_file_path) # Deletes the csv file because it's not helpful anymore
beginning = dt.datetime.now()
df.to_pickle(dataframe_file_path) # Pickles the dataframe into a zip file and saves it
print("It took", (dt.datetime.now() - beginning).microseconds /1000, "ms to pickle the dataframe")
print("BACKUP SAVED AT", dataframe_file_path)
print("FRAME LOADED")
print_separator_line()
print_separator_line()
return df
def print_example(file_path: str, n: int):
print("An example of the dataframe")
with open(file_path, "r") as file:
i = 0
for i in range(n):
print(file.readline())
|
{"/src/file_handler.py": ["/src/dataframe_analysis.py", "/src/misc.py"], "/src/main.py": ["/src/dataframe_analysis.py", "/src/file_handler.py"], "/src/dataframe_analysis.py": ["/src/misc.py"]}
|
19,544
|
andreaalf97/whatsapp_analysis
|
refs/heads/master
|
/src/main.py
|
import src.dataframe_analysis as analysis
from src.file_handler import print_example, load_data_frame
import pandas as pd
import matplotlib.pyplot as plt
if __name__ == '__main__':
pd.set_option('display.max_colwidth', 300)
# Reading the file path from the user input
# file_path: str = input("Insert the name of the chat you want to analyze:")
# file_path = "../chats/" + file_path + ".txt"
# while not path.isfile(file_path):
# print("NOT AN EXISTING PATH")
# file_path: str = input("Insert the name of the chat you want to analyze:")
# file_path = "../chats/" + file_path + ".txt"
#
# # Reading if the file is a iOS file from the user input
# is_apple_input: str = input("Is the chat file generated from an iOS device?")
# is_apple: bool = (is_apple_input == "y" or is_apple_input == "Y" or is_apple_input == "1")
file_path = "../chats/Sara_Gotti.txt"
is_apple = False
df = load_data_frame(file_path, is_apple)
# filtered = analysis.filter(df, words_or=["hu", "Hu", "HU"])
# print(filtered[["author", "message"]])
analysis.df_general_info(df)
# analysis.df_length_info(df)
# analysis.df_plot_month_year(df, start="03-2015", end="12-2015")
# analysis.df_plot_month_year(df, auto=True)
analysis.df_plot_year(df)
# analysis.df_plot_days(df, auto=True)
# analysis.df_emojis(df)
# analysis.df_words(df)
# analysis.df_month_analysis(df, month="5", year="2020")
|
{"/src/file_handler.py": ["/src/dataframe_analysis.py", "/src/misc.py"], "/src/main.py": ["/src/dataframe_analysis.py", "/src/file_handler.py"], "/src/dataframe_analysis.py": ["/src/misc.py"]}
|
19,545
|
andreaalf97/whatsapp_analysis
|
refs/heads/master
|
/src/dataframe_analysis.py
|
import pandas as pd
from src import misc
from src.misc import print_separator_line
import datetime as dt
import matplotlib.pyplot as plt
import wordcloud
from stop_words import get_stop_words
import emojis
from operator import add
def df_general_info(df: pd.DataFrame):
counts = {author: len(frame) for author, frame in df.groupby(df["author"])}
print("There are", len(counts), "different authors in this chat")
for author in counts:
print(author, "has written", counts[author], "messages")
print_separator_line()
print("You have exchanged", str(len(df)), " messages between ", str(df.iloc[0].datetime), "and", str(df.iloc[-1].datetime))
print_separator_line()
print(len(df[df.isMedia == False]), "text objects")
print(len(df[df.isMedia == True]), "media objects")
def df_length_info(df: pd.DataFrame):
index_longest = df.length.sort_values().index[-1]
index_shortest = df.length.sort_values().index[0]
print("Shortest message is #" + str(index_shortest) + " with a length of " + str(
len(df.iloc[index_shortest].message)) + ":")
print(df.iloc[index_shortest].message)
print_separator_line()
print("Longest message is #" + str(index_longest) + " with a length of " + str(
len(df.iloc[index_longest].message)) + ":")
print(df.iloc[index_longest].message)
def bar(x: list, y: list, xlabel, ylabel, color='b', rotation='vertical'):
if type(y[0])==list:
for i in range(len(y)):
plt.bar(x, y[i], align='center')
else:
plt.bar(x, y, align='center', color=color)
plt.xticks(rotation='vertical')
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.show()
def df_plot_month_year(df: pd.DataFrame, start="01-2000", end="12-2050", auto=False):
if auto:
max_size = 0
for year, frame in df.groupby(df["datetime"].dt.year):
if len(frame) > max_size:
max_size = len(frame)
max_year = int(year)
start = "06-" + str(max_year-1)
end = "06-" + str(max_year+1)
print("Max year is", max_year)
start = dt.datetime.strptime(start, "%m-%Y")
end = dt.datetime.strptime(end, "%m-%Y")
dates = []
counts = []
for frame in df.groupby([df["datetime"].dt.year, df["datetime"].dt.month]):
if frame[1].iloc[0]["datetime"] < start or frame[1].iloc[0]["datetime"] > end:
continue
# frame[0] contains (year, month)
# frame[1] contains the full dataframe with those years and months only
dates.append(str(frame[0][0]) + "-" + str(frame[0][1]))
counts.append(len(frame[1]))
bar(dates, counts, "Date", "Total number of messages", color='r')
def df_plot_year(df: pd.DataFrame):
dates = []
counts_per_author = {}
for author in df_get_author_list(df):
counts_per_author[author] = []
for year, year_frame in df.groupby(df["datetime"].dt.year):
dates.append(str(year))
for author, frame in year_frame.groupby(year_frame["author"]):
counts_per_author[author].append(len(frame))
tots = [0 for x in dates]
for author in counts_per_author:
counts_per_author[author] = list(map(add, counts_per_author[author], tots))
tots = counts_per_author[author]
plt.bar(dates, counts_per_author[author], label=author)
plt.xlabel("Year")
plt.ylabel("Total number of messages")
plt.legend()
plt.show()
def df_emojis(df: pd.DataFrame, n=5):
print("EMOJI ANALYSIS")
author_counters = {}
all_emojis = {}
for author in df_get_author_list(df):
author_counters[author] = {}
for row in df.iterrows():
emoji_list = row[1]["emojis"]
author = row[1]["author"]
if emoji_list:
for emoji in emoji_list:
if emoji in author_counters[author]:
author_counters[author][emoji] += 1
else:
author_counters[author][emoji] = 1
if emoji in all_emojis:
all_emojis[emoji] += 1
else:
all_emojis[emoji] = 1
all_emojis = {k: v for k, v in sorted(all_emojis.items(), reverse=True, key=lambda item: item[1])}
print("OVERALL:")
i = 1
for emoji in all_emojis:
if i > n:
break
print(emoji, "--", all_emojis[emoji])
i += 1
bar(
[emojis.decode(k) for k in list(all_emojis.keys())[:(n*2)]],
[all_emojis[k] for k in list(all_emojis.keys())[:(n*2)]],
"Emojis",
"Number of times used",
rotation=''
)
for author in author_counters:
author_counters[author] = {k: v for k, v in sorted(author_counters[author].items(), reverse=True, key=lambda item: item[1])}
print(author)
i = 1
for emoji in author_counters[author]:
if i > n:
break
print(emoji, "--", author_counters[author][emoji])
i += 1
def df_words(df: pd.DataFrame, title=""):
full_string = " ".join([str(row[1]["message"]).replace("\n", " ").lower() for row in df.iterrows() if row[1]["message"]!="<Media omessi>"])
authors = df_get_author_list(df)
full_string_authors = {}
for author in authors:
full_string_authors[author] = " ".join([str(row[1]["message"]).replace("<Media omessi>", "").replace("\n", " ").lower() for row in df.iterrows() if row[1]["author"] == author])
stopwords = get_stop_words("it")
wc = wordcloud.WordCloud(
stopwords=stopwords,
# width=1000,
# height=500,
background_color="white"
)
wc.generate(full_string)
plt.axis("off")
plt.imshow(wc, interpolation="bilinear")
plt.title(title + " | " + "OVERALL")
plt.show()
for author in full_string_authors:
wc.generate(full_string_authors[author])
plt.axis("off")
plt.imshow(wc, interpolation="bilinear")
plt.title(title + " | " + author)
plt.show()
def df_setup(df: pd.DataFrame) -> pd.DataFrame:
# Creates the 'isMedia' column
df["message"] = df["message"].astype(str)
beginning = dt.datetime.now()
df["isMedia"] = df.apply(lambda row: row["message"].find("<Media omessi>") != -1, axis=1)
print((dt.datetime.now() - beginning).microseconds / 1000, "ms to create the isMedia column")
# 14/06/15 12:52:00
beginning = dt.datetime.now()
df["datetime"] = pd.to_datetime(df["datetime"], format="%d/%m/%y %H:%M:%S")
print((dt.datetime.now() - beginning).microseconds / 1000, "ms to convert 'datetime' from string")
beginning = dt.datetime.now()
df["isMedia"] = df["isMedia"].astype(bool)
df["author"] = df["author"].astype(str)
print((dt.datetime.now() - beginning).microseconds / 1000, "ms to convert column types")
beginning = dt.datetime.now()
df["message"] = df.apply(lambda row:
row["message"].replace("__x__", "|")
.replace("__a__", "*")
.replace("__vv__", '"')
.replace("__v__", "'"), axis=1
)
print((dt.datetime.now() - beginning).microseconds / 1000, "ms to reformat the 'message' column")
beginning = dt.datetime.now()
df["emojis"] = df.apply(lambda row: emojis.get(row["message"]), axis=1)
print((dt.datetime.now() - beginning).microseconds / 1000, "ms to create the 'emojis' column")
beginning = dt.datetime.now()
df["length"] = df.apply(lambda row: len(row["message"]), axis=1)
print((dt.datetime.now() - beginning).microseconds / 1000, "ms to create the 'length' column")
return df
def df_month_analysis(df, month="0", year="0"):
if month == '0' and year == '0':
max_size = 0
for date_i, frame_i in df.groupby([df["datetime"].dt.year, df["datetime"].dt.month]):
if len(frame_i) > max_size:
max_size = len(frame_i)
month = date_i[1]
year = date_i[0]
frame = frame_i
print("The month you talked the most is " + str(month) + "-" + str(year))
else:
frame = df[
(df["datetime"].dt.year==int(year)) &
(df["datetime"].dt.month==int(month))
]
print("There have been", len(frame), "messages in " + month + "-" + year)
df_words(frame, title="What you talked about on " + str(month) + "-" + str(year))
def df_filter(df: pd.DataFrame,
words=[],
words_or=[],
authors=[],
start_date="30/03/2000 18:00",
end_date="30/03/2050 18:00") -> pd.DataFrame:
condition = ((df["datetime"] > dt.datetime.strptime(start_date, "%d/%m/%Y %H:%M")) &
(df["datetime"] < dt.datetime.strptime(end_date, "%d/%m/%Y %H:%M")))
if words:
for word in words:
condition = ((condition) & df["message"].str.contains(word))
if words_or:
words_condition = 0
for word in words_or:
words_condition = ((words_condition) | (df["message"].str.contains(word)))
condition = (condition) & (words_condition)
if authors:
author_condition = 0
for author in authors:
author_condition = (author_condition) | (df["author"].str.contains(author))
condition = (condition) & (author_condition)
return df[condition]
def df_plot_days(df, start="01/03/2020", end="01/04/2020", auto=False):
if auto:
max_len = 0
for (year, month), frame in df.groupby([df["datetime"].dt.year, df["datetime"].dt.month]):
if len(frame) > max_len:
max_len = len(frame)
max_year = year
max_month = month
print("Max month is " + str(max_month) + "-" + str(max_year))
last_day = misc.get_last_day_of_month(max_month)
start = "01/" + str(max_month) + "/" + str(max_year)
end = str(last_day) + "/" + str(max_month) + "/" + str(max_year)
# 23/03/2020
start = dt.datetime.strptime(start, "%d/%m/%Y")
end = dt.datetime.strptime(end, "%d/%m/%Y")
filtered_df = df_filter(
df,
start_date=start.strftime("%d/%m/%Y %H:%M"),
end_date=end.strftime("%d/%m/%Y %H:%M")
)
dates = []
counts = []
for date, frame in filtered_df.groupby([df["datetime"].dt.year, df["datetime"].dt.month, df["datetime"].dt.day]):
dates.append(str(date[2]) + "-" + str(date[1]))
counts.append(len(frame))
bar(dates, counts, "Day", "Total number of messages")
def df_get_author_list(df: pd.DataFrame) -> list:
return [author for author in df["author"].value_counts().index]
|
{"/src/file_handler.py": ["/src/dataframe_analysis.py", "/src/misc.py"], "/src/main.py": ["/src/dataframe_analysis.py", "/src/file_handler.py"], "/src/dataframe_analysis.py": ["/src/misc.py"]}
|
19,546
|
andreaalf97/whatsapp_analysis
|
refs/heads/master
|
/src/misc.py
|
def print_separator_line():
print("===============================")
def get_last_day_of_month(month: int) -> int:
cases = {
1: 31,
2: 28,
3: 31,
4: 30,
5: 31,
6: 30,
7: 31,
8: 31,
9: 30,
10: 31,
11: 30,
12: 31
}
if month not in cases:
raise Exception("Month must be between 1 and 12")
return cases[month]
|
{"/src/file_handler.py": ["/src/dataframe_analysis.py", "/src/misc.py"], "/src/main.py": ["/src/dataframe_analysis.py", "/src/file_handler.py"], "/src/dataframe_analysis.py": ["/src/misc.py"]}
|
19,580
|
Jiahuan-Pei/multiwoz-mdrg
|
refs/heads/master
|
/test.py
|
#!/usr/bin/env python
# coding: utf-8
from __future__ import division, print_function, unicode_literals
import argparse
import json
import os
import shutil
import time
import numpy as np
import torch
from utils import util, multiwoz_dataloader
from models.evaluator import *
from models.model import Model
from utils.util import detected_device, pp_mkdir
from multiwoz.Evaluators import *
# pp added: print out env
util.get_env_info()
parser = argparse.ArgumentParser(description='multiwoz1-bsl-te')
# 1. Data & Dir
data_arg = parser.add_argument_group('Data')
data_arg.add_argument('--data_dir', type=str, default='data/multi-woz', help='the root directory of data')
data_arg.add_argument('--result_dir', type=str, default='results/bsl/')
data_arg.add_argument('--model_name', type=str, default='translate.ckpt')
# 2. MISC
misc_arg = parser.add_argument_group('Misc')
misc_arg.add_argument('--dropout', type=float, default=0.0)
misc_arg.add_argument('--use_emb', type=str, default='False')
misc_arg.add_argument('--seed', type=int, default=1, metavar='S', help='random seed (default: 1)')
misc_arg.add_argument('--no_models', type=int, default=20, help='how many models to evaluate')
misc_arg.add_argument('--beam_width', type=int, default=10, help='Beam width used in beamsearch')
misc_arg.add_argument('--write_n_best', type=util.str2bool, nargs='?', const=True, default=False, help='Write n-best list (n=beam_width)')
# 3. Here add new args
new_arg = parser.add_argument_group('New')
new_arg.add_argument('--intent_type', type=str, default=None, help='separate experts by intents: None, domain, sysact or domain_act') # pp added
new_arg.add_argument('--lambda_expert', type=float, default=0.5) # use xx percent of training data
new_arg.add_argument('--mu_expert', type=float, default=0.5) # use xx percent of training data
new_arg.add_argument('--gamma_expert', type=float, default=0.5) # use xx percent of training data
new_arg.add_argument('--debug', type=util.str2bool, nargs='?', const=True, default=False, help='if True use small data for debugging')
args = parser.parse_args()
args.device = "cuda" if torch.cuda.is_available() else "cpu"
print('args.device={}'.format(args.device))
# construct dirs
args.model_dir = '%s/model/' % args.result_dir
args.train_output = '%s/data/train_dials/' % args.result_dir
args.valid_output = '%s/data/valid_dials/' % args.result_dir
args.decode_output = '%s/data/test_dials/' % args.result_dir
print(args)
# pp added: init seed
util.init_seed(args.seed)
def load_config(args):
config = util.unicode_to_utf8(
# json.load(open('%s.json' % args.model_path, 'rb')))
json.load(open('{}{}.json'.format(args.model_dir, args.model_name), 'rb')))
for key, value in args.__args.items():
try:
config[key] = value.value
except:
config[key] = value
return config
def loadModelAndData(num):
# Load dictionaries
input_lang_index2word, output_lang_index2word, input_lang_word2index, output_lang_word2index = util.loadDictionaries(mdir=args.data_dir)
# pp added: load intents
intent2index, index2intent = util.loadIntentDictionaries(intent_type=args.intent_type, intent_file='{}/intents.json'.format(args.data_dir)) if args.intent_type else (None, None)
# Reload existing checkpoint
model = Model(args, input_lang_index2word, output_lang_index2word, input_lang_word2index, output_lang_word2index, intent2index)
model = model.to(detected_device)
if args.load_param:
model.loadModel(iter=num)
# # Load validation file list:
with open('{}/val_dials.json'.format(args.data_dir)) as outfile:
val_dials = json.load(outfile)
#
# # Load test file list:
with open('{}/test_dials.json'.format(args.data_dir)) as outfile:
test_dials = json.load(outfile)
return model, val_dials, test_dials, input_lang_word2index, output_lang_word2index, intent2index, index2intent
def decode(num=1, beam_search=False):
model, val_dials, test_dials, input_lang_word2index, output_lang_word2index, intent2index, index2intent = loadModelAndData(num)
delex_path = '%s/delex.json' % args.data_dir
start_time = time.time()
model.beam_search = beam_search
step = 0 if not args.debug else 2 # small sample for debug
# VALIDATION
val_dials_gen = {}
valid_loss = 0
for name, val_file in list(val_dials.items())[-step:]:
loader = multiwoz_dataloader.get_loader_by_dialogue(val_file, name,
input_lang_word2index, output_lang_word2index,
args.intent_type, intent2index)
data = iter(loader).next()
# Transfer to GPU
if torch.cuda.is_available():
data = [data[i].cuda() if isinstance(data[i], torch.Tensor) else data[i] for i in range(len(data))]
input_tensor, input_lengths, target_tensor, target_lengths, bs_tensor, db_tensor, mask_tensor = data
output_words, loss_sentence = model.predict(input_tensor, input_lengths, target_tensor, target_lengths,
db_tensor, bs_tensor, mask_tensor)
valid_loss += loss_sentence
val_dials_gen[name] = output_words
print('Current VALID LOSS:', valid_loss)
# Valid_Score = evaluateModel(val_dials_gen, val_dials, delex_path, mode='Valid')
Valid_Score = evaluator.summarize_report(val_dials_gen, mode='Valid')
# evaluteNLG(val_dials_gen, val_dials)
# TESTING
test_dials_gen = {}
test_loss = 0
for name, test_file in list(test_dials.items())[-step:]:
loader = multiwoz_dataloader.get_loader_by_dialogue(test_file, name,
input_lang_word2index, output_lang_word2index,
args.intent_type, intent2index)
data = iter(loader).next()
# Transfer to GPU
if torch.cuda.is_available():
data = [data[i].cuda() if isinstance(data[i], torch.Tensor) else data[i] for i in range(len(data))]
input_tensor, input_lengths, target_tensor, target_lengths, bs_tensor, db_tensor, mask_tensor = data
output_words, loss_sentence = model.predict(input_tensor, input_lengths, target_tensor, target_lengths,
db_tensor, bs_tensor, mask_tensor)
test_loss += loss_sentence
test_dials_gen[name] = output_words
test_loss /= len(test_dials)
print('Current TEST LOSS:', test_loss)
# Test_Score = evaluateModel(test_dials_gen, test_dials, delex_path, mode='Test')
Test_Score = evaluator.summarize_report(test_dials_gen, mode='Test')
# evaluteNLG(test_dials_gen, test_dials)
print('TIME:', time.time() - start_time)
return Valid_Score, val_dials_gen, np.exp(valid_loss), Test_Score, test_dials_gen, np.exp(test_loss)
def decodeWrapper(beam_search=False):
# Load config file
# with open(args.model_path + '.config') as f:
with open('{}{}.config'.format(args.model_dir, args.model_name)) as f:
add_args = json.load(f)
for k, v in add_args.items():
if k=='data_dir': # ignore this arg
continue
setattr(args, k, v)
args.mode = 'test'
args.load_param = True
args.dropout = 0.0
assert args.dropout == 0.0
# Start going through models
# args.original = args.model_path
Best_Valid_Score = None
Best_Test_Score = None
Best_PPL = None
Best_model_id = 0
Best_val_dials_gen = {}
Best_test_dials_gen = {}
for ii in range(1, args.no_models + 1):
print(30 * '-' + 'EVALUATING EPOCH %s' % ii)
# args.model_path = args.model_path + '-' + str(ii)
with torch.no_grad():
Valid_Score, val_dials_gen, val_ppl, Test_Score, test_dials_gen, test_ppl = decode(ii, beam_search)
if Best_Valid_Score is None or Best_Valid_Score[-2] < Valid_Score[-2]:
Best_Valid_Score = Valid_Score
Best_Test_Score = Test_Score
Best_PPL = test_ppl
Best_val_dials_gen = val_dials_gen
Best_test_dials_gen = test_dials_gen
Best_model_id = ii
# try:
# decode(ii, intent2index)
# except:
# print('cannot decode')
# save best generated output to json
print('Summary'+'~'*50)
print('Best model: %s'%(Best_model_id))
BLEU, MATCHES, SUCCESS, SCORE, P, R, F1 = Best_Test_Score
mode = 'Test'
print('%s PPL: %.2f' % (mode, Best_PPL))
print('%s BLEU: %.4f' % (mode, BLEU))
print('%s Matches: %2.2f%%' % (mode, MATCHES))
print('%s Success: %2.2f%%' % (mode, SUCCESS))
print('%s Score: %.4f' % (mode, SCORE))
print('%s Precision: %.2f%%' % (mode, P))
print('%s Recall: %.2f%%' % (mode, R))
print('%s F1: %.2f%%' % (mode, F1))
suffix = 'bm' if beam_search else 'gd'
try:
with open(args.valid_output + 'val_dials_gen_%s.json' % suffix, 'w') as outfile:
json.dump(Best_val_dials_gen, outfile, indent=4)
except:
print('json.dump.err.valid')
try:
with open(args.decode_output + 'test_dials_gen_%s.json' % suffix, 'w') as outfile:
json.dump(Best_test_dials_gen, outfile, indent=4)
except:
print('json.dump.err.test')
if __name__ == '__main__':
# create dir for generated outputs of valid and test set
pp_mkdir(args.valid_output)
pp_mkdir(args.decode_output)
evaluator = MultiWozEvaluator('MultiWozEvaluator')
print('\n\nGreedy Search'+'='*50)
decodeWrapper(beam_search=False)
print('\n\nBeam Search' + '=' * 50)
decodeWrapper(beam_search=True)
# evaluteNLGFile(gen_dials_fpath='results/bsl_20190510161309/data/test_dials/test_dials_gen.json',
# ref_dialogues_fpath='data/test_dials.json')
# evaluteNLGFiles(gen_dials_fpaths=['results/bsl_20190510161309/data/test_dials/test_dials_gen.json',
# 'results/moe1_20190510165545/data/test_dials/test_dials_gen.json'],
# ref_dialogues_fpath='data/test_dials.json')
# from nlgeval import compute_metrics
# metrics_dict = compute_metrics(hypothesis='/Users/pp/Code/nlg-eval/examples/hyp.txt',
# references=['/Users/pp/Code/nlg-eval/examples/ref1.txt'])
|
{"/test.py": ["/models/evaluator.py", "/models/model.py", "/utils/util.py"], "/train.py": ["/models/model.py", "/utils/util.py"], "/utils/multiwoz_dataloader.py": ["/utils/util.py"], "/models/model.py": ["/utils/util.py"]}
|
19,581
|
Jiahuan-Pei/multiwoz-mdrg
|
refs/heads/master
|
/train.py
|
# coding=utf-8
from __future__ import division, print_function, unicode_literals
import argparse
import json
import random
import datetime
from io import open
import os
import shutil
import numpy as np
import torch
from torch.optim import Adam
import torch.nn as nn
from utils import util, multiwoz_dataloader
from models.model import Model
from utils.util import detected_device, PAD_token, pp_mkdir
from multiwoz.Evaluators import *
# from tqdm import tqdm
# SOS_token = 0
# EOS_token = 1
# UNK_token = 2
# PAD_token = 3
# pp added: print out env
util.get_env_info()
all_start_time = datetime.datetime.now()
print('Start time={}'.format(all_start_time.strftime("%Y-%m-%d %H:%M:%S")))
parser = argparse.ArgumentParser(description='multiwoz1-bsl-tr')
# Group args
# 1. Data & Dirs
data_arg = parser.add_argument_group(title='Data')
data_arg.add_argument('--data_dir', type=str, default='data/multi-woz', help='the root directory of data')
data_arg.add_argument('--log_dir', type=str, default='logs')
data_arg.add_argument('--result_dir', type=str, default='results/bsl')
data_arg.add_argument('--pre_model_dir', type=str, default='results/moe4_gru-27062/model')
data_arg.add_argument('--model_name', type=str, default='translate.ckpt')
# 2.Network
net_arg = parser.add_argument_group(title='Network')
net_arg.add_argument('--cell_type', type=str, default='lstm')
net_arg.add_argument('--attention_type', type=str, default='bahdanau')
net_arg.add_argument('--depth', type=int, default=1, help='depth of rnn')
net_arg.add_argument('--emb_size', type=int, default=50)
net_arg.add_argument('--hid_size_enc', type=int, default=150)
net_arg.add_argument('--hid_size_dec', type=int, default=150)
net_arg.add_argument('--hid_size_pol', type=int, default=150)
net_arg.add_argument('--max_len', type=int, default=50)
net_arg.add_argument('--vocab_size', type=int, default=400, metavar='V')
net_arg.add_argument('--use_attn', type=util.str2bool, nargs='?', const=True, default=True) # F
net_arg.add_argument('--use_emb', type=util.str2bool, nargs='?', const=True, default=False)
# 3.Train
train_arg = parser.add_argument_group(title='Train')
train_arg.add_argument('--mode', type=str, default='train', help='training or testing: test, train, RL')
train_arg.add_argument('--optim', type=str, default='adam')
train_arg.add_argument('--max_epochs', type=int, default=20) # 15
train_arg.add_argument('--lr_rate', type=float, default=0.005)
train_arg.add_argument('--lr_decay', type=float, default=0.0)
train_arg.add_argument('--l2_norm', type=float, default=0.00001)
train_arg.add_argument('--clip', type=float, default=5.0, help='clip the gradient by norm')
train_arg.add_argument('--teacher_ratio', type=float, default=1.0, help='probability of using targets for learning')
train_arg.add_argument('--dropout', type=float, default=0.0)
train_arg.add_argument('--early_stop_count', type=int, default=2)
train_arg.add_argument('--epoch_load', type=int, default=0)
train_arg.add_argument('--load_param', type=util.str2bool, nargs='?', const=True, default=False)
train_arg.add_argument('--start_epoch', type=int, default=0) # when to use SentMoE
# 4. MISC
misc_arg = parser.add_argument_group('MISC')
misc_arg.add_argument('--seed', type=int, default=0, metavar='S', help='random seed (default: 1)')
misc_arg.add_argument('--batch_size', type=int, default=64, metavar='N', help='input batch size for training (default: 64)')
misc_arg.add_argument('--db_size', type=int, default=30)
misc_arg.add_argument('--bs_size', type=int, default=94)
misc_arg.add_argument('--beam_width', type=int, default=10, help='Beam width used in beamsearch')
#
# 5. Here add new args
new_arg = parser.add_argument_group('New')
new_arg.add_argument('--intent_type', type=str, default=None, help='separate experts by intents: None, domain, sysact or domain_act') # pp added
# different implementation of moe
# 1. only weight loss & hyper weights
# --use_moe_loss=True --learn_loss_weight=False --use_moe_model=False
# 2. only weight loss & learn weights
# --use_moe_loss=True --learn_loss_weight=True --use_moe_model=False
# 3. only split models
# --use_moe_loss=False --learn_loss_weight=False --use_moe_model=True
# 4. both & hyper weights
# --use_moe_loss=True --learn_loss_weight=False --use_moe_model=True
# 5. both & learn weights
# --use_moe_loss=True --learn_loss_weight=True --use_moe_model=True
new_arg.add_argument('--use_moe_loss', type=util.str2bool, nargs='?', const=True, default=False, help='inner models weighting loss')
new_arg.add_argument('--learn_loss_weight', type=util.str2bool, nargs='?', const=True, default=False, help='learn weight of moe loss')
new_arg.add_argument('--use_moe_model', type=util.str2bool, nargs='?', const=True, default=False, help='inner models structure partition')
new_arg.add_argument('--debug', type=util.str2bool, nargs='?', const=True, default=False, help='if True use small data for debugging')
new_arg.add_argument('--train_valid', type=util.str2bool, nargs='?', const=True, default=False, help='if True add valid data for training')
new_arg.add_argument('--train_ratio', type=float, default=1.0) # use xx percent of training data
new_arg.add_argument('--lambda_expert', type=float, default=0.5) # use xx percent of training data
new_arg.add_argument('--mu_expert', type=float, default=0.5) # use xx percent of training data
new_arg.add_argument('--gamma_expert', type=float, default=0.5) # use xx percent of training data
new_arg.add_argument('--SentMoE', type=util.str2bool, nargs='?', const=True, default=False, help='if True use sentence info')
new_arg.add_argument('--if_detach', type=util.str2bool, nargs='?', const=True, default=False) # if detach expert parts
new_arg.add_argument('--rp_share_rnn', type=util.str2bool, nargs='?', const=True, default=True) # if detach expert parts
new_arg.add_argument('--future_info', type=str, default='proba') # use hidd or proba
args = parser.parse_args()
args.device = detected_device.type
print('args.device={}'.format(args.device))
print('args.intent_type={}'.format(args.intent_type))
# construct dirs
args.model_dir = '%s/model' % args.result_dir
args.train_output = '%s/data/train_dials' % args.result_dir
args.valid_output = '%s/data/valid_dials' % args.result_dir
args.decode_output = '%s/data/test_dials' % args.result_dir
args.delex_path = '%s/delex.json' % args.data_dir
print(args)
# pp added: init seed
util.init_seed(args.seed)
def trainOne(print_loss_total,print_act_total, print_grad_total, input_tensor, input_lengths, target_tensor, target_lengths, bs_tensor, db_tensor, mask_tensor=None, name=None):
loss, loss_acts, grad = model.model_train(input_tensor, input_lengths, target_tensor, target_lengths, db_tensor, bs_tensor, mask_tensor, name)
# pp added: experts' loss
# print('@'*20, '\n', target_tensor)
'''
if args.use_moe_loss and False: # data separate by intents
gen_loss_list = []
if mask_tensor is not None: # data separate by intents
# print(mask_tensor)
for mask in mask_tensor: # each intent has a mask [Batch, 1]
target_tensor_i = target_tensor.clone()
target_tensor_i = target_tensor_i.masked_fill_(mask, value=PAD_token)
# print(mask)
# print(target_tensor_i)
# print('*'*50)
loss_i, loss_acts_i, grad_i = model.model_train(input_tensor, input_lengths, target_tensor_i, target_lengths, db_tensor, bs_tensor, mask_tensor, name)
gen_loss_list.append(loss_i)
# print('loss', loss, '; mean_experts_loss', torch.mean(torch.tensor(gen_loss_list)), '\ngen_loss_list', ['%.4f' % s if s!=0 else '0' for s in gen_loss_list])
# mu_expert = 0.5
mu_expert = args.mu_expert
loss = (1 - mu_expert) * loss + mu_expert * torch.mean(torch.tensor(gen_loss_list))
'''
#print(loss, loss_acts)
print_loss_total += loss
print_act_total += loss_acts
print_grad_total += grad
model.global_step += 1
model.sup_loss = torch.zeros(1)
return print_loss_total, print_act_total, print_grad_total
def trainIters(model, intent2index, n_epochs=10, args=args):
prev_min_loss, early_stop_count = 1 << 30, args.early_stop_count
start = datetime.datetime.now()
# Valid_Scores, Test_Scores = [], []
Scores = []
val_dials_gens, test_dials_gens = [], []
origin = args.SentMoE # original flag
for epoch in range(1, n_epochs + 1):
# pp added
if origin:
if epoch > args.start_epoch:
args.SentMoE = True
print('BeginSentMOE', '-'*50)
else:
args.SentMoE = False
print('%s\nEpoch=%s (%s %%)' % ('~'*50, epoch, epoch / n_epochs * 100))
print_loss_total = 0; print_grad_total = 0; print_act_total = 0 # Reset every print_every
start_time = datetime.datetime.now()
# watch out where do you put it
model.optimizer = Adam(lr=args.lr_rate, params=filter(lambda x: x.requires_grad, model.parameters()), weight_decay=args.l2_norm)
model.optimizer_policy = Adam(lr=args.lr_rate, params=filter(lambda x: x.requires_grad, model.policy.parameters()), weight_decay=args.l2_norm)
# Training
model.train()
step = 0
for data in train_loader: # each element of data tuple has [batch_size] samples
step += 1
model.optimizer.zero_grad()
model.optimizer_policy.zero_grad()
# Transfer to GPU
if torch.cuda.is_available():
data = [data[i].cuda() if isinstance(data[i], torch.Tensor) else data[i] for i in range(len(data))]
input_tensor, input_lengths, target_tensor, target_lengths, bs_tensor, db_tensor, mask_tensor = data
print_loss_total, print_act_total, print_grad_total = trainOne(print_loss_total, print_act_total, print_grad_total, input_tensor, input_lengths, target_tensor, target_lengths, bs_tensor, db_tensor, mask_tensor)
if step > 1 and args.debug:
break # for debug
if args.train_ratio!=1.0 and step > args.train_ratio * len(train_loader):
break # only train of
train_len = len(train_loader) # 886 data # len(train_loader.dataset.datasets) # 8423 dialogues
print_loss_avg = print_loss_total / train_len
print_act_total_avg = print_act_total / train_len
print_grad_avg = print_grad_total / train_len
print('Train Time:%.4f' % (datetime.datetime.now() - start_time).seconds)
print('Train Loss: %.6f\nTrain Grad: %.6f' % (print_loss_avg, print_grad_avg))
if not args.debug:
step = 0
# VALIDATION
if args.train_valid: # if add valid data for training
model.train()
valid_loss = 0
for name, val_file in list(val_dials.items())[-step:]:
loader = multiwoz_dataloader.get_loader_by_dialogue(val_file, name,
input_lang_word2index, output_lang_word2index,
args.intent_type, intent2index)
data = iter(loader).next()
# Transfer to GPU
if torch.cuda.is_available():
data = [data[i].cuda() if isinstance(data[i], torch.Tensor) else data[i] for i in range(len(data))]
input_tensor, input_lengths, target_tensor, target_lengths, bs_tensor, db_tensor, mask_tensor = data
proba, _, _ = model.forward(input_tensor, input_lengths, target_tensor, target_lengths, db_tensor,
bs_tensor, mask_tensor) # pp added: mask_tensor
proba = proba.view(-1, model.vocab_size) # flatten all predictions
loss = model.gen_criterion(proba, target_tensor.view(-1))
valid_loss += loss.item()
valid_len = len(val_dials) # 1000
valid_loss /= valid_len
# pp added: evaluate valid
print('Train Valid Loss: %.6f' % valid_loss)
# pp added
with torch.no_grad():
model.eval()
val_dials_gen = {}
valid_loss = 0
for name, val_file in list(val_dials.items())[-step:]: # for py3
loader = multiwoz_dataloader.get_loader_by_dialogue(val_file, name,
input_lang_word2index, output_lang_word2index,
args.intent_type, intent2index)
data = iter(loader).next()
# Transfer to GPU
if torch.cuda.is_available():
data = [data[i].cuda() if isinstance(data[i], torch.Tensor) else data[i] for i in range(len(data))]
input_tensor, input_lengths, target_tensor, target_lengths, bs_tensor, db_tensor, mask_tensor = data
proba, _, _ = model.forward(input_tensor, input_lengths, target_tensor, target_lengths, db_tensor,
bs_tensor, mask_tensor) # pp added: mask_tensor
proba = proba.view(-1, model.vocab_size) # flatten all predictions
loss = model.gen_criterion(proba, target_tensor.view(-1))
valid_loss += loss.item()
# pp added: evaluation - Plan A
# models.eval()
output_words, loss_sentence = model.predict(input_tensor, input_lengths, target_tensor, target_lengths,
db_tensor, bs_tensor, mask_tensor)
# models.train()
val_dials_gen[name] = output_words
valid_len = len(val_dials) # 1000
valid_loss /= valid_len
# pp added: evaluate valid
print('Valid Loss: %.6f' % valid_loss)
# BLEU, MATCHES, SUCCESS, SCORE, P, R, F1
Valid_Score = evaluator.summarize_report(val_dials_gen, mode='Valid')
# Valid_Score = evaluateModel(val_dials_gen, val_dials, delex_path, mode='Valid')
val_dials_gens.append(val_dials_gen) # save generated output for each epoch
# Testing
# pp added
model.eval()
test_dials_gen ={}
test_loss = 0
for name, test_file in list(test_dials.items())[-step:]:
loader = multiwoz_dataloader.get_loader_by_dialogue(test_file, name,
input_lang_word2index, output_lang_word2index,
args.intent_type, intent2index)
data = iter(loader).next()
# Transfer to GPU
if torch.cuda.is_available():
data = [data[i].cuda() if isinstance(data[i], torch.Tensor) else data[i] for i in range(len(data))]
input_tensor, input_lengths, target_tensor, target_lengths, bs_tensor, db_tensor, mask_tensor = data
proba, _, _ = model.forward(input_tensor, input_lengths, target_tensor, target_lengths, db_tensor,
bs_tensor, mask_tensor) # pp added: mask_tensor
proba = proba.view(-1, model.vocab_size) # flatten all predictions
loss = model.gen_criterion(proba, target_tensor.view(-1))
test_loss += loss.item()
output_words, loss_sentence = model.predict(input_tensor, input_lengths, target_tensor, target_lengths,
db_tensor, bs_tensor, mask_tensor)
test_dials_gen[name] = output_words
# pp added: evaluate test
test_len = len(test_dials) # 1000
test_loss /= test_len
# pp added: evaluate valid
print('Test Loss: %.6f' % valid_loss)
Test_Score = evaluator.summarize_report(test_dials_gen, mode='Test')
# Test_Score = evaluateModel(test_dials_gen, test_dials, delex_path, mode='Test')
test_dials_gens.append(test_dials_gen)
try:
with open(args.decode_output + '/test_dials_gen_%s.json' % epoch, 'w') as outfile:
json.dump(test_dials_gen, outfile, indent=4)
except:
print('json.dump.err.test')
model.train()
# pp added: evaluation - Plan B
# print(50 * '=' + 'Evaluating start...')
# # eval_with_train(models)
# eval_with_train3(models, val_dials, mode='valid')
# eval_with_train3(models, test_dials, mode='test')
# print(50 * '=' + 'Evaluating end...')
model.saveModel(epoch)
# BLEU, MATCHES, SUCCESS, SCORE, TOTAL
Scores.append(tuple([epoch]) + Valid_Score + tuple(['%.2f'%np.exp(valid_loss)]) + Test_Score + tuple(['%.2f'%np.exp(test_loss)])) # combine the tuples; 11 elements
# summary of evaluation metrics
import pandas as pd
# BLEU, MATCHES, SUCCESS, SCORE, P, R, F1
fields = ['Epoch',
'Valid BLEU', 'Valid Matches', 'Valid Success', 'Valid Score', 'Valid P', 'Valid R', 'Valid F1', 'Valid PPL',
'Test BLEU', 'Test Matches', 'Test Success', 'Test Score', 'Test P', 'Test R', 'Test F1', 'Test PPL']
df = pd.DataFrame(Scores, columns=fields)
sdf = df.sort_values(by=['Valid Score'], ascending=False)
print('Top3:', '=' * 60)
print(sdf.head(3).transpose())
print('Best:', '=' * 60) # selected by valid score
best_df = sdf.head(1)[['Epoch', 'Test PPL', 'Test BLEU', 'Test Matches', 'Test Success', 'Test Score', 'Test P', 'Test R', 'Test F1']]
print(best_df.transpose())
# save best prediction to json, evaluated on valid set
best_model_id = np.int(best_df['Epoch']) - 1 # epoch start with 1
try:
with open(args.valid_output + '/val_dials_gen.json', 'w') as outfile:
json.dump(val_dials_gens[best_model_id], outfile, indent=4)
except:
print('json.dump.err.valid')
try:
with open(args.decode_output + '/test_dials_gen.json', 'w') as outfile:
json.dump(test_dials_gens[best_model_id], outfile, indent=4)
except:
print('json.dump.err.test')
return best_df
if __name__ == '__main__':
input_lang_index2word, output_lang_index2word, input_lang_word2index, output_lang_word2index = util.loadDictionaries(mdir=args.data_dir)
# pp added: load intents
intent2index, index2intent = util.loadIntentDictionaries(intent_type=args.intent_type, intent_file='{}/intents.json'.format(args.data_dir)) if args.intent_type else (None, None)
# pp added: data loaders
train_loader = multiwoz_dataloader.get_loader('{}/train_dials.json'.format(args.data_dir), input_lang_word2index, output_lang_word2index, args.intent_type, intent2index, batch_size=args.batch_size)
# valid_loader_list = multiwoz_dataloader.get_loader_by_full_dialogue('{}/val_dials.json'.format(args.data_dir), input_lang_word2index, output_lang_word2index, args.intent_type, intent2index)
# test_loader_list = multiwoz_dataloader.get_loader_by_full_dialogue('{}/test_dials.json'.format(args.data_dir), input_lang_word2index, output_lang_word2index, args.intent_type, intent2index)
# Load validation file list:
with open('{}/val_dials.json'.format(args.data_dir)) as outfile:
val_dials = json.load(outfile)
# Load test file list:
with open('{}/test_dials.json'.format(args.data_dir)) as outfile:
test_dials = json.load(outfile)
# delex_path = '%s/delex.json' % args.data_dir
# create dir for generated outputs of valid and test set
pp_mkdir(args.valid_output)
pp_mkdir(args.decode_output)
model = Model(args, input_lang_index2word, output_lang_index2word, input_lang_word2index, output_lang_word2index, intent2index, index2intent)
# models = nn.DataParallel(models, device_ids=[0,1]) # latter for parallel
model = model.to(detected_device)
if args.load_param:
model.loadModel(args.epoch_load)
evaluator = MultiWozEvaluator('MultiWozEvaluator', delex_path=args.delex_path)
# Test_Score = evaluator.summarize_report(test_dials_gen, mode='Test')
trainIters(model, intent2index, n_epochs=args.max_epochs, args=args)
all_end_time = datetime.datetime.now()
print('End time={}'.format(all_end_time.strftime("%Y-%m-%d %H:%M:%S")))
print('Use time={} seconds'.format((all_end_time-all_start_time).seconds))
|
{"/test.py": ["/models/evaluator.py", "/models/model.py", "/utils/util.py"], "/train.py": ["/models/model.py", "/utils/util.py"], "/utils/multiwoz_dataloader.py": ["/utils/util.py"], "/models/model.py": ["/utils/util.py"]}
|
19,582
|
Jiahuan-Pei/multiwoz-mdrg
|
refs/heads/master
|
/utils/multiwoz_dataloader.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Function :
@License : Copyright(C), ILPS group, Univeristy of Amsterdam
@Author : Jiahuan Pei
@Contact : j.pei@uva.nl
@Data: 2019-03-28
"""
import torch
import nltk, sys
from torch.utils.data import Dataset, DataLoader, ConcatDataset
from utils.util import *
import json
from utils.util import default_device
class MultiwozSingleDataset(Dataset):
"""Custom data.Dataset compatible with data.DataLoader."""
def __init__(self, val_file, name, src_word2id, trg_word2id, intent_type=None, intent2index=None):
"""Reads source and target sequences from txt files."""
self.val_file = val_file
self.name = name # the name of json dialogue
self.src_word2id = src_word2id
self.trg_word2id = trg_word2id
self.intent2index = intent2index
self.intent_type = intent_type
self.device = default_device # torch.device('cpu')
self.input_tensor, self.target_tensor, self.bs_tensor, self.db_tensor, self.mask_tensor = self.SingleDialogueJSON2Tensors()
self.datalen = self.__len__()
def __getitem__(self, index): # data for one dialogue file
"""Returns one data pair (source and target)."""
input_tensor, target_tensor, bs_tensor, db_tensor = \
self.input_tensor[index], self.target_tensor[index], self.bs_tensor[index], self.db_tensor[index]
mask_tensor = self.mask_tensor[index] if self.mask_tensor else None
return input_tensor, target_tensor, bs_tensor, db_tensor, mask_tensor
def __len__(self):
return len(self.input_tensor)
def input_word2index(self, index):
if index in self.src_word2id:
return self.src_word2id[index]
else:
return UNK_token
def out_word2index(self, index):
if index in self.trg_word2id:
return self.trg_word2id[index]
else:
return UNK_token
def SingleDialogueJSON2Tensors(self):
val_file = self.val_file
input_tensor = []; target_tensor = []; bs_tensor = []; db_tensor = []; mask_tensor = []
for idx, (usr, sys, bs, db, acts) in enumerate(
zip(val_file['usr'], val_file['sys'], val_file['bs'], val_file['db'], val_file['acts'])):
tensor = [self.input_word2index(word) for word in usr.strip(' ').split(' ')] + [EOS_token] # models.input_word2index(word)
input_tensor.append(torch.as_tensor(tensor, dtype=torch.long, device=self.device)) # .view(-1, 1))
tensor = [self.out_word2index(word) for word in sys.strip(' ').split(' ')] + [EOS_token]
target_tensor.append(torch.as_tensor(tensor, dtype=torch.long, device=self.device)) # .view(-1, 1)
# target_tensor.append(torch.LongTensor(tensor)) # .view(-1, 1)
bs_tensor.append([float(belief) for belief in bs])
db_tensor.append([float(pointer) for pointer in db])
# pp added: mask_i=0 if i_th it contains i_th intent
if self.intent2index:
tensor = torch.ones(len(self.intent2index), 1)
# change acts & find index
intent_type = self.intent_type
if intent_type == 'domain':
inds = [self.intent2index[act.split('-')[0]] for act in acts]
elif intent_type == 'sysact':
inds = [self.intent2index[act.split('-')[1]] for act in acts]
elif intent_type == 'domain_act':
inds = [self.intent2index[act] for act in acts] # the index of the chosen intents
tensor[:][inds] = 0
mask_tensor.append(torch.as_tensor(tensor, dtype=torch.uint8, device=self.device))
return input_tensor, target_tensor, bs_tensor, db_tensor, mask_tensor # each one is a list of tensor
def collate_fn(data, device=default_device):
"""Creates mini-batch tensors from the list of tuples
"""
# batch.sort(key=lambda x: len(x[1]), reverse=True)
has_mask_tensor = True if data[0][-1] is not None else False
input_tensor, target_tensor, bs_tensor, db_tensor, mask_tensor = zip(*data)
input_tensor, input_lengths = padSequence(input_tensor)
target_tensor, target_lengths = padSequence(target_tensor)
bs_tensor = torch.as_tensor(bs_tensor, dtype=torch.float, device=device)
db_tensor = torch.as_tensor(db_tensor, dtype=torch.float, device=device)
mask_tensor = torch.stack(mask_tensor).permute((1, 0, 2)) if has_mask_tensor else None
# mask_tensor = torch.stack(mask_tensor).permute((1, 0, 2)) if mask_tensor[0] and mask_tensor[0] != [] else None
# data = input_tensor, target_tensor, bs_tensor, db_tensor, mask_tensor
# if torch.cuda.is_available():
# data = [data[i].cuda() if isinstance(data[i], torch.Tensor) else data[i] for i in range(len(data))]
return input_tensor, input_lengths, target_tensor, target_lengths, bs_tensor, db_tensor, mask_tensor # tensors [batch_size, *]
def get_loader(file_path, src_word2id, trg_word2id, intent_type=None, intent2index=None, batch_size=1):
"""Returns data loader for train in turn-level.
"""
dials = json.load(open(file_path))
dataset_list = []
for name in dials.keys():
val_file = dials[name]
# build a custom dataset
dataset = MultiwozSingleDataset(val_file, name, src_word2id, trg_word2id, intent_type, intent2index)
dataset_list.append(dataset)
datasets = ConcatDataset(dataset_list)
# data loader for custome dataset
data_loader = DataLoader(dataset=datasets,
batch_size=batch_size,
shuffle=True,
num_workers=0,
collate_fn=collate_fn)
return data_loader
def get_loader_by_dialogue(val_file, name, src_word2id, trg_word2id, intent_type=None, intent2index=None):
'''Return a dataloader for a full dialogue, the batch size is the len of the dialogue'''
dataset = MultiwozSingleDataset(val_file, name, src_word2id, trg_word2id, intent_type, intent2index)
batch_size = len(dataset)
data_loader = DataLoader(dataset=dataset,
batch_size=batch_size,
shuffle=False, # donnot change the order
num_workers=0,
collate_fn=collate_fn)
return data_loader
def get_loader_by_full_dialogue(file_path, src_word2id, trg_word2id, intent_type=None, intent2index=None):
'''Return a list of dataloader, each one load a full dialogue data'''
dials = json.load(open(file_path))
data_loader_list = []
for name in dials.keys():
val_file = dials[name]
data_loader = get_loader_by_dialogue(val_file, name, src_word2id, trg_word2id, intent_type, intent2index)
data_loader_list.append(data_loader)
return data_loader_list
if __name__ == "__main__":
data_dir = '../multiwoz1-moe/data'
# intent_type = 'domain'
intent_type = None
input_lang_index2word, output_lang_index2word, input_lang_word2index, output_lang_word2index = loadDictionaries(mdir=data_dir)
intent2index, index2intent = loadIntentDictionaries(intent_type=intent_type, intent_file='{}/intents.json'.format(data_dir)) if intent_type else (None, None)
file_path = '{}/train_dials.json'.format(data_dir)
train_loader = get_loader(file_path, input_lang_word2index, output_lang_word2index, intent_type, intent2index)
for data in train_loader:
print(data)
|
{"/test.py": ["/models/evaluator.py", "/models/model.py", "/utils/util.py"], "/train.py": ["/models/model.py", "/utils/util.py"], "/utils/multiwoz_dataloader.py": ["/utils/util.py"], "/models/model.py": ["/utils/util.py"]}
|
19,583
|
Jiahuan-Pei/multiwoz-mdrg
|
refs/heads/master
|
/models/evaluator.py
|
import random
import sys
sys.path.append('..')
random.seed(111)
from utils.dbPointer import queryResultVenues
from utils.delexicalize import *
from utils.nlp import *
domains = ['restaurant', 'hotel', 'attraction', 'train', 'taxi', 'hospital', 'police']
requestables = ['phone', 'address', 'postcode', 'reference', 'id']
def parseGoal(goal, d, domain):
"""Parses user goal into dictionary format."""
goal[domain] = {}
goal[domain] = {'informable': [], 'requestable': [], 'booking': []}
if 'info' in d['goal'][domain]:
if domain == 'train':
# we consider dialogues only where train had to be booked!
if 'book' in d['goal'][domain]:
goal[domain]['requestable'].append('reference')
if 'reqt' in d['goal'][domain]:
if 'trainID' in d['goal'][domain]['reqt']:
goal[domain]['requestable'].append('id')
else:
if 'reqt' in d['goal'][domain]:
for s in d['goal'][domain]['reqt']: # addtional requests:
if s in ['phone', 'address', 'postcode', 'reference', 'id']:
# ones that can be easily delexicalized
goal[domain]['requestable'].append(s)
if 'book' in d['goal'][domain]:
goal[domain]['requestable'].append("reference")
goal[domain]["informable"] = d['goal'][domain]['info']
if 'book' in d['goal'][domain]:
goal[domain]["booking"] = d['goal'][domain]['book']
return goal
# dialouges is a dict of list, each list consists of generated responses
def evaluateModel(dialogues, val_dials, delex_path, mode='Valid'):
"""Gathers statistics for the whole sets."""
fin1 = open(delex_path, 'r')
delex_dialogues = json.load(fin1)
successes, matches = 0, 0
total = 0
gen_stats = {'restaurant': [0, 0, 0], 'hotel': [0, 0, 0], 'attraction': [0, 0, 0], 'train': [0, 0,0], 'taxi': [0, 0, 0],
'hospital': [0, 0, 0], 'police': [0, 0, 0]}
sng_gen_stats = {'restaurant': [0, 0, 0], 'hotel': [0, 0, 0], 'attraction': [0, 0, 0], 'train': [0, 0, 0],
'taxi': [0, 0, 0],
'hospital': [0, 0, 0], 'police': [0, 0, 0]}
for filename, dial in dialogues.items():
data = delex_dialogues[filename]
goal, _, _, requestables, _ = evaluateRealDialogue(data, filename) # ground truth
success, match, stats = evaluateGeneratedDialogue(dial, goal, data, requestables)
successes += success
matches += match
total += 1
for domain in gen_stats.keys():
gen_stats[domain][0] += stats[domain][0]
gen_stats[domain][1] += stats[domain][1]
gen_stats[domain][2] += stats[domain][2]
if 'SNG' in filename:
for domain in gen_stats.keys():
sng_gen_stats[domain][0] += stats[domain][0]
sng_gen_stats[domain][1] += stats[domain][1]
sng_gen_stats[domain][2] += stats[domain][2]
# BLUE SCORE
corpus = []
model_corpus = []
bscorer = BLEUScorer()
count_wrong_len = 0
for dialogue in dialogues:
data = val_dials[dialogue]
model_turns, corpus_turns = [], []
for idx, turn in enumerate(data['sys']):
corpus_turns.append([turn])
for turn in dialogues[dialogue]:
model_turns.append([turn])
if len(model_turns) == len(corpus_turns):
corpus.extend(corpus_turns)
model_corpus.extend(model_turns)
else:
count_wrong_len += 1
print('wrong length!!!')
# print(model_turns)
if count_wrong_len:
print('count_wrong_len_ratio={}/{}'.format(count_wrong_len, len(dialogues)))
# Print results
try:
BLEU = bscorer.score(model_corpus, corpus)
MATCHES = (matches / float(total) * 100)
SUCCESS = (successes / float(total) * 100)
SCORE = 0.5 * MATCHES + 0.5 * SUCCESS + 100 * BLEU
print('%s BLEU: %.4f' % (mode, BLEU))
print('%s Matches: %2.2f%%' % (mode, MATCHES))
print('%s Success: %2.2f%%' % (mode, SUCCESS))
print('%s Score: %.4f' % (mode, SCORE))
print('%s Dialogues: %s' % (mode, total))
return BLEU, MATCHES, SUCCESS, SCORE, total
except:
print('SCORE ERROR')
def evaluateModelOnIntent(dialogues, val_dials, delex_path, intent, mode='Valid'):
"""Gathers statistics for the whole sets."""
try:
fin1 = open(delex_path, 'r')
except:
print('cannot find the delex file!=', delex_path)
delex_dialogues = json.load(fin1)
successes, matches = 0, 0
total = 0
total_turns = 0
total_dials = 0
gen_stats = {'restaurant': [0, 0, 0], 'hotel': [0, 0, 0], 'attraction': [0, 0, 0], 'train': [0, 0,0], 'taxi': [0, 0, 0],
'hospital': [0, 0, 0], 'police': [0, 0, 0]}
sng_gen_stats = {'restaurant': [0, 0, 0], 'hotel': [0, 0, 0], 'attraction': [0, 0, 0], 'train': [0, 0, 0],
'taxi': [0, 0, 0],
'hospital': [0, 0, 0], 'police': [0, 0, 0]}
for filename, dial in dialogues.items():
data = delex_dialogues[filename]
goal, _, _, requestables, _ = evaluateRealDialogue(data, filename)
# filter goal & requestbles using domain
new_goal = {}; new_req = {}
for g in goal:
if intent.lower() in g:
new_goal[g] = goal[g]
for r in requestables:
if intent.lower() in r:
new_req[r]=requestables[r]
success, match, stats = evaluateGeneratedDialogue(dial, new_goal, data, new_req)
successes += success
matches += match
total += 1
for domain in gen_stats.keys():
gen_stats[domain][0] += stats[domain][0]
gen_stats[domain][1] += stats[domain][1]
gen_stats[domain][2] += stats[domain][2]
if 'SNG' in filename:
for domain in gen_stats.keys():
sng_gen_stats[domain][0] += stats[domain][0]
sng_gen_stats[domain][1] += stats[domain][1]
sng_gen_stats[domain][2] += stats[domain][2]
# BLUE SCORE
corpus = []
model_corpus = []
bscorer = BLEUScorer()
count_wrong_len = 0
for dialogue in dialogues:
data = val_dials[dialogue]
model_turns, corpus_turns = [], []
flag = False
if len(data['sys']) == len(dialogues[dialogue]):
for idx, turn in enumerate(data['sys']):
act = data['acts'][idx] # for different intents
holding_intents = [a.split('-')[0] for a in act]
model_turn = dialogues[dialogue][idx]
if intent in holding_intents:
corpus_turns.append([turn])
model_turns.append([model_turn])
total_turns += 1
flag = True
corpus.extend(corpus_turns)
model_corpus.extend(model_turns)
else:
count_wrong_len += 1
print('wrong length!!!')
if flag:
total_dials +=1
if count_wrong_len:
print('count_wrong_len_ratio={}/{}'.format(count_wrong_len, len(dialogues)))
# Print results
try:
BLEU = bscorer.score(model_corpus, corpus)
MATCHES = (matches / float(total) * 100)
SUCCESS = (successes / float(total) * 100)
SCORE = 0.5 * MATCHES + 0.5 * SUCCESS + 100 * BLEU
print('%s BLEU: %.4f' % (mode, BLEU))
print('%s Matches: %2.2f%%' % (mode, MATCHES))
print('%s Success: %2.2f%%' % (mode, SUCCESS))
print('%s Score: %.4f' % (mode, SCORE))
print('%s Dialogues: %s' % (mode, total_dials))
print('%s Turns: %s' % (mode, total_turns))
return BLEU, MATCHES, SUCCESS, SCORE, total
except:
print('SCORE ERROR')
def evaluateGeneratedDialogue(dialog, goal, realDialogue, real_requestables):
"""Evaluates the dialogue created by the models.
First we load the user goal of the dialogue, then for each turn
generated by the system we look for key-words.
For the Inform rate we look whether the entity was proposed.
For the Success rate we look for requestables slots"""
# for computing corpus success
requestables = ['phone', 'address', 'postcode', 'reference', 'id']
# CHECK IF MATCH HAPPENED
provided_requestables = {}
venue_offered = {}
domains_in_goal = []
for domain in goal.keys():
venue_offered[domain] = []
provided_requestables[domain] = []
domains_in_goal.append(domain)
for t, sent_t in enumerate(dialog):
for domain in goal.keys():
# for computing success
if '[' + domain + '_name]' in sent_t or '_id' in sent_t:
if domain in ['restaurant', 'hotel', 'attraction', 'train']:
# HERE YOU CAN PUT YOUR BELIEF STATE ESTIMATION
venues = queryResultVenues(domain, realDialogue['log'][t*2 + 1])
# if venue has changed
if len(venue_offered[domain]) == 0 and venues:
venue_offered[domain] = random.sample(venues, 1)
else:
flag = False
for ven in venues:
if venue_offered[domain][0] == ven:
flag = True
break
if not flag and venues: # sometimes there are no results so sample won't work
# print venues
venue_offered[domain] = random.sample(venues, 1)
else: # not limited so we can provide one
venue_offered[domain] = '[' + domain + '_name]'
# ATTENTION: assumption here - we didn't provide phone or address twice! etc
for requestable in requestables:
if requestable == 'reference':
if domain + '_reference' in sent_t:
if 'restaurant_reference' in sent_t:
if realDialogue['log'][t * 2]['db_pointer'][-5] == 1: # if pointer was allowing for that?
provided_requestables[domain].append('reference')
elif 'hotel_reference' in sent_t:
if realDialogue['log'][t * 2]['db_pointer'][-3] == 1: # if pointer was allowing for that?
provided_requestables[domain].append('reference')
elif 'train_reference' in sent_t:
if realDialogue['log'][t * 2]['db_pointer'][-1] == 1: # if pointer was allowing for that?
provided_requestables[domain].append('reference')
else:
provided_requestables[domain].append('reference')
else:
if domain + '_' + requestable + ']' in sent_t:
provided_requestables[domain].append(requestable)
# if name was given in the task
for domain in goal.keys():
# if name was provided for the user, the match is being done automatically
if 'info' in realDialogue['goal'][domain]:
if 'name' in realDialogue['goal'][domain]['info']:
venue_offered[domain] = '[' + domain + '_name]'
# special domains - entity does not need to be provided
if domain in ['taxi', 'police', 'hospital']:
venue_offered[domain] = '[' + domain + '_name]'
if domain == 'train':
if not venue_offered[domain]:
if 'reqt' in realDialogue['goal'][domain] and 'id' not in realDialogue['goal'][domain]['reqt']:
venue_offered[domain] = '[' + domain + '_name]'
"""
Given all inform and requestable slots
we go through each domain from the user goal
and check whether right entity was provided and
all requestable slots were given to the user.
The dialogue is successful if that's the case for all domains.
"""
# HARD EVAL
stats = {'restaurant': [0, 0, 0], 'hotel': [0, 0, 0], 'attraction': [0, 0, 0], 'train': [0, 0,0], 'taxi': [0, 0, 0],
'hospital': [0, 0, 0], 'police': [0, 0, 0]}
match = 0
success = 0
# MATCH
for domain in goal.keys():
match_stat = 0
if domain in ['restaurant', 'hotel', 'attraction', 'train']:
goal_venues = queryResultVenues(domain, goal[domain]['informable'], real_belief=True)
if type(venue_offered[domain]) is str and '_name' in venue_offered[domain]:
match += 1
match_stat = 1
elif len(venue_offered[domain]) > 0 and venue_offered[domain][0] in goal_venues:
match += 1
match_stat = 1
else:
if domain + '_name]' in venue_offered[domain]:
match += 1
match_stat = 1
stats[domain][0] = match_stat
stats[domain][2] = 1
if match == len(goal.keys()):
match = 1
else:
match = 0
# SUCCESS
if match:
for domain in domains_in_goal:
success_stat = 0
domain_success = 0
if len(real_requestables[domain]) == 0:
success += 1
success_stat = 1
stats[domain][1] = success_stat
continue
# if values in sentences are super set of requestables
for request in set(provided_requestables[domain]):
if request in real_requestables[domain]:
domain_success += 1
if domain_success >= len(real_requestables[domain]):
success += 1
success_stat = 1
stats[domain][1] = success_stat
# final eval
if success >= len(real_requestables):
success = 1
else:
success = 0
#rint requests, 'DIFF', requests_real, 'SUCC', success
return success, match, stats
def evaluateRealDialogue(dialog, filename):
"""Evaluation of the real dialogue.
First we loads the user goal and then go through the dialogue history.
Similar to evaluateGeneratedDialogue above."""
domains = ['restaurant', 'hotel', 'attraction', 'train', 'taxi', 'hospital', 'police']
requestables = ['phone', 'address', 'postcode', 'reference', 'id']
# get the list of domains in the goal
domains_in_goal = []
goal = {}
for domain in domains:
if dialog['goal'][domain]:
goal = parseGoal(goal, dialog, domain)
domains_in_goal.append(domain)
# compute corpus success
real_requestables = {}
provided_requestables = {}
venue_offered = {}
for domain in goal.keys():
provided_requestables[domain] = []
venue_offered[domain] = []
real_requestables[domain] = goal[domain]['requestable']
# iterate each turn
m_targetutt = [turn['text'] for idx, turn in enumerate(dialog['log']) if idx % 2 == 1]
for t in range(len(m_targetutt)):
for domain in domains_in_goal:
sent_t = m_targetutt[t]
# for computing match - where there are limited entities
if domain + '_name' in sent_t or '_id' in sent_t:
if domain in ['restaurant', 'hotel', 'attraction', 'train']:
# HERE YOU CAN PUT YOUR BELIEF STATE ESTIMATION
venues = queryResultVenues(domain, dialog['log'][t * 2 + 1])
# if venue has changed
if len(venue_offered[domain]) == 0 and venues:
venue_offered[domain] = random.sample(venues, 1)
else:
flag = False
for ven in venues:
if venue_offered[domain][0] == ven:
flag = True
break
if not flag and venues: # sometimes there are no results so sample won't work
#print venues
venue_offered[domain] = random.sample(venues, 1)
else: # not limited so we can provide one
venue_offered[domain] = '[' + domain + '_name]'
for requestable in requestables:
# check if reference could be issued
if requestable == 'reference':
if domain + '_reference' in sent_t:
if 'restaurant_reference' in sent_t:
if dialog['log'][t * 2]['db_pointer'][-5] == 1: # if pointer was allowing for that?
provided_requestables[domain].append('reference')
elif 'hotel_reference' in sent_t:
if dialog['log'][t * 2]['db_pointer'][-3] == 1: # if pointer was allowing for that?
provided_requestables[domain].append('reference')
#return goal, 0, match, real_requestables
elif 'train_reference' in sent_t:
if dialog['log'][t * 2]['db_pointer'][-1] == 1: # if pointer was allowing for that?
provided_requestables[domain].append('reference')
else:
provided_requestables[domain].append('reference')
else:
if domain + '_' + requestable in sent_t:
provided_requestables[domain].append(requestable)
# offer was made?
for domain in domains_in_goal:
# if name was provided for the user, the match is being done automatically
if 'info' in dialog['goal'][domain]:
if 'name' in dialog['goal'][domain]['info']:
venue_offered[domain] = '[' + domain + '_name]'
# special domains - entity does not need to be provided
if domain in ['taxi', 'police', 'hospital']:
venue_offered[domain] = '[' + domain + '_name]'
# if id was not requested but train was found we dont want to override it to check if we booked the right train
if domain == 'train' and (not venue_offered[domain] and 'id' not in goal['train']['requestable']):
venue_offered[domain] = '[' + domain + '_name]'
# HARD (0-1) EVAL
stats = {'restaurant': [0, 0, 0], 'hotel': [0, 0, 0], 'attraction': [0, 0, 0], 'train': [0, 0,0], 'taxi': [0, 0, 0],
'hospital': [0, 0, 0], 'police': [0, 0, 0]}
match, success = 0, 0
# MATCH
for domain in goal.keys():
match_stat = 0
if domain in ['restaurant', 'hotel', 'attraction', 'train']:
goal_venues = queryResultVenues(domain, dialog['goal'][domain]['info'], real_belief=True)
#print(goal_venues)
if type(venue_offered[domain]) is str and '_name' in venue_offered[domain]:
match += 1
match_stat = 1
elif len(venue_offered[domain]) > 0 and venue_offered[domain][0] in goal_venues:
match += 1
match_stat = 1
else:
if domain + '_name' in venue_offered[domain]:
match += 1
match_stat = 1
stats[domain][0] = match_stat
stats[domain][2] = 1
if match == len(goal.keys()):
match = 1
else:
match = 0
# SUCCESS
if match:
for domain in domains_in_goal:
domain_success = 0
success_stat = 0
if len(real_requestables[domain]) == 0:
# check that
success += 1
success_stat = 1
stats[domain][1] = success_stat
continue
# if values in sentences are super set of requestables
for request in set(provided_requestables[domain]):
if request in real_requestables[domain]:
domain_success += 1
if domain_success >= len(real_requestables[domain]):
success +=1
success_stat = 1
stats[domain][1] = success_stat
# final eval
if success >= len(real_requestables):
success = 1
else:
success = 0
return goal, success, match, real_requestables, stats
def evaluateModelGivenFile(gen_path, ref_path):
with open(ref_path, 'r') as ref, open(gen_path, 'r') as gen:
ref_dialogues = json.load(ref)
gen_dialogues = {}
for k, v in json.load(gen).items():
gen_dialogues[k] = v['sys']
delex_path = 'data/multi-woz/delex.json'
evaluateModel(gen_dialogues, ref_dialogues, delex_path, mode='Test')
return
# use the open source evaluation for nlg-eval https://github.com/Maluuba/nlg-eval
def evaluateNLG(gen_dials, ref_dialogues):
hyp_list, ref_list = [], []
for fname in gen_dials:
hyp_list.extend(gen_dials[fname]) # list of sentence string
ref_list.extend([s.strip() for s in ref_dialogues[fname]['sys']]) # list of ref_list, each ref_list is a list of sentence string
ref_lists = [ref_list] # only put 1 reference
from nlgeval import NLGEval
nlgeval = NLGEval() # loads the models
metrics_dict = nlgeval.compute_metrics(ref_list=ref_lists, hyp_list=hyp_list)
print(metrics_dict)
return metrics_dict
def evaluateNLGFile(gen_dials_fpath, ref_dialogues_fpath):
with open(gen_dials_fpath, 'r') as gen, open(ref_dialogues_fpath, 'r') as ref:
gen_dials = json.load(gen)
ref_dialogues = json.load(ref)
hyp_list, ref_list = [], []
for fname in gen_dials:
hyp_list.extend(gen_dials[fname]) # list of sentence string
ref_list.extend([s.strip() for s in ref_dialogues[fname]['sys']]) # list of ref_list, each ref_list is a list of sentence string
ref_lists = [ref_list] # only put 1 reference
from nlgeval import NLGEval
nlgeval = NLGEval() # loads the models
metrics_dict = nlgeval.compute_metrics(ref_list=ref_lists, hyp_list=hyp_list)
print(metrics_dict)
return metrics_dict
def evaluateNLGFiles(gen_dials_fpaths, ref_dialogues_fpath):
from nlgeval import NLGEval
nlgeval = NLGEval() # loads the models
with open(ref_dialogues_fpath, 'r') as ref:
ref_dialogues = json.load(ref)
for path in gen_dials_fpaths:
with open(path, 'r') as gen:
gen_dials = json.load(gen)
hyp_list, ref_list = [], []
for fname in gen_dials:
hyp_list.extend(gen_dials[fname]) # list of sentence string
ref_list.extend([s.strip() for s in ref_dialogues[fname]['sys']]) # list of ref_list, each ref_list is a list of sentence string
ref_lists = [ref_list] # only put 1 reference
metrics_dict = nlgeval.compute_metrics(ref_list=ref_lists, hyp_list=hyp_list)
print(path)
print(metrics_dict)
if __name__ == '__main__':
pass
# evaluteNLGFiles(gen_dials_fpath='results/bsl_20190510161309/data/test_dials/test_dials_gen.json', ref_dialogues_fpath='data/test_dials.json')
|
{"/test.py": ["/models/evaluator.py", "/models/model.py", "/utils/util.py"], "/train.py": ["/models/model.py", "/utils/util.py"], "/utils/multiwoz_dataloader.py": ["/utils/util.py"], "/models/model.py": ["/utils/util.py"]}
|
19,584
|
Jiahuan-Pei/multiwoz-mdrg
|
refs/heads/master
|
/multiwoz/Test.py
|
from multiwoz.Evaluators import *
random.seed(1)
# diag={}
# for filename, dialogues in json.load(open('data/test_dials.json')).items():
# diag[filename] = dialogues['sys']
# evaluateModel(diag, json.load(open('data/test_dials.json')), mode='test')
evaluator=MultiWozEvaluator('MultiWozEvaluator')
diag={}
# for filename, dialogues in evaluator.delex_dialogues.items():
# one_diag=[]
# for t, sent_t in enumerate(dialogues['log']):
# if t%2==1:
# one_diag.append(sent_t['text'])
# diag[filename]=one_diag
# print(evaluator.evaluate_match_success(evaluator.delex_dialogues, mode='rollout'))
# random.seed(1)
for filename, dialogues in json.load(open('data/multi-woz/test_dials.json')).items():
diag[filename] = dialogues['sys']
evaluator.summarize_report(diag)
path_bsl = 'results/test_dials_gen(bsl_m2_20190510161318).json'
path_moe = 'results/test_dials_gen(moe1_20190510165545).json'
with open(path_bsl) as fr:
print(path_bsl)
evaluator.summarize_report(json.load(fr))
with open(path_moe) as fr:
print(path_moe)
evaluator.summarize_report(json.load(fr))
|
{"/test.py": ["/models/evaluator.py", "/models/model.py", "/utils/util.py"], "/train.py": ["/models/model.py", "/utils/util.py"], "/utils/multiwoz_dataloader.py": ["/utils/util.py"], "/models/model.py": ["/utils/util.py"]}
|
19,585
|
Jiahuan-Pei/multiwoz-mdrg
|
refs/heads/master
|
/models/model.py
|
from __future__ import division, print_function, unicode_literals
import json
import math
import operator
import os
import random
from io import open
from queue import PriorityQueue # for py3
from functools import reduce # for py3
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import optim
import models.policy as policy
# pp added: used for PriorityQueue python3, add an extra para in .put() method
from itertools import count
unique = count()
from utils.util import SOS_token, EOS_token, PAD_token, detected_device
PAD_model = 0 # used for set 0 elements in tensor
default_device = detected_device
# SOS_token = 0
# EOS_token = 1
# UNK_token = 2
# PAD_token = 3
# use_moe_loss = True # inner models weighting loss
# learn_loss_weight = True
# use_moe_model = True # inner models structure partition
#
# pp added
# @total_ordering
# class PriorityElem:
# def __init__(self, elem_to_wrap):
# self.wrapped_elem = elem_to_wrap
#
# def __lt__(self, other):
# return self.wrapped_elem.priority < other.wrapped_elem.priority
# Shawn beam search decoding
class BeamSearchNode(object):
def __init__(self, h, prevNode, wordid, logp, leng):
self.h = h
self.prevNode = prevNode
self.wordid = wordid
self.logp = logp
self.leng = leng
def eval(self, repeatPenalty, tokenReward, scoreTable, alpha=1.0):
reward = 0
alpha = 1.0
return self.logp / float(self.leng - 1 + 1e-6) + alpha * reward
def init_lstm(cell, gain=1):
init_gru(cell, gain)
# positive forget gate bias (Jozefowicz et al., 2015)
for _, _, ih_b, hh_b in cell.all_weights:
l = len(ih_b)
ih_b[l // 4:l // 2].data.fill_(1.0)
hh_b[l // 4:l // 2].data.fill_(1.0)
def init_gru(gru, gain=1):
gru.reset_parameters()
for _, hh, _, _ in gru.all_weights:
for i in range(0, hh.size(0), gru.hidden_size):
torch.nn.init.orthogonal_(hh[i:i + gru.hidden_size], gain=gain)
def whatCellType(input_size, hidden_size, cell_type, dropout_rate):
if cell_type == 'rnn':
cell = nn.RNN(input_size, hidden_size, dropout=dropout_rate, batch_first=False)
init_gru(cell)
return cell
elif cell_type == 'gru':
cell = nn.GRU(input_size, hidden_size, dropout=dropout_rate, batch_first=False)
init_gru(cell)
return cell
elif cell_type == 'lstm':
cell = nn.LSTM(input_size, hidden_size, dropout=dropout_rate, batch_first=False)
init_lstm(cell)
return cell
elif cell_type == 'bigru':
cell = nn.GRU(input_size, hidden_size, bidirectional=True, dropout=dropout_rate, batch_first=False)
init_gru(cell)
return cell
elif cell_type == 'bilstm':
cell = nn.LSTM(input_size, hidden_size, bidirectional=True, dropout=dropout_rate, batch_first=False)
init_lstm(cell)
return cell
class EncoderRNN(nn.Module):
def __init__(self, input_size, embedding_size, hidden_size, cell_type, depth, dropout, device=default_device):
super(EncoderRNN, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.embed_size = embedding_size
self.n_layers = depth
self.dropout = dropout
self.bidirectional = False
if 'bi' in cell_type:
self.bidirectional = True
padding_idx = 3
self.embedding = nn.Embedding(input_size, embedding_size, padding_idx=padding_idx)
# self.embedding = nn.Embedding(400, embedding_size, padding_idx=padding_idx)
self.rnn = whatCellType(embedding_size, hidden_size,
cell_type, dropout_rate=self.dropout)
self.device = device
def forward(self, input_seqs, input_lens, hidden=None):
"""
forward procedure. **No need for inputs to be sorted**
:param input_seqs: Variable of [T,B]
:param hidden:
:param input_lens: *numpy array* of len for each input sequence
:return:
"""
input_lens = np.asarray(input_lens)
input_seqs = input_seqs.transpose(0, 1)
# batch_size = input_seqs.size(1)
embedded = self.embedding(input_seqs)
embedded = embedded.transpose(0, 1) # [B,T,E]
sort_idx = np.argsort(-input_lens)
# pp added
unsort_idx = np.argsort(sort_idx)
# unsort_idx = torch.LongTensor(np.argsort(sort_idx))
input_lens = input_lens[sort_idx]
# sort_idx = torch.LongTensor(sort_idx)
embedded = embedded[sort_idx].transpose(0, 1) # [T,B,E]
packed = torch.nn.utils.rnn.pack_padded_sequence(embedded, input_lens)
outputs, hidden = self.rnn(packed, hidden)
outputs, _ = torch.nn.utils.rnn.pad_packed_sequence(outputs)
if self.bidirectional:
outputs = outputs[:, :, :self.hidden_size] + outputs[:, :, self.hidden_size:]
outputs = outputs.transpose(0, 1)[unsort_idx].transpose(0, 1).contiguous()
if isinstance(hidden, tuple):
hidden = list(hidden)
hidden[0] = hidden[0].transpose(0, 1)[unsort_idx].transpose(0, 1).contiguous()
hidden[1] = hidden[1].transpose(0, 1)[unsort_idx].transpose(0, 1).contiguous()
hidden = tuple(hidden)
else:
hidden = hidden.transpose(0, 1)[unsort_idx].transpose(0, 1).contiguous()
return outputs, hidden
class Attn(nn.Module):
def __init__(self, method, hidden_size, device=default_device):
super(Attn, self).__init__()
self.method = method
self.hidden_size = hidden_size
self.attn = nn.Linear(self.hidden_size * 2, hidden_size)
self.v = nn.Parameter(torch.rand(hidden_size))
stdv = 1. / math.sqrt(self.v.size(0))
self.v.data.normal_(mean=0, std=stdv)
self.device = device
def forward(self, hidden, encoder_outputs):
'''
:param hidden:
previous hidden state of the decoder, in shape (layers*directions,B,H)
:param encoder_outputs:
encoder outputs from Encoder, in shape (T,B,H)
:return
attention energies in shape (B,T)
'''
max_len = encoder_outputs.size(0)
H = hidden.repeat(max_len, 1, 1).transpose(0, 1)
encoder_outputs = encoder_outputs.transpose(0, 1) # [T,B,H] -> [B,T,H]
attn_energies = self.score(H, encoder_outputs) # compute attention score
return F.softmax(attn_energies, dim=1).unsqueeze(1) # normalize with softmax
def score(self, hidden, encoder_outputs):
cat = torch.cat([hidden, encoder_outputs], 2)
energy = torch.tanh(self.attn(cat)) # [B*T*2H]->[B*T*H]
energy = energy.transpose(2, 1) # [B*H*T]
v = self.v.repeat(encoder_outputs.data.shape[0], 1).unsqueeze(1) # [B*1*H]
energy = torch.bmm(v, energy) # [B*1*T]
return energy.squeeze(1) # [B*T]
class SeqAttnDecoderRNN(nn.Module):
def __init__(self, embedding_size, hidden_size, output_size, cell_type, dropout_p=0.1, max_length=30,
device=default_device):
super(SeqAttnDecoderRNN, self).__init__()
# Define parameters
self.hidden_size = hidden_size
self.embed_size = embedding_size
self.output_size = output_size
self.n_layers = 1
self.dropout_p = dropout_p
self.device = device
# Define layers
self.embedding = nn.Embedding(output_size, embedding_size)
self.dropout = nn.Dropout(dropout_p)
if 'bi' in cell_type: # we dont need bidirectionality in decoding
cell_type = cell_type.strip('bi')
self.rnn = whatCellType(embedding_size + hidden_size, hidden_size, cell_type, dropout_rate=self.dropout_p)
self.out = nn.Linear(hidden_size, output_size)
self.score = nn.Linear(self.hidden_size + self.hidden_size, self.hidden_size)
self.attn_combine = nn.Linear(embedding_size + hidden_size, embedding_size)
# attention
self.method = 'concat'
self.attn = nn.Linear(self.hidden_size * 2, hidden_size)
self.v = nn.Parameter(torch.rand(hidden_size))
stdv = 1. / math.sqrt(self.v.size(0))
self.v.data.normal_(mean=0, std=stdv)
def forward(self, input, hidden, encoder_outputs, mask_tensor=None):
if isinstance(hidden, tuple):
h_t = hidden[0]
else:
h_t = hidden
encoder_outputs = encoder_outputs.transpose(0, 1)
embedded = self.embedding(input) # .view(1, 1, -1)
# embedded = F.dropout(embedded, self.dropout_p)
# SCORE 3
max_len = encoder_outputs.size(1)
h_t = h_t.transpose(0, 1) # [1,B,D] -> [B,1,D]
h_t = h_t.repeat(1, max_len, 1) # [B,1,D] -> [B,T,D]
energy = self.attn(torch.cat((h_t, encoder_outputs), 2)) # [B,T,2D] -> [B,T,D]
energy = torch.tanh(energy)
energy = energy.transpose(2, 1) # [B,H,T]
v = self.v.repeat(encoder_outputs.size(0), 1).unsqueeze(1) # [B,1,H]
energy = torch.bmm(v, energy) # [B,1,T]
attn_weights = F.softmax(energy, dim=2) # [B,1,T]
# getting context
context = torch.bmm(attn_weights, encoder_outputs) # [B,1,H]
# context = torch.bmm(attn_weights.unsqueeze(0), encoder_outputs.unsqueeze(0)) #[B,1,H]
# Combine embedded input word and attended context, run through RNN
rnn_input = torch.cat((embedded, context), 2)
rnn_input = rnn_input.transpose(0, 1)
output, hidden = self.rnn(rnn_input, hidden)
output = output.squeeze(0) # (1,B,V)->(B,V)
output = F.log_softmax(self.out(output), dim=1)
return output, hidden # , attn_weights
class MoESeqAttnDecoderRNN(nn.Module):
def __init__(self, embedding_size, hidden_size, output_size, cell_type, k=1, dropout_p=0.1, max_length=30,
args=None, device=default_device):
super(MoESeqAttnDecoderRNN, self).__init__()
# Define parameters
self.hidden_size = hidden_size
self.embed_size = embedding_size
self.output_size = output_size
self.n_layers = 1
self.dropout_p = dropout_p
self.k = k
self.device = device
self.args = args
# pp added: future info size
self.future_size = self.output_size
# Define layers
self.embedding = nn.Embedding(output_size, embedding_size)
self.dropout = nn.Dropout(dropout_p)
if 'bi' in cell_type: # we dont need bidirectionality in decoding
cell_type = cell_type.strip('bi')
self.rnn = whatCellType(embedding_size + hidden_size, hidden_size, cell_type, dropout_rate=self.dropout_p)
self.rnn_f = whatCellType(embedding_size + hidden_size, hidden_size, cell_type, dropout_rate=self.dropout_p) # pp added for future context
# self.rnn_fp = whatCellType(embedding_size + hidden_size + output_size, hidden_size, cell_type, dropout_rate=self.dropout_p) # pp added for future context
self.moe_rnn = whatCellType(hidden_size * (self.k + 1), hidden_size * (self.k + 1), cell_type,
dropout_rate=self.dropout_p)
self.moe_hidden = nn.Linear(hidden_size * (self.k + 1), hidden_size)
# self.moe_fc = nn.Linear((output_size+hidden_size)*(self.k+1), (self.k+1))
self.moe_fc = nn.Linear(output_size * (self.k + 1), (self.k + 1))
# self.moe_fc_hid = nn.Linear(hidden_size*(self.k+1), (self.k+1))
self.out = nn.Linear(hidden_size, output_size)
self.score = nn.Linear(self.hidden_size + self.hidden_size, self.hidden_size)
self.attn_combine = nn.Linear(embedding_size + hidden_size, embedding_size)
# attention
self.method = 'concat'
self.attn = nn.Linear(self.hidden_size * 2, hidden_size)
# self.attn_fp = nn.Linear(self.hidden_size * 2 + self.output_size, hidden_size)
self.attn_f = nn.Linear(self.hidden_size * 2 + self.future_size, hidden_size)
self.v = nn.Parameter(torch.rand(hidden_size))
stdv = 1. / math.sqrt(self.v.size(0))
self.v.data.normal_(mean=0, std=stdv)
# self.attn_dec_hid = Attn(self.method, hidden_size, self.device)
def expert_forward(self, input, hidden, encoder_outputs):
if isinstance(hidden, tuple):
h_t = hidden[0]
else:
h_t = hidden
encoder_outputs = encoder_outputs.transpose(0, 1)
embedded = self.embedding(input) # .view(1, 1, -1)
# embedded = F.dropout(embedded, self.dropout_p)
# SCORE 3
max_len = encoder_outputs.size(1)
h_t_reshaped = h_t.unsqueeze(0) if len(h_t.size()) == 2 else h_t # pp added: make sure h_t is [1,B,D]
h_t = h_t_reshaped.transpose(0, 1) # [1,B,D] -> [B,1,D]
h_t = h_t.repeat(1, max_len, 1) # [B,1,D] -> [B,T,D]
energy = self.attn(torch.cat((h_t, encoder_outputs), 2)) # [B,T,2D] -> [B,T,D]
energy = torch.tanh(energy)
energy = energy.transpose(2, 1) # [B,H,T]
v = self.v.repeat(encoder_outputs.size(0), 1).unsqueeze(1) # [B,1,H]
energy = torch.bmm(v, energy) # [B,1,T]
attn_weights = F.softmax(energy, dim=2) # [B,1,T]
# getting context
context = torch.bmm(attn_weights, encoder_outputs) # [B,1,H]
# Combine embedded input word and attended context, run through RNN
rnn_input = torch.cat((embedded, context), 2)
rnn_input = rnn_input.transpose(0, 1)
# pp added
new_hid = h_t_reshaped
if isinstance(hidden, tuple):
if len(hidden) == 2:
new_hid = (h_t_reshaped, hidden[1])
# elif len(hidden)==1:
# new_hid = (h_t_reshaped)
output, hidden = self.rnn(rnn_input, new_hid) # hidden to h_t_reshaped
output = output.squeeze(0) # (1,B,H)->(Batu,H)
output = F.log_softmax(self.out(output), dim=1) # self.out(output)[batch, out_vocab]
return output, hidden, embedded.transpose(0, 1) # , attn_weights
def moe_layer(self, decoder_output_list, decoder_hidden_list, embedded_list, gamma_expert):
# output
chair_dec_out = decoder_output_list[0] # chair
expert_dec_out_list = decoder_output_list[1:] # experts
chair_dec_hid = decoder_hidden_list[0] # chair
expert_dec_hid_list = decoder_hidden_list[1:] # experts
# 1. only use decoder_output compute weights
cat_dec_out = torch.cat(decoder_output_list, -1) # (B, (k+1)*V) # Experts
# 2. use both decoder_output & decoder_hidden
# cat_dec_list = [torch.cat((o, x.squeeze(0)), 1) for o, (x, y) in zip(decoder_output_list, decoder_hidden_list)]
# cat_dec_out = torch.cat(cat_dec_list, -1)
# MOE weights computation + normalization ------ Start
moe_weights = self.moe_fc(cat_dec_out) # [Batch, Intent]
moe_weights = F.log_softmax(moe_weights, dim=1)
# moe_weights = F.softmax(moe_weights, dim=1)
# available_m = torch.zeros(moe_weights.size(), device=self.device)
# i = 0
# for k in enumerate(decoder_output_list):
# available_m[:,i] = mask_tensor[k]
# i += 1
# moe_weights = available_m * moe_weights
norm_weights = torch.sum(moe_weights, dim=1)
norm_weights = norm_weights.unsqueeze(1)
moe_weights = torch.div(moe_weights, norm_weights) # [B, I]
moe_weights = moe_weights.permute(1, 0).unsqueeze(-1) # [I, B, 1]; debug:[8,2,1]
# MOE weights computation + normalization ------ End
# output
moe_weights_output = moe_weights.expand(-1, -1, decoder_output_list[0].size(-1)) # [I, B, V]; [8,2,400]
decoder_output_tensor = torch.stack(decoder_output_list) # [I, B, V]
output = decoder_output_tensor.mul(moe_weights_output).sum(0) # [B, V]; [2, 400]
# weighting
output = gamma_expert * output + (1 - gamma_expert) * chair_dec_out # [2, 400]
# hidden
moe_weights_hidden = moe_weights.expand(-1, -1, decoder_hidden_list[0][0].size(-1)) # [I, B, H]; [8,2,5]
if isinstance(decoder_hidden_list[0], tuple): # for lstm
stack_dec_hid = torch.stack([a.squeeze(0) for a, b in decoder_hidden_list]), torch.stack(
[b.squeeze(0) for a, b in decoder_hidden_list]) # [I, B, H]
hidden = stack_dec_hid[0].mul(moe_weights_hidden).sum(0).unsqueeze(0), stack_dec_hid[1].mul(
moe_weights_hidden).sum(0).unsqueeze(0) # [B, H]
hidden = gamma_expert * hidden[0] + (1 - gamma_expert) * chair_dec_hid[0], gamma_expert * hidden[1] + (
1 - gamma_expert) * chair_dec_hid[1]
else: # for gru
stack_dec_hid = torch.stack([a.squeeze(0) for a in decoder_hidden_list])
hidden = stack_dec_hid[0].mul(moe_weights_hidden).sum(0).unsqueeze(0)
hidden = gamma_expert * hidden[0] + (1 - gamma_expert) * chair_dec_hid[0]
hidden = hidden.unsqueeze(0)
# print('hidden=', hidden.size())
return output, hidden # output[B, V] -- [2, 400] ; hidden[1, B, H] -- [1, 2, 5]
def tokenMoE(self, decoder_input, decoder_hidden, encoder_outputs, mask_tensor):
# decoder_input[batch, 1]; decoder_hidden: tuple element is a tensor[1, batch, hidden], encoder_outputs[maxlen_target, batch, hidden]
# n = len(self.intent_list) # how many intents do we have
output_c, hidden_c, embedded_c = self.expert_forward(input=decoder_input, hidden=decoder_hidden,
encoder_outputs=encoder_outputs)
decoder_output_list, decoder_hidden_list, embedded_list = [output_c], [hidden_c], [embedded_c]
# decoder_output_list, decoder_hidden_list, embedded_list = [], [], []
# count = 0
for mask in mask_tensor: # each intent has a mask [Batch, 1]
decoder_input_k = decoder_input.clone().masked_fill_(mask,
value=PAD_model) # if assigned PAD_token it will count loss
if isinstance(decoder_hidden, tuple):
decoder_hidden_k = tuple(map(lambda x: x.clone().masked_fill_(mask, value=PAD_model), decoder_hidden))
else:
decoder_hidden_k = decoder_hidden.clone().masked_fill_(mask, value=PAD_model)
encoder_outputs_k = encoder_outputs.clone().masked_fill_(mask, value=PAD_model)
# test if there's someone not all PADDED
# if torch.min(decoder_input_k)!=PAD_token or torch.min(decoder_hidden_k[0])!=PAD_token or torch.min(decoder_hidden_k[1])!=PAD_token or torch.min(encoder_outputs_k)!=PAD_token:
# print(decoder_input_k, '\n', decoder_hidden_k,'\n', encoder_outputs_k)
# count += 1
output_k, hidden_k, embedded_k = self.expert_forward(input=decoder_input_k, hidden=decoder_hidden_k,
encoder_outputs=encoder_outputs_k)
decoder_output_list.append(output_k)
decoder_hidden_list.append(hidden_k)
embedded_list.append(embedded_k)
# print('count=', count) # 10/31 will count for loss
gamma_expert = self.args.gamma_expert
decoder_output, decoder_hidden = self.moe_layer(decoder_output_list, decoder_hidden_list, embedded_list,
gamma_expert)
# decoder_output = gamma_expert * decoder_output + (1 - gamma_expert) * output_c
# decoder_hidden = gamma_expert * decoder_hidden + (1 - gamma_expert) * hidden_c
# output = output.squeeze(0) # (1,B,H)->(B,H)
# output = F.log_softmax(self.out(output), dim=1) # self.out(output)[batch, out_vocab]
return decoder_output, decoder_hidden
def pros_expert_forward(self, input, hidden, encoder_outputs, dec_hidd_with_future):
if isinstance(hidden, tuple):
h_t = hidden[0]
else:
h_t = hidden
encoder_outputs = encoder_outputs.transpose(0, 1)
embedded = self.embedding(input) # .view(1, 1, -1)
# embedded = F.dropout(embedded, self.dropout_p)
# SCORE 3
max_len = encoder_outputs.size(1)
h_t0 = h_t.transpose(0, 1) # [1,B,D] -> [B,1,D]
h_t = h_t0.repeat(1, max_len, 1) # [B,1,D] -> [B,T,D]
# pp added: new attn
energy = self.attn_f(torch.cat((h_t, encoder_outputs, dec_hidd_with_future[:max_len].transpose(0, 1)), 2)) # [B,T,2D] -> [B,T,D]
energy = torch.tanh(energy)
energy = energy.transpose(2, 1) # [B,H,T]
v = self.v.repeat(encoder_outputs.size(0), 1).unsqueeze(1) # [B,1,H]
energy = torch.bmm(v, energy) # [B,1,T]
attn_weights = F.softmax(energy, dim=2) # [B,1,T]
# getting context
context = torch.bmm(attn_weights, encoder_outputs) # [B,1,H]
# Combine embedded input word and attended context, run through RNN
rnn_input = torch.cat((embedded, context), 2)
rnn_input = rnn_input.transpose(0, 1)
output, hidden = self.rnn(rnn_input, hidden) # if self.args.rp_share_rnn else self.rnn_f(rnn_input, hidden)
output = output.squeeze(0) # (1,B,H)->(B,H)
output = F.log_softmax(self.out(output), dim=1) # self.out(output)[batch, out_vocab]
return output, hidden, embedded.transpose(0, 1) # , attn_weights
def prospectiveMoE(self, decoder_input, decoder_hidden, encoder_outputs, mask_tensor, dec_hidd_with_future):
# count = 1
# print('count=', count)
output_c, hidden_c, embedded_c = self.pros_expert_forward(decoder_input, decoder_hidden, encoder_outputs,
dec_hidd_with_future)
decoder_output_list, decoder_hidden_list, embedded_list = [output_c], [hidden_c], [embedded_c]
for mask in mask_tensor: # each intent has a mask [Batch, 1]
# count += 1
# print('count=', count)
decoder_input_k = decoder_input.clone().masked_fill_(mask,
value=PAD_model) # if assigned PAD_token it will count loss
if isinstance(decoder_hidden, tuple):
decoder_hidden_k = tuple(map(lambda x: x.clone().masked_fill_(mask, value=PAD_model), decoder_hidden))
else:
decoder_hidden_k = decoder_hidden.clone().masked_fill_(mask, value=PAD_model)
encoder_outputs_k = encoder_outputs.clone().masked_fill_(mask, value=PAD_model)
dec_hidd_with_future_k = dec_hidd_with_future.clone().masked_fill_(mask, value=PAD_model)
output_k, hidden_k, embedded_k = self.pros_expert_forward(decoder_input_k, decoder_hidden_k,
encoder_outputs_k, dec_hidd_with_future_k)
decoder_output_list.append(output_k)
decoder_hidden_list.append(hidden_k)
embedded_list.append(embedded_k)
gamma_expert = self.args.gamma_expert
decoder_output, decoder_hidden = self.moe_layer(decoder_output_list, decoder_hidden_list, embedded_list,
gamma_expert)
return decoder_output, decoder_hidden
def forward(self, input, hidden, encoder_outputs, mask_tensor, dec_hidd_with_future=None):
if mask_tensor is not None:
if dec_hidd_with_future is None: # don not use future prediction
output, hidden = self.tokenMoE(input, hidden, encoder_outputs, mask_tensor)
else:
output, hidden = self.prospectiveMoE(input, hidden, encoder_outputs, mask_tensor, dec_hidd_with_future)
else:
pass
output, hidden, _ = self.expert_forward(input, hidden, encoder_outputs)
return output, hidden # , mask_tensor # , attn_weights
class DecoderRNN(nn.Module):
def __init__(self, embedding_size, hidden_size, output_size, cell_type, dropout=0.1, device=default_device):
super(DecoderRNN, self).__init__()
self.device = device
self.hidden_size = hidden_size
self.cell_type = cell_type
padding_idx = 3
self.embedding = nn.Embedding(num_embeddings=output_size,
embedding_dim=embedding_size,
padding_idx=padding_idx
)
if 'bi' in cell_type: # we dont need bidirectionality in decoding
cell_type = cell_type.strip('bi')
self.rnn = whatCellType(embedding_size, hidden_size, cell_type, dropout_rate=dropout)
self.dropout_rate = dropout
self.out = nn.Linear(hidden_size, output_size)
def forward(self, input, hidden, not_used, mask_tensor=None):
embedded = self.embedding(input).transpose(0, 1) # [B,1] -> [ 1,B, D]
embedded = F.dropout(embedded, self.dropout_rate)
output = embedded
# output = F.relu(embedded)
output, hidden = self.rnn(output, hidden)
out = self.out(output.squeeze(0))
output = F.log_softmax(out, dim=1)
return output, hidden
class Model(nn.Module):
def __init__(self, args, input_lang_index2word, output_lang_index2word, input_lang_word2index,
output_lang_word2index, intent2index=None, index2intent=None, device=default_device):
super(Model, self).__init__()
self.args = args
self.max_len = args.max_len
self.output_lang_index2word = output_lang_index2word
self.input_lang_index2word = input_lang_index2word
self.output_lang_word2index = output_lang_word2index
self.input_lang_word2index = input_lang_word2index
# pp added
self.intent2index, self.index2intent = intent2index, index2intent
self.k = len(self.intent2index) if self.intent2index else 1
self.hid_size_enc = args.hid_size_enc
self.hid_size_dec = args.hid_size_dec
self.hid_size_pol = args.hid_size_pol
self.emb_size = args.emb_size
self.db_size = args.db_size
self.bs_size = args.bs_size
self.cell_type = args.cell_type
if 'bi' in self.cell_type:
self.num_directions = 2
else:
self.num_directions = 1
self.depth = args.depth
self.use_attn = args.use_attn
self.attn_type = args.attention_type
self.dropout = args.dropout
self.device = device
self.model_dir = args.model_dir
self.pre_model_dir = args.pre_model_dir
self.model_name = args.model_name
self.teacher_forcing_ratio = args.teacher_ratio
self.vocab_size = args.vocab_size
self.epsln = 10E-5
torch.manual_seed(args.seed)
self.build_model()
self.getCount()
try:
assert self.args.beam_width > 0
self.beam_search = True
except:
self.beam_search = False
self.global_step = 0
def cuda_(self, var):
return var.cuda() if self.args.cuda else var
def build_model(self):
self.encoder = EncoderRNN(len(self.input_lang_index2word), self.emb_size, self.hid_size_enc,
self.cell_type, self.depth, self.dropout)
self.policy = policy.DefaultPolicy(self.hid_size_pol, self.hid_size_enc, self.db_size, self.bs_size)
# pp added: intent_type branch
if self.args.intent_type and self.args.use_moe_model:
self.decoder = MoESeqAttnDecoderRNN(self.emb_size, self.hid_size_dec, len(self.output_lang_index2word),
self.cell_type, self.k, self.dropout, self.max_len, self.args)
elif self.use_attn:
if self.attn_type == 'bahdanau':
self.decoder = SeqAttnDecoderRNN(self.emb_size, self.hid_size_dec, len(self.output_lang_index2word),
self.cell_type, self.dropout, self.max_len)
else:
self.decoder = DecoderRNN(self.emb_size, self.hid_size_dec, len(self.output_lang_index2word),
self.cell_type, self.dropout)
if self.args.mode == 'train':
self.gen_criterion = nn.NLLLoss(ignore_index=PAD_token,
reduction='mean') # logsoftmax is done in decoder part
self.setOptimizers()
# pp added
self.moe_loss_layer = nn.Linear(1 * (self.k + 1), 1)
def model_train(self, input_tensor, input_lengths, target_tensor, target_lengths, db_tensor, bs_tensor,
mask_tensor=None, dial_name=None):
proba, _, decoded_sent = self.forward(input_tensor, input_lengths, target_tensor, target_lengths, db_tensor,
bs_tensor, mask_tensor) # pp added: acts_list
proba = proba.view(-1, self.vocab_size)
self.gen_loss = self.gen_criterion(proba, target_tensor.view(-1))
if self.args.use_moe_loss and mask_tensor is not None: # data separate by intents:
gen_loss_list = []
for mask in mask_tensor: # each intent has a mask [Batch, 1]
target_tensor_i = target_tensor.clone()
target_tensor_i = target_tensor_i.masked_fill_(mask, value=PAD_token)
loss_i = self.gen_criterion(proba, target_tensor_i.view(-1))
gen_loss_list.append(loss_i)
if self.args.learn_loss_weight:
gen_loss_list.append(self.gen_loss)
gen_loss_tensor = torch.as_tensor(torch.stack(gen_loss_list), device=self.device)
self.gen_loss = self.moe_loss_layer(gen_loss_tensor)
else: # hyper weights
# lambda_expert = 0.5
lambda_expert = self.args.lambda_expert
self.gen_loss = (1 - lambda_expert) * self.gen_loss + \
lambda_expert * torch.mean(torch.tensor(gen_loss_list))
self.loss = self.gen_loss
self.loss.backward()
grad = self.clipGradients()
self.optimizer.step()
self.optimizer.zero_grad()
# self.printGrad()
return self.loss.item(), 0, grad
def setOptimizers(self):
self.optimizer_policy = None
if self.args.optim == 'sgd':
self.optimizer = optim.SGD(lr=self.args.lr_rate,
params=filter(lambda x: x.requires_grad, self.parameters()),
weight_decay=self.args.l2_norm)
elif self.args.optim == 'adadelta':
self.optimizer = optim.Adadelta(lr=self.args.lr_rate,
params=filter(lambda x: x.requires_grad, self.parameters()),
weight_decay=self.args.l2_norm)
elif self.args.optim == 'adam':
self.optimizer = optim.Adam(lr=self.args.lr_rate,
params=filter(lambda x: x.requires_grad, self.parameters()),
weight_decay=self.args.l2_norm)
def retro_forward(self, input_tensor, input_lengths, target_tensor, target_lengths, db_tensor, bs_tensor,
mask_tensor=None, if_detach=False): # pp added: acts_list
"""Given the user sentence, user belief state and database pointer,
encode the sentence, decide what policy vector construct and
feed it as the first hiddent state to the decoder.
input_tensor: tensor(batch, maxlen_input)
target_tensor: tensor(batch, maxlen_target)
"""
target_length = target_tensor.size(1) if target_tensor is not None else self.args.max_len
# for fixed encoding this is zero so it does not contribute
batch_size, seq_len = input_tensor.size()
# ENCODER
encoder_outputs, encoder_hidden = self.encoder(input_tensor,
input_lengths) # encoder_outputs: tensor(maxlen_input, batch, 150); encoder_hidden: tuple, each element is a tensor: [1, batch, 150]
# pp added: extract forward output of encoder if use SentMoE and 2 directions
if self.num_directions == 2 and self.args.SentMoE:
if isinstance(encoder_hidden, tuple):
# pp added: forward or backward
encoder_hidden = encoder_hidden[0][0].unsqueeze(0), encoder_hidden[1][0].unsqueeze(0)
# encoder_hidden = encoder_hidden[0][1].unsqueeze(0), encoder_hidden[1][1].unsqueeze(0)
else:
encoder_hidden = encoder_hidden[0].unsqueeze(0)
# POLICY
decoder_hidden = self.policy(encoder_hidden, db_tensor, bs_tensor,
self.num_directions) # decoder_hidden: tuple, each element is a tensor: [1, batch, 150]
# print('decoder_hidden', decoder_hidden.size())
# GENERATOR
# Teacher forcing: Feed the target as the next input
# _, target_len = target_tensor.size()
decoder_input = torch.as_tensor([[SOS_token] for _ in range(batch_size)], dtype=torch.long,
device=self.device) # tensor[batch, 1]
# decoder_input = torch.LongTensor([[SOS_token] for _ in range(batch_size)], device=self.device)
# pp added: calculate new batch size
proba = torch.zeros(batch_size, target_length, self.vocab_size,
device=self.device) # tensor[Batch, maxlen_target, V]
hidd = torch.zeros(batch_size, target_length, self.hid_size_dec, device=self.device)
# generate target sequence step by step !!!
for t in range(target_length):
# pp added: moe chair
decoder_output, decoder_hidden = self.decoder(decoder_input, decoder_hidden, encoder_outputs,
mask_tensor) # decoder_output; decoder_hidden
# use_teacher_forcing = True if random.random() < self.args.teacher_ratio else False # pp added: self.args.SentMoE is False
# use_teacher_forcing = True if random.random() < self.args.teacher_ratio and self.args.SentMoE is False else False # pp added: self.args.SentMoE is False
if target_tensor is not None: # if use SentMoE, we should stop teacher forcing for experts
decoder_input = target_tensor[:, t].view(-1, 1) # [B,1] Teacher forcing
else:
# Without teacher forcing: use its own predictions as the next input
topv, topi = decoder_output.topk(1)
# decoder_input = topi.squeeze().detach() # detach from history as input
decoder_input = topi.detach() # detach from history as input
proba[:, t,
:] = decoder_output # decoder_output[Batch, TargetVocab] # proba[Batch, Target_MaxLen, Target_Vocab]
# pp added
if isinstance(decoder_hidden, tuple):
hidd0 = decoder_hidden[0]
else:
hidd0 = decoder_hidden
hidd[:, t, :] = hidd0
decoded_sent = None
# pp added: GENERATION
# decoded_sent = self.decode(target_tensor, decoder_hidden, encoder_outputs, mask_tensor)
if if_detach:
proba, hidd = proba.detach(), hidd.detach()
return proba, hidd, decoded_sent
def forward(self, input_tensor, input_lengths, target_tensor, target_lengths, db_tensor, bs_tensor,
mask_tensor=None): # pp added: acts_list
# if we consider sentence info
if self.args.SentMoE:
proba_r, hidd, decoded_sent = self.retro_forward(input_tensor, input_lengths, None, None, db_tensor,
bs_tensor, mask_tensor, if_detach=self.args.if_detach)
target_length = target_tensor.size(1)
# for fixed encoding this is zero so it does not contribute
batch_size, seq_len = input_tensor.size()
# ENCODER
encoder_outputs, encoder_hidden = self.encoder(input_tensor,
input_lengths) # encoder_outputs: tensor(maxlen_input, batch, 150); encoder_hidden: tuple, each element is a tensor: [1, batch, 150]
# pp added: extract backward output of encoder
if self.num_directions == 2:
if isinstance(encoder_hidden, tuple):
# pp added: forward or backward
encoder_hidden = encoder_hidden[0][1].unsqueeze(0), encoder_hidden[1][1].unsqueeze(0)
# encoder_hidden = encoder_hidden[0][0].unsqueeze(0), encoder_hidden[1][0].unsqueeze(0)
else:
encoder_hidden = encoder_hidden[1].unsqueeze(0)
# POLICY
decoder_hidden = self.policy(encoder_hidden, db_tensor, bs_tensor,
self.num_directions) # decoder_hidden: tuple, each element is a tensor: [1, batch, 150]
# print('decoder_hidden', decoder_hidden.size())
# GENERATOR
# Teacher forcing: Feed the target as the next input
_, target_len = target_tensor.size()
decoder_input = torch.as_tensor([[SOS_token] for _ in range(batch_size)], dtype=torch.long,
device=self.device) # tensor[batch, 1]
proba_p = torch.zeros(batch_size, target_length, self.vocab_size,
device=self.device) # tensor[Batch, maxlen_target, V]
# pp added
future_info = proba_r if self.args.future_info == 'proba' else hidd
# generate target sequence step by step !!!
for t in range(target_len):
# pp added: moe chair
# decoder_output, decoder_hidden = self.decoder(decoder_input, decoder_hidden, encoder_outputs, mask_tensor, dec_hidd_with_future=future_info.transpose(0, 1)) # decoder_output; decoder_hidden
decoder_output, decoder_hidden = self.decoder(decoder_input, decoder_hidden, encoder_outputs, mask_tensor, dec_hidd_with_future=proba_r.transpose(0, 1)) # decoder_output; decoder_hidden
decoder_input = target_tensor[:, t].view(-1, 1) # [B,1] Teacher forcing
# use_teacher_forcing = True if random.random() < self.args.teacher_ratio else False
# if use_teacher_forcing:
# decoder_input = target_tensor[:, t].view(-1, 1) # [B,1] Teacher forcing
# else:
# # Without teacher forcing: use its own predictions as the next input
# topv, topi = decoder_output.topk(1)
# # decoder_input = topi.squeeze().detach() # detach from history as input
# decoder_input = topi.detach() # detach from history as input
proba_p[:, t, :] = decoder_output # decoder_output[Batch, TargetVocab]
return proba_p, None, decoded_sent
else:
# print('pretrain')
proba_r, hidd, decoded_sent = self.retro_forward(input_tensor, input_lengths, target_tensor, target_lengths,
db_tensor, bs_tensor, mask_tensor,
if_detach=self.args.if_detach)
return proba_r, None, decoded_sent
def predict(self, input_tensor, input_lengths, target_tensor, target_lengths, db_tensor, bs_tensor,
mask_tensor=None):
# pp added
with torch.no_grad():
# ENCODER
encoder_outputs, encoder_hidden = self.encoder(input_tensor, input_lengths)
# POLICY
decoder_hidden = self.policy(encoder_hidden, db_tensor, bs_tensor, self.num_directions)
# GENERATION
decoded_words = self.decode(target_tensor, decoder_hidden, encoder_outputs, mask_tensor)
return decoded_words, 0
def decode(self, target_tensor, decoder_hidden, encoder_outputs, mask_tensor=None):
decoder_hiddens = decoder_hidden
if self.beam_search: # wenqiang style - sequicity
decoded_sentences = []
for idx in range(target_tensor.size(0)): # idx is the batch index
if isinstance(decoder_hiddens, tuple): # LSTM case
decoder_hidden = (
decoder_hiddens[0][:, idx, :].unsqueeze(0), decoder_hiddens[1][:, idx, :].unsqueeze(0))
else:
decoder_hidden = decoder_hiddens[:, idx, :].unsqueeze(0)
encoder_output = encoder_outputs[:, idx, :].unsqueeze(1)
# Beam start
self.topk = 1
endnodes = [] # stored end nodes
number_required = min((self.topk + 1), self.topk - len(endnodes))
decoder_input = torch.as_tensor([[SOS_token]], dtype=torch.long, device=self.device)
# decoder_input = torch.LongTensor([[SOS_token]], device=self.device)
# starting node hidden vector, prevNode, wordid, logp, leng,
node = BeamSearchNode(decoder_hidden, None, decoder_input, 0, 1)
nodes = PriorityQueue() # start the queue
nodes.put((-node.eval(None, None, None, None),
next(unique),
node))
# start beam search
qsize = 1
while True:
# give up when decoding takes too long
if qsize > 2000: break
# fetch the best node
score, _, n = nodes.get() # pp added: _
decoder_input = n.wordid
decoder_hidden = n.h
if n.wordid.item() == EOS_token and n.prevNode != None: # its not empty
endnodes.append((score, n))
# if reach maximum # of sentences required
if len(endnodes) >= number_required:
break
else:
continue
# decode for one step using decoder
# import pdb
# pdb.set_trace()
mask_tensor_idx = mask_tensor[:, idx, :].unsqueeze(1) if mask_tensor is not None else None
decoder_output, decoder_hidden = self.decoder(decoder_input, decoder_hidden, encoder_output,
mask_tensor_idx)
log_prob, indexes = torch.topk(decoder_output, self.args.beam_width)
nextnodes = []
for new_k in range(self.args.beam_width):
decoded_t = indexes[0][new_k].view(1, -1)
log_p = log_prob[0][new_k].item()
node = BeamSearchNode(decoder_hidden, n, decoded_t, n.logp + log_p, n.leng + 1)
score = -node.eval(None, None, None, None)
nextnodes.append((score, node))
# put them into queue
for i in range(len(nextnodes)):
score, nn = nextnodes[i]
nodes.put((score,
next(unique),
nn))
# increase qsize
qsize += len(nextnodes)
# choose nbest paths, back trace them
if len(endnodes) == 0:
endnodes = [(nodes.get()[0], nodes.get()[-1]) for n in range(self.topk)]
utterances = []
for score, n in sorted(endnodes, key=operator.itemgetter(0)):
utterance = []
utterance.append(n.wordid)
# back trace
while n.prevNode != None:
n = n.prevNode
utterance.append(n.wordid)
utterance = utterance[::-1]
utterances.append(utterance)
decoded_words = utterances[0]
decoded_sentence = [self.output_index2word(str(ind.item())) for ind in decoded_words]
# print(decoded_sentence)
decoded_sentences.append(' '.join(decoded_sentence[1:-1]))
return decoded_sentences
else: # GREEDY DECODING
# decoded_sentences = []
decoded_sentences = self.greedy_decode(decoder_hidden, encoder_outputs, target_tensor, mask_tensor)
return decoded_sentences
def greedy_decode(self, decoder_hidden, encoder_outputs, target_tensor, mask_tensor=None):
decoded_sentences = []
batch_size, seq_len = target_tensor.size()
# pp added
decoder_input = torch.as_tensor([[SOS_token] for _ in range(batch_size)], dtype=torch.long, device=self.device)
# decoder_input = torch.LongTensor([[SOS_token] for _ in range(batch_size)], device=self.device)
decoded_words = torch.zeros((batch_size, self.max_len), device=self.device)
for t in range(self.max_len):
decoder_output, decoder_hidden = self.decoder(decoder_input, decoder_hidden, encoder_outputs, mask_tensor)
topv, topi = decoder_output.data.topk(1) # get candidates
topi = topi.view(-1)
decoded_words[:, t] = topi
decoder_input = topi.detach().view(-1, 1)
for sentence in decoded_words:
sent = []
for ind in sentence:
if self.output_index2word(str(int(ind.item()))) == self.output_index2word(str(EOS_token)):
break
sent.append(self.output_index2word(str(int(ind.item()))))
decoded_sentences.append(' '.join(sent))
return decoded_sentences
def clipGradients(self):
grad = torch.nn.utils.clip_grad_norm_(self.parameters(), self.args.clip)
return grad
def saveModel(self, iter):
print('Saving parameters..')
if not os.path.exists(self.model_dir):
os.makedirs(self.model_dir)
torch.save(self.encoder.state_dict(), self.model_dir + '/' + self.model_name + '-' + str(iter) + '.enc')
torch.save(self.policy.state_dict(), self.model_dir + '/' + self.model_name + '-' + str(iter) + '.pol')
torch.save(self.decoder.state_dict(), self.model_dir + '/' + self.model_name + '-' + str(iter) + '.dec')
with open(self.model_dir + '/' + self.model_name + '.config', 'w') as f:
json.dump(vars(self.args), f, ensure_ascii=False, indent=4)
def loadModel(self, iter=0):
print('Loading parameters of iter %s ' % iter)
self.encoder.load_state_dict(torch.load(self.pre_model_dir + '/' + self.model_name + '-' + str(iter) + '.enc'))
self.policy.load_state_dict(torch.load(self.pre_model_dir + '/' + self.model_name + '-' + str(iter) + '.pol'))
self.decoder.load_state_dict(torch.load(self.pre_model_dir + '/' + self.model_name + '-' + str(iter) + '.dec'))
def input_index2word(self, index):
if index in self.input_lang_index2word:
return self.input_lang_index2word[index]
else:
raise UserWarning('We are using UNK')
def output_index2word(self, index):
if index in self.output_lang_index2word:
return self.output_lang_index2word[index]
else:
raise UserWarning('We are using UNK')
def input_word2index(self, index):
if index in self.input_lang_word2index:
return self.input_lang_word2index[index]
else:
return 2
def output_word2index(self, index):
if index in self.output_lang_word2index:
return self.output_lang_word2index[index]
else:
return 2
# pp added:
def input_intent2index(self, intent):
if intent in self.intent2index:
return self.intent2index[intent]
else:
return 0
def input_index2intent(self, index):
if index in self.index2intent:
return self.index2intent[index]
else:
raise UserWarning('We are using UNK intent')
def getCount(self):
learnable_parameters = filter(lambda p: p.requires_grad, self.parameters())
param_cnt = sum([reduce((lambda x, y: x * y), param.shape) for param in learnable_parameters])
print('Model has', param_cnt, ' parameters.')
def printGrad(self):
learnable_parameters = filter(lambda p: p.requires_grad, self.parameters())
for idx, param in enumerate(learnable_parameters):
print(param.grad, param.shape)
|
{"/test.py": ["/models/evaluator.py", "/models/model.py", "/utils/util.py"], "/train.py": ["/models/model.py", "/utils/util.py"], "/utils/multiwoz_dataloader.py": ["/utils/util.py"], "/models/model.py": ["/utils/util.py"]}
|
19,586
|
Jiahuan-Pei/multiwoz-mdrg
|
refs/heads/master
|
/utils/util.py
|
'''
Utility functions
'''
import argparse
import pickle as pkl
import json
import sys
import math
import time
import numpy as np
import torch
import random
import os
import shutil
# DEFINE special tokens
SOS_token = 0
EOS_token = 1
UNK_token = 2
PAD_token = 3
# detected_device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
detected_device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
default_device = torch.device("cpu")
def padSequence(tensor, device=default_device):
pad_token = PAD_token
tensor_lengths = [len(sentence) for sentence in tensor]
longest_sent = max(tensor_lengths)
batch_size = len(tensor)
padded_tensor = torch.ones((batch_size, longest_sent), dtype=torch.int64, device=device) * pad_token
# copy over the actual sequences
for i, x_len in enumerate(tensor_lengths):
sequence = tensor[i]
padded_tensor[i, 0:x_len] = sequence[:x_len]
padded_tensor = torch.as_tensor(padded_tensor, dtype=torch.long, device=device)
# padded_tensor = torch.LongTensor(padded_tensor)
return padded_tensor, tensor_lengths
def loadDialogue(model, val_file, input_tensor, target_tensor, bs_tensor, db_tensor, mask_tensor=None, intent2index=None, device=default_device):
# Iterate over dialogue
for idx, (usr, sys, bs, db, acts) in enumerate(
zip(val_file['usr'], val_file['sys'], val_file['bs'], val_file['db'], val_file['acts'])):
tensor = [model.input_word2index(word) for word in usr.strip(' ').split(' ')] + [EOS_token] # models.input_word2index(word)
input_tensor.append(torch.as_tensor(tensor, dtype=torch.long, device=device)) # .view(-1, 1))
# input_tensor.append(torch.LongTensor(tensor)) # .view(-1, 1))
tensor = [model.output_word2index(word) for word in sys.strip(' ').split(' ')] + [EOS_token]
target_tensor.append(torch.as_tensor(tensor, dtype=torch.long, device=device)) # .view(-1, 1)
# target_tensor.append(torch.LongTensor(tensor)) # .view(-1, 1)
bs_tensor.append([float(belief) for belief in bs])
db_tensor.append([float(pointer) for pointer in db])
# pp added: mask_i=0 if i_th it contains i_th intent
if intent2index:
tensor = torch.ones(len(intent2index), 1)
# change acts & find index
intent_type = model.args.intent_type
if intent_type == 'domain':
inds = [model.input_intent2index(act.split('-')[0]) for act in acts]
elif intent_type == 'sysact':
inds = [model.input_intent2index(act.split('-')[1]) for act in acts]
elif intent_type == 'domain_act':
inds = [model.input_intent2index(act) for act in acts] # the index of the chosen intents
tensor[:][inds] = 0
mask_tensor.append(torch.as_tensor(tensor, dtype=torch.uint8, device=device))
return input_tensor, target_tensor, bs_tensor, db_tensor, mask_tensor # mask_tensor is a list of [Intent, 1]
#json loads strings as unicode; we currently still work with Python 2 strings, and need conversion
def unicode_to_utf8(d):
return dict((key.encode("UTF-8"), value) for (key,value) in d.items())
def load_dict(filename):
try:
with open(filename, 'rb') as f:
return unicode_to_utf8(json.load(f))
except:
with open(filename, 'rb') as f:
return pkl.load(f)
def load_config(basename):
try:
with open('%s.json' % basename, 'rb') as f:
return json.load(f)
except:
try:
with open('%s.pkl' % basename, 'rb') as f:
return pkl.load(f)
except:
sys.stderr.write('Error: config file {0}.json is missing\n'.format(basename))
sys.exit(1)
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def asMinutes(s):
m = math.floor(s / 60)
s -= m * 60
return '%dm %ds' % (m, s)
def timeSince(since, percent):
now = time.time()
s = now - since
return '%s ' % (asMinutes(s))
# pp added -- Start
def get_env_info():
import sys
print('Python version={}'.format(sys.version))
print('PyTorch version={}'.format(torch.__version__))
flag = torch.cuda.is_available()
print('torch.cuda.is_available()={}'.format(flag))
if flag:
from torch.backends import cudnn
cudnn.enabled = True
cudnn.benchmark = False # False efficiency decrease; but fix random;
cudnn.deterministic = True # if True, the result would keep same; if False, efficiency would be high but results would change slightly
# os.environ["CUDA_VISIBLE_DEVICES"] = '1' # choose which device to use
# torch.set_default_tensor_type(torch.cuda.FloatTensor if torch.cuda.is_available() else torch.FloatTensor) # be careful if use
print('torch.cuda.current_device()={}'.format(torch.cuda.current_device()))
print('torch.cuda.device_count()={}'.format(torch.cuda.device_count()))
print('torch.cuda.get_device_name(0)={}'.format(torch.cuda.get_device_name(0)))
print('torch.backends.cudnn.version()={}'.format(cudnn.version()))
print('torch.version.cuda={}'.format(torch.version.cuda))
print('Memory Usage:')
print('Allocated:', round(torch.cuda.memory_allocated(0)/1024**3,1), 'GB')
print('Cached: ', round(torch.cuda.memory_cached(0)/1024**3,1), 'GB')
def get_ms():
return time.time() * 1000
def init_seed(seed=None):
if seed is None:
seed = int(get_ms() // 1000)
np.random.seed(seed)
torch.manual_seed(seed)
random.seed(seed)
def loadDictionaries(mdir):
# load data and dictionaries
with open('{}/input_lang.index2word.json'.format(mdir)) as f:
input_lang_index2word = json.load(f)
with open('{}/input_lang.word2index.json'.format(mdir)) as f:
input_lang_word2index = json.load(f)
with open('{}/output_lang.index2word.json'.format(mdir)) as f:
output_lang_index2word = json.load(f)
with open('{}/output_lang.word2index.json'.format(mdir)) as f:
output_lang_word2index = json.load(f)
return input_lang_index2word, output_lang_index2word, input_lang_word2index, output_lang_word2index
# pp added: give a intent_type, return a list of intent names
def loadIntentDictionaries(intent_type='domain', intent_file='../data/intents.json'):
fin = open(intent_file, 'r')
intents = json.load(fin) # index 0 is UNK-UNK
intent_list = []
if intent_type == 'domain':
intent_list = [intents[0].split('-')[0]] + sorted(list(set([s.split('-')[0] for s in intents[1:]])))
elif intent_type == 'sysact':
intent_list = [intents[0].split('-')[1]] + sorted(list(set([s.split('-')[1] for s in intents[1:]])))
elif intent_type == 'domain_act':
intent_list = intents
if intent_list:
print('intent_list=', intent_list)
intent2index = {intent:index for index, intent in enumerate(intent_list)} # the first intent should be 'UNK-UNK'
index2intent = dict(zip(intent2index.values(), intent2index.keys()))
return intent2index, index2intent
else:
return None, None
# TODO: initialise paras of a models in the same way
def init_params(model):
from torch.nn.init import xavier_uniform_
for name, param in model.named_parameters():
print(name, param.size())
if param.data.dim() > 1:
xavier_uniform_(param.data)
# creat a new dir if it do not exist
def pp_mkdir(path):
if os.path.exists(path):
shutil.rmtree(path)
os.makedirs(path)
else:
os.makedirs(path)
# pp added -- End
|
{"/test.py": ["/models/evaluator.py", "/models/model.py", "/utils/util.py"], "/train.py": ["/models/model.py", "/utils/util.py"], "/utils/multiwoz_dataloader.py": ["/utils/util.py"], "/models/model.py": ["/utils/util.py"]}
|
19,600
|
stonek4/anti-monopoly
|
refs/heads/master
|
/classes/player.py
|
class PLAYER:
def get_name(self):
return self.name
def get_style(self):
return self.style
def get_owned(self):
return self.owned
def add_prop(self, prop):
self.owned.append(prop)
return True
def get_s_priorities(self):
return self.s_priorities
def get_b_priorities(self):
return self.b_priorities
def get_tolerance(self):
return self.tolerance
def get_budget(self):
return self.budget
def set_budget(self, amount):
self.budget = amount
return True
def set_out(self):
self.owned = []
self.out = True
return True
def check_out(self):
return self.out
def set_jailed(self, option):
self.jailed = option
self.jail_timer = 2
return True
def get_jail_timer(self):
return self.jail_timer
def dec_jail_timer(self):
self.jail_timer -= 1
return True
def check_jailed(self):
return self.jailed
def __init__(self, name, style, b_priorities, s_priorities, tolerance, budget):
self.name = name
self.style = style
self.owned = []
self.s_priorities = s_priorities
self.b_priorities = b_priorities
self.tolerance = tolerance
self.budget = budget
self.out = False
self.jailed = False
self.jail_timer = 0
|
{"/main.py": ["/classes/game.py"]}
|
19,601
|
stonek4/anti-monopoly
|
refs/heads/master
|
/classes/game.py
|
import time
import math
import random
from functions import roll
from functions import num_owned
from functions import get_owner
from board import BOARD
from player import PLAYER
class GAME:
def print_out(self, text):
if self.debugging == True:
print text
def check_t_bankruptcy(self, player, val):
outcome = self.check_bal(player, val)
if outcome == False:
self.pay(player, player.get_budget())
player.set_out()
self.inc_turn()
for prop in player.get_owned():
prop.un_mortgage()
self.print_out("Player "+str(player.get_name())+" has gone bankrupt to the bank, all properties are freed.")
return True
self.pay(player, val)
return False
def check_p_bankruptcy(self, player, val, owner):
outcome = self.check_bal(player, val)
if outcome == False:
self.collect(owner, player.get_budget())
self.pay(player, player.get_budget())
for pprop in player.get_owned():
owner.add_prop(pprop)
player.set_out()
self.inc_turn()
self.print_out("Player "+str(player.get_name())+" has gone bankrupt to Player "+str(owner.get_name())+", all properties were turned over.")
return True
self.pay(player, val)
self.collect(owner, val)
return False
def check_win(self):
left = 0
winner = ""
for player in self.players:
if player.check_out() == False:
left += 1
winner = player.get_name()
if left >= 2:
return False
self.print_out("Player "+str(winner)+" wins!!!")
return True
def move(self, player):
value = roll(2)
self.print_out("Player "+str(self.turn)+" rolled " + str(value))
self.locations[self.turn] += value
if (self.locations[self.turn] >= len(self.board)-1):
self.locations[self.turn] -= len(self.board)-1
self.collect(player, 100)
self.print_out("Player "+str(self.turn)+" moved to "+self.board[self.locations[self.turn]].get_name())
def move_to(self, player, prop):
while True:
if(self.board[self.locations[self.turn]].get_name() == prop):
self.print_out("Player "+str(self.turn)+" moved to "+self.board[self.locations[self.turn]].get_name())
return
else:
self.locations[self.turn] += 1
if (self.locations[self.turn] >= len(self.board)-1):
self.locations[self.turn] -= len(self.board)-1
self.collect(player, 100)
def straight(self, player, prop):
while True:
if(self.board[self.locations[self.turn]].get_name() == prop):
player.set_jailed(True)
self.print_out("Player "+str(self.turn)+" moved straight to "+self.board[self.locations[self.turn]].get_name())
return
else:
self.locations[self.turn] += 1
if (self.locations[self.turn] >= len(self.board)-1):
self.locations[self.turn] -= len(self.board)-1
player.set_jailed(True)
def collect(self, player, value):
player.set_budget(player.get_budget()+value)
self.print_out("Player "+str(player.get_name())+" collected $"+str(value) + " | ($" + str(player.get_budget()) + ")")
def pay(self, player, value):
player.set_budget(player.get_budget()-value)
self.print_out("Player "+str(player.get_name())+" paid $"+str(value) + " | ($" + str(player.get_budget()) + ")")
return True
def buy_prop(self, player, prop):
if player.get_budget >= prop.get_cost():
self.pay(player, prop.get_cost())
player.add_prop(prop)
self.print_out("Player "+str(player.get_name())+" purchased "+ prop.get_name())
def sell_houses(self, player, value):
for pprop in player.get_owned():
if pprop.get_style() == "property":
while pprop.get_houses() > 0:
pprop.rem_house()
self.print_out("Player "+str(player.get_name())+" sold a house on "+ pprop.get_name())
self.collect(player, pprop.get_h_val()/2)
if player.get_budget >= value:
return True
return False
def buy_houses(self, player):
for pprop in player.get_owned():
if pprop.get_style() == "property":
if pprop.get_houses() < 4 or (pprop.get_houses() == 4 and player.get_style() == "c"):
if pprop.check_mortgage() == False and player.get_budget() >= pprop.get_h_val():
if num_owned(player, pprop) > 1:
self.pay(player, pprop.get_h_val())
pprop.add_house()
self.print_out("Player "+str(player.get_name())+" bought a house for "+ pprop.get_name() + " | ($" + str(player.get_budget()) + " rem)")
return True
return False
def mortgage(self, player, value):
for pprop in player.get_owned():
if pprop.get_style() == "property" and pprop.check_mortgage() == False:
if pprop.get_houses() == 0:
pprop.mortgage()
self.print_out("Player "+str(player.get_name())+" mortgaged "+ pprop.get_name())
self.collect(player, pprop.get_m_val())
elif pprop.check_mortgage == False:
pprop.mortgage()
self.print_out("Player "+str(player.get_name())+" mortgaged "+ pprop.get_name())
self.collect(player, pprop.get_m_val())
if player.get_budget() >= value:
return True
return False
def unmortgage(self, player):
for pprop in player.get_owned():
if pprop.check_mortgage() == True and pprop.get_um_val() < player.get_budget():
self.pay(player, pprop.get_um_val())
pprop.un_mortgage()
self.print_out("Player "+str(player.get_name())+" unmortgaged "+ pprop.get_name())
return True
return False
def check_bal(self, player, val):
while val > player.get_budget():
for p in player.get_s_priorities():
if p == "h":
outcome = self.sell_houses(player, val)
if p == "m":
outcome = self.mortgage(player, val)
if outcome != True:
outcome = self.mortgage(player, val)
if outcome != True:
return False
return True
def inc_turn(self):
if (self.turn == len(self.players)-1):
self.turn = 0
else:
self.turn += 1
def take_turn(self):
player = self.players[self.turn]
if (player.check_out() == True):
self.inc_turn()
return
if player.check_jailed() == True:
if player.budget >= 50 and player.get_style() == "m":
self.pay(player, 50)
player.set_jailed(False)
self.print_out ("Player " + str(self.turn) + " left jail")
else:
if player.get_jail_timer() > 0:
first = roll(1)
second = roll(1)
self.print_out("Player "+str(self.turn)+" is in jail and rolled " + str(first)+ " " + str(second))
if first != second:
player.dec_jail_timer()
self.inc_turn()
return
player.set_jailed(False)
self.print_out ("Player " + str(self.turn) + " left jail")
self.move(player)
prop = self.board[self.locations[self.turn]]
if (prop.get_style() == "property tax"):
outcome = self.check_t_bankruptcy(player, prop.get_value())
if outcome == True:
return
if (prop.get_style() == "income tax"):
total = 0
for pprop in player.get_owned():
if pprop.check_mortgage() == False:
total += pprop.get_cost()
if pprop.get_style() == "property":
total += pprop.get_houses() * pprop.get_h_val()
total *= .10
total += player.get_budget() * .10
total = int(math.floor(total))
outcome = self.check_t_bankruptcy(player, prop.get_value(total))
if outcome == True:
return
if (prop.get_style() == "go to"):
self.straight(player, "sightseeing tour")
if (prop.get_style() == "cm" or prop.get_style() == "anti-monopoly foundation"):
chance = prop.get_value(player.get_style())
self.print_out ("Player " + str(self.turn) + " must " + chance[0] + " " + str(chance[1]))
if (chance[0] == "move"):
self.move_to(player, chance[1])
elif(chance[0] == "collect"):
self.collect(player, chance[1])
elif(chance[0] == "pay"):
outcome = self.check_t_bankruptcy(player, chance[1])
if outcome == True:
return
elif(chance[0] == "straight"):
self.straight(player, chance[1])
elif(chance[0] == "collect_c"):
for opp in self.players:
if opp.get_style() == "c" and opp.check_out() == False:
outcome = self.check_p_bankruptcy(player, chance[1], opp)
if outcome == True:
return
elif(chance[0] == "collect_m"):
for opp in self.players:
if opp.get_style() == "m" and opp.check_out() == False:
outcome = self.check_p_bankruptcy(player, chance[1], opp)
if outcome == True:
return
can_buy = False
prop = self.board[self.locations[self.turn]]
if (prop.get_style() == "property" or prop.get_style() == "utility" or prop.get_style() == "transport"):
owner_name = get_owner(prop.get_name(), self.players)
if owner_name >= 0:
owner = self.players[owner_name]
if owner_name != self.turn and owner_name >= 0 and prop.check_mortgage() == False and owner.check_jailed() == False:
val = prop.get_value(owner.get_style(), num_owned(owner, prop))
outcome = self.check_p_bankruptcy(player, val, owner)
if outcome == True:
return
if owner_name < 0:
can_buy = True
if player.get_tolerance() <= player.get_budget():
for p in player.get_b_priorities():
if p == "p":
if can_buy == True:
if player.get_budget() > prop.get_cost():
self.buy_prop(player, prop)
if p == "h":
buying = True
while buying == True and player.get_tolerance() <= player.get_budget():
buying = self.buy_houses(player)
if p == "u":
unmort = True
while unmort == True and player.get_tolerance() <= player.get_budget():
unmort = self.unmortgage(player)
self.inc_turn()
def get_stats(self):
print " "
print "~~~~STATISTICS~~~~"
for player in self.players:
print "Player " + str(player.get_name()) + " ~ $" + str(player.get_budget()),
if player.get_style() == "m":
print " is a monopolist"
else:
print " is a competitor"
for prop in player.get_owned():
if prop.get_style() == "property":
print prop.get_name() + " " + str(prop.get_houses()) + " houses",
else:
print prop.get_name(),
if prop.check_mortgage() == True:
print " is mortgaged"
else:
print ""
def __init__(self, num_players, debugging, slow):
i = 0
min_tol = 50
max_tol = 400
self.debugging = debugging
self.turn = 0
self.locations = []
self.new_board = BOARD()
self.board = self.new_board.get_board()
self.players = []
while (i < num_players):
if ((i % 2) == 0):
self.players.append(PLAYER(i, "m", ["h","p","u"], ["h","m"], random.randint(min_tol,max_tol),1500))
else:
self.players.append(PLAYER(i, "c", ["h","p","u"], ["m","h"], random.randint(min_tol,max_tol),1500))
self.locations.append(0)
i += 1
i = 0
while i <= 1000:
alive = False
while alive == False:
if self.players[self.turn].check_out() == False:
alive = True
else:
self.inc_turn()
print "TURN " + str(i+1)
self.take_turn()
print ""
if slow == True:
time.sleep(5)
winner = self.check_win()
if winner == True:
break
i += 1
self.get_stats()
|
{"/main.py": ["/classes/game.py"]}
|
19,602
|
stonek4/anti-monopoly
|
refs/heads/master
|
/classes/functions.py
|
import random
def roll(num):
total = 0
while num > 0:
total += random.randint(1,6)
num -= 1
return total
def get_owner(prop, players):
for player in players:
for pprop in player.get_owned():
if pprop.get_name() == prop:
return player.get_name()
return -1
def num_owned(player,prop):
num = 0
for pprop in player.get_owned():
if prop.get_style() == "property" and pprop.get_style() == "property":
if pprop.get_city() == prop.get_city():
num += 1
else:
if pprop.get_style() == prop.get_style():
num += 1
return num
def find_mult_own(props):
cities = []
mults = []
mult_props = []
for prop in props:
if (prop.get_city in cities):
mults.append(prop.get_city())
else:
cities.append(prop.get_city())
for prop in props:
if (prop.get_city() in mults):
mult_props.append(prop)
return mult_props
|
{"/main.py": ["/classes/game.py"]}
|
19,603
|
stonek4/anti-monopoly
|
refs/heads/master
|
/classes/board.py
|
from square import SQUARE
from square import PROPERTY
from square import CM
from square import INCOME_TAX
from square import UTILITY
from square import TRANSPORT
from square import AMF
from square import GOTO
from square import PROPERTY_TAX
class BOARD:
def get_board(self):
return self.board
def __init__(self):
self.board = []
self.board.append(SQUARE("start","start"))
self.board.append(PROPERTY("basin st.","new orleans",60,50,6,6,5,10))
self.board.append(CM("competitor or monopolist"))
self.board.append(PROPERTY("french quarter","new orleans",60,50,6,6,5,10))
self.board.append(INCOME_TAX("income tax"))
self.board.append(TRANSPORT("u.s. railroad"))
self.board.append(PROPERTY("sunset blvd.","los angeles",100,50,10,10,5,10))
self.board.append(CM("competitor or monopolist"))
self.board.append(PROPERTY("wilshire blvd.","los angeles",100,50,10,10,5,10))
self.board.append(PROPERTY("hollywood blvd.","los angeles",120,66,12,12,5,10))
self.board.append(SQUARE("sightseeing tour","sightseeing tour"))
self.board.append(PROPERTY("rush st.","chicago",140,100,14,14,10,20))
self.board.append(UTILITY("u.s. electric company"))
self.board.append(PROPERTY("state st.","chicago",140,100,14,14,10,20))
self.board.append(PROPERTY("michigan ave.","chicago",160,100,16,16,10,20))
self.board.append(TRANSPORT("u.s. bus company"))
self.board.append(PROPERTY("locust st.","philadelphia",180,100,18,18,10,20))
self.board.append(CM("competitor or monopolist"))
self.board.append(PROPERTY("chesnut st.","philadelphia",180,100,18,18,10,20))
self.board.append(PROPERTY("walnut st.","philadelphia",200,100,20,20,10,20))
self.board.append(AMF("anti-monopoly foundation"))
self.board.append(PROPERTY("brattle st.","boston",220,150,22,22,15,30))
self.board.append(CM("competitor or monopolist"))
self.board.append(PROPERTY("harvard square","boston",220,150,22,22,15,30))
self.board.append(PROPERTY("beacon st.","boston",240,150,24,24,15,30))
self.board.append(TRANSPORT("u.s. air line"))
self.board.append(PROPERTY("georgetown","washington",260,150,26,26,15,30))
self.board.append(PROPERTY("constitution ave.","washington",260,150,26,26,15,30))
self.board.append(UTILITY("u.s. gas company"))
self.board.append(PROPERTY("pennsylvania ave.","washington",280,150,28,28,15,30))
self.board.append(GOTO("go to"))
self.board.append(PROPERTY("fisherman's wharf","san francisco",300,200,30,30,20,40))
self.board.append(PROPERTY("union square","san francisco",300,200,30,30,20,40))
self.board.append(CM("competitor or monopolist"))
self.board.append(PROPERTY("nob hill","san francisco",320,200,32,32,20,40))
self.board.append(TRANSPORT("u.s. trucking company"))
self.board.append(CM("competitor or monopolist"))
self.board.append(PROPERTY("fifth ave.","new york",350,200,35,35,20,40))
self.board.append(PROPERTY_TAX("property tax"))
self.board.append(PROPERTY("wall st.","new york",400,200,40,40,20,40))
|
{"/main.py": ["/classes/game.py"]}
|
19,604
|
stonek4/anti-monopoly
|
refs/heads/master
|
/main.py
|
from classes.game import GAME
def main():
test = GAME(4, True, False)
return
main()
|
{"/main.py": ["/classes/game.py"]}
|
19,605
|
stonek4/anti-monopoly
|
refs/heads/master
|
/classes/square.py
|
from functions import roll
class SQUARE:
def get_name(self):
return self.name
def get_style(self):
return self.style
def __init__(self, name, style):
self.style = style
self.name = name
class GOTO(SQUARE):
def get_value(self,player):
if(player == "m"):
return ["straight","sightseeing tour"]
if(player == "c"):
return ["move","sightseeing tour"]
def __init__(self,name):
SQUARE.__init__(self,name,"go to")
class PROPERTY_TAX(SQUARE):
def get_value(self):
return 75
def __init__(self, name):
SQUARE.__init__(self, name, "property tax")
class INCOME_TAX(SQUARE):
def get_value(self, amount):
if (amount < 200):
return amount
else:
return 200
def __init__(self, name):
SQUARE.__init__(self, name, "income tax")
class AMF(SQUARE):
def get_value(self, player):
if(player == "m"):
return ["pay",160]
elif(player == "c"):
number = roll(1)
if (number == 1):
return ["collect",25]
elif (number == 2):
return ["collect",50]
else:
return ["collect",0]
def __init__(self,name):
SQUARE.__init__(self,name,"anti-monopoly foundation")
class PROPERTY(SQUARE):
def get_cost(self):
return self.cost
def get_city(self):
return self.city
def get_m_val(self):
return self.v_mort
def get_um_val(self):
return self.v_umort
def get_h_val(self):
return self.c_house
def get_houses(self):
return self.houses
def add_house(self):
self.houses += 1
return True
def rem_house(self):
self.houses -= 1
def mortgage(self):
self.is_mortgaged = True
return True
def un_mortgage(self):
self.is_mortgaged = False
return True
def check_mortgage(self):
return self.is_mortgaged
def get_value(self, owner, number):
if(owner == "m" and number > 1):
return (self.m_rent*2) + (self.m_rise*self.houses)
else:
return self.c_rent + (self.c_rise*self.houses)
def __init__(self, name, city, cost, c_house, c_rent, m_rent, c_rise, m_rise):
SQUARE.__init__(self, name, "property")
self.city = city
self.cost = cost
self.houses = 0
self.v_mort = int(cost * .5)
self.v_umort = int(cost * .55)
self.c_house = c_house
self.c_rent = c_rent
self.m_rent = m_rent
self.c_rise = c_rise
self.m_rise = m_rise
self.is_mortgaged = False
class CM(SQUARE):
def get_value(self, player):
number = roll(2)
if(player == "m"):
if(number == 2):
return ["move","start"]
elif(number == 3):
return ["collect",75]
elif(number == 4):
return ["move","beacon st."]
elif(number == 5):
return ["pay",75]
elif(number == 6):
return ["move","u.s. electric company"]
elif(number == 7):
return ["collect",50]
elif(number == 8):
return ["move","u.s. air line"]
elif(number == 9):
return ["pay",50]
elif(number == 10):
return ["collect_c",25]
elif(number == 11):
return ["straight","sightseeing tour"]
elif(number == 12):
return ["pay",25]
elif(player == "c"):
if(number == 2):
return ["move","u.s. air line"]
elif(number == 3):
return ["pay",75]
elif(number == 4):
return ["collect_m",25]
elif(number == 5):
return ["move","u.s. electric company"]
elif(number == 6):
return ["pay",25]
elif(number == 7):
return ["move","beacon st."]
elif(number == 8):
return ["collect",75]
elif(number == 9):
return ["move","start"]
elif(number == 10):
return ["pay",50]
elif(number == 11):
return ["collect",50]
elif(number == 12):
return ["move","sightseeing tour"]
def __init__(self, name):
SQUARE.__init__(self, name, "cm")
class UTILITY(SQUARE):
def mortgage(self):
self.is_mortgaged = True
return True
def un_mortgage(self):
self.is_mortgaged = False
return True
def check_mortgage(self):
return self.is_mortgaged
def get_m_val(self):
return 100
def get_um_val(self):
return 110
def get_cost(self):
return 150
def get_value(self, owner, owned):
number = roll(2)
if(owned == 1):
return (number * 4)
elif(owned == 2):
if(owner == "c"):
return (number * 4)
elif(owner == "m"):
return (number * 10)
def __init__(self, name):
SQUARE.__init__(self, name, "utility")
self.is_mortgaged = False
class TRANSPORT(SQUARE):
def mortgage(self):
self.is_mortgaged = True
return True
def un_mortgage(self):
self.is_mortgaged = False
return True
def check_mortgage(self):
return self.is_mortgaged
def get_m_val(self):
return 75
def get_um_val(self):
return 83
def get_cost(self):
return 200
def get_value(self, owner, owned):
if (owner == "c"):
return 20
elif (owner == "m"):
return (40*(owned*2))
def __init__(self, name):
SQUARE.__init__(self,name,"transport")
self.is_mortgaged = False
|
{"/main.py": ["/classes/game.py"]}
|
19,625
|
kwoolter/Kingdom2
|
refs/heads/master
|
/kingdom2/model/utils.py
|
import collections
class Event():
# Event Types
DEFAULT = "default"
STATE = "state"
GAME = "game"
def __init__(self, name: str, description: str = None, type: str = DEFAULT):
self.name = name
self.description = description
self.type = type
def __str__(self):
return "{0}:{1} ({2})".format(self.name, self.description, self.type)
class EventQueue():
def __init__(self):
self.events = collections.deque()
def add_event(self, new_event: Event):
self.events.append(new_event)
def pop_event(self):
return self.events.pop()
def size(self):
return len(self.events)
def print(self):
for event in self.events:
print(event)
def is_numeric(s):
try:
x = int(s)
except:
try:
x = float(s)
except:
x = None
return x
|
{"/kingdom2/controller/__init__.py": ["/kingdom2/controller/cli.py"], "/kingdom2/view/__init__.py": ["/kingdom2/view/text_view.py"], "/kingdom2/controller/cli.py": ["/kingdom2/model/__init__.py", "/kingdom2/view/__init__.py"], "/kingdom2/model/building_blocks.py": ["/kingdom2/model/utils.py"], "/kingdom2/model/model.py": ["/kingdom2/model/utils.py", "/kingdom2/model/building_blocks.py"], "/kingdom2/view/text_view.py": ["/kingdom2/model/__init__.py"], "/kingdom2/model/__init__.py": ["/kingdom2/model/model.py", "/kingdom2/model/utils.py"]}
|
19,626
|
kwoolter/Kingdom2
|
refs/heads/master
|
/kingdom2/controller/__init__.py
|
from .cli import GameCLI
|
{"/kingdom2/controller/__init__.py": ["/kingdom2/controller/cli.py"], "/kingdom2/view/__init__.py": ["/kingdom2/view/text_view.py"], "/kingdom2/controller/cli.py": ["/kingdom2/model/__init__.py", "/kingdom2/view/__init__.py"], "/kingdom2/model/building_blocks.py": ["/kingdom2/model/utils.py"], "/kingdom2/model/model.py": ["/kingdom2/model/utils.py", "/kingdom2/model/building_blocks.py"], "/kingdom2/view/text_view.py": ["/kingdom2/model/__init__.py"], "/kingdom2/model/__init__.py": ["/kingdom2/model/model.py", "/kingdom2/model/utils.py"]}
|
19,627
|
kwoolter/Kingdom2
|
refs/heads/master
|
/kingdom2/view/__init__.py
|
from .text_view import TextView
from .text_view import InventoryTextView
from .text_view import CreationsTextView
from .text_view import WorldMapTextView
from .text_view import WorldTopoModelTextView
|
{"/kingdom2/controller/__init__.py": ["/kingdom2/controller/cli.py"], "/kingdom2/view/__init__.py": ["/kingdom2/view/text_view.py"], "/kingdom2/controller/cli.py": ["/kingdom2/model/__init__.py", "/kingdom2/view/__init__.py"], "/kingdom2/model/building_blocks.py": ["/kingdom2/model/utils.py"], "/kingdom2/model/model.py": ["/kingdom2/model/utils.py", "/kingdom2/model/building_blocks.py"], "/kingdom2/view/text_view.py": ["/kingdom2/model/__init__.py"], "/kingdom2/model/__init__.py": ["/kingdom2/model/model.py", "/kingdom2/model/utils.py"]}
|
19,628
|
kwoolter/Kingdom2
|
refs/heads/master
|
/kingdom2/controller/cli.py
|
import cmd
from .utils import *
import logging
import os
import random
import kingdom2.model as model
import kingdom2.view as view
class GameCLI(cmd.Cmd):
intro = "Welcome to The Kingdom 2.\nType 'start' to get going!\nType 'help' for a list of commands."
prompt = "What next?"
def __init__(self):
super(GameCLI, self).__init__()
self.model = model.Game("Kingdom 2")
self.view = view.TextView(self.model)
def run(self):
self.cmdloop()
def emptyline(self):
pass
def do_quit(self, arg):
"""Quit the game"""
try:
if confirm("Are you sure you want to quit?") is True:
print("\nThanks for playing.")
self.model.do_game_over()
self.print_events()
print(str(self.model))
print("\nBye bye.")
except Exception as err:
print(str(err))
def do_start(self, arg):
self.model.start()
self.print_events()
def do_tick(self, arg : str = "1"):
i = is_numeric(arg)
if i is not None:
for i in range (0, i):
self.model.tick()
self.view.tick()
self.print_events()
def do_print(self, arg):
self.view.draw()
def do_inv(self, arg):
inv_view = view.InventoryTextView(self.model.inventory)
inv_view.draw()
def do_map(self, arg):
map_view = view.WorldMapTextView(self.model.map)
map_view.draw()
#map_view.draw((5,5,10,10))
def do_topo(self, arg):
map_view = view.WorldTopoModelTextView(self.model.map)
map_view.draw()
#map_view.draw((5,5,10,10))
def do_test(self, arg):
resource_types = model.ResourceFactory.get_resource_types()
for type in resource_types:
new_resource = model.ResourceFactory.get_resource(type)
self.model.inventory.add_resource(new_resource, random.randint(20,60))
self.model.inventory.print()
for creatable_name in self.model.creatables.names:
creatable = self.model.creatables.get_creatable_copy(creatable_name)
ok = self.model.inventory.is_creatable(creatable)
print("{0}: creatable = {1}".format(creatable.name, ok))
self.model.add_creation(creatable)
def print_events(self):
# Print any events that got raised
event = self.model.get_next_event()
if event is not None:
print("Game event(s)...")
while event is not None:
print(" * " + str(event))
event = self.model.get_next_event()
|
{"/kingdom2/controller/__init__.py": ["/kingdom2/controller/cli.py"], "/kingdom2/view/__init__.py": ["/kingdom2/view/text_view.py"], "/kingdom2/controller/cli.py": ["/kingdom2/model/__init__.py", "/kingdom2/view/__init__.py"], "/kingdom2/model/building_blocks.py": ["/kingdom2/model/utils.py"], "/kingdom2/model/model.py": ["/kingdom2/model/utils.py", "/kingdom2/model/building_blocks.py"], "/kingdom2/view/text_view.py": ["/kingdom2/model/__init__.py"], "/kingdom2/model/__init__.py": ["/kingdom2/model/model.py", "/kingdom2/model/utils.py"]}
|
19,629
|
kwoolter/Kingdom2
|
refs/heads/master
|
/kingdom2/model/building_blocks.py
|
import copy
import csv
import logging
import random
from xml.dom.minidom import *
import numpy
from .utils import is_numeric
class Resource:
CATEGORY_DEFAULT = "default"
def __init__(self, name: str, description: str, category: str = CATEGORY_DEFAULT, graphic: str = None):
self.name = name
self.description = description
self.category = category
self.graphic = graphic
def __str__(self):
_str = "{0} ({3}): {1} ({2})".format(self.name, self.description, self.category, self.graphic)
return _str
class Creatable():
def __init__(self, name: str, description: str, ticks_required: int = 10):
self.name = name
self.description = description
self.pre_requisites = {}
self.ticks_done = 0
self.ticks_required = ticks_required
self.output = {}
def __str__(self):
_str = "{0} ({1}) {2}% complete".format(self.name, self.description, self.percent_complete)
if len(self.pre_requisites.keys()) > 0:
_str += "\n\tPre-requisites:"
for k, v in self.pre_requisites.items():
_str += "\n\t\t- {0}:{1}".format(k, v)
if len(self.output.keys()) > 0:
_str += "\n\tOutputs:"
for k, v in self.output.items():
_str += "\n\t\t- {0}:{1}".format(k, v)
return _str
@property
def is_complete(self):
return self.ticks_done >= self.ticks_required
@property
def percent_complete(self):
try:
percent_complete = int(min(100, self.ticks_done * 100 / self.ticks_required))
except Exception as err:
print("{0}/{1}".format(self.ticks_done, self.ticks_required))
print(str(err))
percent_complete = 0
return percent_complete
def add_pre_requisite(self, new_resource_name: str, item_count: int = 1):
if new_resource_name not in self.pre_requisites.keys():
self.pre_requisites[new_resource_name] = 0
self.pre_requisites[new_resource_name] += item_count
def add_output(self, new_resource_name: str, item_count: int = 1):
if new_resource_name not in self.output.keys():
self.output[new_resource_name] = 0
self.output[new_resource_name] += item_count
def tick(self):
if self.is_complete is False:
self.ticks_done += 1
if self.is_complete is True:
self.do_complete()
def do_complete(self):
print("Construction complete for {0}!".format(self.name))
class Inventory():
def __init__(self):
self.resources = {}
@property
def resource_type_count(self):
return len(self.resources.keys())
def add_resource(self, new_resource: Resource, item_count: int = 1):
if new_resource not in self.resources.keys():
self.resources[new_resource] = 0
self.resources[new_resource] += item_count
def is_creatable(self, new_creatable: Creatable):
is_creatable = True
for pre_req_name, count in new_creatable.pre_requisites.items():
pre_req = ResourceFactory.get_resource(pre_req_name)
if pre_req not in self.resources.keys():
is_creatable = False
break
else:
inv_count = self.resources[pre_req]
if count > inv_count:
is_creatable = False
break
return is_creatable
def print(self):
if len(self.resources.keys()) > 0:
_str = "Inventory ({0} resource types)".format(self.resource_type_count)
for k, v in self.resources.items():
_str += "\n\t{0} ({1}) : {2}".format(k.name, k.description, v)
else:
_str = "No resources in your inventory!"
print(_str)
class ResourceFactory:
resources = {}
def __init__(self, file_name: str):
self.file_name = file_name
@staticmethod
def get_resource(name: str):
resource = None
if name in ResourceFactory.resources.keys():
resource = ResourceFactory.resources[name]
return resource
@staticmethod
def get_resource_copy(name: str):
resource = None
if name in ResourceFactory.resources.keys():
resource = copy.deepcopy(ResourceFactory.resources[name])
return resource
@staticmethod
def get_resource_types():
return list(ResourceFactory.resources.keys())
def load(self):
print("\nLoading resources...")
# Attempt to open the file
with open(self.file_name, 'r') as object_file:
# Load all rows in as a dictionary
reader = csv.DictReader(object_file)
# For each row in the file....
for row in reader:
name = row.get("Name")
description = row.get("Description")
category = row.get("Category")
graphic = row.get("Graphic")
if graphic == "":
graphic = None
new_resource = Resource(name, description, category, graphic)
ResourceFactory.resources[new_resource.name] = new_resource
print(str(new_resource))
# Close the file
object_file.close()
print("\n{0} resources loaded.".format(len(self.resources.keys())))
class CreatableFactoryXML(object):
'''
Load some creatables from an XML file and store them in a dictionary
'''
def __init__(self, file_name: str):
self.file_name = file_name
self._dom = None
self._creatables = {}
@property
def count(self):
return len(self._creatables)
@property
def names(self):
return list(self._creatables.keys())
# Load in the quest contained in the quest file
def load(self):
self._dom = parse(self.file_name)
assert self._dom.documentElement.tagName == "creatables"
logging.info("%s.load(): Loading in %s", __class__, self.file_name)
# Get a list of all quests
creatables = self._dom.getElementsByTagName("creatable")
# for each quest...
for creatable in creatables:
# Get the main tags that describe the quest
name = self.xml_get_node_text(creatable, "name")
desc = self.xml_get_node_text(creatable, "description")
ticks_required = self.xml_get_node_value(creatable, "ticks_required")
# ...and create a basic creatable object
new_creatable = Creatable(name=name, description=desc, ticks_required=ticks_required)
logging.info("%s.load(): Loading Creatable '%s'...", __class__, new_creatable.name)
# Next get a list of all of the pre-requisites
pre_requisites = creatable.getElementsByTagName("pre_requisites")[0]
resources = pre_requisites.getElementsByTagName("resource")
# For each pre-requisite resource...
for resource in resources:
# Get the basic details of the resource
name = self.xml_get_node_text(resource, "name")
count = self.xml_get_node_value(resource, "count")
new_creatable.add_pre_requisite(name, count)
logging.info("{0}.load(): adding pre-req {1} ({2})".format(__class__, name, count))
# Next get a list of all of the outputs
pre_requisites = creatable.getElementsByTagName("outputs")[0]
resources = pre_requisites.getElementsByTagName("resource")
# For each output resource...
for resource in resources:
# Get the basic details of the resource
name = self.xml_get_node_text(resource, "name")
count = self.xml_get_node_value(resource, "count")
action = self.xml_get_node_text(resource, "action")
if action is not None:
action = "replace"
else:
action = "inventory"
new_creatable.add_output(name, count)
logging.info("{0}.load(): adding output {1} ({2})".format(__class__, name, count))
logging.info("{0}.load(): Creatable '{1}' loaded".format(__class__, new_creatable.name))
print(str(new_creatable))
# Add the new creatable to the dictionary
self._creatables[new_creatable.name] = new_creatable
self._dom.unlink()
# From a specified node get the data value
def xml_get_node_text(self, node, tag_name: str):
tag = node.getElementsByTagName(tag_name)
# If the tag exists then get the data value
if len(tag) > 0:
value = tag[0].firstChild.data
# Else use None
else:
value = None
return value
def xml_get_node_value(self, node, tag_name: str):
value = self.xml_get_node_text(node, tag_name)
return is_numeric(value)
def print(self):
for creatable in self._creatables.values():
print(creatable)
def get_creatable(self, name: str):
return self._creatables[name]
def get_creatable_copy(self, name: str):
return copy.deepcopy(self._creatables[name])
class WorldMap:
TILE_GRASS = "Grass"
TILE_SEA = "Sea"
def __init__(self, name: str, width: int = 50, height: int = 50):
self.name = name
self._width = width
self._height = height
self.map = []
self.topo_model_pass2 = []
def initialise(self):
# Generate a topology model for the map
self.generate_topology()
# Clear the map squares
self.map = [[None for y in range(0, self._height)] for x in range(0, self._width)]
grass = ResourceFactory.get_resource_copy(WorldMap.TILE_GRASS)
self.add_objects(grass.graphic, 40)
grass = ResourceFactory.get_resource_copy(WorldMap.TILE_SEA)
self.add_objects(grass.graphic, 40)
def generate_topology(self):
# Topo controls
MAX_ALTITUDE = 10.0
MIN_ALTITUDE_CLIP_FACTOR = -0.5
ALTITUDE_OFFSET = 0.0
MIN_ALTITUDE = 0.0
MAX_SLOPE = MAX_ALTITUDE * 0.15
MIN_SLOPE = MAX_SLOPE * -1.0
MAX_SLOPE_DELTA = MAX_SLOPE * 2.0
# Clear the topo model
topo_model_pass1 = [[None for y in range(0, self._height)] for x in range(0, self._width)]
self.topo_model_pass2 = [[None for y in range(0, self._height)] for x in range(0, self._width)]
# Create an initial topography using altitudes and random slope changes
print("Pass 1: altitudes and slopes...")
# Set the first square to be a random altitude with slopes in range
topo_model_pass1[0][0] = (random.uniform(MIN_ALTITUDE, MAX_ALTITUDE),
random.uniform(MIN_SLOPE, MAX_SLOPE),
random.uniform(MIN_SLOPE, MAX_SLOPE))
for y in range(0, self._height):
for x in range(0, self._width):
if y == 0:
north_slope = random.uniform(MIN_SLOPE, MAX_SLOPE)
north_altitude = random.uniform(MIN_ALTITUDE, MAX_ALTITUDE)
# north_altitude = 0
else:
north_altitude, tmp, north_slope = topo_model_pass1[x][y - 1]
if x == 0:
west_slope = random.uniform(MIN_SLOPE, MAX_SLOPE)
west_altitude = random.uniform(MIN_ALTITUDE, MAX_ALTITUDE)
# west_altitude = 0
else:
west_altitude, west_slope, tmp = topo_model_pass1[x - 1][y]
clip = lambda n, minn, maxn: max(min(maxn, n), minn)
altitude = ((north_altitude + north_slope) + (west_altitude + west_slope)) / 2
altitude = clip(altitude, MIN_ALTITUDE, MAX_ALTITUDE)
east_slope = west_slope + ((random.random() * MAX_SLOPE_DELTA) - MAX_SLOPE_DELTA / 2)
east_slope = clip(east_slope, MIN_SLOPE, MAX_SLOPE)
south_slope = north_slope + ((random.random() * MAX_SLOPE_DELTA) - MAX_SLOPE_DELTA / 2)
south_slope = clip(south_slope, MIN_SLOPE, MAX_SLOPE)
topo_model_pass1[x][y] = (altitude, east_slope, south_slope)
print("Pass 2: averaging out using neighbouring points...")
# Perform second pass averaging based on adjacent altitudes to smooth out topography
# Define which neighboring points we are going to look at
vectors = ((1, 0), (-1, 0), (0, 1), (0, -1), (1, 1), (-1, -1), (1, -1), (-1, 1))
# Iterate through each point in the map
for y in range(0, self._height):
for x in range(0, self._width):
# Get the height of the current point
local_altitude_total, es, ss = topo_model_pass1[x][y]
local_altitude_points = 1
# Get the heights of the surrounding points
for dx, dy in vectors:
if x + dx < 0 or x + dx >= self._width or y + dy < 0 or y + dy >= self._height:
pass
else:
local_altitude, es, ss = topo_model_pass1[x + dx][y + dy]
local_altitude_total += local_altitude
local_altitude_points += 1
average_altitude = (local_altitude_total / local_altitude_points)
# Record the average altitude in a new array
self.topo_model_pass2[x][y] = average_altitude
# Perform 3rd pass clipping to create floors in the topology
a = numpy.array(self.topo_model_pass2)
avg = numpy.mean(a)
std = numpy.std(a)
threshold = avg - (std * MIN_ALTITUDE_CLIP_FACTOR)
a[a < threshold] = threshold
self.topo_model_pass2 = a.tolist()
print("Pass 3: applying altitude floor of {0:.3}...".format(threshold))
@property
def width(self):
return len(self.map)
@property
def height(self):
return len(self.map[0])
# Are the specified coordinates within the area of the map?
def is_valid_xy(self, x: int, y: int):
result = False
if x >= 0 and x < self.width and y >= 0 and y < self.height:
result = True
return result
# Get a map square at the specified co-ordinates
def get(self, x: int, y: int):
if self.is_valid_xy(x, y) is False:
raise Exception("Trying to get tile at ({0},{1}) which is outside of the world!".format(x, y))
return self.map[x][y]
def get_range(self, x: int, y: int, width: int, height: int):
a = numpy.array(self.topo_model_pass2, order="F")
b = a[x:x + width, y:y + height]
return b.tolist()
# Set a map square at the specified co-ordinates with the specified object
def set(self, x: int, y: int, c):
if self.is_valid_xy(x, y) is False:
raise Exception("Trying to set tile at ({0},{1}) which is outside of the world!".format(x, y))
self.map[x][y] = c
def get_altitude(self, x: int, y: int):
return self.topo_model_pass2[x][y]
# Add objects to random tiles
def add_objects(self, object_type, count: int = 20):
for i in range(0, count):
x = random.randint(0, self.width - 1)
y = random.randint(0, self.height - 1)
if self.get(x, y) is None:
self.set(x, y, object_type)
class MapSquare:
def __init__(self, content: str, altitude: float = 0.0):
self.content = content
self.altitude = altitude
|
{"/kingdom2/controller/__init__.py": ["/kingdom2/controller/cli.py"], "/kingdom2/view/__init__.py": ["/kingdom2/view/text_view.py"], "/kingdom2/controller/cli.py": ["/kingdom2/model/__init__.py", "/kingdom2/view/__init__.py"], "/kingdom2/model/building_blocks.py": ["/kingdom2/model/utils.py"], "/kingdom2/model/model.py": ["/kingdom2/model/utils.py", "/kingdom2/model/building_blocks.py"], "/kingdom2/view/text_view.py": ["/kingdom2/model/__init__.py"], "/kingdom2/model/__init__.py": ["/kingdom2/model/model.py", "/kingdom2/model/utils.py"]}
|
19,630
|
kwoolter/Kingdom2
|
refs/heads/master
|
/kingdom2/model/model.py
|
import logging
import os
from .utils import Event
from .utils import EventQueue
from .building_blocks import Resource
from .building_blocks import Inventory
from .building_blocks import Creatable
from .building_blocks import ResourceFactory
from .building_blocks import CreatableFactoryXML
from .building_blocks import WorldMap
class Game:
# States
STATE_LOADED = "loaded"
STATE_PLAYING = "playing"
STATE_GAME_OVER = "game over"
# Events
EVENT_TICK = "tick"
EVENT_STATE = "state"
GAME_DATA_DIR = os.path.dirname(__file__) + "\\data\\"
def __init__(self, name : str):
self.name = name
self.events = EventQueue()
self._state = Game.STATE_LOADED
self._tick_count = 0
self.inventory = None
self.resources = None
self.creatables = None
self.creations = None
self.map = None
@property
def state(self):
return self._state
@state.setter
def state(self, new_state):
self._old_state = self.state
self._state = new_state
self.events.add_event(Event(self._state,
"Game state change from {0} to {1}".format(self._old_state, self._state),
Game.EVENT_STATE))
def __str__(self):
return self.name
def start(self):
self.state = Game.STATE_PLAYING
self.inventory = Inventory()
self.resources = ResourceFactory(Game.GAME_DATA_DIR + "resources.csv")
self.resources.load()
self.creatables = CreatableFactoryXML(Game.GAME_DATA_DIR + "creatables.xml")
self.creatables.load()
self.map = WorldMap("Kingdom 2", 50, 50)
self.map.initialise()
self.creations = []
def add_creation(self, new_creation : Creatable):
self.creations.append(new_creation)
def tick(self):
self._tick_count += 1
self.events.add_event(Event(Game.EVENT_TICK,
"Game ticked to {0}".format(self._tick_count),
Game.EVENT_TICK))
for creation in self.creations:
if self.inventory.is_creatable(creation):
creation.tick()
def do_game_over(self):
self.state = Game.STATE_GAME_OVER
def get_next_event(self):
next_event = None
if self.events.size() > 0:
next_event = self.events.pop_event()
return next_event
|
{"/kingdom2/controller/__init__.py": ["/kingdom2/controller/cli.py"], "/kingdom2/view/__init__.py": ["/kingdom2/view/text_view.py"], "/kingdom2/controller/cli.py": ["/kingdom2/model/__init__.py", "/kingdom2/view/__init__.py"], "/kingdom2/model/building_blocks.py": ["/kingdom2/model/utils.py"], "/kingdom2/model/model.py": ["/kingdom2/model/utils.py", "/kingdom2/model/building_blocks.py"], "/kingdom2/view/text_view.py": ["/kingdom2/model/__init__.py"], "/kingdom2/model/__init__.py": ["/kingdom2/model/model.py", "/kingdom2/model/utils.py"]}
|
19,631
|
kwoolter/Kingdom2
|
refs/heads/master
|
/kingdom2/view/text_view.py
|
import logging
import sys
import colorama
import kingdom2.model as model
class View():
def __init__(self):
self.tick_count = 0
def initialise(self):
pass
def tick(self):
self.tick_count += 1
def process_event(self, new_event: model.Event):
logging.info("Default View Class event process:{0}".format(new_event))
def draw(self):
pass
class TextView(View):
def __init__(self, model: model.Game):
super(TextView, self).__init__()
self.model = model
def draw(self):
print("Text View of {0}".format(self.model))
inv_view = InventoryTextView(self.model.inventory)
inv_view.draw()
creations_view = CreationsTextView(self.model.creations)
creations_view.draw()
class InventoryTextView(View):
def __init__(self, model: model.Inventory):
super(InventoryTextView, self).__init__()
self.model = model
def draw(self):
if self.model is not None:
self.model.print()
else:
print("No inventory to print!")
class CreationsTextView(View):
def __init__(self, model: list):
super(CreationsTextView, self).__init__()
self.model = model
def draw(self):
if self.model is not None:
print("{0} creations:".format(len(self.model)))
for creation in self.model:
print(str(creation))
else:
print("No creations to print!")
class WorldMapTextView(View):
COLOURS_DEFAULT = colorama.Fore.RESET + colorama.Back.RESET
COLOURS_TITLE = colorama.Fore.BLACK + colorama.Back.YELLOW
COLOURS_EMPTY_TILE = colorama.Fore.GREEN + colorama.Back.GREEN
COLOURS_NON_EMPTY_TILE = colorama.Fore.BLACK + colorama.Back.GREEN
def __init__(self, model: model.WorldMap):
self.model = model
if sys.stdout.isatty() is False:
colorama.init(convert=False, strip=False)
else:
colorama.init(convert=True)
def draw(self, rect: list = None):
if rect is not None:
ox, oy, width, height = rect
else:
ox = 0
oy = 0
width = self.model.width
height = self.model.height
print(WorldMapTextView.COLOURS_TITLE, end="")
print("+" + "-" * width + "+" + WorldMapTextView.COLOURS_DEFAULT)
title = "{0:^" + str(width) + "}"
print(WorldMapTextView.COLOURS_TITLE, end="")
print("|" + title.format(self.model.name) + "|" + WorldMapTextView.COLOURS_DEFAULT)
print(WorldMapTextView.COLOURS_TITLE, end="")
print("+" + "-" * width + "+" + WorldMapTextView.COLOURS_DEFAULT)
for y in range(oy, oy + height):
print(WorldMapTextView.COLOURS_TITLE + "|" + WorldMapTextView.COLOURS_DEFAULT, end="")
row = ""
for x in range(ox, ox + width):
c = self.model.get(x, y)
if c is not None:
row += WorldMapTextView.COLOURS_NON_EMPTY_TILE + c + WorldMapTextView.COLOURS_DEFAULT
else:
row += WorldMapTextView.COLOURS_EMPTY_TILE + " " + WorldMapTextView.COLOURS_DEFAULT
print(row + WorldMapTextView.COLOURS_TITLE + "|" + WorldMapTextView.COLOURS_DEFAULT)
print(WorldMapTextView.COLOURS_TITLE, end="")
print("+" + "-" * width + "+" + WorldMapTextView.COLOURS_DEFAULT)
class WorldTopoModelTextView(View):
def __init__(self, model: model.WorldMap):
self.model = model
def draw(self, rect: list = None):
if rect is not None:
ox, oy, width, height = rect
else:
ox = 0
oy = 0
width = self.model.width
height = self.model.height
for x in range(0, width):
print(",{0}".format(x), end="")
print("")
for y in range(0, height):
row = "{0},".format(y)
for x in range(0, width):
a = self.model.topo_model_pass2[x][y]
row += "{0:.4},".format(a)
print(row)
|
{"/kingdom2/controller/__init__.py": ["/kingdom2/controller/cli.py"], "/kingdom2/view/__init__.py": ["/kingdom2/view/text_view.py"], "/kingdom2/controller/cli.py": ["/kingdom2/model/__init__.py", "/kingdom2/view/__init__.py"], "/kingdom2/model/building_blocks.py": ["/kingdom2/model/utils.py"], "/kingdom2/model/model.py": ["/kingdom2/model/utils.py", "/kingdom2/model/building_blocks.py"], "/kingdom2/view/text_view.py": ["/kingdom2/model/__init__.py"], "/kingdom2/model/__init__.py": ["/kingdom2/model/model.py", "/kingdom2/model/utils.py"]}
|
19,632
|
kwoolter/Kingdom2
|
refs/heads/master
|
/kingdom2/model/__init__.py
|
from .model import Game
from .model import Inventory
from .model import WorldMap
from .utils import EventQueue
from .utils import Event
|
{"/kingdom2/controller/__init__.py": ["/kingdom2/controller/cli.py"], "/kingdom2/view/__init__.py": ["/kingdom2/view/text_view.py"], "/kingdom2/controller/cli.py": ["/kingdom2/model/__init__.py", "/kingdom2/view/__init__.py"], "/kingdom2/model/building_blocks.py": ["/kingdom2/model/utils.py"], "/kingdom2/model/model.py": ["/kingdom2/model/utils.py", "/kingdom2/model/building_blocks.py"], "/kingdom2/view/text_view.py": ["/kingdom2/model/__init__.py"], "/kingdom2/model/__init__.py": ["/kingdom2/model/model.py", "/kingdom2/model/utils.py"]}
|
19,644
|
osorkon5513/turing201620
|
refs/heads/master
|
/sonidosLibresApp/migrations/0001_initial.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.9 on 2016-10-08 22:01
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Album',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('rating', models.FloatField(default=0, editable=False)),
('numOfRatings', models.IntegerField(default=0, editable=False)),
],
),
migrations.CreateModel(
name='Artist',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=40)),
('image', models.URLField()),
('user', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'artists',
},
),
migrations.CreateModel(
name='Audio',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=40)),
('title', models.CharField(max_length=40)),
('audioDownload', models.URLField()),
('audioPlay', models.URLField()),
('playCount', models.IntegerField(default=0, editable=False)),
('downloadsCount', models.IntegerField(default=0, editable=False)),
('rating', models.FloatField(default=0, editable=False)),
('numOfRatings', models.IntegerField(default=0, editable=False)),
('uploadDate', models.DateTimeField(default=django.utils.timezone.now, editable=False)),
('albums', models.ManyToManyField(related_name='audios', to='sonidosLibresApp.Album')),
('artists', models.ManyToManyField(related_name='audios', to='sonidosLibresApp.Artist')),
],
),
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=40)),
('image', models.URLField()),
],
options={
'verbose_name_plural': 'categories',
},
),
migrations.CreateModel(
name='Commentary',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('commentary', models.TextField()),
('date', models.DateTimeField(default=django.utils.timezone.now, editable=False)),
('audio', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='sonidosLibresApp.Audio')),
('user', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'commentaries',
},
),
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='audio',
name='categories',
field=models.ManyToManyField(related_name='audios', to='sonidosLibresApp.Category'),
),
migrations.AddField(
model_name='album',
name='artists',
field=models.ManyToManyField(related_name='albums', to='sonidosLibresApp.Artist'),
),
migrations.AddField(
model_name='album',
name='categories',
field=models.ManyToManyField(related_name='albums', to='sonidosLibresApp.Category'),
),
]
|
{"/sonidosLibresApp/customFilters.py": ["/sonidosLibresApp/models.py"], "/sonidosLibresApp/serializers.py": ["/sonidosLibresApp/models.py"], "/sonidosLibresApp/views.py": ["/sonidosLibresApp/serializers.py", "/sonidosLibresApp/models.py"], "/sonidosLibresApp/admin.py": ["/sonidosLibresApp/models.py"]}
|
19,645
|
osorkon5513/turing201620
|
refs/heads/master
|
/sonidosLibresApp/migrations/0003_album_image.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.9 on 2016-10-12 13:03
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sonidosLibresApp', '0002_auto_20161008_2224'),
]
operations = [
migrations.AddField(
model_name='album',
name='image',
field=models.URLField(default='https://github.com/slinan/sonidosLibresG2/blob/master/docs/assets/img/albums/1.jpg?raw=true'),
preserve_default=False,
),
]
|
{"/sonidosLibresApp/customFilters.py": ["/sonidosLibresApp/models.py"], "/sonidosLibresApp/serializers.py": ["/sonidosLibresApp/models.py"], "/sonidosLibresApp/views.py": ["/sonidosLibresApp/serializers.py", "/sonidosLibresApp/models.py"], "/sonidosLibresApp/admin.py": ["/sonidosLibresApp/models.py"]}
|
19,646
|
osorkon5513/turing201620
|
refs/heads/master
|
/sonidosLibresApp/models.py
|
from datetime import datetime
import django
from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.utils import timezone
class Category(models.Model):
def __str__(self):
return self.name
class Meta:
verbose_name_plural = "categories"
name = models.CharField(max_length=40)
image = models.URLField()
description = models.TextField()
relatedCategories = models.ManyToManyField('self')
class Artist(models.Model):
def __str__(self):
return self.name
class Meta:
verbose_name_plural = "artists"
name = models.CharField(max_length=40)
user = models.OneToOneField(User, null=True, blank=True)
image = models.URLField()
@receiver(post_save, sender=User)
def create_user_profile(sender, instance, created, **kwargs):
if created:
Artist.objects.create(user=instance)
@receiver(post_save, sender=User)
def save_user_profile(sender, instance, **kwargs):
instance.artist.save()
class Album (models.Model):
def __str__(self):
return self.title
title = models.CharField(max_length=100)
rating = models.FloatField(editable=False, default = 0)
numOfRatings = models.IntegerField(editable=False, default = 0)
categories = models.ManyToManyField(Category,related_name="albums", blank=True)
artists = models.ManyToManyField(Artist, related_name="albums", blank=True)
image = models.URLField()
class Audio(models.Model):
def __str__(self):
return self.title + " "+str(self.id)
name = models.CharField(max_length=40)
title = models.CharField(max_length=40)
audioDownload = models.URLField()
audioPlay = models.URLField()
playCount = models.IntegerField(editable=False, default = 0)
downloadsCount = models.IntegerField(editable=False, default = 0)
rating = models.FloatField(editable=False, default = 0)
numOfRatings = models.IntegerField(editable=False, default = 0)
categories = models.ManyToManyField(Category,related_name="audios")
uploadDate = models.DateTimeField(editable=False, default = django.utils.timezone.now)
albums = models.ManyToManyField(Album, related_name="audios")
artists = models.ManyToManyField(Artist, related_name="audios")
class Commentary (models.Model):
def __str__(self):
return self.commentary
class Meta:
verbose_name_plural = "commentaries"
commentary = models.TextField()
date = models.DateTimeField(editable=False, default = django.utils.timezone.now)
audio = models.ForeignKey(Audio,on_delete=models.CASCADE)
user = models.OneToOneField(User, null=True, blank=True)
# python manage.py makemigrations sonidosLibresApp
# python manage.py sqlmigrate sonidosLibresApp 0001
# python manage.py migrate
# python manage.py createsuperuser
# $ heroku run python manage.py migrate --app sonidoslibres
|
{"/sonidosLibresApp/customFilters.py": ["/sonidosLibresApp/models.py"], "/sonidosLibresApp/serializers.py": ["/sonidosLibresApp/models.py"], "/sonidosLibresApp/views.py": ["/sonidosLibresApp/serializers.py", "/sonidosLibresApp/models.py"], "/sonidosLibresApp/admin.py": ["/sonidosLibresApp/models.py"]}
|
19,647
|
osorkon5513/turing201620
|
refs/heads/master
|
/sonidosLibresApp/migrations/0004_auto_20161012_1128.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.9 on 2016-10-12 16:28
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sonidosLibresApp', '0003_album_image'),
]
operations = [
migrations.AlterField(
model_name='album',
name='artists',
field=models.ManyToManyField(blank=True, null=True, related_name='albums', to='sonidosLibresApp.Artist'),
),
migrations.AlterField(
model_name='album',
name='categories',
field=models.ManyToManyField(blank=True, null=True, related_name='albums', to='sonidosLibresApp.Category'),
),
]
|
{"/sonidosLibresApp/customFilters.py": ["/sonidosLibresApp/models.py"], "/sonidosLibresApp/serializers.py": ["/sonidosLibresApp/models.py"], "/sonidosLibresApp/views.py": ["/sonidosLibresApp/serializers.py", "/sonidosLibresApp/models.py"], "/sonidosLibresApp/admin.py": ["/sonidosLibresApp/models.py"]}
|
19,648
|
osorkon5513/turing201620
|
refs/heads/master
|
/sonidosLibresApp/customFilters.py
|
import django_filters
from sonidosLibresApp.models import Audio
|
{"/sonidosLibresApp/customFilters.py": ["/sonidosLibresApp/models.py"], "/sonidosLibresApp/serializers.py": ["/sonidosLibresApp/models.py"], "/sonidosLibresApp/views.py": ["/sonidosLibresApp/serializers.py", "/sonidosLibresApp/models.py"], "/sonidosLibresApp/admin.py": ["/sonidosLibresApp/models.py"]}
|
19,649
|
osorkon5513/turing201620
|
refs/heads/master
|
/sonidosLibresApp/urls.py
|
from django.conf.urls import url, include
from rest_framework.routers import DefaultRouter
from sonidosLibresApp import views
router = DefaultRouter()
urlpatterns = [
url(r'^', include(router.urls)),
url(r'^login/?$', views.CustomObtainAuthToken.as_view()),
url(r'^signUp/?$', views.CreateUserView.as_view()),
url(r'^audios/?$', views.AudioList.as_view()),
url(r'^audios/(?P<pk>[0-9]+)/?$', views.AudioDetail.as_view()),
url(r'^artists/?$', views.ArtistList.as_view()),
url(r'^artists/(?P<pk>[0-9]+)/?$', views.ArtistDetail.as_view()),
url(r'^categories/?$', views.CategoryList.as_view()),
url(r'^categories/(?P<pk>[0-9]+)/?$', views.CategoryDetail.as_view()),
url(r'^albums/?$', views.AlbumList.as_view()),
url(r'^albums/(?P<pk>[0-9]+)/?$', views.AlbumDetail.as_view()),
url(r'^commentaries/?$', views.CommentaryList.as_view()),
url(r'^commentaries/(?P<pk>[0-9]+)/?$', views.CommentaryDetail.as_view()),
url(r'^albumAudio/(?P<idAudio>[0-9]+)/(?P<idAlbum>[0-9]+)/?$', views.AudioAlbumAssociation.as_view()),
url(r'^rateAudio/(?P<idAudio>[0-9]+)/(?P<rating>[0-5])/?$', views.RateAudio.as_view()),
url(r'^rateAlbum/(?P<idAlbum>[0-9]+)/(?P<rating>[0-5])/?$', views.RateAlbum.as_view()),
url(r'^play/(?P<idAudio>[0-9]+)/?$', views.PlayAudio.as_view()),
url(r'^download/(?P<idAudio>[0-9]+)/?$', views.DownloadAudio.as_view()),
url(r'^categoriesTopRating/(?P<size>[0-9]+)/?$', views.CategoriesTopRating.as_view()),
]
#urlpatterns =format_suffix_patterns(urlpatterns)
urlpatterns += router.urls
|
{"/sonidosLibresApp/customFilters.py": ["/sonidosLibresApp/models.py"], "/sonidosLibresApp/serializers.py": ["/sonidosLibresApp/models.py"], "/sonidosLibresApp/views.py": ["/sonidosLibresApp/serializers.py", "/sonidosLibresApp/models.py"], "/sonidosLibresApp/admin.py": ["/sonidosLibresApp/models.py"]}
|
19,650
|
osorkon5513/turing201620
|
refs/heads/master
|
/sonidosLibresApp/apps.py
|
from django.apps import AppConfig
class SonidosLibresAppConfig(AppConfig):
name = 'sonidosLibresApp'
|
{"/sonidosLibresApp/customFilters.py": ["/sonidosLibresApp/models.py"], "/sonidosLibresApp/serializers.py": ["/sonidosLibresApp/models.py"], "/sonidosLibresApp/views.py": ["/sonidosLibresApp/serializers.py", "/sonidosLibresApp/models.py"], "/sonidosLibresApp/admin.py": ["/sonidosLibresApp/models.py"]}
|
19,651
|
osorkon5513/turing201620
|
refs/heads/master
|
/sonidosLibresApp/serializers.py
|
from django.contrib.auth.models import User
from rest_framework import serializers
from .models import Audio, Category, Album, Commentary, Artist
class AudioCreate(serializers.ModelSerializer):
class Meta:
model = Audio
class AudioSerializer(serializers.ModelSerializer):
class Meta:
model = Audio
class AlbumSerializer(serializers.ModelSerializer):
class Meta:
model = Album
class ArtistSerializer(serializers.ModelSerializer):
class Meta:
model=Artist
class CategorySerializer(serializers.ModelSerializer):
class Meta:
model=Category
class CategoryWithAudiosSerializer(serializers.ModelSerializer):
class Meta:
model=Category
fields = ['id','name', 'image','audios']
class CommentarySerializer(serializers.ModelSerializer):
class Meta:
model=Commentary
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('id', 'username', 'password', 'email', 'first_name', 'last_name','is_superuser', 'is_staff','is_active', 'groups')
write_only_fields = ('password',)
read_only_fields = ('id',)
def create(self, validated_data):
user = User.objects.create(
username=validated_data['username'],
email=validated_data['email'],
first_name=validated_data['first_name'],
last_name=validated_data['last_name']
)
user.set_password(validated_data['password'])
user.save()
return user
|
{"/sonidosLibresApp/customFilters.py": ["/sonidosLibresApp/models.py"], "/sonidosLibresApp/serializers.py": ["/sonidosLibresApp/models.py"], "/sonidosLibresApp/views.py": ["/sonidosLibresApp/serializers.py", "/sonidosLibresApp/models.py"], "/sonidosLibresApp/admin.py": ["/sonidosLibresApp/models.py"]}
|
19,652
|
osorkon5513/turing201620
|
refs/heads/master
|
/sonidosLibresApp/views.py
|
from tokenize import Token
from django.contrib.auth import get_user_model
from django.contrib.auth.models import User
from django.http import JsonResponse
from rest_framework import permissions
from rest_framework.authentication import BasicAuthentication, TokenAuthentication
from rest_framework.authentication import SessionAuthentication
from rest_framework.authtoken.models import Token
from django.shortcuts import render
from rest_framework import generics
from rest_framework import mixins
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.generics import CreateAPIView
from rest_framework.permissions import IsAuthenticated
from rest_framework.views import APIView
from rest_framework import filters
from sonidosLibresApp.customPagination import StandardResultsSetPagination
from sonidosLibresApp.serializers import AudioSerializer, CategorySerializer, AlbumSerializer, CommentarySerializer, \
ArtistSerializer, UserSerializer
from .models import Audio, Category, Album, Commentary, Artist
from rest_framework.response import Response
def index(request):
return render(request, 'index.html')
class CustomObtainAuthToken(ObtainAuthToken):
def post(self, request, *args, **kwargs):
response = super(CustomObtainAuthToken, self).post(request, *args, **kwargs)
token = Token.objects.get(key=response.data['token'])
user = User.objects.get(id = token.user_id)
serializer = UserSerializer(user)
return Response({'token': token.key, 'id': token.user_id, 'user': serializer.data})
class CreateUserView(CreateAPIView):
model = get_user_model()
permission_classes = [
permissions.AllowAny # Or anon users can't register
]
serializer_class = UserSerializer
class AudioList(mixins.ListModelMixin, mixins.CreateModelMixin, generics.GenericAPIView):
queryset = Audio.objects.all()
serializer_class = AudioSerializer
filter_backends = (filters.DjangoFilterBackend,filters.OrderingFilter,)
pagination_class = StandardResultsSetPagination
filter_fields = ('title', 'rating', 'playCount', 'downloadsCount','uploadDate','numOfRatings', 'categories','albums')
ordering_fields = ('title', 'rating', 'playCount', 'downloadsCount','uploadDate','numOfRatings')
def get(self, request, *args, **kwargs):
return self.list(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
return self.create(request, *args, **kwargs)
class AudioDetail(mixins.RetrieveModelMixin, mixins.UpdateModelMixin, mixins.DestroyModelMixin, generics.GenericAPIView):
queryset = Audio.objects.all()
serializer_class = AudioSerializer
def get(self, request, *args, **kwargs):
return self.retrieve(request, *args, **kwargs)
def put(self, request, *args, **kwargs):
return self.update(request, *args, **kwargs)
def delete(self, request, *args, **kwargs):
return self.destroy(request, *args, **kwargs)
class ArtistList(mixins.ListModelMixin, mixins.CreateModelMixin, generics.GenericAPIView):
queryset = Artist.objects.all()
serializer_class = ArtistSerializer
pagination_class = StandardResultsSetPagination
def get(self, request, *args, **kwargs):
return self.list(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
return self.create(request, *args, **kwargs)
class ArtistDetail(mixins.RetrieveModelMixin, mixins.UpdateModelMixin, mixins.DestroyModelMixin, generics.GenericAPIView):
queryset = Artist.objects.all()
serializer_class = ArtistSerializer
def get(self, request, *args, **kwargs):
return self.retrieve(request, *args, **kwargs)
def put(self, request, *args, **kwargs):
return self.update(request, *args, **kwargs)
def delete(self, request, *args, **kwargs):
return self.destroy(request, *args, **kwargs)
class CategoryList(mixins.ListModelMixin, mixins.CreateModelMixin, generics.GenericAPIView):
queryset = Category.objects.all()
serializer_class = CategorySerializer
pagination_class = StandardResultsSetPagination
def get(self, request, *args, **kwargs):
return self.list(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
return self.create(request, *args, **kwargs)
class CategoryDetail(mixins.RetrieveModelMixin, mixins.UpdateModelMixin, mixins.DestroyModelMixin, generics.GenericAPIView):
queryset = Category.objects.all()
serializer_class = CategorySerializer
def get(self, request, *args, **kwargs):
return self.retrieve(request, *args, **kwargs)
def put(self, request, *args, **kwargs):
return self.update(request, *args, **kwargs)
def delete(self, request, *args, **kwargs):
return self.destroy(request, *args, **kwargs)
class AlbumList(mixins.ListModelMixin, mixins.CreateModelMixin, generics.GenericAPIView):
queryset = Album.objects.all()
serializer_class = AlbumSerializer
filter_backends = (filters.DjangoFilterBackend,filters.OrderingFilter,)
pagination_class = StandardResultsSetPagination
filter_fields = ('title', 'rating', 'categories','numOfRatings','artists','id')
ordering_fields = ('title', 'rating', 'categories','numOfRatings','artists','id')
def get(self, request, *args, **kwargs):
return self.list(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
return self.create(request, *args, **kwargs)
class AlbumDetail(mixins.RetrieveModelMixin, mixins.UpdateModelMixin, mixins.DestroyModelMixin, generics.GenericAPIView):
queryset = Album.objects.all()
serializer_class = AlbumSerializer
def get(self, request, *args, **kwargs):
return self.retrieve(request, *args, **kwargs)
def put(self, request, *args, **kwargs):
return self.update(request, *args, **kwargs)
def delete(self, request, *args, **kwargs):
return self.destroy(request, *args, **kwargs)
class CommentaryList(mixins.ListModelMixin, mixins.CreateModelMixin, generics.GenericAPIView):
queryset = Commentary.objects.all()
serializer_class = CommentarySerializer
def get(self, request, *args, **kwargs):
return self.list(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
return self.create(request, *args, **kwargs)
class CommentaryDetail(mixins.RetrieveModelMixin, mixins.UpdateModelMixin, mixins.DestroyModelMixin, generics.GenericAPIView):
queryset = Commentary.objects.all()
serializer_class = CommentarySerializer
pagination_class = StandardResultsSetPagination
def get(self, request, *args, **kwargs):
return self.retrieve(request, *args, **kwargs)
def put(self, request, *args, **kwargs):
return self.update(request, *args, **kwargs)
def delete(self, request, *args, **kwargs):
return self.destroy(request, *args, **kwargs)
class AudioAlbumAssociation(APIView):
def get(self,request,idAudio, idAlbum,format=None):
audio = Audio.objects.get(id=idAudio)
album = Album.objects.get(id=idAlbum)
album.audios.add(audio)
serializer = AudioSerializer(audio)
return Response(serializer.data)
def delete(self, request, idAudio, idAlbum, format=None):
audio = Audio.objects.get(id=idAudio)
album = Album.objects.get(id=idAlbum)
album.audios.remove(audio)
serializer = AudioSerializer(audio)
return Response(serializer.data)
class RateAudio(APIView):
def get(self,request,idAudio, rating,format=None):
audio = Audio.objects.get(id=idAudio)
newRate = ((audio.rating * audio.numOfRatings) + int(rating))/(audio.numOfRatings + 1)
audio.rating=newRate
audio.numOfRatings += 1
audio.save()
serializer = AudioSerializer(audio)
return Response(serializer.data)
class RateAlbum(APIView):
def get(self,request,idAlbum, rating,format=None):
album = Album.objects.get(id=idAlbum)
newRate = ((album.rating * album.numOfRatings) + int(rating))/(album.numOfRatings + 1)
album.rating=newRate
album.numOfRatings += 1
album.save()
serializer = AlbumSerializer(album)
return Response(serializer.data)
class PlayAudio(APIView):
def get(self,request,idAudio,format=None):
audio = Audio.objects.get(id=idAudio)
audio.playCount += 1
audio.save()
serializer = AudioSerializer(audio)
return Response(serializer.data)
class DownloadAudio(APIView):
def get(self,request,idAudio,format=None):
audio = Audio.objects.get(id=idAudio)
audio.downloadsCount += 1
audio.save()
serializer = AudioSerializer(audio)
return Response(serializer.data)
class CategoriesTopRating(APIView):
def get(self,request,size,format=None):
resp = []
categories = Category.objects.all()
for c in categories:
cat = {}
serializer = CategorySerializer(c)
cat['id']=c.pk
cat['name']=c.name
cat['image'] = c.image
audios = Audio.objects.filter(categories__in=[c.pk]).order_by('-rating')
audList = []
var = 0
for a in audios:
aud = {}
aud['id'] = a.pk
aud['name'] = a.name
aud['title'] = a.title
aud['audioDownload'] = a.audioDownload
aud['audioPlay'] = a.audioPlay
aud['playCount'] = a.playCount
aud['downloadsCount'] = a.downloadsCount
aud['rating'] = a.rating
aud['uploadDate'] = a.uploadDate
artists = Artist.objects.filter(audios__in=[a.pk]).order_by('name')
artList = []
for t in artists:
art = {}
art['id'] = t.pk
art['name'] = t.name
art['image'] = t.image
artList.append(art)
aud['artists'] = artList
audList.append(aud)
if var == int(size)-1:
break
cat['audios']=audList
resp.append(cat)
return JsonResponse(resp, safe=False)
|
{"/sonidosLibresApp/customFilters.py": ["/sonidosLibresApp/models.py"], "/sonidosLibresApp/serializers.py": ["/sonidosLibresApp/models.py"], "/sonidosLibresApp/views.py": ["/sonidosLibresApp/serializers.py", "/sonidosLibresApp/models.py"], "/sonidosLibresApp/admin.py": ["/sonidosLibresApp/models.py"]}
|
19,653
|
osorkon5513/turing201620
|
refs/heads/master
|
/sonidosLibresApp/admin.py
|
from django.contrib import admin
from .models import Category, Audio, Commentary, Album, Artist
admin.site.register(Category)
admin.site.register(Audio)
admin.site.register(Commentary)
admin.site.register(Album)
admin.site.register(Artist)
|
{"/sonidosLibresApp/customFilters.py": ["/sonidosLibresApp/models.py"], "/sonidosLibresApp/serializers.py": ["/sonidosLibresApp/models.py"], "/sonidosLibresApp/views.py": ["/sonidosLibresApp/serializers.py", "/sonidosLibresApp/models.py"], "/sonidosLibresApp/admin.py": ["/sonidosLibresApp/models.py"]}
|
19,654
|
osorkon5513/turing201620
|
refs/heads/master
|
/sonidosLibresApp/migrations/0002_auto_20161008_2224.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.9 on 2016-10-09 03:24
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sonidosLibresApp', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='category',
name='description',
field=models.TextField(default='This is a generic description'),
preserve_default=False,
),
migrations.AddField(
model_name='category',
name='relatedCategories',
field=models.ManyToManyField(related_name='_category_relatedCategories_+', to='sonidosLibresApp.Category'),
),
]
|
{"/sonidosLibresApp/customFilters.py": ["/sonidosLibresApp/models.py"], "/sonidosLibresApp/serializers.py": ["/sonidosLibresApp/models.py"], "/sonidosLibresApp/views.py": ["/sonidosLibresApp/serializers.py", "/sonidosLibresApp/models.py"], "/sonidosLibresApp/admin.py": ["/sonidosLibresApp/models.py"]}
|
19,660
|
shughes-uk/python-twitchchat
|
refs/heads/master
|
/setup.py
|
from setuptools import setup
setup(name="twitchchat", version="0.1", packages=['twitchchat'],)
|
{"/twitchchat/__init__.py": ["/twitchchat/chat.py"]}
|
19,661
|
shughes-uk/python-twitchchat
|
refs/heads/master
|
/twitchchat/chat.py
|
import asynchat
import asyncore
import json
import logging
import re
import socket
import sys
import time
from datetime import datetime, timedelta
from threading import Thread
PY3 = sys.version_info[0] == 3
if PY3:
from urllib.request import urlopen, Request
from queue import Queue
else:
from urllib2 import urlopen, Request
from Queue import Queue
logger = logging.getLogger(name="tmi")
class twitch_chat(object):
def __init__(self, user, oauth, channels, client_id):
self.logger = logging.getLogger(name="twitch_chat")
self.chat_subscribers = []
self.usernotice_subscribers = []
self.channels = channels
self.user = user
self.oauth = oauth
self.channel_servers = {'irc.chat.twitch.tv:6667': {'channel_set': channels}}
self.irc_handlers = []
for server in self.channel_servers:
handler = tmi_client(server, self.handle_message, self.handle_connect)
self.channel_servers[server]['client'] = handler
self.irc_handlers.append(handler)
def start(self):
for handler in self.irc_handlers:
handler.start()
def join(self):
for handler in self.irc_handlers:
handler.asynloop_thread.join()
def stop(self):
for handler in self.irc_handlers:
handler.stop()
def subscribeChatMessage(self, callback):
"Subscribe to a callback for incoming chat messages"
self.chat_subscribers.append(callback)
def subscribeUsernotice(self, callback):
"Subscribe to a callback for new subscribers and resubs"
self.usernotice_subscribers.append(callback)
def check_error(self, ircMessage, client):
"Check for a login error notification and terminate if found"
if re.search(r":tmi.twitch.tv NOTICE \* :Error logging i.*", ircMessage):
self.logger.critical(
"Error logging in to twitch irc, check your oauth and username are set correctly in config.txt!")
self.stop()
return True
def check_join(self, ircMessage, client):
"Watch for successful channel join messages"
match = re.search(r":{0}!{0}@{0}\.tmi\.twitch\.tv JOIN #(.*)".format(self.user), ircMessage)
if match:
if match.group(1) in self.channels:
self.logger.info("Joined channel {0} successfully".format(match.group(1)))
return True
def check_usernotice(self, ircMessage, client):
"Parse out new twitch subscriber messages and then call... python subscribers"
if ircMessage[0] == '@':
arg_regx = r"([^=;]*)=([^ ;]*)"
arg_regx = re.compile(arg_regx, re.UNICODE)
args = dict(re.findall(arg_regx, ircMessage[1:]))
regex = (
r'^@[^ ]* :tmi.twitch.tv'
r' USERNOTICE #(?P<channel>[^ ]*)' # channel
r'((?: :)?(?P<message>.*))?') # message
regex = re.compile(regex, re.UNICODE)
match = re.search(regex, ircMessage)
if match:
args['channel'] = match.group(1)
args['message'] = match.group(2)
for subscriber in self.usernotice_subscribers:
try:
subscriber(args)
except Exception:
msg = "Exception during callback to {0}".format(subscriber)
self.logger.exception(msg)
return True
def check_ping(self, ircMessage, client):
"Respond to ping messages or twitch boots us off"
if re.search(r"PING :tmi\.twitch\.tv", ircMessage):
self.logger.info("Responding to a ping from twitch... pong!")
client.send_message("PING :pong\r\n")
return True
def check_message(self, ircMessage, client):
"Watch for chat messages and notifiy subsribers"
if ircMessage[0] == "@":
arg_regx = r"([^=;]*)=([^ ;]*)"
arg_regx = re.compile(arg_regx, re.UNICODE)
args = dict(re.findall(arg_regx, ircMessage[1:]))
regex = (r'^@[^ ]* :([^!]*)![^!]*@[^.]*.tmi.twitch.tv' # username
r' PRIVMSG #([^ ]*)' # channel
r' :(.*)') # message
regex = re.compile(regex, re.UNICODE)
match = re.search(regex, ircMessage)
if match:
args['username'] = match.group(1)
args['channel'] = match.group(2)
args['message'] = match.group(3)
for subscriber in self.chat_subscribers:
try:
subscriber(args)
except Exception:
msg = "Exception during callback to {0}".format(subscriber)
self.logger.exception(msg)
return True
def handle_connect(self, client):
self.logger.info('Connected..authenticating as {0}'.format(self.user))
client.send_message('Pass ' + self.oauth + '\r\n')
client.send_message('NICK ' + self.user + '\r\n'.lower())
client.send_message('CAP REQ :twitch.tv/tags\r\n')
client.send_message('CAP REQ :twitch.tv/membership\r\n')
client.send_message('CAP REQ :twitch.tv/commands\r\n')
for server in self.channel_servers:
if server == client.serverstring:
self.logger.info('Joining channels {0}'.format(self.channel_servers[server]))
for chan in self.channel_servers[server]['channel_set']:
client.send_message('JOIN ' + '#' + chan.lower() + '\r\n')
def handle_message(self, ircMessage, client):
"Handle incoming IRC messages"
self.logger.debug(ircMessage)
if self.check_message(ircMessage, client):
return
elif self.check_join(ircMessage, client):
return
elif self.check_usernotice(ircMessage, client):
return
elif self.check_ping(ircMessage, client):
return
elif self.check_error(ircMessage, client):
return
def send_message(self, channel, message):
for server in self.channel_servers:
if channel in self.channel_servers[server]['channel_set']:
client = self.channel_servers[server]['client']
client.send_message(u'PRIVMSG #{0} :{1}\n'.format(channel, message))
break
MAX_SEND_RATE = 20
SEND_RATE_WITHIN_SECONDS = 30
class tmi_client(asynchat.async_chat, object):
def __init__(self, server, message_callback, connect_callback):
self.logger = logging.getLogger(name="tmi_client[{0}]".format(server))
self.logger.info('TMI initializing')
self.map = {}
asynchat.async_chat.__init__(self, map=self.map)
self.received_data = bytearray()
servernport = server.split(":")
self.serverstring = server
self.server = servernport[0]
self.port = int(servernport[1])
self.set_terminator(b'\n')
self.asynloop_thread = Thread(target=self.run)
self.running = False
self.message_callback = message_callback
self.connect_callback = connect_callback
self.message_queue = Queue()
self.messages_sent = []
self.logger.info('TMI initialized')
return
def send_message(self, msg):
self.message_queue.put(msg.encode("UTF-8"))
def handle_connect(self):
"Socket connected successfully"
self.connect_callback(self)
def handle_error(self):
if self.socket:
self.close()
raise
def collect_incoming_data(self, data):
"Dump recieved data into a buffer"
self.received_data += data
def found_terminator(self):
"Processes each line of text received from the IRC server."
txt = self.received_data.rstrip(b'\r') # accept RFC-compliant and non-RFC-compliant lines.
del self.received_data[:]
self.message_callback(txt.decode("utf-8"), self)
def start(self):
"Connect start message watching thread"
if not self.asynloop_thread.is_alive():
self.running = True
self.asynloop_thread = Thread(target=self.run)
self.asynloop_thread.daemon = True
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.connect((self.server, self.port))
self.asynloop_thread.start()
self.send_thread = Thread(target=self.send_loop)
self.send_thread.daemon = True
self.send_thread.start()
else:
self.logger.critical("Already running can't run twice")
def stop(self):
"Terminate the message watching thread by killing the socket"
self.running = False
if self.asynloop_thread.is_alive():
if self.socket:
self.close()
try:
self.asynloop_thread.join()
self.send_thread.join()
except RuntimeError as e:
if e.message == "cannot join current thread":
# this is thrown when joining the current thread and is ok.. for now"
pass
else:
raise e
def send_loop(self):
while self.running:
time.sleep(1)
if len(self.messages_sent) < MAX_SEND_RATE:
if not self.message_queue.empty():
to_send = self.message_queue.get()
self.logger.debug("Sending")
self.logger.debug(to_send)
self.push(to_send)
self.messages_sent.append(datetime.now())
else:
time_cutoff = datetime.now() - timedelta(seconds=SEND_RATE_WITHIN_SECONDS)
self.messages_sent = [dt for dt in self.messages_sent if dt < time_cutoff]
def run(self):
"Loop!"
try:
asyncore.loop(map=self.map)
finally:
self.running = False
|
{"/twitchchat/__init__.py": ["/twitchchat/chat.py"]}
|
19,662
|
shughes-uk/python-twitchchat
|
refs/heads/master
|
/twitchchat/__init__.py
|
from .chat import twitch_chat
|
{"/twitchchat/__init__.py": ["/twitchchat/chat.py"]}
|
19,667
|
SHANK885/realtime_face_recognition
|
refs/heads/master
|
/enroll_face.py
|
from keras import backend as K
from fr_utils import *
from inception_blocks_v2 import *
from triplet_loss import triplet_loss
import numpy as np
import json
import cv2
import sys
import os
import argparse
K.set_image_data_format('channels_first')
def main(args):
image_path = "./database/images/"
embedding_path = "./database/embeddings/embeddings.json"
face_detector_path = "./classifiers/haarcascade_frontalface_default.xml"
image_path = os.path.join(image_path, args.name + ".png")
video_capture = cv2.VideoCapture(0)
face_detector = cv2.CascadeClassifier(face_detector_path)
print("*********Initializing Face Enrollment*************\n")
while True:
while True:
if video_capture.isOpened():
ret, frame = video_capture.read()
raw_frame = frame.copy()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_detector.detectMultiScale(gray,
scaleFactor=1.5,
minNeighbors=5,
minSize=(30, 30))
print("length of faces: ", len(faces))
print("faces:\n", faces)
if len(faces) == 0:
continue
else:
areas = [w*h for x, y, w, h in faces]
i_biggest = np.argmax(areas)
bb = faces[i_biggest]
cv2.rectangle(frame,
(bb[0], bb[1]),
(bb[0]+bb[2], bb[1]+bb[3]),
(0, 255, 0),
2)
cropped = raw_frame[bb[1]:bb[1]+bb[3], bb[0]:bb[0]+bb[2]]
image = cv2.resize(cropped,
(96, 96),
interpolation=cv2.INTER_LINEAR)
cv2.imshow("Video", frame)
if cv2.waitKey(1) & 0xFF == ord('s'):
print("Face Captured for: {}".format(args.name))
break
print("Press 'C' to confirm this image")
print("Press 'R' to retake the picture")
response = input("\nEnter Your Response: ")
if response == "C" or response == "c":
print("\nImage finalized\n")
video_capture.release()
cv2.destroyAllWindows()
break
if response == "R" or response == "r":
cv2.destroyAllWindows()
continue
if os.path.exists(image_path):
print("Member with name: {} already exists!!".format(args.name))
print("Press 'C' to overwrite or 'R' to return")
val = input("Enter response:")
if val == 'r' or val == 'R':
return
elif val == 'c' or val == 'C':
cv2.imwrite(image_path, image)
print("image saved")
else:
cv2.imwrite(image_path, image)
print("image saved _")
FRmodel = faceRecoModel(input_shape=(3, 96, 96))
print("Total Params:", FRmodel.count_params())
# load trained model
FRmodel.compile(optimizer='adam', loss=triplet_loss, metrics=['accuracy'])
load_weights_from_FaceNet(FRmodel)
embedding = img_to_encoding(image_path, FRmodel)[0].tolist()
print(type(embedding))
print(embedding)
print(len(embedding))
print("embedding created")
try:
with open(embedding_path, 'r') as rf:
base_emb = json.load(rf)
except IOError:
print("Embeddibg file empty!! Creating a new embedding file")
with open(embedding_path, 'w+') as rf:
base_emb = {}
with open(embedding_path, 'w') as wf:
base_emb[args.name] = embedding
json.dump(base_emb, wf)
print("embedding written")
print("face enrolled with name => {}".format(args.name))
def parse_arguments(argv):
parser = argparse.ArgumentParser()
parser.add_argument('name',
type=str,
help='Add the name of member to be added.')
return parser.parse_args(argv)
if __name__ == '__main__':
main(parse_arguments(sys.argv[1:]))
|
{"/enroll_face.py": ["/triplet_loss.py"], "/realtime_recognition.py": ["/triplet_loss.py"]}
|
19,668
|
SHANK885/realtime_face_recognition
|
refs/heads/master
|
/realtime_recognition.py
|
from keras.models import Sequential
from keras.layers import Conv2D, ZeroPadding2D, Activation, Input, concatenate
from keras.models import Model
from keras.layers.normalization import BatchNormalization
from keras.layers.pooling import MaxPooling2D, AveragePooling2D
from keras.layers.merge import Concatenate
from keras.layers.core import Lambda, Flatten, Dense
from keras.initializers import glorot_uniform
from keras.engine.topology import Layer
from keras import backend as K
K.set_image_data_format('channels_first')
import cv2
import json
import os
import numpy as np
from numpy import genfromtxt
import pandas as pd
import tensorflow as tf
from fr_utils import *
from triplet_loss import triplet_loss
from inception_blocks_v2 import *
def create_encoding(image, model):
img = image[...,::-1]
img = np.around(np.transpose(img, (2,0,1))/255.0, decimals=12)
x_train = np.array([img])
embedding = model.predict_on_batch(x_train)
return embedding
def who_is_it(image_path, database, model):
"""
Arguments:
image_path -- path to an image
database -- database containing image encodings along with the name of the person on the image
model -- your Inception model instance in Keras
Returns:
min_dist -- the minimum distance between image_path encoding and the encodings from the database
identity -- string, the name prediction for the person on image_path
"""
### START CODE HERE ###
## Step 1: Compute the target "encoding" for the image. Use img_to_encoding() see example above. ## (≈ 1 line)
encoding = create_encoding(image_path, model)
## Step 2: Find the closest encoding ##
# Initialize "min_dist" to a large value, say 100 (≈1 line)
min_dist = 100
# Loop over the database dictionary's names and encodings.
for (name, db_enc) in database.items():
# Compute L2 distance between the target "encoding" and the current "emb" from the database. (≈ 1 line)
dist = np.linalg.norm(encoding-db_enc)
# If this distance is less than the min_dist, then set min_dist to dist, and identity to name. (≈ 3 lines)
if dist < min_dist:
min_dist = dist
identity = name
### END CODE HERE ###
if min_dist > 0.85:
print("Not in the database.")
print("distance", min_dist)
identity = "Unknown"
else:
print ("it's " + str(identity) + ", the distance is " + str(min_dist))
return min_dist, identity
def main():
embedding_path = "./database/embeddings/embeddings.json"
face_detector_path = "./classifiers/haarcascade_frontalface_default.xml"
FRmodel = faceRecoModel(input_shape=(3, 96, 96))
print("Total Params:", FRmodel.count_params())
# load trained model
FRmodel.compile(optimizer='adam', loss=triplet_loss, metrics=['accuracy'])
load_weights_from_FaceNet(FRmodel)
with open(embedding_path, 'r') as infile:
database = json.load(infile)
#who_is_it("images/camera_0.jpg", database, FRmodel)
video_capture = cv2.VideoCapture(0)
video_capture.set(cv2.CAP_PROP_FRAME_WIDTH, 960)
video_capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 540)
face_detector = cv2.CascadeClassifier(face_detector_path)
print("above while")
while True:
# capture frame
if video_capture.isOpened():
ret, frame = video_capture.read()
raw_frame = frame.copy()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_detector.detectMultiScale(gray,
scaleFactor=1.5,
minNeighbors=5,
minSize=(30, 30))
if len(faces) > 0:
for (x, y, w, h) in faces:
cropped = raw_frame[y:y+h, x:x+w]
image = cv2.resize(cropped,
(96, 96),
interpolation=cv2.INTER_LINEAR)
min_dist, identity = who_is_it(image, database, FRmodel)
if identity == 'Unknown':
box_color = (0, 0, 255)
text_color = (0, 0, 255)
else:
box_color = (0, 255, 0)
text_color = (255, 0, 0)
cv2.rectangle(frame,
(x, y),
(x+w, y+h),
box_color,
2)
cv2.putText(frame,
identity,
(x, y),
cv2.FONT_HERSHEY_SIMPLEX,
0.75,
text_color,
thickness=2,
lineType=2)
cv2.imshow('Realtime Recognition', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
main()
|
{"/enroll_face.py": ["/triplet_loss.py"], "/realtime_recognition.py": ["/triplet_loss.py"]}
|
19,669
|
SHANK885/realtime_face_recognition
|
refs/heads/master
|
/triplet_loss.py
|
# triplet loss
import tensorflow as tf
def triplet_loss(y_true, y_pred, alpha=0.2):
'''
Arguments:
y_true -- true labels, required when you define a loss in Keras, you don't need it in this function.
y_pred -- python list containing three objects:
anchor -- the encodings for the anchor images, of shape (None, 128)
positive -- the encodings for the positive images, of shape (None, 128)
negative -- the encodings for the negative images, of shape (None, 128)
Returns:
loss -- real number, value of the loss
'''
anchor, positive, negative = y_pred[0], y_pred[1], y_pred[2]
# compute the encoding distance between the anchor and the positive,
# need to sum over the axis -1
pos_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, positive)))
# compute the encoding distance between the anchor and the negative
# need to sum over the axis -1
neg_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, negative)))
basic_loss = pos_dist - neg_dist + alpha
# take the maximum of bsic loss and 0.0 sum over the training examples
loss = tf.reduce_sum(tf.maximum(basic_loss, 0))
return loss
with tf.Session() as test:
tf.set_random_seed(1)
y_true = (None, None, None)
y_pred = (tf.random_normal([3, 128], mean=6, stddev=0.1, seed=1),
tf.random_normal([3, 128], mean=1, stddev=1, seed=1),
tf.random_normal([3, 128], mean=3, stddev=4, seed=1))
loss = triplet_loss(y_true, y_pred)
print("loss = ", str(loss.eval()))
|
{"/enroll_face.py": ["/triplet_loss.py"], "/realtime_recognition.py": ["/triplet_loss.py"]}
|
19,671
|
mjschaub/portfolio-site
|
refs/heads/master
|
/parser/xml_parser.py
|
'''
Created on Mar 6, 2017
@author: mjschaub
'''
import xml.etree.ElementTree as ET
import projects.log_entry as le
import projects.Project as proj
import json
from pymongo import MongoClient
client = MongoClient()
'''
Parse's the log to retrieve each commit and all the required data from it.
@param file: the log file to input
@return: the array of log entries
'''
def parse_log(file):
e = ET.parse(file).getroot()
log_entries = []
#print(e.items())
for logentry in e.iter('logentry'):
curr_entry = []
curr_entry.append(logentry.attrib)
print(logentry.attrib)
for auth in logentry.iter('author'):
print(auth.text)
curr_entry.append(auth.text)
for date in logentry.iter('date'):
print(date.text)
curr_entry.append(date.text)
paths = []
for path in logentry.iter('path'):
print(path.text)
paths.append(path.text)
curr_entry.append(paths)
path_attribs =[]
for path in logentry.iter('path'):
print(path.attrib)
path_attribs.append(path.attrib)
curr_entry.append(path_attribs)
for msg in logentry.iter('msg'):
print(msg.text)
curr_entry.append(msg.text)
log_entries.append(curr_entry)
return log_entries
'''
parse's the list xml file but for my implementation I only fetched the size of each file from the list as the log had all the other information
@param file: the list file
@param path_name: the path of the file to get the size of
@return: the size of the file
'''
def parse_list(file, path_name):
e = ET.parse(file).getroot()
ret_size = 0
for entry in e.iter('entry'):
name = ''
for i in entry.iter('name'):
name = i.text
if name == path_name:
for i in entry.iter('size'):
ret_size = i.text
return ret_size
if __name__ == '__main__':
list_file = 'svn_list.xml'
log_file = 'svn_log.xml'
entries = parse_log(log_file)
db = client['portfolio']
files = db['files']
logs = db['logs']
entry_objs = []
curr_id = 0
print(db['files'].count())
print(db['logs'].count())
db['files'].remove({})
db['logs'].remove({})
print(db['files'].count())
for i in range(len(entries)):
x = entries[i]
kinds = [my_dict['kind'] for my_dict in x[4]]
actions = [my_dict['action'] for my_dict in x[4]]
projects = []
for i in range(len(x[3])):
curr_path = x[3][i]
size_to_add = 0
if kinds[i] == 'file':
size_to_add = parse_list(list_file,curr_path.replace('/mjschau2/',''))
svn_link = str('https://subversion.ews.illinois.edu/svn/sp17-cs242'+curr_path+'/?p='+x[0]['revision'])
temp_proj = proj.Project(curr_path,size_to_add,actions[i],kinds[i], text=svn_link,file_id=curr_id)
result = files.insert_one(temp_proj.__dict__)
#print(result)
curr_id+=1
projects.append(temp_proj.__dict__)
temp_obj = le.log_entry(int(x[0]['revision']),x[1],x[2],x[5],projects)
entry_objs.append(temp_obj.__dict__)
project_data = entry_objs
#now put up on mongodb database
result = logs.insert_many(project_data)
print(result.inserted_ids)
|
{"/parser/xml_parser.py": ["/projects/log_entry.py", "/projects/Project.py"], "/tests/testParser.py": ["/parser/xml_parser.py"]}
|
19,672
|
mjschaub/portfolio-site
|
refs/heads/master
|
/web/server.py
|
'''
Created on Mar 7, 2017
@author: mjschaub
'''
from flask import Flask,render_template, abort, request
from pymongo import MongoClient
from bson.objectid import ObjectId
client = MongoClient()
app = Flask(__name__)
'''
routes to the homepage
@return the render html template for the index page
'''
@app.route('/')
def home_page():
return render_template('index.html')
'''
routes the project view page where you see all of the commits made
@return the html template for the page
'''
@app.route('/projects')
def assignment_page():
return render_template('log_view.html',portfolio=portfolio)
'''
routes the invidual commit page for the certain revision number
@return the html template for this page
'''
@app.route('/projects/<revision_num>')
def project_page(revision_num=0):
logs = db['logs'].find()
for i in logs:
if int(i['revision']) == int(revision_num):
files = i['files']
return render_template('revision_view.html',files=files,revision_num=revision_num)
'''
routes the app for the individual file page where you do the commenting
@param the route of the site
@return the html template to use
'''
@app.route('/projects/<revision_num>/<file_id>', methods=['POST','GET'])
def file_page(file_id=0,revision_num=0):
db = client['portfolio']
comments = db['comments']
if request.method == 'POST':
user = request.form['username']
comment = request.form['comment']
comment = cleanup_comment(comment)
print({'status':'OK','user':user,'comment':comment})
if request.form['type-of-comment'] == "normalComment":
#add comment
result = comments.insert_one({'user':user,'comment':comment,'file_id':file_id,'reply_id':-1,'replies':[]})
print(result)
else:
#reply comment
reply_id = request.form['type-of-comment']
print('reply id: ',reply_id)
result = comments.insert_one({'user':user,'comment':comment,'file_id':file_id,'reply_id':reply_id,'replies':[]})
new_comment = comments.find({'user':user,'comment':comment})
reply_comment = comments.find({'_id': ObjectId(reply_id)})
for i in reply_comment:
comments.update({'_id' : ObjectId(reply_id)}, { '$push': {'replies' : new_comment[0]}})
file_given = None
path = None
files = db['files'].find()
for i in files:
if int(i['file_id']) == int(file_id):
path = i['path']
file_given = i['text']
if file_given == None:
return abort(500)
comments = comments.find()
page_comments = []
for i in comments:
if i['file_id'] == file_id:
page_comments.append(i)
print(i)
return render_template('project.html',path=path,file=file_given,file_id=file_id,page_comments=page_comments,revision_num=revision_num)
'''
method to check each comment does not contain the filtered text and if it does then relace it with the good words
@param comment_text: the comment to filter
@return the new comment
'''
def cleanup_comment(comment_text):
db = client['portfolio']
word_filter = db['filter'].find()
for i in word_filter:
for j in range(len(i['bad_words'])):
print(i['bad_words'][j])
if i['bad_words'][j] in comment_text:
print("old text: ",comment_text)
comment_text = comment_text.replace(i['bad_words'][j],i['good_words'][j])
print("new_text: ",comment_text)
return comment_text
'''
sets up the database to have the filtered words in it, is run once to create the data
'''
def setup_bad_words():
bad_words = ['moist','patriots','ugly','justin bieber','bing']
good_words = ['wet','worst team ever', 'beautiful','he who shall not be named','google']
db = client['portfolio']
word_filter = db['filter']
word_filter.insert_one({'bad_words':bad_words,'good_words':good_words })
check_filter = db['filter'].find()
for i in check_filter:
print(i)
if __name__ == "__main__":
db = client['portfolio']
portfolio = db['logs'].find()
files = db['files'].find()
comments = db['comments'].find()
#setup_bad_words()
app.secret_key = 'super secret key'
app.config['SESSION_TYPE'] = 'mongodb'
app.run()
|
{"/parser/xml_parser.py": ["/projects/log_entry.py", "/projects/Project.py"], "/tests/testParser.py": ["/parser/xml_parser.py"]}
|
19,673
|
mjschaub/portfolio-site
|
refs/heads/master
|
/projects/Project.py
|
'''
Created on Mar 8, 2017
@author: mjschaub
'''
class Project(object):
'''
the project object
'''
def __init__(self,path='',size=0, action='',kind='', text='',file_id=0):
'''
Constructor to initialize a project
'''
self.path = path
self.size = size
self.action = action
self.kind = kind
self.text=text
self.file_id = file_id
|
{"/parser/xml_parser.py": ["/projects/log_entry.py", "/projects/Project.py"], "/tests/testParser.py": ["/parser/xml_parser.py"]}
|
19,674
|
mjschaub/portfolio-site
|
refs/heads/master
|
/tests/testFlask.py
|
'''
Created on Mar 12, 2017
@author: mjschaub
'''
import unittest,requests
from requests.packages.urllib3.exceptions import InsecureRequestWarning
'''
The api testing class
'''
class Test(unittest.TestCase):
'''
sets up the parameters
'''
def setUp(self):
self.baseURL = 'http://localhost:5000'
'''
tests the endpoints for the portfolio website
'''
def test_gets(self):
r = requests.get(self.baseURL+'/')
self.assertEqual(r.status_code,200)
r2 = requests.get(self.baseURL+'/projects')
self.assertEqual(r2.status_code,200)
r2 = requests.get(self.baseURL+'/projects/12')
self.assertEqual(r2.status_code,200)
r2 = requests.get(self.baseURL+'/projects/984028')
self.assertEqual(r2.status_code,500)
if __name__ == "__main__":
unittest.main()
|
{"/parser/xml_parser.py": ["/projects/log_entry.py", "/projects/Project.py"], "/tests/testParser.py": ["/parser/xml_parser.py"]}
|
19,675
|
mjschaub/portfolio-site
|
refs/heads/master
|
/tests/testParser.py
|
'''
Created on Mar 12, 2017
@author: mjschaub
'''
import unittest
import parser.xml_parser as parse
class Test(unittest.TestCase):
'''
tests that parsing the log returns a list of entries and each entry has the correct information
'''
def testParseLog(self):
entries = parse.parse_log('test_log.xml')
self.assertEqual(len(entries),1)
self.assertEqual(entries[0][0]['revision'],u'6401')
self.assertEqual(entries[0][1],u'mjschau2')
self.assertEqual(entries[0][2],u'2017-03-06T16:59:20.880790Z')
self.assertEqual(entries[0][3],['/mjschau2/Assignment2.1', '/mjschau2/Assignment2.1/Actor.py', '/mjschau2/Assignment2.1/CreateGraph.py', '/mjschau2/Assignment2.1/Graph.py', '/mjschau2/Assignment2.1/GraphVis.py', '/mjschau2/Assignment2.1/Graph_API.py', '/mjschau2/Assignment2.1/Movie.py', '/mjschau2/Assignment2.1/Test_Api.py', '/mjschau2/Assignment2.1/Test_Graph.py', '/mjschau2/Assignment2.1/Testing Plan Assignment #2.docx', '/mjschau2/Assignment2.1/graph_data.json', '/mjschau2/Assignment2.1/graph_setup.log', '/mjschau2/Assignment2.1/graphics.py'])
self.assertEqual(entries[0][4],[{'action': 'A', 'kind': 'dir'}, {'action': 'A', 'kind': 'file'}, {'action': 'A', 'kind': 'file'}, {'action': 'A', 'kind': 'file'}, {'action': 'A', 'kind': 'file'}, {'action': 'A', 'kind': 'file'}, {'action': 'A', 'kind': 'file'}, {'action': 'A', 'kind': 'file'}, {'action': 'A', 'kind': 'file'}, {'action': 'A', 'kind': 'file'}, {'action': 'A', 'kind': 'file'}, {'action': 'A', 'kind': 'file'}, {'action': 'A', 'kind': 'file'}])
self.assertEqual(entries[0][5],u'importing assignment 2.1')
pass
'''
tests that you can get the size of a file from the list xml (that's all I get from the list)
'''
def testParseList(self):
entry = parse.parse_log('test_log.xml')
curr_path = entry[0][3][0]
size1 = parse.parse_list('test_list.xml',curr_path.replace('/mjschau2/',''))
self.assertEqual(size1, 0)
curr_path = entry[0][3][1]
size1 = parse.parse_list('test_list.xml',curr_path.replace('/mjschau2/',''))
self.assertEqual(size1, u'1623')
pass
if __name__ == "__main__":
unittest.main()
|
{"/parser/xml_parser.py": ["/projects/log_entry.py", "/projects/Project.py"], "/tests/testParser.py": ["/parser/xml_parser.py"]}
|
19,676
|
mjschaub/portfolio-site
|
refs/heads/master
|
/projects/log_entry.py
|
'''
Created on Mar 7, 2017
@author: mjschaub
'''
class log_entry(object):
'''
log_entry class for each commit
'''
def __init__(self,revision = 0, author='',date='',msg='',files = []):
'''
Constructor
'''
self.author = author
self.date = date
self.revision = revision
self.msg = msg
self.files = files
'''
sets the size of the file or directory
@param path: path of the file to change
@param size: the size of the file
'''
def set_size(self,path_idx,size):
self.size[path_idx] = size
|
{"/parser/xml_parser.py": ["/projects/log_entry.py", "/projects/Project.py"], "/tests/testParser.py": ["/parser/xml_parser.py"]}
|
19,678
|
Mr-big-c/github
|
refs/heads/master
|
/练习代码/Flask/flask_web/models/man.py
|
# -*- coding: utf-8 -*-
# @File : man.py
# @Author: 一稚杨
# @Date : 2018/6/10/010
# @Desc : 创建一个man模型类,用于与数据库进行交互
from sqlalchemy import Column, String, Integer
from flask_sqlalchemy import SQLAlchemy
# 利用flask提供的SQLAlchemy类实例化一个类,实际就相当于创建一个
# 连接数据数据库的一个引擎,后面创建模型类时直接继承该类就可以自动与数据表相关联
db = SQLAlchemy()
class man(db.Model):
id = Column(Integer, primary_key=True)
name = Column(String(10), nullable=False)
age = Column(Integer, default=18)
|
{"/Blog Relevant/files_system/basics/views.py": ["/Blog Relevant/files_system/basics/file_op.py"], "/Blog Relevant/blog7-17/myapp/admin.py": ["/Blog Relevant/blog7-17/myapp/models.py"], "/Blog Relevant/blog8-31/myapp/views.py": ["/Blog Relevant/blog8-31/myapp/forms.py"], "/Blog Relevant/blog7-21/myapp/views.py": ["/Blog Relevant/blog7-21/myapp/models.py"], "/Blog Relevant/blog7-17/myapp/views.py": ["/Blog Relevant/blog7-17/myapp/models.py"], "/\u7ec3\u4e60\u4ee3\u7801/Flask/flask_web/blueprint1/views/views1.py": ["/\u7ec3\u4e60\u4ee3\u7801/Flask/flask_web/blueprint1/views/__init__.py"]}
|
19,679
|
Mr-big-c/github
|
refs/heads/master
|
/快捷办公/csv文件操作/写csv文件.py
|
"""
写csv文件
"""
import csv
def writecsv(path, data):
# 以写的方式打开一个文件,如果没有则创建
with open(path, "w") as f:
writer = csv.writer(f)
for rowdata in data:
writer.writerow(rowdata)
path = r"D:\Python数据\csv数据写入.csv"
data = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
writecsv(path, data)
|
{"/Blog Relevant/files_system/basics/views.py": ["/Blog Relevant/files_system/basics/file_op.py"], "/Blog Relevant/blog7-17/myapp/admin.py": ["/Blog Relevant/blog7-17/myapp/models.py"], "/Blog Relevant/blog8-31/myapp/views.py": ["/Blog Relevant/blog8-31/myapp/forms.py"], "/Blog Relevant/blog7-21/myapp/views.py": ["/Blog Relevant/blog7-21/myapp/models.py"], "/Blog Relevant/blog7-17/myapp/views.py": ["/Blog Relevant/blog7-17/myapp/models.py"], "/\u7ec3\u4e60\u4ee3\u7801/Flask/flask_web/blueprint1/views/views1.py": ["/\u7ec3\u4e60\u4ee3\u7801/Flask/flask_web/blueprint1/views/__init__.py"]}
|
19,680
|
Mr-big-c/github
|
refs/heads/master
|
/Flask/Flask_Mac/web_app.py
|
"""
flask创建web app
"""
from settings.create_app import create_app
app = create_app()
if __name__ == "__main__":
app.run(debug=app.config["DEBUG"], host='0.0.0.0', port=8000)
|
{"/Blog Relevant/files_system/basics/views.py": ["/Blog Relevant/files_system/basics/file_op.py"], "/Blog Relevant/blog7-17/myapp/admin.py": ["/Blog Relevant/blog7-17/myapp/models.py"], "/Blog Relevant/blog8-31/myapp/views.py": ["/Blog Relevant/blog8-31/myapp/forms.py"], "/Blog Relevant/blog7-21/myapp/views.py": ["/Blog Relevant/blog7-21/myapp/models.py"], "/Blog Relevant/blog7-17/myapp/views.py": ["/Blog Relevant/blog7-17/myapp/models.py"], "/\u7ec3\u4e60\u4ee3\u7801/Flask/flask_web/blueprint1/views/views1.py": ["/\u7ec3\u4e60\u4ee3\u7801/Flask/flask_web/blueprint1/views/__init__.py"]}
|
19,681
|
Mr-big-c/github
|
refs/heads/master
|
/练习代码/Flask/6.7/重定向.py
|
# -*- coding: utf-8 -*-
# @File : 重定向.py
# @Author: 一稚杨
# @Date : 2018/6/7/007
# @Desc : 重定向和404页面定义
# redirect实现重定向
from flask import Flask, redirect, render_template, flash
app = Flask(__name__)
app.secret_key = '123456'
@app.route("/index1")
def index1():
flash("登录成功", category="login")
flash("hello",category="hello")
return redirect("/index2/")
@app.route("/index2/")
def index2():
return render_template("flash.html")
@app.errorhandler(404)
def error(error):
return render_template("404.html"),404
# form表单action为空时访问那个页面?结论:当action为空时,数据提交给发送数据的页面
@app.route("/action_none", methods=["GET", "POST"])
def action_none():
return render_template("action.html")
app.run(debug=True)
|
{"/Blog Relevant/files_system/basics/views.py": ["/Blog Relevant/files_system/basics/file_op.py"], "/Blog Relevant/blog7-17/myapp/admin.py": ["/Blog Relevant/blog7-17/myapp/models.py"], "/Blog Relevant/blog8-31/myapp/views.py": ["/Blog Relevant/blog8-31/myapp/forms.py"], "/Blog Relevant/blog7-21/myapp/views.py": ["/Blog Relevant/blog7-21/myapp/models.py"], "/Blog Relevant/blog7-17/myapp/views.py": ["/Blog Relevant/blog7-17/myapp/models.py"], "/\u7ec3\u4e60\u4ee3\u7801/Flask/flask_web/blueprint1/views/views1.py": ["/\u7ec3\u4e60\u4ee3\u7801/Flask/flask_web/blueprint1/views/__init__.py"]}
|
19,682
|
Mr-big-c/github
|
refs/heads/master
|
/练习代码/spider5.15/模拟浏览器.py
|
import urllib.request
url = r"http://www.huangwenyang.cn/"
# 构造一个请求头,里面包含一些关于浏览器的信息,比如版本、内核等
header = {
"User-Agent": "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; en) Opera 9.50"
}
# 构造一个请求体,里面包含请求头,这样就模拟浏览器访问了
req = urllib.request.Request(url, headers=header)
# 发起请求
response = urllib.request.urlopen(req)
data = response.read().decode("utf-8")
print(data)
|
{"/Blog Relevant/files_system/basics/views.py": ["/Blog Relevant/files_system/basics/file_op.py"], "/Blog Relevant/blog7-17/myapp/admin.py": ["/Blog Relevant/blog7-17/myapp/models.py"], "/Blog Relevant/blog8-31/myapp/views.py": ["/Blog Relevant/blog8-31/myapp/forms.py"], "/Blog Relevant/blog7-21/myapp/views.py": ["/Blog Relevant/blog7-21/myapp/models.py"], "/Blog Relevant/blog7-17/myapp/views.py": ["/Blog Relevant/blog7-17/myapp/models.py"], "/\u7ec3\u4e60\u4ee3\u7801/Flask/flask_web/blueprint1/views/views1.py": ["/\u7ec3\u4e60\u4ee3\u7801/Flask/flask_web/blueprint1/views/__init__.py"]}
|
19,683
|
Mr-big-c/github
|
refs/heads/master
|
/练习代码/Flask/flask_web/forms/data_forms.py
|
# -*- coding: utf-8 -*-
# @File : data_forms.py
# @Author: 一稚杨
# @Date : 2018/6/9/009
# @Desc : 利用wtforms进行参数验证
from wtforms import Form, StringField, IntegerField
from wtforms.validators import Length, NumberRange, DataRequired
# 定义一个参数验证类,该类继承于Form这个类方法
class data_forms(Form):
name = StringField(validators=[DataRequired() ,Length(min=1, max=20)])
age = IntegerField(validators=[NumberRange(min=1, max=100, message="不在正常年龄范围")], default=18)
|
{"/Blog Relevant/files_system/basics/views.py": ["/Blog Relevant/files_system/basics/file_op.py"], "/Blog Relevant/blog7-17/myapp/admin.py": ["/Blog Relevant/blog7-17/myapp/models.py"], "/Blog Relevant/blog8-31/myapp/views.py": ["/Blog Relevant/blog8-31/myapp/forms.py"], "/Blog Relevant/blog7-21/myapp/views.py": ["/Blog Relevant/blog7-21/myapp/models.py"], "/Blog Relevant/blog7-17/myapp/views.py": ["/Blog Relevant/blog7-17/myapp/models.py"], "/\u7ec3\u4e60\u4ee3\u7801/Flask/flask_web/blueprint1/views/views1.py": ["/\u7ec3\u4e60\u4ee3\u7801/Flask/flask_web/blueprint1/views/__init__.py"]}
|
19,684
|
Mr-big-c/github
|
refs/heads/master
|
/代码中转站/blog/myapp/migrations/0004_auto_20180619_1102.py
|
# Generated by Django 2.0.6 on 2018-06-19 11:02
import ckeditor_uploader.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('myapp', '0003_auto_20180619_1030'),
]
operations = [
migrations.AlterField(
model_name='books',
name='content',
field=ckeditor_uploader.fields.RichTextUploadingField(),
),
]
|
{"/Blog Relevant/files_system/basics/views.py": ["/Blog Relevant/files_system/basics/file_op.py"], "/Blog Relevant/blog7-17/myapp/admin.py": ["/Blog Relevant/blog7-17/myapp/models.py"], "/Blog Relevant/blog8-31/myapp/views.py": ["/Blog Relevant/blog8-31/myapp/forms.py"], "/Blog Relevant/blog7-21/myapp/views.py": ["/Blog Relevant/blog7-21/myapp/models.py"], "/Blog Relevant/blog7-17/myapp/views.py": ["/Blog Relevant/blog7-17/myapp/models.py"], "/\u7ec3\u4e60\u4ee3\u7801/Flask/flask_web/blueprint1/views/views1.py": ["/\u7ec3\u4e60\u4ee3\u7801/Flask/flask_web/blueprint1/views/__init__.py"]}
|
19,685
|
Mr-big-c/github
|
refs/heads/master
|
/练习代码/spider5.15/糗事百科.py
|
import urllib.request
import re
def spider(url):
# 创建请求头,用来模拟浏览器请求
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64; rv:59.0) Gecko/20100101 Firefox/59.0"
}
# 创建请求体
req = urllib.request.Request(url, headers=headers)
# 请求
response = urllib.request.urlopen(req)
data = response.read().decode("utf-8")
return data
url = r"https://www.qiushibaike.com/text/page/2/"
re_txt1 = ''
re_txt2 = ''
re_txt = r'<div class="content">\n<span>([\S\s]*?)</span>'
print(type(re_txt))
result = spider(url)
# txt = re.compile(re_txt)
# with open(r"C:\Users\Administrator\Desktop\qiu.txt", "w", encoding="utf-8") as f:
# f.write(result)
# print(result)
result = re.findall(re_txt, result)
print(result)
|
{"/Blog Relevant/files_system/basics/views.py": ["/Blog Relevant/files_system/basics/file_op.py"], "/Blog Relevant/blog7-17/myapp/admin.py": ["/Blog Relevant/blog7-17/myapp/models.py"], "/Blog Relevant/blog8-31/myapp/views.py": ["/Blog Relevant/blog8-31/myapp/forms.py"], "/Blog Relevant/blog7-21/myapp/views.py": ["/Blog Relevant/blog7-21/myapp/models.py"], "/Blog Relevant/blog7-17/myapp/views.py": ["/Blog Relevant/blog7-17/myapp/models.py"], "/\u7ec3\u4e60\u4ee3\u7801/Flask/flask_web/blueprint1/views/views1.py": ["/\u7ec3\u4e60\u4ee3\u7801/Flask/flask_web/blueprint1/views/__init__.py"]}
|
19,686
|
Mr-big-c/github
|
refs/heads/master
|
/练习代码/spider5.15/动态网页.py
|
import urllib.request
import json
import ssl
def spider(url):
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64; rv:59.0) Gecko/20100101 Firefox/59.0"
}
# 创建请求体
req = urllib.request.Request(url, headers=headers)
# 使用ssl创建不验证的上下文,从而可以爬取https安全网站
context = ssl._create_unverified_context()
# 发起请求
reponse = urllib.request.urlopen(req, context=context)
data = reponse.read().decode("utf-8")
data = json.loads(data)
return data
# url = "https://movie.douban.com/j/chart/top_list?type=11&interval_id=100%3A90&action=&start=0&limit=1"
# result = spider(url)
# print(result)
# print(len(result))
j = 1
for i in range(0, 10):
url = "https://movie.douban.com/j/chart/top_list?type=11&interval_id=100%3A90&action=&start=" + str(i * 20) + "&limit=20"
result = spider(url)
for info in result:
with open(r"C:\Users\Administrator\Desktop\dou.txt", "a", encoding="utf-8") as f:
f.write(str(j) + info["title"] + "\n")
j = j + 1
print(len(result))
|
{"/Blog Relevant/files_system/basics/views.py": ["/Blog Relevant/files_system/basics/file_op.py"], "/Blog Relevant/blog7-17/myapp/admin.py": ["/Blog Relevant/blog7-17/myapp/models.py"], "/Blog Relevant/blog8-31/myapp/views.py": ["/Blog Relevant/blog8-31/myapp/forms.py"], "/Blog Relevant/blog7-21/myapp/views.py": ["/Blog Relevant/blog7-21/myapp/models.py"], "/Blog Relevant/blog7-17/myapp/views.py": ["/Blog Relevant/blog7-17/myapp/models.py"], "/\u7ec3\u4e60\u4ee3\u7801/Flask/flask_web/blueprint1/views/views1.py": ["/\u7ec3\u4e60\u4ee3\u7801/Flask/flask_web/blueprint1/views/__init__.py"]}
|
19,687
|
Mr-big-c/github
|
refs/heads/master
|
/tkinter/组合按键事件.py
|
"""
组合按键事件
"""
import tkinter
win = tkinter.Tk()
win.title("hwy")
win.geometry("400x400")
label = tkinter.Label(win, text="python")
label.focus_set()
label.pack()
def showinfo(event):
# 显示对应按键的字符
print(event.char)
# 显示对应按键的ascii码
print(event.keycode)
label.bind("<Shift-Up>", showinfo)
win.mainloop()
|
{"/Blog Relevant/files_system/basics/views.py": ["/Blog Relevant/files_system/basics/file_op.py"], "/Blog Relevant/blog7-17/myapp/admin.py": ["/Blog Relevant/blog7-17/myapp/models.py"], "/Blog Relevant/blog8-31/myapp/views.py": ["/Blog Relevant/blog8-31/myapp/forms.py"], "/Blog Relevant/blog7-21/myapp/views.py": ["/Blog Relevant/blog7-21/myapp/models.py"], "/Blog Relevant/blog7-17/myapp/views.py": ["/Blog Relevant/blog7-17/myapp/models.py"], "/\u7ec3\u4e60\u4ee3\u7801/Flask/flask_web/blueprint1/views/views1.py": ["/\u7ec3\u4e60\u4ee3\u7801/Flask/flask_web/blueprint1/views/__init__.py"]}
|
19,688
|
Mr-big-c/github
|
refs/heads/master
|
/tkinter/combobox.py
|
"""
下拉控件
"""
import tkinter
from tkinter import ttk
win = tkinter.Tk()
win.title("hwy")
win.geometry('400x400')
# 创建下拉菜单
com = ttk.Combobox(win)
# 设置下拉值
com["value"] = ("python", "C++", "java")
# 设置初始值
com.current(0)
com.pack()
def showinfo(event):
print(com.get())
# 绑定事件,该事件在下拉之发生变化时触发
com.bind("<<ComboboxSelected>>", showinfo)
win.mainloop()
|
{"/Blog Relevant/files_system/basics/views.py": ["/Blog Relevant/files_system/basics/file_op.py"], "/Blog Relevant/blog7-17/myapp/admin.py": ["/Blog Relevant/blog7-17/myapp/models.py"], "/Blog Relevant/blog8-31/myapp/views.py": ["/Blog Relevant/blog8-31/myapp/forms.py"], "/Blog Relevant/blog7-21/myapp/views.py": ["/Blog Relevant/blog7-21/myapp/models.py"], "/Blog Relevant/blog7-17/myapp/views.py": ["/Blog Relevant/blog7-17/myapp/models.py"], "/\u7ec3\u4e60\u4ee3\u7801/Flask/flask_web/blueprint1/views/views1.py": ["/\u7ec3\u4e60\u4ee3\u7801/Flask/flask_web/blueprint1/views/__init__.py"]}
|
19,689
|
Mr-big-c/github
|
refs/heads/master
|
/Blog Relevant/blog8-19/myapp/migrations/0003_auto_20180727_1929.py
|
# Generated by Django 2.0 on 2018-07-27 11:29
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('myapp', '0002_auto_20180721_1709'),
]
operations = [
migrations.AlterField(
model_name='article',
name='file_upload',
field=models.FileField(blank=True, upload_to='file'),
),
migrations.AlterField(
model_name='article',
name='img',
field=models.ImageField(blank=True, upload_to='image'),
),
]
|
{"/Blog Relevant/files_system/basics/views.py": ["/Blog Relevant/files_system/basics/file_op.py"], "/Blog Relevant/blog7-17/myapp/admin.py": ["/Blog Relevant/blog7-17/myapp/models.py"], "/Blog Relevant/blog8-31/myapp/views.py": ["/Blog Relevant/blog8-31/myapp/forms.py"], "/Blog Relevant/blog7-21/myapp/views.py": ["/Blog Relevant/blog7-21/myapp/models.py"], "/Blog Relevant/blog7-17/myapp/views.py": ["/Blog Relevant/blog7-17/myapp/models.py"], "/\u7ec3\u4e60\u4ee3\u7801/Flask/flask_web/blueprint1/views/views1.py": ["/\u7ec3\u4e60\u4ee3\u7801/Flask/flask_web/blueprint1/views/__init__.py"]}
|
19,690
|
Mr-big-c/github
|
refs/heads/master
|
/tkinter/鼠标点击事件.py
|
'''
鼠标点击事件
'''
import tkinter
win = tkinter.Tk()
win.title("hwy")
win.geometry("400x400")
button = tkinter.Button(win, text="button")
def showinfo(event):
print(event.x, event.y)
# 绑定事件
button.bind("<Button-2>", showinfo)
button.pack()
win.mainloop()
|
{"/Blog Relevant/files_system/basics/views.py": ["/Blog Relevant/files_system/basics/file_op.py"], "/Blog Relevant/blog7-17/myapp/admin.py": ["/Blog Relevant/blog7-17/myapp/models.py"], "/Blog Relevant/blog8-31/myapp/views.py": ["/Blog Relevant/blog8-31/myapp/forms.py"], "/Blog Relevant/blog7-21/myapp/views.py": ["/Blog Relevant/blog7-21/myapp/models.py"], "/Blog Relevant/blog7-17/myapp/views.py": ["/Blog Relevant/blog7-17/myapp/models.py"], "/\u7ec3\u4e60\u4ee3\u7801/Flask/flask_web/blueprint1/views/views1.py": ["/\u7ec3\u4e60\u4ee3\u7801/Flask/flask_web/blueprint1/views/__init__.py"]}
|
19,691
|
Mr-big-c/github
|
refs/heads/master
|
/tkinter/表格布局.py
|
"""
表格布局
"""
import tkinter
win = tkinter.Tk()
win.title("hwy")
win.geometry("400x400")
# 创建三个标签
label1 = tkinter.Label(win, text="python", bg="blue")
label2 = tkinter.Label(win, text="java", bg="red")
label3 = tkinter.Label(win, text="C++", bg="pink")
# 指定控件所在的行和列
label1.grid(row=0, column=0)
label2.grid(row=0, column=1)
label3.grid(row=1, column=1)
win.mainloop()
|
{"/Blog Relevant/files_system/basics/views.py": ["/Blog Relevant/files_system/basics/file_op.py"], "/Blog Relevant/blog7-17/myapp/admin.py": ["/Blog Relevant/blog7-17/myapp/models.py"], "/Blog Relevant/blog8-31/myapp/views.py": ["/Blog Relevant/blog8-31/myapp/forms.py"], "/Blog Relevant/blog7-21/myapp/views.py": ["/Blog Relevant/blog7-21/myapp/models.py"], "/Blog Relevant/blog7-17/myapp/views.py": ["/Blog Relevant/blog7-17/myapp/models.py"], "/\u7ec3\u4e60\u4ee3\u7801/Flask/flask_web/blueprint1/views/views1.py": ["/\u7ec3\u4e60\u4ee3\u7801/Flask/flask_web/blueprint1/views/__init__.py"]}
|
19,692
|
Mr-big-c/github
|
refs/heads/master
|
/练习代码/spider5.15/POST.py
|
import urllib.request
import urllib.parse
url = "http://127.0.0.1:8000/form/"
# 创建请求所需的数据
re_data = {
"username": "hwy",
"passwd": "123",
}
# 将数据进行打包,并指定编码格式
post_data = urllib.parse.urlencode(re_data).encode("utf-8")
# 构造请求体
req = urllib.request.Request(url, post_data)
# 请求
response = urllib.request.urlopen(req)
data = response.read().decode("utf-8")
print(data)
|
{"/Blog Relevant/files_system/basics/views.py": ["/Blog Relevant/files_system/basics/file_op.py"], "/Blog Relevant/blog7-17/myapp/admin.py": ["/Blog Relevant/blog7-17/myapp/models.py"], "/Blog Relevant/blog8-31/myapp/views.py": ["/Blog Relevant/blog8-31/myapp/forms.py"], "/Blog Relevant/blog7-21/myapp/views.py": ["/Blog Relevant/blog7-21/myapp/models.py"], "/Blog Relevant/blog7-17/myapp/views.py": ["/Blog Relevant/blog7-17/myapp/models.py"], "/\u7ec3\u4e60\u4ee3\u7801/Flask/flask_web/blueprint1/views/views1.py": ["/\u7ec3\u4e60\u4ee3\u7801/Flask/flask_web/blueprint1/views/__init__.py"]}
|
19,693
|
Mr-big-c/github
|
refs/heads/master
|
/Blog Relevant/blog8-22/Attitude/models.py
|
from django.db import models
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.contrib.auth.models import User
# Create your models here.
# 记录某篇文章用户的发表态度的记录
class AttitudeRecord(models.Model):
content_type = models.ForeignKey(ContentType, on_delete=models.DO_NOTHING)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey('content_type', 'object_id')
# 创建一个记录态度类型的字段,默认创建的态度为applause(鼓掌)
attitude_type = models.TextField(default='applause')
# 记录发表态度的用户
attitude_user = models.ForeignKey(User, on_delete=models.DO_NOTHING)
# 记录发表态度的时间
attitude_time = models.DateTimeField(auto_now_add=True)
# 记录某篇文章用户的发表态度的数量
class AttitudeCount(models.Model):
content_type = models.ForeignKey(ContentType, on_delete=models.DO_NOTHING)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey('content_type', 'object_id')
# 记录鲜花的数量
attitude_flower_num = models.IntegerField(default=0)
# 记录握手的数量
attitude_handshake_num = models.IntegerField(default=0)
# 记录路过的数量
attitude_pass_num = models.IntegerField(default=0)
# 记录雷人的数量
attitude_shocking_num = models.IntegerField(default=0)
# 记录鸡蛋的数量
attitude_egg_num = models.IntegerField(default=0)
|
{"/Blog Relevant/files_system/basics/views.py": ["/Blog Relevant/files_system/basics/file_op.py"], "/Blog Relevant/blog7-17/myapp/admin.py": ["/Blog Relevant/blog7-17/myapp/models.py"], "/Blog Relevant/blog8-31/myapp/views.py": ["/Blog Relevant/blog8-31/myapp/forms.py"], "/Blog Relevant/blog7-21/myapp/views.py": ["/Blog Relevant/blog7-21/myapp/models.py"], "/Blog Relevant/blog7-17/myapp/views.py": ["/Blog Relevant/blog7-17/myapp/models.py"], "/\u7ec3\u4e60\u4ee3\u7801/Flask/flask_web/blueprint1/views/views1.py": ["/\u7ec3\u4e60\u4ee3\u7801/Flask/flask_web/blueprint1/views/__init__.py"]}
|
19,694
|
Mr-big-c/github
|
refs/heads/master
|
/Blog Relevant/blog8-17/Run/models.py
|
from django.db import models
# Create your models here.
class Run(models.Model):
img = models.ImageField(upload_to='./img')
time = models.DateTimeField(auto_now_add=True)
is_delete = models.BooleanField(default=False)
class Meta:
ordering = ['-time']
|
{"/Blog Relevant/files_system/basics/views.py": ["/Blog Relevant/files_system/basics/file_op.py"], "/Blog Relevant/blog7-17/myapp/admin.py": ["/Blog Relevant/blog7-17/myapp/models.py"], "/Blog Relevant/blog8-31/myapp/views.py": ["/Blog Relevant/blog8-31/myapp/forms.py"], "/Blog Relevant/blog7-21/myapp/views.py": ["/Blog Relevant/blog7-21/myapp/models.py"], "/Blog Relevant/blog7-17/myapp/views.py": ["/Blog Relevant/blog7-17/myapp/models.py"], "/\u7ec3\u4e60\u4ee3\u7801/Flask/flask_web/blueprint1/views/views1.py": ["/\u7ec3\u4e60\u4ee3\u7801/Flask/flask_web/blueprint1/views/__init__.py"]}
|
19,695
|
Mr-big-c/github
|
refs/heads/master
|
/tkinter/button.py
|
'''
按钮控件
'''
import tkinter
def button():
print('hwy is a good man')
win = tkinter.Tk()
win.title('黄文杨')
win.geometry('400x400+400+200')
button = tkinter.Button(win, text="按钮", command=button, width=5, height=5)
button2 = tkinter.Button(win, text="quit", command=win.quit,)
button.pack()
button2.pack()
win.mainloop()
|
{"/Blog Relevant/files_system/basics/views.py": ["/Blog Relevant/files_system/basics/file_op.py"], "/Blog Relevant/blog7-17/myapp/admin.py": ["/Blog Relevant/blog7-17/myapp/models.py"], "/Blog Relevant/blog8-31/myapp/views.py": ["/Blog Relevant/blog8-31/myapp/forms.py"], "/Blog Relevant/blog7-21/myapp/views.py": ["/Blog Relevant/blog7-21/myapp/models.py"], "/Blog Relevant/blog7-17/myapp/views.py": ["/Blog Relevant/blog7-17/myapp/models.py"], "/\u7ec3\u4e60\u4ee3\u7801/Flask/flask_web/blueprint1/views/views1.py": ["/\u7ec3\u4e60\u4ee3\u7801/Flask/flask_web/blueprint1/views/__init__.py"]}
|
19,696
|
Mr-big-c/github
|
refs/heads/master
|
/练习代码/spider5.15/5.16.py
|
import urllib.request
import json
url = r'http://127.0.0.1:8000/index/'
response = urllib.request.urlopen(url)
data = response.read().decode("utf-8")
print(data)
print(type(data))
# 将json格式的数据转化为Python数据类型
jsondata = json.loads(data)
print(jsondata["name"])
print(type(jsondata))
|
{"/Blog Relevant/files_system/basics/views.py": ["/Blog Relevant/files_system/basics/file_op.py"], "/Blog Relevant/blog7-17/myapp/admin.py": ["/Blog Relevant/blog7-17/myapp/models.py"], "/Blog Relevant/blog8-31/myapp/views.py": ["/Blog Relevant/blog8-31/myapp/forms.py"], "/Blog Relevant/blog7-21/myapp/views.py": ["/Blog Relevant/blog7-21/myapp/models.py"], "/Blog Relevant/blog7-17/myapp/views.py": ["/Blog Relevant/blog7-17/myapp/models.py"], "/\u7ec3\u4e60\u4ee3\u7801/Flask/flask_web/blueprint1/views/views1.py": ["/\u7ec3\u4e60\u4ee3\u7801/Flask/flask_web/blueprint1/views/__init__.py"]}
|
19,697
|
Mr-big-c/github
|
refs/heads/master
|
/Blog Relevant/blog7-15/myapp/models.py
|
from django.db import models
from django.contrib.contenttypes.models import ContentType
from ReadNumber.models import *
# Create your models here.
# 创建一个文章的模型
class Article(models.Model):
title = models.CharField(max_length=20)
author = models.CharField(max_length=10)
text = models.CharField(max_length=200)
def get_read_num(self):
try:
ct = ContentType.objects.get_for_model(Article)
re = ReadNum.objects.filter(content_type=ct, object_id=self.pk)
return re[0].read_num
except:
return 0
class Diary(models.Model):
title=models.CharField(max_length=20)
author=models.CharField(max_length=10)
text=models.CharField(max_length=200)
def get_read_num(self):
try:
ct = ContentType.objects.get_for_model(Diary)
re = ReadNum.objects.filter(content_type=ct, object_id=self.pk)
return re[0].read_num
except:
return 0
|
{"/Blog Relevant/files_system/basics/views.py": ["/Blog Relevant/files_system/basics/file_op.py"], "/Blog Relevant/blog7-17/myapp/admin.py": ["/Blog Relevant/blog7-17/myapp/models.py"], "/Blog Relevant/blog8-31/myapp/views.py": ["/Blog Relevant/blog8-31/myapp/forms.py"], "/Blog Relevant/blog7-21/myapp/views.py": ["/Blog Relevant/blog7-21/myapp/models.py"], "/Blog Relevant/blog7-17/myapp/views.py": ["/Blog Relevant/blog7-17/myapp/models.py"], "/\u7ec3\u4e60\u4ee3\u7801/Flask/flask_web/blueprint1/views/views1.py": ["/\u7ec3\u4e60\u4ee3\u7801/Flask/flask_web/blueprint1/views/__init__.py"]}
|
19,698
|
Mr-big-c/github
|
refs/heads/master
|
/Blog Relevant/blog8-15/Like/templatetags/__init__.py
|
# -*- coding: utf-8 -*-
# @File : __init__.py.py
# @Author: 一稚杨
# @Date : 2018/8/14/014
# @Desc :
|
{"/Blog Relevant/files_system/basics/views.py": ["/Blog Relevant/files_system/basics/file_op.py"], "/Blog Relevant/blog7-17/myapp/admin.py": ["/Blog Relevant/blog7-17/myapp/models.py"], "/Blog Relevant/blog8-31/myapp/views.py": ["/Blog Relevant/blog8-31/myapp/forms.py"], "/Blog Relevant/blog7-21/myapp/views.py": ["/Blog Relevant/blog7-21/myapp/models.py"], "/Blog Relevant/blog7-17/myapp/views.py": ["/Blog Relevant/blog7-17/myapp/models.py"], "/\u7ec3\u4e60\u4ee3\u7801/Flask/flask_web/blueprint1/views/views1.py": ["/\u7ec3\u4e60\u4ee3\u7801/Flask/flask_web/blueprint1/views/__init__.py"]}
|
19,699
|
Mr-big-c/github
|
refs/heads/master
|
/Blog Relevant/blog8-20/Like/migrations/0002_auto_20180813_2011.py
|
# Generated by Django 2.0 on 2018-08-13 12:11
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('Like', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='LikeCount',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('object_id', models.PositiveIntegerField()),
('like_num', models.IntegerField(default=0)),
('content_type', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='contenttypes.ContentType')),
],
),
migrations.CreateModel(
name='LikeRecord',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('object_id', models.PositiveIntegerField()),
('like_time', models.DateTimeField(auto_now_add=True)),
('content_type', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='contenttypes.ContentType')),
('like_user', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to=settings.AUTH_USER_MODEL)),
],
),
migrations.RemoveField(
model_name='likear',
name='content_type',
),
migrations.RemoveField(
model_name='likear',
name='user',
),
migrations.DeleteModel(
name='LikeAr',
),
]
|
{"/Blog Relevant/files_system/basics/views.py": ["/Blog Relevant/files_system/basics/file_op.py"], "/Blog Relevant/blog7-17/myapp/admin.py": ["/Blog Relevant/blog7-17/myapp/models.py"], "/Blog Relevant/blog8-31/myapp/views.py": ["/Blog Relevant/blog8-31/myapp/forms.py"], "/Blog Relevant/blog7-21/myapp/views.py": ["/Blog Relevant/blog7-21/myapp/models.py"], "/Blog Relevant/blog7-17/myapp/views.py": ["/Blog Relevant/blog7-17/myapp/models.py"], "/\u7ec3\u4e60\u4ee3\u7801/Flask/flask_web/blueprint1/views/views1.py": ["/\u7ec3\u4e60\u4ee3\u7801/Flask/flask_web/blueprint1/views/__init__.py"]}
|
19,700
|
Mr-big-c/github
|
refs/heads/master
|
/练习代码/spider5.15/json数据类型.py
|
import json
# 将Python数据转化为json数据类型
data = {"name": "hwy", "age": "20"}
jsondata = json.dumps(data)
print(jsondata)
print(type(jsondata))
# 将data这个字典以json数据类型写入本地
path = r"C:\Users\Administrator\Desktop\hwy.json"
# with open(path, "w") as f:
# json.dump(data, f)
# 读取本地的json数据
with open(path, "r") as f:
r = f.read()
print(r)
print("--------")
print(type(r))
newr = json.loads(r)
print(newr)
print(type(newr))
|
{"/Blog Relevant/files_system/basics/views.py": ["/Blog Relevant/files_system/basics/file_op.py"], "/Blog Relevant/blog7-17/myapp/admin.py": ["/Blog Relevant/blog7-17/myapp/models.py"], "/Blog Relevant/blog8-31/myapp/views.py": ["/Blog Relevant/blog8-31/myapp/forms.py"], "/Blog Relevant/blog7-21/myapp/views.py": ["/Blog Relevant/blog7-21/myapp/models.py"], "/Blog Relevant/blog7-17/myapp/views.py": ["/Blog Relevant/blog7-17/myapp/models.py"], "/\u7ec3\u4e60\u4ee3\u7801/Flask/flask_web/blueprint1/views/views1.py": ["/\u7ec3\u4e60\u4ee3\u7801/Flask/flask_web/blueprint1/views/__init__.py"]}
|
19,701
|
Mr-big-c/github
|
refs/heads/master
|
/快捷办公/设置桌面壁纸.py
|
"""
设置桌面壁纸
"""
import win32api
import win32con
import win32gui
def setwallpaper(path):
# 打开注册表
win32api.RegOpenKeyEx(win32con.HKEY_CURRENT_USER, "Control Panel\\Desktop", 0, win32con.KEY_SET_VALUE)
# 设置壁纸路径
win32gui.SystemParametersInfo(win32con.SPI_SETDESKWALLPAPER, path, win32con.SPIF_SENDWININICHANGE)
path = r"C:\Users\Administrator\Desktop\个人博客\壁纸.jpg"
setwallpaper(path)
|
{"/Blog Relevant/files_system/basics/views.py": ["/Blog Relevant/files_system/basics/file_op.py"], "/Blog Relevant/blog7-17/myapp/admin.py": ["/Blog Relevant/blog7-17/myapp/models.py"], "/Blog Relevant/blog8-31/myapp/views.py": ["/Blog Relevant/blog8-31/myapp/forms.py"], "/Blog Relevant/blog7-21/myapp/views.py": ["/Blog Relevant/blog7-21/myapp/models.py"], "/Blog Relevant/blog7-17/myapp/views.py": ["/Blog Relevant/blog7-17/myapp/models.py"], "/\u7ec3\u4e60\u4ee3\u7801/Flask/flask_web/blueprint1/views/views1.py": ["/\u7ec3\u4e60\u4ee3\u7801/Flask/flask_web/blueprint1/views/__init__.py"]}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.