index
int64 | repo_name
string | branch_name
string | path
string | content
string | import_graph
string |
|---|---|---|---|---|---|
4,543
|
dalaomai/stuInfoManag
|
refs/heads/master
|
/app/personal/__init__.py
|
from flask import Blueprint
personal = Blueprint('personal',__name__)
from . import views
from ..main import errors
|
{"/app/__init__.py": ["/config.py", "/app/personal/__init__.py", "/app/course/__init__.py", "/app/student/__init__.py", "/app/source/__init__.py", "/app/teacher/__init__.py", "/app/admin/__init__.py", "/app/aclass/__init__.py", "/app/statistic/__init__.py"], "/flasky.py": ["/app/__init__.py", "/app/models.py"], "/app/models.py": ["/app/__init__.py", "/config.py"], "/app/statistic/views.py": ["/app/statistic/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/personal/views.py": ["/app/personal/__init__.py", "/app/personal/forms.py", "/config.py", "/app/models.py"], "/app/source/views.py": ["/app/source/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/teacher/views.py": ["/app/teacher/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/aclass/views.py": ["/app/aclass/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/course/views.py": ["/app/course/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/student/views.py": ["/app/student/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/admin/views.py": ["/app/admin/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/main/views.py": ["/app/__init__.py"], "/app/auth/views.py": ["/app/auth/forms.py", "/app/models.py"]}
|
4,544
|
dalaomai/stuInfoManag
|
refs/heads/master
|
/flasky.py
|
import os
import click
from flask_migrate import Migrate
from app import create_app, db
from app.models import Teacher,Student,Course,Course_Teach_Stu,Admin
from flask_script import Manager,Shell
app = create_app(os.getenv('FLASK_CONFIG') or 'default')
manager = Manager(app)
migrate = Migrate(app, db)
@app.shell_context_processor
def make_shell_context():
return dict(db=db,stu=Student,teach=Teacher,admin=Admin,course=Course,stc=Course_Teach_Stu)
@app.cli.command()
@click.argument('test_names', nargs=-1)
def test(test_names):
"""Run the unit tests."""
import unittest
if test_names:
tests = unittest.TestLoader().loadTestsFromNames(test_names)
else:
tests = unittest.TestLoader().discover('tests')
unittest.TextTestRunner(verbosity=2).run(tests)
manager.add_command("shell",Shell(make_context=make_shell_context))
if __name__=='__main__':
manager.run()
|
{"/app/__init__.py": ["/config.py", "/app/personal/__init__.py", "/app/course/__init__.py", "/app/student/__init__.py", "/app/source/__init__.py", "/app/teacher/__init__.py", "/app/admin/__init__.py", "/app/aclass/__init__.py", "/app/statistic/__init__.py"], "/flasky.py": ["/app/__init__.py", "/app/models.py"], "/app/models.py": ["/app/__init__.py", "/config.py"], "/app/statistic/views.py": ["/app/statistic/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/personal/views.py": ["/app/personal/__init__.py", "/app/personal/forms.py", "/config.py", "/app/models.py"], "/app/source/views.py": ["/app/source/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/teacher/views.py": ["/app/teacher/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/aclass/views.py": ["/app/aclass/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/course/views.py": ["/app/course/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/student/views.py": ["/app/student/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/admin/views.py": ["/app/admin/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/main/views.py": ["/app/__init__.py"], "/app/auth/views.py": ["/app/auth/forms.py", "/app/models.py"]}
|
4,545
|
dalaomai/stuInfoManag
|
refs/heads/master
|
/app/admin/__init__.py
|
from flask import Blueprint
admin = Blueprint('admin',__name__)
from . import views
from ..main import errors
|
{"/app/__init__.py": ["/config.py", "/app/personal/__init__.py", "/app/course/__init__.py", "/app/student/__init__.py", "/app/source/__init__.py", "/app/teacher/__init__.py", "/app/admin/__init__.py", "/app/aclass/__init__.py", "/app/statistic/__init__.py"], "/flasky.py": ["/app/__init__.py", "/app/models.py"], "/app/models.py": ["/app/__init__.py", "/config.py"], "/app/statistic/views.py": ["/app/statistic/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/personal/views.py": ["/app/personal/__init__.py", "/app/personal/forms.py", "/config.py", "/app/models.py"], "/app/source/views.py": ["/app/source/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/teacher/views.py": ["/app/teacher/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/aclass/views.py": ["/app/aclass/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/course/views.py": ["/app/course/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/student/views.py": ["/app/student/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/admin/views.py": ["/app/admin/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/main/views.py": ["/app/__init__.py"], "/app/auth/views.py": ["/app/auth/forms.py", "/app/models.py"]}
|
4,546
|
dalaomai/stuInfoManag
|
refs/heads/master
|
/app/models.py
|
from werkzeug.security import generate_password_hash, check_password_hash
from flask_login import UserMixin
from app import login_manager
from app import db
from config import RolePermission
from sqlalchemy import and_
from app.decorators import permission_required
from config import Permission
class User(UserMixin):
type_id = []
type = -1
__tablename__ = 'User'
_id = db.Column(db.Integer, primary_key=True)
passwd_hash = db.Column(db.String(128),nullable=False)
name = db.Column(db.String(64),nullable=False)
id = db.Column(db.Integer,unique=True,nullable=False)
permission = db.Column(db.Integer,default=0,nullable=False)
sex = db.Column(db.Boolean)
def query_user(type_id):
try:
if isinstance(type_id,str):
type_id = eval(type_id)
if not isinstance(type_id,list) or len(type_id)!=2:
result = None
if int(type_id[0]) == 0:
result = Student.query.filter_by(id=int(type_id[1])).first()
if int(type_id[0]) == 1:
result = Teacher.query.filter_by(id=int(type_id[1])).first()
if int(type_id[0]) == 2:
result = Admin.query.filter_by(id=int(type_id[1])).first()
if result != None :
result.type_id = type_id
result.type = type_id[0]
except Exception as e:
print(e)
return None
return result
def get_id(self):
return str(self.type_id)
@property
def passwd(self):
raise AttributeError('password is not a readable attribute')
@passwd.setter
def passwd(self, passwd):
if(len(passwd)<6):
raise Exception('密码修改失败')
return 0
self.passwd_hash = generate_password_hash(passwd)
def verify_passwd(self, passwd):
return check_password_hash(self.passwd_hash, passwd)
def can(self,permission):
return (self.permission&permission)==permission
def __repr__(self):
return '<{} : {}>'.format(self.__tablename__,self.name)
@permission_required(Permission.PERSONAL_INFO)
def modifyBaseInfo(self,passwd=None):
if passwd:
self.passwd = passwd
db.session.add(self)
return db.session.commit()
@permission_required(RolePermission.ADMIN)
def getAllCourse(self):
result = db.session.query(Course)
return result
@permission_required(RolePermission.ADMIN)
def getAllStudent(self):
result = db.session.query(Student,_class).filter(Student._class==_class._id)
return result
@permission_required(RolePermission.ADMIN)
def getAllTeacher(self):
result = db.session.query(Teacher)
return result
@permission_required(RolePermission.ADMIN)
def getAllClass(self):
result = db.session.query(_class)
return result
@permission_required(RolePermission.ROOT)
def getAllAdmin(self):
result = db.session.query(Admin)
return result
def getCoursesInfo(self):
return db.session.query(Student,Teacher,Course,Course_Teach_Stu,_class).filter(and_(Student.id == Course_Teach_Stu.stu,Teacher.id == Course_Teach_Stu.teach,Course.id==Course_Teach_Stu.course,_class._id==Student._class))
class Student(User,db.Model):
__tablename__ = 'student'
permission = db.Column(db.Integer,default=RolePermission.STUDENT,nullable=False)
_class = db.Column(db.Integer,db.ForeignKey('_class._id'),default=0,nullable=False)
courses = db.relationship("Course_Teach_Stu",backref='student')
@permission_required(RolePermission.STUDENT)
def modifyBaseInfo(self,passwd=None):
if passwd:
self.passwd = passwd
db.session.add(self)
return db.session.commit()
@permission_required(RolePermission.STUDENT)
def getCoursesInfo(self):
result = super().getCoursesInfo().filter(Student.id==self.id)
return result
class Teacher(User,db.Model):
__tablename__ = 'teacher'
permission = db.Column(db.Integer,default=RolePermission.TEACHER,nullable=False)
courses = db.relationship("Course_Teach_Stu",backref='teacher')
@permission_required(RolePermission.TEACHER)
def modifyBaseInfo(self,passwd=None):
if passwd:
self.passwd = passwd
db.session.add(self)
return db.session.commit()
@permission_required(RolePermission.TEACHER)
def getCoursesInfo(self):
result = super().getCoursesInfo().filter(Teacher.id==self.id)
return result
class Admin(User,db.Model):
permission = db.Column(db.Integer,default=RolePermission.ADMIN,nullable=False)
__tablename__ = 'admin'
@permission_required(RolePermission.ADMIN)
def modifyBaseInfo(self,passwd=None):
if passwd:
self.passwd = passwd
db.session.add(self)
return db.session.commit()
@permission_required(RolePermission.ADMIN)
def getCoursesInfo(self):
result = super().getCoursesInfo()
return result
class Course(db.Model):
__tablename__ = 'course'
_id = db.Column(db.Integer, primary_key=True)
id = db.Column(db.String(64),unique=True,nullable=False)
name = db.Column(db.String(64),nullable=False)
college = db.Column(db.String(64),nullable=False)
courses = db.relationship("Course_Teach_Stu",backref='cour')
class _class(db.Model):
__tablename__ = '_class'
_id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64),nullable=False,unique=True)
students = db.relationship("Student",backref='aclass')
class Course_Teach_Stu(db.Model):
__tablename__ = 'course_teach_stu'
_id = db.Column(db.Integer, primary_key=True)
stu = db.Column(db.Integer,db.ForeignKey('student.id'),nullable=False)
teach = db.Column(db.Integer,db.ForeignKey('teacher.id'),nullable=False)
course = db.Column(db.String(64),db.ForeignKey('course.id'),nullable=False)
source = db.Column(db.Integer,nullable=True)
semester = db.Column(db.String(64),nullable=False)
@login_manager.user_loader
def load_user(type_id):
return User.query_user(type_id)
|
{"/app/__init__.py": ["/config.py", "/app/personal/__init__.py", "/app/course/__init__.py", "/app/student/__init__.py", "/app/source/__init__.py", "/app/teacher/__init__.py", "/app/admin/__init__.py", "/app/aclass/__init__.py", "/app/statistic/__init__.py"], "/flasky.py": ["/app/__init__.py", "/app/models.py"], "/app/models.py": ["/app/__init__.py", "/config.py"], "/app/statistic/views.py": ["/app/statistic/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/personal/views.py": ["/app/personal/__init__.py", "/app/personal/forms.py", "/config.py", "/app/models.py"], "/app/source/views.py": ["/app/source/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/teacher/views.py": ["/app/teacher/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/aclass/views.py": ["/app/aclass/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/course/views.py": ["/app/course/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/student/views.py": ["/app/student/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/admin/views.py": ["/app/admin/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/main/views.py": ["/app/__init__.py"], "/app/auth/views.py": ["/app/auth/forms.py", "/app/models.py"]}
|
4,547
|
dalaomai/stuInfoManag
|
refs/heads/master
|
/app/personal/forms.py
|
from flask_wtf import FlaskForm
from wtforms import IntegerField,StringField,PasswordField,SubmitField,SelectField,BooleanField,widgets
from wtforms.validators import Required,Length,EqualTo
class StuForm(FlaskForm):
stype = StringField("角色",render_kw={'readonly':'readonly'})
id = StringField("学号",render_kw={'readonly':'readonly'})
aclass = StringField("班级",render_kw={'readonly':'readonly'})
sex = StringField("性别",render_kw={'readonly':'readonly'})
passwd = PasswordField("Password")
passwd2 = PasswordField("Confirm Password",validators=[EqualTo('passwd',message='密码不一致')])
submit = SubmitField("修改信息")
def __init__(self,stu):
super().__init__()
self.stype.data = "学生"
self.id.data= stu.id
self.aclass.data = stu.aclass.name
if stu.sex == 0:
self.sex.data = '男'
if stu.sex:
self.sex.data = '女'
class TeachForm(FlaskForm):
stype = StringField("角色",render_kw={'readonly':'readonly'})
id = StringField("工号",render_kw={'readonly':'readonly'},)
sex = StringField("性别",render_kw={'readonly':'readonly'})
passwd = PasswordField("Password")
passwd2 = PasswordField("Confirm Password",validators=[EqualTo('passwd',message='密码不一致')])
submit = SubmitField("修改信息")
def __init__(self,user):
super().__init__()
self.stype.data = "老师"
self.id.data= user.id
if user.sex == 0:
self.sex.data = '男'
if user.sex:
self.sex.data = '女'
class AdminForm(FlaskForm):
stype = StringField("角色",render_kw={'readonly':'readonly'})
id = StringField("工号",render_kw={'readonly':'readonly'},)
sex = StringField("性别",render_kw={'readonly':'readonly'})
passwd = PasswordField("Password")
passwd2 = PasswordField("Confirm Password",validators=[EqualTo('passwd',message='密码不一致')])
submit = SubmitField("修改信息")
def __init__(self,user):
super().__init__()
self.stype.data = "管理员"
self.id.data= user.id
if user.sex == 0:
self.sex.data = '男'
if user.sex:
self.sex.data = '女'
|
{"/app/__init__.py": ["/config.py", "/app/personal/__init__.py", "/app/course/__init__.py", "/app/student/__init__.py", "/app/source/__init__.py", "/app/teacher/__init__.py", "/app/admin/__init__.py", "/app/aclass/__init__.py", "/app/statistic/__init__.py"], "/flasky.py": ["/app/__init__.py", "/app/models.py"], "/app/models.py": ["/app/__init__.py", "/config.py"], "/app/statistic/views.py": ["/app/statistic/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/personal/views.py": ["/app/personal/__init__.py", "/app/personal/forms.py", "/config.py", "/app/models.py"], "/app/source/views.py": ["/app/source/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/teacher/views.py": ["/app/teacher/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/aclass/views.py": ["/app/aclass/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/course/views.py": ["/app/course/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/student/views.py": ["/app/student/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/admin/views.py": ["/app/admin/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/main/views.py": ["/app/__init__.py"], "/app/auth/views.py": ["/app/auth/forms.py", "/app/models.py"]}
|
4,548
|
dalaomai/stuInfoManag
|
refs/heads/master
|
/app/statistic/views.py
|
from . import statistic
from flask import render_template,flash,redirect,url_for,request
from flask_login import login_user,current_user,login_required,logout_user
from config import Config
from app.decorators import permission_required
from config import Permission,RolePermission
import json
from sqlalchemy import desc,asc
from app.models import Student,Teacher,Course,Course_Teach_Stu,Admin
from app import db
@statistic.route('/student')
@login_required
@permission_required(Permission.STATISTIC_INFO)
def studentStatistic():
return render_template('statistic/index.html',mainUrl='mainStudentData')
@statistic.route('/mainStudentData')
@login_required
@permission_required(Permission.STATISTIC_INFO)
def mainStudentData():
data = {'dataUrl':'studentDataForAdmin','operateUrls':'','dataFieldes':[],'dataTitles':[],'addFieldes':[],'editFieldes':[]}
if current_user.type == 2:
data['dataTitles'] = ['学号','姓名','班级','学期','平均分']
data['dataFieldes'] = ['StudentId','StudentName','ClassName','Semester','GAvg']
return json.dumps(data)
@statistic.route('/studentDataForAdmin')
@login_required
@permission_required(RolePermission.ADMIN)
def studentDataForAdmin():
page = request.args.get('page',1,type=int)
rows = request.args.get('rows',Config.POSTS_PER_PAGE,type=int)
sort = request.args.get('sort','StudentId')
sortOrder = request.args.get('sortOrder','asc')
selectResult = db.session.execute('select * from stu_semes order by ' + sort + ' ' + sortOrder + ' limit ' + str(rows*(page-1)) + ',' + str(rows))
datas = []
oldItem = []
for item in selectResult :
temp = {'StudentId':item[0],'StudentName':item[1],'ClassName':item[2],'Semester':item[3],'GAvg':str(item[4])}
datas.append(temp)
datas = {'total':next(db.session.execute('select count(*) from stu_semes'))[0],'rows':datas}
return str(json.dumps(datas))
@statistic.route('/class')
@login_required
@permission_required(Permission.STATISTIC_INFO)
def classStatistic():
return render_template('statistic/index.html',mainUrl='mainClassData')
@statistic.route('/mainClassData')
@login_required
@permission_required(Permission.STATISTIC_INFO)
def mainClassData():
data = {'dataUrl':'classDataForAdmin','operateUrls':'','dataFieldes':[],'dataTitles':[],'addFieldes':[],'editFieldes':[]}
if current_user.type == 2:
data['dataTitles'] = ['班级ID','班级','学期','课程名','平均分','最高分','最低分','及格人数','及格率(%)']
data['dataFieldes'] = ['ClassId','ClassName','Semester','CourseName','GAvg','GMax','GMin','PassNumber','PassRate']
return json.dumps(data)
@statistic.route('/classDataForAdmin')
@login_required
@permission_required(RolePermission.ADMIN)
def classDataForAdmin():
page = request.args.get('page',1,type=int)
rows = request.args.get('rows',Config.POSTS_PER_PAGE,type=int)
sort = request.args.get('sort','ClassId')
sortOrder = request.args.get('sortOrder','asc')
selectResult = db.session.execute('select * from class_semes order by ' + sort + ' ' + sortOrder + ' limit ' + str(rows*(page-1)) + ',' + str(rows))
datas = []
oldItem = []
for item in selectResult :
temp = {'ClassId':item[0],'ClassName':item[1],'Semester':item[2],'CourseName':item[3],'GAvg':str(item[4]),'GMax':str(item[5]),'GMin':str(item[6]),'PassNumber':str(item[7]),'PassRate':str(item[8])}
datas.append(temp)
datas = {'total':next(db.session.execute('select count(*) from class_semes'))[0],'rows':datas}
return str(json.dumps(datas))
|
{"/app/__init__.py": ["/config.py", "/app/personal/__init__.py", "/app/course/__init__.py", "/app/student/__init__.py", "/app/source/__init__.py", "/app/teacher/__init__.py", "/app/admin/__init__.py", "/app/aclass/__init__.py", "/app/statistic/__init__.py"], "/flasky.py": ["/app/__init__.py", "/app/models.py"], "/app/models.py": ["/app/__init__.py", "/config.py"], "/app/statistic/views.py": ["/app/statistic/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/personal/views.py": ["/app/personal/__init__.py", "/app/personal/forms.py", "/config.py", "/app/models.py"], "/app/source/views.py": ["/app/source/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/teacher/views.py": ["/app/teacher/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/aclass/views.py": ["/app/aclass/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/course/views.py": ["/app/course/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/student/views.py": ["/app/student/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/admin/views.py": ["/app/admin/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/main/views.py": ["/app/__init__.py"], "/app/auth/views.py": ["/app/auth/forms.py", "/app/models.py"]}
|
4,549
|
dalaomai/stuInfoManag
|
refs/heads/master
|
/app/personal/views.py
|
from . import personal
from .forms import StuForm,TeachForm,AdminForm
from flask import render_template,flash,redirect,url_for
from flask_login import login_user,current_user,login_required,logout_user
from app.decorators import permission_required
from config import Permission
from app.models import Student,Teacher,Admin
@personal.route('/index',methods=['GET','POST'])
@login_required
@permission_required(Permission.PERSONAL_INFO)
def index():
if current_user.type == 0:
form = StuForm(current_user)
elif current_user.type == 1:
form = TeachForm(current_user)
elif current_user.type == 2:
form = AdminForm(current_user)
if form.validate_on_submit():
result = current_user.modifyBaseInfo(form.passwd.data)
if result == None:
flash("修改成功")
else:
flash("修改失败")
return render_template('personal/index.html',form=form)
|
{"/app/__init__.py": ["/config.py", "/app/personal/__init__.py", "/app/course/__init__.py", "/app/student/__init__.py", "/app/source/__init__.py", "/app/teacher/__init__.py", "/app/admin/__init__.py", "/app/aclass/__init__.py", "/app/statistic/__init__.py"], "/flasky.py": ["/app/__init__.py", "/app/models.py"], "/app/models.py": ["/app/__init__.py", "/config.py"], "/app/statistic/views.py": ["/app/statistic/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/personal/views.py": ["/app/personal/__init__.py", "/app/personal/forms.py", "/config.py", "/app/models.py"], "/app/source/views.py": ["/app/source/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/teacher/views.py": ["/app/teacher/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/aclass/views.py": ["/app/aclass/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/course/views.py": ["/app/course/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/student/views.py": ["/app/student/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/admin/views.py": ["/app/admin/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/main/views.py": ["/app/__init__.py"], "/app/auth/views.py": ["/app/auth/forms.py", "/app/models.py"]}
|
4,550
|
dalaomai/stuInfoManag
|
refs/heads/master
|
/app/source/views.py
|
from . import source
from flask import render_template,flash,redirect,url_for,request
from flask_login import login_user,current_user,login_required,logout_user
from config import Config
from app.decorators import permission_required
from config import Permission,RolePermission
import json
from sqlalchemy import desc,asc,and_
from app.models import Student,Teacher,Course,Course_Teach_Stu,_class
from app import db
@source.route('/index')
@login_required
@permission_required(Permission.SOURCE_INFO)
def index():
return render_template('source/index.html',mainUrl='mainData')
@source.route('/mainData')
@login_required
@permission_required(Permission.SOURCE_INFO)
def mainData():
data = {'dataUrl':'data','operateUrls':'','dataFieldes':[],'dataTitles':[],'addFieldes':[],'editFieldes':[]}
if current_user.type == 1:
data['operateUrls'] = {'addUrl':'','editUrl':'editSource','delUrl':''}
data['dataTitles'] = ['Id','姓名','学号','性别','班级','班级ID','课程名','课程ID','开课学期','成绩']
data['dataFieldes'] = ['Id','StudentName','StudentId','Sex','ClassName','ClassId','CourseName','CourseId','Semester','Source']
data['editFieldes'] = ['Source']
if current_user.type == 2:
data['operateUrls'] = {'addUrl':'addSource','editUrl':'editSource','delUrl':'delSource'}
data['dataTitles'] = ['Id','姓名','学号','性别','班级','班级ID','老师','老师工号','课程名','课程ID','开课学期','成绩']
data['dataFieldes'] = ['Id','StudentName','StudentId','Sex','ClassName','ClassId','TeacherName','TeacherId','CourseName','CourseId','Semester','Source']
data['addFieldes'] = ['StudentId','TeacherId','CourseId','Semester','Source']
data['editFieldes'] = ['StudentId','TeacherId','CourseId','Semester','Source']
return json.dumps(data)
@source.route('/data')
@login_required
@permission_required(Permission.SOURCE_INFO)
def data():
if current_user.type == 1:
return getDataForTeacher()
if current_user.type == 2:
return getDataForAdmin()
return None
@permission_required(RolePermission.TEACHER)
def getDataForTeacher():
page = request.args.get('page',1,type=int)
rows = request.args.get('rows',Config.POSTS_PER_PAGE,type=int)
sort = request.args.get('sort','StudentName')
sortOrder = request.args.get('sortOrder','asc')
queryResult = current_user.getCoursesInfo()
targetDict = {'StudentName':Student.name,'StudentId':Student.id,'ClassId':_class._id,'CourseName':Course.name,'CourseId':Course.id,'Source':Course_Teach_Stu.source,'Semester':Course_Teach_Stu.semester,'ClassName':_class.name,'Id':Course_Teach_Stu._id}
if sortOrder=='asc':
queryResult = queryResult.order_by(asc(targetDict.get(sort,'StudentName')))
else:
queryResult = queryResult.order_by(desc(targetDict.get(sort,'StudentName')))
pagination = queryResult.paginate(page,per_page=rows,error_out=False)
datas = []
for item in pagination.items :
temp = {'StudentName':item[0].name,'StudentId':item[0].id,'ClassId':item[4]._id,'CourseName':item[2].name,'CourseId':item[2].id,'Source':item[3].source,'Semester':item[3].semester,'ClassName':item[4].name,'Id':item[3]._id}
datas.append(temp)
datas = {'total':pagination.total,'rows':datas}
return str(json.dumps(datas))
@permission_required(RolePermission.ADMIN)
def getDataForAdmin():
page = request.args.get('page',1,type=int)
rows = request.args.get('rows',Config.POSTS_PER_PAGE,type=int)
sort = request.args.get('sort','Id')
sortOrder = request.args.get('sortOrder','asc')
queryResult = current_user.getCoursesInfo()
targetDict = {'StudentName':Student.name,'StudentId':Student.id,'ClassId':_class._id,'CourseName':Course.name,'CourseId':Course.id,'Source':Course_Teach_Stu.source,'Id':Course_Teach_Stu._id,'TeacherId':Teacher.id,'TeacherName':Teacher.name,'Semester':Course_Teach_Stu.semester,'ClassName':_class.name}
if sortOrder=='asc':
queryResult = queryResult.order_by(asc(targetDict.get(sort,'name')))
else:
queryResult = queryResult.order_by(desc(targetDict.get(sort,'name')))
pagination = queryResult.paginate(page,per_page=rows,error_out=False)
datas = []
for item in pagination.items :
temp = {'StudentName':item[0].name,'StudentId':item[0].id,'ClassId':item[4]._id,'CourseName':item[2].name,'CourseId':item[2].id,'Source':item[3].source,'Id':item[3]._id,'TeacherId':item[1].id,'TeacherName':item[1].name,'Semester':item[3].semester,'ClassName':item[4].name}
datas.append(temp)
datas = {'total':pagination.total,'rows':datas}
return str(json.dumps(datas))
@source.route('/editSource',methods=['POST'])
@login_required
@permission_required(RolePermission.TEACHER)
def editSource():
if(current_user.type==1):
return editSourceForTeacher()
if(current_user.type==2):
return editSourceForAdmin()
@permission_required(RolePermission.TEACHER)
def editSourceForTeacher():
result={'code':1,'result':'success'}
try:
id = request.form.get('Id',None)
course_teach_stu = db.session.query(Course_Teach_Stu).filter(and_(Course_Teach_Stu._id==id,Course_Teach_Stu.teach==current_user.id)).first()
course_teach_stu.source = request.form.get('Source',course_teach_stu.source)
db.session.add(course_teach_stu)
db.session.commit()
except Exception as e:
result['code'] = 0
result['result'] = '修改失败'
print(e)
return str(json.dumps(result))
@permission_required(RolePermission.ADMIN)
def editSourceForAdmin():
result={'code':1,'result':'success'}
try:
id = request.form.get('Id',None)
course_teach_stu = db.session.query(Course_Teach_Stu).filter(Course_Teach_Stu._id==id).first()
course_teach_stu.source = request.form.get('Source',course_teach_stu.source)
course_teach_stu.stu = request.form.get('StudentId',course_teach_stu.stu)
course_teach_stu.teach = request.form.get('TeacherId',course_teach_stu.teach)
course_teach_stu.course = request.form.get('CourseId',course_teach_stu.course)
course_teach_stu.semester = request.form.get('Semester',course_teach_stu.semester)
db.session.add(course_teach_stu)
db.session.commit()
except Exception as e:
result['code'] = 0
result['result'] = '修改失败'
print(e)
return str(json.dumps(result))
@source.route('/addSource',methods=['POST'])
@login_required
@permission_required(RolePermission.ADMIN)
def addSource():
result={'code':1,'result':'success'}
try:
course_teach_stu = Course_Teach_Stu()
course_teach_stu.stu = request.form.get('StudentId',course_teach_stu.stu)
course_teach_stu.teach = request.form.get('TeacherId',course_teach_stu.teach)
course_teach_stu.course = request.form.get('CourseId',course_teach_stu.course)
course_teach_stu.source = request.form.get('Source',course_teach_stu.source)
course_teach_stu.semester = request.form.get('Semester',course_teach_stu.semester)
if(course_teach_stu.source==''):
course_teach_stu.source=None
db.session.add(course_teach_stu)
db.session.commit()
except Exception as e:
result['code'] = 0
result['result'] = '添加失败'
print(e)
return str(json.dumps(result))
@source.route('/delSource',methods=['POST'])
@login_required
@permission_required(RolePermission.ADMIN)
def delSource():
result={'code':1,'result':'success'}
try:
id = request.form.get('Id',None)
course_teach_stu = db.session.query(Course_Teach_Stu).filter(Course_Teach_Stu._id==id).first()
db.session.delete(course_teach_stu)
db.session.commit()
except Exception as e:
result['code'] = 0
result['result'] = '删除失败'
print(e)
return str(json.dumps(result))
def str_to_bool(str):
if str.lower() == 'true':
return True
if str.lower() == 'false':
return False
return None
|
{"/app/__init__.py": ["/config.py", "/app/personal/__init__.py", "/app/course/__init__.py", "/app/student/__init__.py", "/app/source/__init__.py", "/app/teacher/__init__.py", "/app/admin/__init__.py", "/app/aclass/__init__.py", "/app/statistic/__init__.py"], "/flasky.py": ["/app/__init__.py", "/app/models.py"], "/app/models.py": ["/app/__init__.py", "/config.py"], "/app/statistic/views.py": ["/app/statistic/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/personal/views.py": ["/app/personal/__init__.py", "/app/personal/forms.py", "/config.py", "/app/models.py"], "/app/source/views.py": ["/app/source/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/teacher/views.py": ["/app/teacher/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/aclass/views.py": ["/app/aclass/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/course/views.py": ["/app/course/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/student/views.py": ["/app/student/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/admin/views.py": ["/app/admin/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/main/views.py": ["/app/__init__.py"], "/app/auth/views.py": ["/app/auth/forms.py", "/app/models.py"]}
|
4,551
|
dalaomai/stuInfoManag
|
refs/heads/master
|
/app/teacher/views.py
|
from . import teacher
from flask import render_template,flash,redirect,url_for,request
from flask_login import login_user,current_user,login_required,logout_user
from config import Config
from app.decorators import permission_required
from config import Permission,RolePermission
import json
from sqlalchemy import desc,asc
from app.models import Student,Teacher,Course,Course_Teach_Stu
from app import db
@teacher.route('/index')
@login_required
@permission_required(Permission.TEACHER_INFO)
def index():
return render_template('teacher/index.html',mainUrl='mainData')
@teacher.route('/mainData')
@login_required
@permission_required(Permission.TEACHER_INFO)
def mainData():
data = {'dataUrl':'data','operateUrls':'','dataFieldes':[],'dataTitles':[],'addFieldes':[],'editFieldes':[]}
if current_user.type == 2:
data['operateUrls'] = {'addUrl':'addTeacher','editUrl':'editTeacher','delUrl':'delTeacher'}
data['dataTitles'] = ['Id','姓名','工号','性别','密码']
data['dataFieldes'] = ['Id','TeacherName','TeacherId','Sex','Passwd']
data['addFieldes'] = ['TeacherName','TeacherId','Sex','Passwd']
data['editFieldes'] = ['TeacherName','TeacherId','Sex','Passwd']
return json.dumps(data)
@teacher.route('/data')
@login_required
@permission_required(Permission.TEACHER_INFO)
def data():
if current_user.type == 2:
return getDataForAdmin()
return None
@permission_required(RolePermission.ADMIN)
def getDataForAdmin():
page = request.args.get('page',1,type=int)
rows = request.args.get('rows',Config.POSTS_PER_PAGE,type=int)
sort = request.args.get('sort','TeacherName')
sortOrder = request.args.get('sortOrder','asc')
queryResult = current_user.getAllTeacher()
targetDict = {'TeacherName':Teacher.name,'TeacherId':Teacher.id,'Sex':Teacher.sex,'Id':Teacher._id}
if sortOrder=='asc':
queryResult = queryResult.order_by(asc(targetDict.get(sort,'TeacherName')))
else:
queryResult = queryResult.order_by(desc(targetDict.get(sort,'TeacherName')))
pagination = queryResult.paginate(page,per_page=rows,error_out=False)
datas = []
for item in pagination.items :
temp = {'TeacherName':item.name,'TeacherId':item.id,'Sex':item.sex,'Id':item._id,'Passwd':''}
datas.append(temp)
datas = {'total':pagination.total,'rows':datas}
return str(json.dumps(datas))
@teacher.route('/editTeacher',methods=['POST'])
@login_required
@permission_required(RolePermission.ADMIN)
def editTeacher():
result={'code':1,'result':'success'}
try:
id = request.form.get('Id',None)
teacher = db.session.query(Teacher).filter(Teacher._id==id).first()
teacher.id = request.form.get('TeacherId',teacher.id)
teacher.name = request.form.get('TeacherName',teacher.name)
teacher.sex = str_to_bool(request.form.get('Sex',teacher.sex))
if(request.form.get('Passwd','')!=''):
teacher.passwd = request.form.get('Passwd')
db.session.add(teacher)
db.session.commit()
except Exception as e:
result['code'] = 0
result['result'] = '修改失败'
print(e)
return str(json.dumps(result))
@teacher.route('/addTeacher',methods=['POST'])
@login_required
@permission_required(RolePermission.ADMIN)
def addTeacher():
result={'code':1,'result':'success'}
try:
teacher = Teacher()
teacher.id = request.form.get('TeacherId',teacher.id)
teacher.name = request.form.get('TeacherName',teacher.name)
teacher.sex = str_to_bool(request.form.get('Sex',teacher.sex))
if(request.form.get('Passwd','')!=''):
teacher.passwd = request.form.get('Passwd')
db.session.add(teacher)
db.session.commit()
except Exception as e:
result['code'] = 0
result['result'] = '添加失败'
print(e)
return str(json.dumps(result))
@teacher.route('/delTeacher',methods=['POST'])
@login_required
@permission_required(RolePermission.ADMIN)
def delTeacher():
result={'code':1,'result':'success'}
try:
id = request.form.get('Id',None)
teacher = db.session.query(Teacher).filter(Teacher._id==id).first()
db.session.delete(teacher)
db.session.commit()
except Exception as e:
result['code'] = 0
result['result'] = '删除失败'
print(e)
return str(json.dumps(result))
def str_to_bool(str):
if str.lower() == 'true':
return True
if str.lower() == 'false':
return False
return None
|
{"/app/__init__.py": ["/config.py", "/app/personal/__init__.py", "/app/course/__init__.py", "/app/student/__init__.py", "/app/source/__init__.py", "/app/teacher/__init__.py", "/app/admin/__init__.py", "/app/aclass/__init__.py", "/app/statistic/__init__.py"], "/flasky.py": ["/app/__init__.py", "/app/models.py"], "/app/models.py": ["/app/__init__.py", "/config.py"], "/app/statistic/views.py": ["/app/statistic/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/personal/views.py": ["/app/personal/__init__.py", "/app/personal/forms.py", "/config.py", "/app/models.py"], "/app/source/views.py": ["/app/source/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/teacher/views.py": ["/app/teacher/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/aclass/views.py": ["/app/aclass/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/course/views.py": ["/app/course/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/student/views.py": ["/app/student/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/admin/views.py": ["/app/admin/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/main/views.py": ["/app/__init__.py"], "/app/auth/views.py": ["/app/auth/forms.py", "/app/models.py"]}
|
4,552
|
dalaomai/stuInfoManag
|
refs/heads/master
|
/app/aclass/views.py
|
from . import aclass
from flask import render_template,flash,redirect,url_for,request
from flask_login import login_user,current_user,login_required,logout_user
from config import Config
from app.decorators import permission_required
from config import Permission,RolePermission
import json
from sqlalchemy import desc,asc
from app.models import Student,Teacher,Course,Course_Teach_Stu,_class
from app import db
@aclass.route('/index')
@login_required
@permission_required(Permission.CLASS_INFO)
def index():
return render_template('class/index.html',mainUrl='mainData')
@aclass.route('/mainData')
@login_required
@permission_required(Permission.CLASS_INFO)
def mainData():
data = {'dataUrl':'data','operateUrls':'','dataFieldes':[],'dataTitles':[],'addFieldes':[],'editFieldes':[]}
if current_user.type == 2:
data['operateUrls'] = {'addUrl':'addClass','editUrl':'editClass','delUrl':'delClass'}
data['dataTitles'] = ['Id','班级']
data['dataFieldes'] = ['Id','ClassName']
data['addFieldes'] = ['ClassName']
data['editFieldes'] = ['ClassName']
return json.dumps(data)
@aclass.route('/data')
@login_required
@permission_required(Permission.CLASS_INFO)
def data():
if current_user.type == 2:
return getDataForAdmin()
return None
@permission_required(RolePermission.ADMIN)
def getDataForAdmin():
page = request.args.get('page',1,type=int)
rows = request.args.get('rows',Config.POSTS_PER_PAGE,type=int)
sort = request.args.get('sort','ClassName')
sortOrder = request.args.get('sortOrder','asc')
queryResult = current_user.getAllClass()
targetDict = {'ClassName':_class.name,'Id':_class._id}
if sortOrder=='asc':
queryResult = queryResult.order_by(asc(targetDict.get(sort,'ClassName')))
else:
queryResult = queryResult.order_by(desc(targetDict.get(sort,'ClassName')))
pagination = queryResult.paginate(page,per_page=rows,error_out=False)
datas = []
for item in pagination.items :
temp = {'ClassName':item.name,'Id':item._id}
datas.append(temp)
datas = {'total':pagination.total,'rows':datas}
return str(json.dumps(datas))
@aclass.route('/editClass',methods=['POST'])
@login_required
@permission_required(RolePermission.ADMIN)
def editClass():
result={'code':1,'result':'success'}
try:
id = request.form.get('Id',None)
aclass = db.session.query(_class).filter(_class._id==id).first()
aclass.name = request.form.get('ClassName',aclass.name)
db.session.add(aclass)
db.session.commit()
except Exception as e:
result['code'] = 0
result['result'] = '修改失败'
print(e)
return str(json.dumps(result))
@aclass.route('/addClass',methods=['POST'])
@login_required
@permission_required(RolePermission.ADMIN)
def addClass():
result={'code':1,'result':'success'}
try:
aclass = _class()
aclass.name = request.form.get('ClassName',aclass.name)
if(request.form.get('Passwd','')!=''):
teacher.passwd = request.form.get('Passwd')
db.session.add(aclass)
db.session.commit()
except Exception as e:
result['code'] = 0
result['result'] = '添加失败'
print(e)
return str(json.dumps(result))
@aclass.route('/delClass',methods=['POST'])
@login_required
@permission_required(RolePermission.ADMIN)
def delClass():
result={'code':1,'result':'success'}
try:
id = request.form.get('Id',None)
alcass = db.session.query(_class).filter(_class._id==id).first()
db.session.delete(alcass)
db.session.commit()
except Exception as e:
result['code'] = 0
result['result'] = '删除失败'
print(e)
return str(json.dumps(result))
def str_to_bool(str):
if str.lower() == 'true':
return True
if str.lower() == 'false':
return False
return None
|
{"/app/__init__.py": ["/config.py", "/app/personal/__init__.py", "/app/course/__init__.py", "/app/student/__init__.py", "/app/source/__init__.py", "/app/teacher/__init__.py", "/app/admin/__init__.py", "/app/aclass/__init__.py", "/app/statistic/__init__.py"], "/flasky.py": ["/app/__init__.py", "/app/models.py"], "/app/models.py": ["/app/__init__.py", "/config.py"], "/app/statistic/views.py": ["/app/statistic/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/personal/views.py": ["/app/personal/__init__.py", "/app/personal/forms.py", "/config.py", "/app/models.py"], "/app/source/views.py": ["/app/source/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/teacher/views.py": ["/app/teacher/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/aclass/views.py": ["/app/aclass/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/course/views.py": ["/app/course/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/student/views.py": ["/app/student/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/admin/views.py": ["/app/admin/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/main/views.py": ["/app/__init__.py"], "/app/auth/views.py": ["/app/auth/forms.py", "/app/models.py"]}
|
4,553
|
dalaomai/stuInfoManag
|
refs/heads/master
|
/app/course/views.py
|
from app.course import course
from flask import render_template,flash,redirect,url_for,request
from flask_login import login_user,current_user,login_required,logout_user
from config import Config
from app.decorators import permission_required
from config import Permission,RolePermission
import json
from sqlalchemy import desc,asc
from app.models import Student,Teacher,Course,Course_Teach_Stu,_class
from app import db
@course.route('/index')
@login_required
@permission_required(Permission.COURSE_INFO)
def index():
return render_template('course/index.html',mainUrl='mainData')
@course.route('/data')
@login_required
@permission_required(Permission.COURSE_INFO)
def data():
if current_user.type == 0:
return getDataForStudent()
if current_user.type == 1:
return getDataForTeacher()
if current_user.type == 2:
return getDataForAdmin()
return None
@course.route('/mainData')
@login_required
@permission_required(Permission.COURSE_INFO)
def mainData():
data = {'dataUrl':'data','operateUrls':'','dataFieldes':[],'dataTitles':[],'addFieldes':[],'editFieldes':[]}
if current_user.type == 0:
data['dataTitles'] = ['课程名','课程号','开课学院','学期','成绩']
data['dataFieldes'] = ['CourseName','CourseId','College','Semester','Source']
if current_user.type == 1:
data['dataTitles'] = ['课程名','课程号','开课学院','学期','班级']
data['dataFieldes'] = ['CourseName','CourseId','College','Semester','ClassName']
if current_user.type == 2:
data['operateUrls'] = {'addUrl':'addCourse','editUrl':'editCourse','delUrl':'delCourse'}
data['dataTitles'] = ['Id','课程名','课程号','开课学院']
data['dataFieldes'] = ['Id','CourseName','CourseId','College']
data['addFieldes'] = ['CourseName','CourseId','College']
data['editFieldes'] = ['CourseName','CourseId','College']
return json.dumps(data)
@course.route('/delCourse',methods=['POST'])
@login_required
@permission_required(RolePermission.ADMIN)
def delCourse():
result={'code':1,'result':'success'}
try:
id = request.form.get('Id',None)
course = db.session.query(Course).filter(Course._id==id).first()
db.session.delete(course)
db.session.commit()
except Exception as e:
result['code'] = 0
result['result'] = '删除失败'
print(e)
return str(json.dumps(result))
@course.route('/editCourse',methods=['POST'])
@login_required
@permission_required(RolePermission.ADMIN)
def editCourse():
result={'code':1,'result':'success'}
try:
id = request.form.get('Id',None)
course = db.session.query(Course).filter(Course._id==id).first()
course.id = request.form.get('CourseId',course.id)
course.name = request.form.get('CourseName',course.name)
course.college = request.form.get('College',course.college)
db.session.add(course)
db.session.commit()
except Exception as e:
result['code'] = 0
result['result'] = '修改失败'
print(e)
return str(json.dumps(result))
@course.route('/addCourse',methods=['POST'])
@login_required
@permission_required(RolePermission.ADMIN)
def addCourse():
result={'code':1,'result':'success'}
try:
course = Course()
course.id = request.form.get('CourseId')
course.name = request.form.get('CourseName')
course.college = request.form.get('College')
db.session.add(course)
db.session.commit()
except Exception as e:
result['code'] = 0
result['result'] = '添加失败'
print(e)
return str(json.dumps(result))
@permission_required(RolePermission.STUDENT)
def getDataForStudent():
page = request.args.get('page',1,type=int)
rows = request.args.get('rows',Config.POSTS_PER_PAGE,type=int)
sort = request.args.get('sort','CourseName')
sortOrder = request.args.get('sortOrder','asc')
queryResult = current_user.getCoursesInfo()
targetDict = {'CourseName':Course.name,'CourseId':Course.id,'College':Course.college,'Semester':Course_Teach_Stu.semester,'Source':Course_Teach_Stu.source}
if sortOrder=='asc':
queryResult = queryResult.order_by(asc(targetDict.get(sort,'CourseName')))
else:
queryResult = queryResult.order_by(desc(targetDict.get(sort,'CourseName')))
pagination = queryResult.paginate(page,per_page=rows,error_out=False)
datas = []
oldItem = []
for item in pagination.items :
if oldItem != item:
temp = {'CourseName':item[2].name,'CourseId':item[2].id,'College':item[2].college,'Semester':item[3].semester,'Source':item[3].source}
datas.append(temp)
oldItem = item
datas = {'total':pagination.total,'rows':datas}
return str(json.dumps(datas))
@permission_required(RolePermission.TEACHER)
def getDataForTeacher():
page = request.args.get('page',1,type=int)
rows = request.args.get('rows',Config.POSTS_PER_PAGE,type=int)
sort = request.args.get('sort','CourseName')
sortOrder = request.args.get('sortOrder','asc')
queryResult = current_user.getCoursesInfo()
targetDict = {'CourseName':Course.name,'CourseId':Course.id,'College':Course.college,'Semester':Course_Teach_Stu.semester,'ClassName':_class.name}
if sortOrder=='asc':
queryResult = queryResult.order_by(asc(targetDict.get(sort,'CourseName')))
else:
queryResult = queryResult.order_by(desc(targetDict.get(sort,'CourseName')))
pagination = queryResult.paginate(page,per_page=rows,error_out=False)
datas = []
oldItem = []
for item in pagination.items :
if oldItem==[] or (oldItem[4].name != item[4].name and oldItem[2].name != item[2].name):
temp = {'CourseName':item[2].name,'CourseId':item[2].id,'College':item[2].college,'Semester':item[3].semester,'ClassName':item[4].name}
datas.append(temp)
oldItem = item
datas = {'total':pagination.total,'rows':datas}
return str(json.dumps(datas))
@permission_required(RolePermission.ADMIN)
def getDataForAdmin():
page = request.args.get('page',1,type=int)
rows = request.args.get('rows',Config.POSTS_PER_PAGE,type=int)
sort = request.args.get('sort','CourseName')
sortOrder = request.args.get('sortOrder','asc')
queryResult = current_user.getAllCourse()
targetDict = {'CourseName':Course.name,'CourseId':Course.id,'College':Course.college,'Id':Course._id}
if sortOrder=='asc':
queryResult = queryResult.order_by(asc(targetDict.get(sort,'CourseName')))
else:
queryResult = queryResult.order_by(desc(targetDict.get(sort,'CourseName')))
pagination = queryResult.paginate(page,per_page=rows,error_out=False)
datas = []
oldItem = []
for item in pagination.items :
if oldItem != item:
temp = {'CourseName':item.name,'CourseId':item.id,'College':item.college,'Id':item._id}
datas.append(temp)
oldItem = item
datas = {'total':pagination.total,'rows':datas}
return str(json.dumps(datas))
|
{"/app/__init__.py": ["/config.py", "/app/personal/__init__.py", "/app/course/__init__.py", "/app/student/__init__.py", "/app/source/__init__.py", "/app/teacher/__init__.py", "/app/admin/__init__.py", "/app/aclass/__init__.py", "/app/statistic/__init__.py"], "/flasky.py": ["/app/__init__.py", "/app/models.py"], "/app/models.py": ["/app/__init__.py", "/config.py"], "/app/statistic/views.py": ["/app/statistic/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/personal/views.py": ["/app/personal/__init__.py", "/app/personal/forms.py", "/config.py", "/app/models.py"], "/app/source/views.py": ["/app/source/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/teacher/views.py": ["/app/teacher/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/aclass/views.py": ["/app/aclass/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/course/views.py": ["/app/course/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/student/views.py": ["/app/student/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/admin/views.py": ["/app/admin/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/main/views.py": ["/app/__init__.py"], "/app/auth/views.py": ["/app/auth/forms.py", "/app/models.py"]}
|
4,554
|
dalaomai/stuInfoManag
|
refs/heads/master
|
/app/student/views.py
|
from . import student
from flask import render_template,flash,redirect,url_for,request
from flask_login import login_user,current_user,login_required,logout_user
from config import Config
from app.decorators import permission_required
from config import Permission,RolePermission
import json
from sqlalchemy import desc,asc
from app.models import Student,Teacher,Course,Course_Teach_Stu,_class
from app import db
@student.route('/index')
@login_required
@permission_required(Permission.STUDENT_INFO)
def index():
return render_template('student/index.html',mainUrl='mainData')
@student.route('/mainData')
@login_required
@permission_required(Permission.STUDENT_INFO)
def mainData():
data = {'dataUrl':'data','operateUrls':'','dataFieldes':[],'dataTitles':[],'addFieldes':[],'editFieldes':[]}
if current_user.type == 0:
return getDataForStudent()
if current_user.type == 1:
data['dataTitles'] = ['姓名','学号','性别','班级','课程名','学期']
data['dataFieldes'] = ['StudentName','StudentId','Sex','ClassName','CourseName','Semester']
if current_user.type == 2:
data['operateUrls'] = {'addUrl':'addStudent','editUrl':'editStudent','delUrl':'delStudent'}
data['dataTitles'] = ['Id','姓名','学号','性别','班级','班级ID','密码']
data['dataFieldes'] = ['Id','StudentName','StudentId','Sex','ClassName','ClassId','Passwd']
data['addFieldes'] = ['StudentName','StudentId','Sex','ClassId','Passwd']
data['editFieldes'] = ['StudentName','StudentId','Sex','ClassId','Passwd']
return json.dumps(data)
@student.route('/data')
@login_required
@permission_required(Permission.STUDENT_INFO)
def data():
if current_user.type == 1:
return getDataForTeacher()
if current_user.type == 2:
return getDataForAdmin()
return None
@permission_required(RolePermission.TEACHER)
def getDataForTeacher():
page = request.args.get('page',1,type=int)
rows = request.args.get('rows',Config.POSTS_PER_PAGE,type=int)
sort = request.args.get('sort','StudentName')
sortOrder = request.args.get('sortOrder','asc')
queryResult = current_user.getCoursesInfo()
targetDict = {'StudentName':Student.name,'StudentId':Student.id,'Sex':Student.sex,'ClassName':_class.name,'CourseName':Course.name,'Semester':Course.semester}
if sortOrder=='asc':
queryResult = queryResult.order_by(asc(targetDict.get(sort,'StudentName')))
else:
queryResult = queryResult.order_by(desc(targetDict.get(sort,'StudentName')))
pagination = queryResult.paginate(page,per_page=rows,error_out=False)
datas = []
oldItem = []
for item in pagination.items :
temp = {'StudentName':item[0].name,'StudentId':item[0].id,'Sex':item[0].sex,'ClassName':item[4].name,'CourseName':item[2].name,'Semester':item[2].semester}
datas.append(temp)
datas = {'total':pagination.total,'rows':datas}
return str(json.dumps(datas))
@permission_required(RolePermission.ADMIN)
def getDataForAdmin():
page = request.args.get('page',1,type=int)
rows = request.args.get('rows',Config.POSTS_PER_PAGE,type=int)
sort = request.args.get('sort','name')
sortOrder = request.args.get('sortOrder','asc')
queryResult = current_user.getAllStudent()
targetDict = {'StudentName':Student.name,'StudentId':Student.id,'Sex':Student.sex,'Id':Student._id,'ClassId':_class._id,'ClassName':_class.name}
if sortOrder=='asc':
queryResult = queryResult.order_by(asc(targetDict.get(sort,'name')))
else:
queryResult = queryResult.order_by(desc(targetDict.get(sort,'name')))
pagination = queryResult.paginate(page,per_page=rows,error_out=False)
datas = []
for item in pagination.items :
temp = {'StudentName':item[0].name,'StudentId':item[0].id,'Sex':item[0].sex,'Id':item[0]._id,'ClassId':item[1]._id,'ClassName':item[1].name,'Passwd':''}
datas.append(temp)
datas = {'total':pagination.total,'rows':datas}
return str(json.dumps(datas))
@student.route('/editStudent',methods=['POST'])
@login_required
@permission_required(RolePermission.ADMIN)
def editStudent():
result={'code':1,'result':'success'}
try:
id = request.form.get('Id',None)
student = db.session.query(Student).filter(Student._id==id).first()
student.id = request.form.get('StudentId',student.id)
student.name = request.form.get('StudentName',student.name)
student.sex = str_to_bool(request.form.get('Sex',student.sex))
student._class = request.form.get('ClassId',student._class)
if(request.form.get('Passwd','')!=''):
student.passwd = request.form.get('Passwd')
db.session.add(student)
db.session.commit()
except Exception as e:
result['code'] = 0
result['result'] = '修改失败'
print(e)
return str(json.dumps(result))
@student.route('/addStudent',methods=['POST'])
@login_required
@permission_required(RolePermission.ADMIN)
def addStudent():
result={'code':1,'result':'success'}
try:
student = Student()
student.id = request.form.get('StudentId',student.id)
student.name = request.form.get('StudentName',student.name)
student.sex = str_to_bool(request.form.get('Sex',student.sex))
if(request.form.get('ClassId','')!=''):
student._class = int(request.form.get('ClassId'))
if(request.form.get('Passwd','')!=''):
student.passwd = request.form.get('Passwd')
db.session.add(student)
db.session.commit()
except Exception as e:
result['code'] = 0
result['result'] = '添加失败'
print(e)
return str(json.dumps(result))
@student.route('/delStudent',methods=['POST'])
@login_required
@permission_required(RolePermission.ADMIN)
def delStudent():
result={'code':1,'result':'success'}
try:
id = request.form.get('Id',None)
student = db.session.query(Student).filter(Student._id==id).first()
db.session.delete(student)
db.session.commit()
except Exception as e:
result['code'] = 0
result['result'] = '删除失败'
print(e)
return str(json.dumps(result))
def str_to_bool(str):
if str.lower() == 'true':
return True
if str.lower() == 'false':
return False
return None
|
{"/app/__init__.py": ["/config.py", "/app/personal/__init__.py", "/app/course/__init__.py", "/app/student/__init__.py", "/app/source/__init__.py", "/app/teacher/__init__.py", "/app/admin/__init__.py", "/app/aclass/__init__.py", "/app/statistic/__init__.py"], "/flasky.py": ["/app/__init__.py", "/app/models.py"], "/app/models.py": ["/app/__init__.py", "/config.py"], "/app/statistic/views.py": ["/app/statistic/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/personal/views.py": ["/app/personal/__init__.py", "/app/personal/forms.py", "/config.py", "/app/models.py"], "/app/source/views.py": ["/app/source/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/teacher/views.py": ["/app/teacher/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/aclass/views.py": ["/app/aclass/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/course/views.py": ["/app/course/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/student/views.py": ["/app/student/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/admin/views.py": ["/app/admin/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/main/views.py": ["/app/__init__.py"], "/app/auth/views.py": ["/app/auth/forms.py", "/app/models.py"]}
|
4,555
|
dalaomai/stuInfoManag
|
refs/heads/master
|
/app/student/__init__.py
|
from flask import Blueprint
student = Blueprint('student',__name__)
from . import views
from ..main import errors
|
{"/app/__init__.py": ["/config.py", "/app/personal/__init__.py", "/app/course/__init__.py", "/app/student/__init__.py", "/app/source/__init__.py", "/app/teacher/__init__.py", "/app/admin/__init__.py", "/app/aclass/__init__.py", "/app/statistic/__init__.py"], "/flasky.py": ["/app/__init__.py", "/app/models.py"], "/app/models.py": ["/app/__init__.py", "/config.py"], "/app/statistic/views.py": ["/app/statistic/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/personal/views.py": ["/app/personal/__init__.py", "/app/personal/forms.py", "/config.py", "/app/models.py"], "/app/source/views.py": ["/app/source/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/teacher/views.py": ["/app/teacher/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/aclass/views.py": ["/app/aclass/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/course/views.py": ["/app/course/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/student/views.py": ["/app/student/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/admin/views.py": ["/app/admin/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/main/views.py": ["/app/__init__.py"], "/app/auth/views.py": ["/app/auth/forms.py", "/app/models.py"]}
|
4,556
|
dalaomai/stuInfoManag
|
refs/heads/master
|
/app/admin/views.py
|
from . import admin
from flask import render_template,flash,redirect,url_for,request
from flask_login import login_user,current_user,login_required,logout_user
from config import Config
from app.decorators import permission_required
from config import Permission,RolePermission
import json
from sqlalchemy import desc,asc
from app.models import Student,Teacher,Course,Course_Teach_Stu,Admin
from app import db
@admin.route('/index')
@login_required
@permission_required(Permission.ADMIN_INFO)
def index():
return render_template('admin/index.html',mainUrl='mainData')
@admin.route('/mainData')
@login_required
@permission_required(Permission.ADMIN_INFO)
def mainData():
data = {'dataUrl':'data','operateUrls':'','dataFieldes':[],'dataTitles':[],'addFieldes':[],'editFieldes':[]}
if current_user.type == 0:
return getDataForStudent()
if current_user.type == 1:
return getDataForTeacher()
if current_user.type == 2:
data['operateUrls'] = {'addUrl':'addAdmin','editUrl':'editAdmin','delUrl':'delAdmin'}
data['dataTitles'] = ['Id','姓名','工号','性别','权限','密码']
data['dataFieldes'] = ['Id','AdminName','AdminId','Sex','Permission','Passwd']
data['addFieldes'] = ['AdminName','AdminId','Sex','Passwd']
data['editFieldes'] = ['AdminName','AdminId','Sex','Passwd']
return json.dumps(data)
@admin.route('/data')
@login_required
@permission_required(Permission.ADMIN_INFO)
def data():
if current_user.type == 2:
return getDataForAdmin()
return None
@permission_required(RolePermission.ROOT)
def getDataForAdmin():
page = request.args.get('page',1,type=int)
rows = request.args.get('rows',Config.POSTS_PER_PAGE,type=int)
sort = request.args.get('sort','AdminName')
sortOrder = request.args.get('sortOrder','asc')
queryResult = current_user.getAllAdmin()
targetDict = {'AdminName':Admin.name,'AdminId':Admin.id,'Sex':Admin.sex,'Id':Admin._id,'Permission':Admin.permission}
if sortOrder=='asc':
queryResult = queryResult.order_by(asc(targetDict.get(sort,'AdminName')))
else:
queryResult = queryResult.order_by(desc(targetDict.get(sort,'AdminName')))
pagination = queryResult.paginate(page,per_page=rows,error_out=False)
datas = []
for item in pagination.items :
temp = {'AdminName':item.name,'AdminId':item.id,'Sex':item.sex,'Id':item._id,'Passwd':'','Permission':item.permission}
datas.append(temp)
datas = {'total':pagination.total,'rows':datas}
return str(json.dumps(datas))
@admin.route('/editAdmin',methods=['POST'])
@login_required
@permission_required(RolePermission.ROOT)
def editAdmin():
result={'code':1,'result':'success'}
try:
id = request.form.get('Id',None)
admin = db.session.query(Admin).filter(Admin._id==id).first()
admin.id = request.form.get('AdminName',admin.id)
admin.name = request.form.get('Name',admin.name)
admin.sex = str_to_bool(request.form.get('Sex',admin.sex))
if(request.form.get('Passwd','')!=''):
admin.passwd = request.form.get('Passwd')
if(request.form.get('Permission','')!=''):
admin.permission = request.form.get('Permission')
db.session.add(admin)
db.session.commit()
except Exception as e:
result['code'] = 0
result['result'] = '修改失败'
print(e)
return str(json.dumps(result))
@admin.route('/addAdmin',methods=['POST'])
@login_required
@permission_required(RolePermission.ROOT)
def addAdmin():
result={'code':1,'result':'success'}
try:
admin = Admin()
admin.id = request.form.get('AdminId',admin.id)
admin.name = request.form.get('AdminName',admin.name)
admin.sex = str_to_bool(request.form.get('Sex',admin.sex))
if(request.form.get('Passwd','')!=''):
admin.passwd = request.form.get('Passwd')
if(request.form.get('Permission','')!=''):
admin.passwd = request.form.get('Permission')
db.session.add(admin)
db.session.commit()
except Exception as e:
result['code'] = 0
result['result'] = '添加失败'
print(e)
return str(json.dumps(result))
@admin.route('/delAdmin',methods=['POST'])
@login_required
@permission_required(RolePermission.ROOT)
def delAdmin():
result={'code':1,'result':'success'}
try:
id = request.form.get('Id',None)
if(id==current_user._id):
result['code'] = 0
result['result'] = '不能把自己删了'
return result
admin = db.session.query(Admin).filter(Admin._id==id).first()
db.session.delete(admin)
db.session.commit()
except Exception as e:
result['code'] = 0
result['result'] = '删除失败'
print(e)
return str(json.dumps(result))
def str_to_bool(str):
if str.lower() == 'true':
return True
if str.lower() == 'false':
return False
return None
|
{"/app/__init__.py": ["/config.py", "/app/personal/__init__.py", "/app/course/__init__.py", "/app/student/__init__.py", "/app/source/__init__.py", "/app/teacher/__init__.py", "/app/admin/__init__.py", "/app/aclass/__init__.py", "/app/statistic/__init__.py"], "/flasky.py": ["/app/__init__.py", "/app/models.py"], "/app/models.py": ["/app/__init__.py", "/config.py"], "/app/statistic/views.py": ["/app/statistic/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/personal/views.py": ["/app/personal/__init__.py", "/app/personal/forms.py", "/config.py", "/app/models.py"], "/app/source/views.py": ["/app/source/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/teacher/views.py": ["/app/teacher/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/aclass/views.py": ["/app/aclass/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/course/views.py": ["/app/course/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/student/views.py": ["/app/student/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/admin/views.py": ["/app/admin/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/main/views.py": ["/app/__init__.py"], "/app/auth/views.py": ["/app/auth/forms.py", "/app/models.py"]}
|
4,557
|
dalaomai/stuInfoManag
|
refs/heads/master
|
/app/main/views.py
|
from flask import render_template, session, redirect, url_for, current_app
from flask_login import current_user,login_required
from app import db
from app.main import main
@main.route('/', methods=['GET', 'POST'])
@login_required
def index():
return render_template('index.html')
|
{"/app/__init__.py": ["/config.py", "/app/personal/__init__.py", "/app/course/__init__.py", "/app/student/__init__.py", "/app/source/__init__.py", "/app/teacher/__init__.py", "/app/admin/__init__.py", "/app/aclass/__init__.py", "/app/statistic/__init__.py"], "/flasky.py": ["/app/__init__.py", "/app/models.py"], "/app/models.py": ["/app/__init__.py", "/config.py"], "/app/statistic/views.py": ["/app/statistic/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/personal/views.py": ["/app/personal/__init__.py", "/app/personal/forms.py", "/config.py", "/app/models.py"], "/app/source/views.py": ["/app/source/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/teacher/views.py": ["/app/teacher/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/aclass/views.py": ["/app/aclass/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/course/views.py": ["/app/course/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/student/views.py": ["/app/student/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/admin/views.py": ["/app/admin/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/main/views.py": ["/app/__init__.py"], "/app/auth/views.py": ["/app/auth/forms.py", "/app/models.py"]}
|
4,558
|
dalaomai/stuInfoManag
|
refs/heads/master
|
/app/auth/views.py
|
from flask import render_template,flash,redirect,url_for
from flask_login import login_user,current_user,login_required,logout_user
from . import auth
from app.auth.forms import LoginForm
from app.models import User
@auth.route('/login',methods=['GET','POST'])
def login():
form = LoginForm()
#进入登陆页面
if not form.validate_on_submit():
return render_template('auth/login.html',form = form)
#登陆
user = User.query_user([form.type.data,form.id.data])
if user is not None and user.verify_passwd(form.passwd.data):
login_user(user,form.remember.data)
return redirect(url_for("main.index"))
else:
flash("登陆失败",'error')
return render_template('auth/login.html',form = form)
@auth.route('/logout',methods=['GET','POST'])
@login_required
def logout():
logout_user()
flash("已退出登陆!")
return redirect(url_for("auth.login"))
|
{"/app/__init__.py": ["/config.py", "/app/personal/__init__.py", "/app/course/__init__.py", "/app/student/__init__.py", "/app/source/__init__.py", "/app/teacher/__init__.py", "/app/admin/__init__.py", "/app/aclass/__init__.py", "/app/statistic/__init__.py"], "/flasky.py": ["/app/__init__.py", "/app/models.py"], "/app/models.py": ["/app/__init__.py", "/config.py"], "/app/statistic/views.py": ["/app/statistic/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/personal/views.py": ["/app/personal/__init__.py", "/app/personal/forms.py", "/config.py", "/app/models.py"], "/app/source/views.py": ["/app/source/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/teacher/views.py": ["/app/teacher/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/aclass/views.py": ["/app/aclass/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/course/views.py": ["/app/course/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/student/views.py": ["/app/student/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/admin/views.py": ["/app/admin/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/main/views.py": ["/app/__init__.py"], "/app/auth/views.py": ["/app/auth/forms.py", "/app/models.py"]}
|
4,559
|
dalaomai/stuInfoManag
|
refs/heads/master
|
/app/aclass/__init__.py
|
from flask import Blueprint
aclass = Blueprint('aclass',__name__)
from . import views
from ..main import errors
|
{"/app/__init__.py": ["/config.py", "/app/personal/__init__.py", "/app/course/__init__.py", "/app/student/__init__.py", "/app/source/__init__.py", "/app/teacher/__init__.py", "/app/admin/__init__.py", "/app/aclass/__init__.py", "/app/statistic/__init__.py"], "/flasky.py": ["/app/__init__.py", "/app/models.py"], "/app/models.py": ["/app/__init__.py", "/config.py"], "/app/statistic/views.py": ["/app/statistic/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/personal/views.py": ["/app/personal/__init__.py", "/app/personal/forms.py", "/config.py", "/app/models.py"], "/app/source/views.py": ["/app/source/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/teacher/views.py": ["/app/teacher/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/aclass/views.py": ["/app/aclass/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/course/views.py": ["/app/course/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/student/views.py": ["/app/student/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/admin/views.py": ["/app/admin/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/main/views.py": ["/app/__init__.py"], "/app/auth/views.py": ["/app/auth/forms.py", "/app/models.py"]}
|
4,560
|
dalaomai/stuInfoManag
|
refs/heads/master
|
/app/auth/forms.py
|
from flask_wtf import FlaskForm
from wtforms import IntegerField,StringField,PasswordField,SubmitField,SelectField,BooleanField
from wtforms.validators import Required,Length
class LoginForm(FlaskForm):
id = StringField("ID",validators=[Required()])
passwd = PasswordField("Password",validators=[Required()])
type = SelectField("角色",choices=[(0,"学生"),(1,'老师'),(2,'管理员')],coerce=int)
remember = BooleanField("记住登陆")
submit = SubmitField("Login in")
|
{"/app/__init__.py": ["/config.py", "/app/personal/__init__.py", "/app/course/__init__.py", "/app/student/__init__.py", "/app/source/__init__.py", "/app/teacher/__init__.py", "/app/admin/__init__.py", "/app/aclass/__init__.py", "/app/statistic/__init__.py"], "/flasky.py": ["/app/__init__.py", "/app/models.py"], "/app/models.py": ["/app/__init__.py", "/config.py"], "/app/statistic/views.py": ["/app/statistic/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/personal/views.py": ["/app/personal/__init__.py", "/app/personal/forms.py", "/config.py", "/app/models.py"], "/app/source/views.py": ["/app/source/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/teacher/views.py": ["/app/teacher/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/aclass/views.py": ["/app/aclass/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/course/views.py": ["/app/course/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/student/views.py": ["/app/student/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/admin/views.py": ["/app/admin/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/main/views.py": ["/app/__init__.py"], "/app/auth/views.py": ["/app/auth/forms.py", "/app/models.py"]}
|
4,561
|
dalaomai/stuInfoManag
|
refs/heads/master
|
/app/source/__init__.py
|
from flask import Blueprint
source = Blueprint('source',__name__)
from . import views
from ..main import errors
|
{"/app/__init__.py": ["/config.py", "/app/personal/__init__.py", "/app/course/__init__.py", "/app/student/__init__.py", "/app/source/__init__.py", "/app/teacher/__init__.py", "/app/admin/__init__.py", "/app/aclass/__init__.py", "/app/statistic/__init__.py"], "/flasky.py": ["/app/__init__.py", "/app/models.py"], "/app/models.py": ["/app/__init__.py", "/config.py"], "/app/statistic/views.py": ["/app/statistic/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/personal/views.py": ["/app/personal/__init__.py", "/app/personal/forms.py", "/config.py", "/app/models.py"], "/app/source/views.py": ["/app/source/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/teacher/views.py": ["/app/teacher/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/aclass/views.py": ["/app/aclass/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/course/views.py": ["/app/course/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/student/views.py": ["/app/student/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/admin/views.py": ["/app/admin/__init__.py", "/config.py", "/app/models.py", "/app/__init__.py"], "/app/main/views.py": ["/app/__init__.py"], "/app/auth/views.py": ["/app/auth/forms.py", "/app/models.py"]}
|
4,571
|
TheoLvs/carbonify
|
refs/heads/main
|
/carbonify/basecarbone.py
|
import pandas as pd
import plotly.express as px
from nltk.tokenize import wordpunct_tokenize
class BaseCarbone:
def __init__(self,path,lang = "français"):
self.lang = lang
self.data,self._category_cols = self._prepare_data(path,lang)
@property
def categories(self):
return self.data[self._category_cols]
def _prepare_data(self,path,lang):
def clean_text_split(text):
tokens = wordpunct_tokenize(text)
tokens = [x for x in tokens if len(x) > 2]
return tokens
# Reading and filtering columns in other languages
data = pd.read_csv(path,sep = ";",encoding = "latin1",low_memory = False)
data = data[[x for x in data.columns if "espagnol" not in x and "anglais" not in x]]
# Rename columns to more user friendly col names
data = data.rename(columns = {
f"Tags {lang}":"tags",
"Code de la catégorie":"category",
f"Nom base {lang}":"name_base",
f"Nom attribut {lang}":"name_attribute",
f"Nom frontière {lang}":"name_attribute2",
"Type de l'élément":"element_type",
"Statut de l'élément":"element_status",
"Structure":"structure",
"Identifiant de l'élément":"element_id",
"Type Ligne":"row_type",
f"Unité {lang}":"unit",
"Localisation géographique":"geography",
f"Sous-localisation géographique {lang}":"subgeography",
"Total poste non décomposé":"emissions",
})
# Filter archived rows
data = data.loc[data["element_status"].str.lower().str.contains("valide")]
data = data.reset_index(drop = True)
# Clean text fields, concatenate and propertly tokenize for indexation
data["name_base"] = data["name_base"].str.replace('"',"").str.strip()
data["tags"] = data["tags"].str.replace('"',"").str.strip()
data["text"] = data[["name_base","name_attribute","tags","category"]].apply(lambda x : " ".join(x.dropna()),axis = 1).str.lower()
data["text_split"] = data["text"].map(clean_text_split)
data["name"] = data[["name_base","name_attribute"]].apply(lambda x : " ".join(x.dropna()),axis = 1)
data["full_name"] = data[["name_base","name_attribute","name_attribute2"]].apply(lambda x : " ".join(x.dropna()),axis = 1)
data["emissions_clean"] = data[["unit","emissions"]].apply(lambda x : f"{x['emissions']} ({x['unit']})",axis = 1)
# Convert emissions to numeric
def convert_to_num(x):
try:
return float(x.replace(",","."))
except:
return x
data["emissions"] = data["emissions"].map(convert_to_num)
# Add categories to the columns
categories = (data["category"]
.str.split(" > ")
.apply(pd.Series)
)
category_cols = [f"category{i+1}" for i in range(len(categories.columns))]
categories.columns = category_cols
data = pd.concat([data,categories],axis = 1)
return data,category_cols
def show_data(self,data = None,kind = "treemap",detailed_path = False,color_by_emissions=True,**kwargs):
assert kind in ["treemap","sunburst","icicle"]
# Take all data if not provided
if data is None:
data = self.data
# Remove category cols with all NaNs
all_nans = data[self._category_cols].isnull().sum() == len(data)
all_nans = all_nans[all_nans].index.tolist()
data = data.drop(columns = all_nans)
category_cols = [x for x in self._category_cols if x not in all_nans]
# Fill NaN for visualization
data = data.fillna(" ")
if detailed_path:
path = [px.Constant("all")] + category_cols + ["name_base","name_attribute","name_attribute2","geography","subgeography"]
else:
path = [px.Constant("all")] + category_cols + ["name_base"]
params = {
# "values":"emissions",
"hover_data":["emissions","unit"],
"custom_data":["element_id","emissions","unit"],
}
if color_by_emissions:
params.update({
"color":"emissions",
"color_continuous_scale":"RdBu_r",
})
# Treemap visualization (also called Mondrian)
if kind == "treemap":
fig = px.treemap(data,path = path,maxdepth = 6,**params,**kwargs)
if color_by_emissions:
fig.update_traces(
root_color="lightgrey",
hovertemplate="<b>%{label}</b> - Count: %{value}<br>Emissions: %{color:.5f} %{customdata[2]}<br>Id: %{customdata[0]}"
)
return fig
# Sunburst visualization (circular structure chart)
elif kind == "sunburst":
fig = px.sunburst(data,path = path[1:],maxdepth = 4,**params,**kwargs)
return fig
# icicle visualization (rectangular structure chart)
elif kind == "icicle":
fig = px.icicle(data,path = path[1:],maxdepth = 4,**params,**kwargs)
fig.update_traces(root_color="lightgrey")
return fig
def search(self,query,kind = None,without_split = True,color_by_emissions = True,**kwargs):
results = self.data.loc[self.data["text_split"].map(lambda x : query in x)].copy()
if without_split:
results = results.query("row_type=='Elément'")
# If no visualization
if kind is None:
return results
else:
fig = self.show_data(data = results.copy(),kind = kind,detailed_path = True,color_by_emissions = color_by_emissions,**kwargs)
fig.update_layout(title=f"Base Carbone results for query='{query}'")
return results,fig
def search_word(self,query):
return self.data.loc[self.data["text"].str.contains(query)]
def search_by_id(self,element_id,return_value = False,print_unit = True):
results = self.data.query(f"element_id=={element_id} and row_type=='Elément'")
assert len(results) == 1
results = results.iloc[0]
name = results["full_name"]
value = results["emissions"]
unit = results["unit"]
if return_value:
if print_unit: print(results["unit"])
return results["emissions"]
else:
return results[["full_name","emissions","unit"]].to_dict()
def compare(self,element_id,with_id,raise_unit_error = True,metadata = True):
element = self.search_by_id(element_id,return_value = False)
with_element = self.search_by_id(with_id,return_value = False)
if element["unit"] != with_element["unit"]:
message = f"Warning - First element unit is {element['unit']} and second one is {with_element['unit']}"
if raise_unit_error:
raise Exception(message)
else:
print(message)
comparison = element["emissions"] / with_element["emissions"]
if metadata:
return comparison,element,with_element
else:
return comparison
def evaluate_transportation_by_plane(self,distance,condensation_trails = True,round_trip = False,cargo = False):
"""
HYPOTHESIS
> Long and short courriers
- Les courts courriers ont un rayon d’action d’environ 500 kilomètres (ex : avions à hélices) : il s'agit de liaisons entre villes françaises (métropole) par exemple.
- Les moyens courriers ont un rayon d’action de 5000 kilomètres (Pour Air France, ils correspondent aux vols desservant l’Europe et l’Afrique du Nord). Exemple : A320.
- Les longs courriers sont des avions de ligne pouvant voler sur 15 000 kilomètres de distance. Il s'agit de vols transocéaniques par exemple. Exemple : A340.
Source https://www.bilans-ges.ademe.fr/forum/viewtopic.php?t=4192
> Trails
https://www.carbone4.com/trainees-de-condensation-impact-climat
> Cargo
We assume big cargos above 100T
We also suppose cargos are full with 100T load
"""
# Ids in the Base Carbone for plane transportation
if not cargo:
SHORT_IDS = (28130,28129)
MID_IDS = (28132,28131)
LONG_IDS = (28134,28133)
else:
SHORT_IDS = (28065,28066)
MID_IDS = (28063,28064)
LONG_IDS = (28055,28056)
# Condensation trails filter
condensation_idx = 0 if condensation_trails else 1
# Find the right id for short, medium and long trips
if distance < 500:
element_id = SHORT_IDS[condensation_idx]
elif distance < 5000:
element_id = MID_IDS[condensation_idx]
else:
element_id = LONG_IDS[condensation_idx]
# Prepare emissions ratio
emissions_ratio = self.search_by_id(element_id)["emissions"]
# Compute final emissions
emissions = emissions_ratio * distance
# Add round trip bonus
if round_trip:
emissions *= 2
return emissions
def evaluate_transportation_by_train(self,distance,tgv = True):
pass
|
{"/carbonify/__init__.py": ["/carbonify/basecarbone.py"], "/index.py": ["/carbonify/__init__.py"]}
|
4,572
|
TheoLvs/carbonify
|
refs/heads/main
|
/carbonify/__init__.py
|
from .basecarbone import BaseCarbone
|
{"/carbonify/__init__.py": ["/carbonify/basecarbone.py"], "/index.py": ["/carbonify/__init__.py"]}
|
4,573
|
TheoLvs/carbonify
|
refs/heads/main
|
/index.py
|
import streamlit as st
# Page Configuration
st.set_page_config(page_title="Carbonify Tool",page_icon="🌎",layout="wide",initial_sidebar_state="expanded")
from carbonify import BaseCarbone
#------------------------------------------------------------------------------------------
# PARAMETERS
#------------------------------------------------------------------------------------------
# Retrieving data from base carbone and caching the result for streamlit reuse
@st.cache(allow_output_mutation=True)
def get_basecarbone():
PATH = "data/raw/base_carbone.csv"
baca = BaseCarbone(PATH)
return baca
baca = get_basecarbone()
#------------------------------------------------------------------------------------------
# SIDEBAR
#------------------------------------------------------------------------------------------
st.sidebar.image("docs/logo-blanc-jaune.svg")
st.sidebar.write("## CARBONIFY 🌎")
#------------------------------------------------------------------------------------------
# MAIN PAGE
#------------------------------------------------------------------------------------------
st.write("# Carbonify - Base Carbone")
st.write("## Rechercher une donnée carbone")
st.write("Recherchez une information particulière pour observer la visualisation et facilement trouver votre donnée carbone.\nEssayez avec *train* 🚅 ou *avion* ✈")
st.write("")
query = st.text_input("Recherche carbone")
if query != "":
results,fig = baca.search(query,kind = "treemap",color_by_emissions = True,height = 600)
st.plotly_chart(fig,use_container_width = True)
st.write("Retrouvez ces mêmes informations dans un tableau")
st.write(results)
st.write("## Comparateur d'émissions")
comp1 = st.text_input("Entrez un ID de la base carbone")
comp2 = st.text_input("Entrez un autre ID de la base carbone à comparer avec le premier")
if comp1 != "" and comp2 != "":
comp1 = int(comp1)
comp2 = int(comp2)
comparison,element1,element2 = baca.compare(comp1,comp2,metadata = True,raise_unit_error = False)
if comparison < 1:
comparison = 1/comparison
element1,element2 = element2,element1
st.success(f"{element1['full_name']} ({element1['unit']}) émet {comparison:.3f} fois plus que {element2['full_name']} ({element2['unit']}) ")
st.write("## Calculateur d'émissions")
ratio_id = st.text_input("Entrez un ID de la base carbone à considérer pour le ratio_id")
factor = st.number_input("Entrez la valeur à multiplier au ratio_id pour obtenir les émissions (par exemple la distance pour des émissions / km")
if ratio_id != "":
ratio_id = int(ratio_id)
emissions_ratio = baca.search_by_id(ratio_id)
emissions = emissions_ratio["emissions"] * factor
st.success(f"**{emissions:.3f}** kCO2eq émis en utilisant le ratio **{emissions_ratio['full_name']}** *(en {emissions_ratio['unit']}*)")
st.write("## Exploration de la base carbone")
st.write("La [Base Carbone](https://data.ademe.fr/datasets/base-carbone(r)) de l'ADEME contient de nombreuses données carbone catégorisées dans une hiérarchie complexe:")
fig = baca.show_data(kind = "treemap",color_by_emissions = False,height = 800)
st.plotly_chart(fig,use_container_width = True)
|
{"/carbonify/__init__.py": ["/carbonify/basecarbone.py"], "/index.py": ["/carbonify/__init__.py"]}
|
4,574
|
simgeekiz/FashionChallenge
|
refs/heads/master
|
/data_preparation/testbatchgeneratortrain.py
|
from __future__ import absolute_import
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from keras.preprocessing import sequence
import sklearn
import argparse
import cPickle
import gzip
import json
from tensorflow.python.lib.io import file_io
import random
from keras.models import Sequential
from keras.layers import Dense, Embedding, LSTM, Activation, BatchNormalization, Conv2D, MaxPooling2D, Dropout, Flatten, MaxPool2D
from keras import optimizers
from batch_generator import BatchGenerator, BatchSequence
from PIL import Image
import tensorflow as tf
def load_data(path):
# Load images
# Load and decompress training labels
with file_io.FileIO(path + 'data/y_train.pickle', mode='rb') as fp:
data = gzip.GzipFile(fileobj=fp)
y_train = cPickle.load(data)
# Load and decompress validation labels
with file_io.FileIO(path + 'data/y_validation.pickle', mode='rb') as fp:
data = gzip.GzipFile(fileobj=fp)
y_validation = cPickle.load(data)
return y_train, y_validation
def preprocessing(dir):
return None
def create_model():
model = Sequential()
model.add(Dense(42, activation='relu'))
model.add((Dense(6, activation='sigmoid')))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.summary()
return model
def main(train_file, test_file, job_dir):
y_train, y_validation = load_data(train_file)
print("test1")
training_gen = BatchGenerator(
input_dir=images_path_train,
y=y_train,
batch_size=32,
shuffle=False,
img_size=290
)
for batch_x, batch_y in training_gen:
print(batch_x.shape)
print(batch_y.shape)
break
print("succes")
print(y_train.shape,y_validation.shape)
if __name__ == '__main__':
"""
The argparser can also be extended to take --n-epochs or --batch-size arguments
"""
parser = argparse.ArgumentParser()
# Input Arguments
parser.add_argument(
'--train-file',
help='GCS or local paths to training data',
required=True
)
parser.add_argument(
'--test-file',
help='GCS or local paths to test data',
required=False
)
parser.add_argument(
'--job-dir',
help='GCS location to write checkpoints and export models',
required=True
)
args = parser.parse_args()
arguments = args.__dict__
print('args: {}'.format(arguments))
main(args.train_file, args.test_file, args.job_dir)
|
{"/model/trainer/train.py": ["/exception_callbacks/callbacks.py"]}
|
4,575
|
simgeekiz/FashionChallenge
|
refs/heads/master
|
/model/trainer/confusion_matrix.py
|
import numpy as np
# Computes the confusion matrix given 'many-hot' encoded predictions and labels.
# Rows in the confusion matrix are in the following order: tp, fp, tn, fn.
def confusion_matrix(y_true, y_pred):
confusion = np.zeros(228,4)
for i in range(228):
confusion[0][0] = sum(y_true[i] & y_pred[i])
confusion[0][1] = sum((1 - y_true[i]) & y_pred)
confusion[0][2] = sum(1 - (y_true[i] & y_pred[i]))
confusion[0][3] = sum(y_true[i] & (1 - y_pred))
return confusion
|
{"/model/trainer/train.py": ["/exception_callbacks/callbacks.py"]}
|
4,576
|
simgeekiz/FashionChallenge
|
refs/heads/master
|
/model/trainer/train.py
|
from __future__ import absolute_import
import numpy as np
import pandas as pd
import tensorflow as tf
import sklearn
import argparse
import cPickle
import gzip
import json
import random
import os
from tensorflow.python.lib.io import file_io
from keras.models import Model
from keras.layers import GlobalAveragePooling2D, Dense, Dropout
from keras.applications import Xception, VGG16, VGG19, ResNet50, InceptionV3
from data_preparation.batchgenerator import BatchGenerator, BatchSequence
from exception_callbacks.callbacks import all_call_backs
def load_data(path):
# Load and decompress training labels
with file_io.FileIO(path + 'data/y_train.pickle', mode='rb') as fp:
data = gzip.GzipFile(fileobj=fp)
y_train = cPickle.load(data)
# Load and decompress validation labels
with file_io.FileIO(path + 'data/y_validation.pickle', mode='rb') as fp:
data = gzip.GzipFile(fileobj=fp)
y_validation = cPickle.load(data)
return y_train, y_validation
def preprocessing(dir):
return None
def fine_tune_model(base_model):
# Adding the last two fully-connected layers
x = base_model.output
x = GlobalAveragePooling2D()(x) # global average pooling (flatten)
x = Dense(1024, activation='relu')(x) # should be rather large with 228 output labels
y = Dense(228, activation='softmax')(x) # sigmoid instead of softmax to have independent probabilities
model = Model(inputs=base_model.input, outputs=y)
# Unfreeze last few layers
for layer in base_model.layers[:-4]:
layer.trainable = False
for layer in base_model.layers[-4:]:
layer.trainable = True
# Use binary loss instead of categorical loss to penalize each output independently
model.compile(optimizer='adam', loss='binary_crossentropy')
return model
def get_models():
"""Get all five pretrained models."""
models = []
xception_base = Xception(weights='imagenet', include_top=False, input_shape=(290,290,3))
xception = fine_tune_model(xception_base)
models.append(xception)
vgg16_base = VGG16(weights='imagenet', include_top=False, input_shape=(290,290,3))
vgg16 = fine_tune_model(vgg16_base)
models.append(vgg16)
vgg19_base = VGG19(weights='imagenet', include_top=False, input_shape=(290,290,3))
vgg19 = fine_tune_model(vgg19_base)
models.append(vgg19)
resnet_base = ResNet50(weights='imagenet', include_top=False, input_shape=(290,290,3))
resnet = fine_tune_model(resnet_base)
models.append(resnet)
inception_base = InceptionV3(weights='imagenet', include_top=False, input_shape=(290,290,3))
inception = fine_tune_model(inception_base)
models.append(inception)
return models
def main(train_file, test_file, job_dir, n_epochs):
y_train, y_validation = load_data(train_file)
images_path_train = os.path.join(train_file, 'data/train/')
images_path_validation = os.path.join(train_file, 'data/validation/')
epochs = 30
callbacks = all_call_backs()
batch_size = 128
training_gen = BatchGenerator(
input_dir=images_path_train,
y=y_train,
batch_size=batch_size,
shuffle=True,
img_size=290
)
validation_gen = BatchSequence(
input_dir=images_path_validation,
y=y_validation,
batch_size=batch_size,
shuffle=True,
img_size=290
)
# Initialize some pretrained keras model, add more models if want to stack/ensemble them
models = get_models()
# Train all models
for model in models:
# Need to still define keras.utils.Sequence to use fit_generator
model.fit_generator(
generator=training_gen,
callbacks=callbacks,
steps_per_epoch=int(len(y_train)/batch_size),
epochs=epochs,
validation_data=validation_gen,
validation_steps=int(len(y_validation)/batch_size)
)
print("main success")
if __name__ == '__main__':
"""
The argparser can also be extended to take --n-epochs or --batch-size arguments
"""
parser = argparse.ArgumentParser()
# Input Arguments
parser.add_argument(
'--train-file',
help='GCS or local paths to training data',
required=True
)
parser.add_argument(
'--test-file',
help='GCS or local paths to test data',
required=True
)
parser.add_argument(
'--job-dir',
help='GCS location to write checkpoints and export models',
required=True
)
parser.add_argument(
'--n-epochs',
help='Number of epochs to train the model for',
required=True
)
args = parser.parse_args()
arguments = args.__dict__
print('args: {}'.format(arguments))
main(args.train_file, args.test_file, args.job_dir, args.n_epochs)
|
{"/model/trainer/train.py": ["/exception_callbacks/callbacks.py"]}
|
4,577
|
simgeekiz/FashionChallenge
|
refs/heads/master
|
/pretrained_network/Pretrained-networks/ResNet50/ResNet50.py
|
from os.path import join
from keras.applications import ResNet50
from keras.layers import GlobalAveragePooling2D, Dense, Dropout
from keras.models import Model, load_model
from keras.utils.np_utils import to_categorical
import pandas as pd
import csv
import os
import numpy as np
import json
from matplotlib import pyplot as plt
import sys
sys.path.append("../../data_preparation/")
from batch_generator import BatchGenerator, BatchSequence
from sklearn.metrics import recall_score, precision_score, f1_score
#datadir = os.getcwd()
input_path = os.path.abspath('../../../mlipdata/')
train={}
test={}
validation={}
with open(os.path.join(input_path, 'train.json')) as json_data:
train= json.load(json_data)
with open(os.path.join(input_path, 'test.json')) as json_data:
test= json.load(json_data)
with open(os.path.join(input_path, 'validation.json')) as json_data:
validation = json.load(json_data)
print('Train No. of images: %d'%(len(train['images'])))
print('Test No. of images: %d'%(len(test['images'])))
print('Validation No. of images: %d'%(len(validation['images'])))
# JSON TO PANDAS DATAFRAME
# train data
train_img_url=train['images']
train_img_url=pd.DataFrame(train_img_url)
train_ann=train['annotations']
train_ann=pd.DataFrame(train_ann)
train=pd.merge(train_img_url, train_ann, on='imageId', how='inner')
# test data
test=pd.DataFrame(test['images'])
# Validation Data
val_img_url=validation['images']
val_img_url=pd.DataFrame(val_img_url)
val_ann=validation['annotations']
val_ann=pd.DataFrame(val_ann)
validation=pd.merge(val_img_url, val_ann, on='imageId', how='inner')
datas = {'Train': train, 'Test': test, 'Validation': validation}
for data in datas.values():
data['imageId'] = data['imageId'].astype(np.uint32)
images_path_train = os.path.abspath('../../../mlipdata/files/train/')
from sklearn.preprocessing import MultiLabelBinarizer
mlb = MultiLabelBinarizer()
# loading labels
y_train = np.array(train.labelId)
y_validation = np.array(validation.labelId)
y_train1000 = mlb.fit_transform(y_train)[:1000]
y_validation500 = mlb.fit_transform(y_validation)[:500]
# load the generator
training_gen = BatchGenerator(input_dir=images_path_train, y=y_train1000, batch_size=64)
base_model = ResNet50(weights='imagenet', include_top=False, input_shape=(290,290,3))
# Adding the last two fully-connected layers
x = base_model.output
x = GlobalAveragePooling2D()(x) # global average pooling (flatten)
x = Dense(1024, activation='relu')(x) # should be rather large with 228 output labels
#x = Dropout(0.5)(x)
y = Dense(228, activation='softmax')(x) # sigmoid instead of softmax to have independent probabilities
model = Model(inputs=base_model.input, outputs=y)
# Train only the top layer
for layer in base_model.layers:
layer.trainable = False
# Use binary loss instead of categorical loss to penalize each output independently
model.compile(optimizer='adam', loss='binary_crossentropy')
# 1000 steps = 640000 random images per epoch
model.fit_generator(training_gen, steps_per_epoch=100, epochs=10)
model.save('./ResNet50.h5')
|
{"/model/trainer/train.py": ["/exception_callbacks/callbacks.py"]}
|
4,578
|
simgeekiz/FashionChallenge
|
refs/heads/master
|
/data_preparation/batchgeneratorv2.py
|
"""
Module to generate batches for both the training and validation set.
For training, use the BatchGenerator.
For validation, use the BatchSequence.
"""
from os import listdir
from os.path import join
from math import ceil, floor
import numpy as np
import sklearn
from keras.utils import Sequence
from keras.preprocessing import image
import tensorflow as tf
#from PIL import Image
#from google.appengine.api import images
# standard input of exception
DESIRED_IMAGE_SIZE = 290
def image_to_ndarray(path, session, desired_size=DESIRED_IMAGE_SIZE):
"""
Load a .jpg image.
Arguments:
path {string} -- file location.
Keyword Arguments:
desired_size {int} -- the returned image needs to be a square, this denotes the number of pixels on each side. (default: {DESIRED_IMAGE_SIZE})
Returns:
ndarray -- image in numpy array.
"""
#session = tf.Session()
file = tf.read_file(path)
img = tf.image.decode_image(file)
return get_right_format(img, session, desired_size=desired_size)
def get_right_format(img, session, desired_size=DESIRED_IMAGE_SIZE, color=(255, 255, 255)):
"""
Getting the image in the correct format.
This is done by either downsampling pictures that are too big, or by padding images that are too small.
Arguments:
img {obj} -- an image in image format still.
Keyword Arguments:
desired_size {int} -- the returned image needs to be a square, this denotes the number of pixels on each side. (default: {DESIRED_IMAGE_SIZE})
color {(int, int, int)} -- the desired RGB values for padding.
Returns:
[obj] -- image in the desired size.
"""
imgrun = session.run(tfimg)
old_size = imgrun.shape[0:-1]
# create new image in desired size, totally white
ratio = float(desired_size)/max(old_size)
new_size = tuple([int(x*ratio) for x in old_size])
im = tf.image.resize_images(imgrun, new_size, method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
right_format = tf.image.resize_image_with_crop_or_pad(im, desired_size, desired_size)
x = session.run(right_format)
return x
class BatchGenerator(object):
"""
This class generates batches that can be provided to a neural network.
It can be used for training only. For validation use the BatchSequence class.
"""
def __init__(self, input_dir, y, batch_size, session, shuffle=True, random=False, img_size=DESIRED_IMAGE_SIZE, augmentation_fn=None):
"""
Constructor of the BatchGenerator.
Arguments:
input_dir {string} -- directory in which the images are stored.
y {[rows=indices, cols=labels]} -- labels corresponding to the images in input_dir, in multilabel notation.
batch_size {int} -- expected size of the generated batches.
Keyword Arguments:
shuffle {boolean} -- if the dataset should be shuffled (default: {True})
random {boolean} -- if the batches should pick random images from the dataset, or in a fixed order (default: {False})
img_size {int} -- the returned image needs to be a square, this denotes the number of pixels on each side. (default: {DESIRED_IMAGE_SIZE})
augmentation_fn {function} -- augmentor function for the data (default: {None})
"""
self.input_dir = input_dir
self.session = session #tf.Session()
self.random = random
self.desired_size = img_size
self.batch_size = batch_size # number of patches per batch
self.augmentation_fn = augmentation_fn # augmentation function
self.idx = 0 # to know what part of the data set to return in next()
data = ['{}.jpg'.format(i+1) for i in range(y.shape[0])]
labels = y
if shuffle:
data, labels = sklearn.utils.shuffle(data, labels)
self.x = data
self.y = labels
def __iter__(self):
"""
Make the object iterable.
Returns:
self.
"""
return self
def __next__(self):
"""
Next iteration.
Returns:
function -- builds a mini-batch.
"""
return self.next()
def __len__(self):
"""
Denotes the number of batches per epoch.
Returns:
int -- the number of batches possible such that every sample of the class with the least samples is seen once.
"""
return int(np.ceil(len(self.x) / float(self.batch_size)))
def next(self):
"""
Build a mini-batch.
Returns:
(ndarray, ndarray) -- a batch with training samples and a batch with the corresponding labels.
"""
if self.random:
# pick random values from the training set
idxs = np.random.randint(0, len(self.x), self.batch_size)
else:
# check if end is reached
if self.idx * self.batch_size >= len(self.x):
self.x, self.y = sklearn.utils.shuffle(self.x, self.y)
self.idx = 0
# create indices
idx_min = self.idx * self.batch_size
# make sure to never go out of bounds
idx_max = np.min([idx_min + self.batch_size, len(self.x)])
idxs = np.arange(idx_min, idx_max)
self.idx += 1
batch_x = [self.x[i] for i in idxs]
batch_y = [self.y[i] for i in idxs]
return np.array([
image_to_ndarray(join(self.input_dir, x), self.session, desired_size=self.desired_size) for x in batch_x]), np.array(batch_y)
class BatchSequence(Sequence):
"""
This class generates batches that can be provided to a neural network.
It can be used for validation only. For training use the BatchGenerator class.
Arguments:
Sequence {class} -- a sequence never repeats items.
"""
def __init__(self, input_dir, y, batch_size, desired_size=DESIRED_IMAGE_SIZE):
"""
Constructor of the BatchSequence.
Arguments:
input_dir {string} -- directory in which the images are stored.
y {[rows=indices, cols=labels]} -- labels corresponding to the images in input_dir, in multilabel notation.
batch_size {int} -- expected size of the generated batches.
Keyword arguments:
desired_size {int} -- the returned image needs to be a square, this denotes the number of pixels on each side. (default: {DESIRED_IMAGE_SIZE})
"""
self.input_dir = input_dir
self.desired_size = desired_size
self.x = ['{}.jpg'.format(i+1) for i in range(y.shape[0])]
self.y = y
self.batch_size = batch_size # number of patches per batch
def __len__(self):
"""
Denotes the number of batches per epoch.
Returns:
int -- the number of batches possible such that every sample of the class with the least samples is seen once.
"""
return int(np.ceil(len(self.x) / float(self.batch_size)))
def __getitem__(self, idx):
"""
Get the next batch from the validation set. Since it is a sequence, it will never give records twice.
Arguments:
idx {int} -- offset
Returns:
(ndarray, ndarray) -- a batch with validation samples and a batch with the corresponding labels.
"""
# create indices
idx_min = idx * self.batch_size
# make sure to never go out of bounds
idx_max = np.min([idx_min + self.batch_size, len(self.x)])
idxs = np.arange(idx_min, idx_max)
batch_x = [self.x[i] for i in idxs]
batch_y = [self.y[i] for i in idxs]
return np.array([
image_to_ndarray(join(self.input_dir, x), self.session, desired_size=self.desired_size)
for x in batch_x]), np.array(batch_y)
|
{"/model/trainer/train.py": ["/exception_callbacks/callbacks.py"]}
|
4,579
|
simgeekiz/FashionChallenge
|
refs/heads/master
|
/pretrained_network/Pretrained-networks/vgg16/vgg16.py
|
# -*- coding: utf-8 -*-
import os
import sys
sys.path.append("../../data_preparation/")
import json
import pickle
import numpy as np
import pandas as pd
from keras.applications import VGG16
from keras.layers import GlobalAveragePooling2D, Dense, Dropout
from keras.models import Model, load_model
from batch_generator import BatchGenerator, BatchSequence
# Set the paths
input_path = os.path.abspath('../../../mlipdata/')
images_path_train = os.path.join(input_path, 'files/train/')
# Load the multilabel binarizer
with open('../binarizer.pickle', 'rb') as pickle_file:
binarizer = pickle.load(pickle_file)
# Load training data from file
train={}
with open(os.path.join(input_path, 'train.json')) as json_data:
train= json.load(json_data)
train_img_url = train['images']
train_img_url = pd.DataFrame(train_img_url)
train_ann = train['annotations']
train_ann = pd.DataFrame(train_ann)
train = pd.merge(train_img_url, train_ann, on='imageId', how='inner')
train['imageId'] = train['imageId'].astype(np.uint32)
y_train = np.array(train.labelId)
y_train_bin = binarizer.transform(y_train)
# Load the generator
training_gen = BatchGenerator(input_dir=images_path_train, y=y_train_bin, batch_size=64)
# Init pre-trained network
base_model = VGG16(weights='imagenet', include_top=False, input_shape=(290,290,3))
# Adding the last two fully-connected layers
x = base_model.output
x = GlobalAveragePooling2D()(x) # global average pooling (flatten)
x = Dense(1024, activation='relu')(x) # should be rather large with 228 output labels
y = Dense(228, activation='softmax')(x) # sigmoid instead of softmax to have independent probabilities
model = Model(inputs=base_model.input, outputs=y)
# Train only the top layer
for layer in base_model.layers:
layer.trainable = False
# Use binary loss instead of categorical loss to penalize each output independently
model.compile(optimizer='adam', loss='binary_crossentropy')
# 1000 steps = 640000 random images per epoch
model.fit_generator(training_gen, steps_per_epoch=int(3000/64), epochs=10)
model.save('./vgg16_cloud_model.h5')
|
{"/model/trainer/train.py": ["/exception_callbacks/callbacks.py"]}
|
4,580
|
simgeekiz/FashionChallenge
|
refs/heads/master
|
/model/trainer/exception.py
|
from __future__ import absolute_import
import numpy as np
import pandas as pd
from keras.models import Sequential
from keras.layers import Dense, Embedding, LSTM
from sklearn.model_selection import train_test_split
from keras.preprocessing import sequence
import sklearn
import argparse
import cPickle
import gzip
import json
from tensorflow.python.lib.io import file_io
def load_data(path):
# Load images
# Load and decompress training labels
with file_io.FileIO(path + 'data/y_train.pickle', mode='rb') as fp:
data = gzip.GzipFile(fileobj=fp)
y_train = cPickle.load(data)
# Load and decompress validation labels
with file_io.FileIO(path + 'data/y_validation.pickle', mode='rb') as fp:
data = gzip.GzipFile(fileobj=fp)
y_validation = cPickle.load(data)
return y_train, y_validation
def preprocessing():
return None
def create_model():
model = Sequential()
model.add(Dense(42, activation='relu'))
model.add((Dense(6, activation='sigmoid')))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.summary()
return model
def main(train_file, test_file, job_dir):
y_train, y_validation = load_data(train_file)
# Save model weights
model.save('model.h5')
# Save model on google storage
with file_io.FileIO('model.h5', mode='r') as input_f:
with file_io.FileIO(job_dir + '/model.h5', mode='w+') as output_f:
output_f.write(input_f.read())
print(y_train.shape,y_validation.shape)
if __name__ == '__main__':
"""
The argparser can also be extended to take --n-epochs or --batch-size arguments
"""
parser = argparse.ArgumentParser()
# Input Arguments
parser.add_argument(
'--train-file',
help='GCS or local paths to training data',
required=True
)
parser.add_argument(
'--test-file',
help='GCS or local paths to test data',
required=False
)
parser.add_argument(
'--job-dir',
help='GCS location to write checkpoints and export models',
required=True
)
args = parser.parse_args()
arguments = args.__dict__
print('args: {}'.format(arguments))
main(args.train_file, args.test_file, args.job_dir)
|
{"/model/trainer/train.py": ["/exception_callbacks/callbacks.py"]}
|
4,581
|
simgeekiz/FashionChallenge
|
refs/heads/master
|
/exception_callbacks/callbacks.py
|
import numpy as np
import keras
# these have to be defined in a notebook
# class Metrics(Callback):
# def on_train_begin(self, logs={}):
# self.mean_f1s = []
# self.recalls = []
# self.precisions = []
# def on_epoch_end(self, epoch, logs={}):
# y_pred = (np.asarray(self.model.predict(self.validation_data[0]))).round()
# y_true = self.validation_data[1]
# mean_f1 = f1_score(y_true, y_pred, average='macro')
# recall = recall_score(y_true, y_pred, average='macro')
# precision = precision_score(y_true, y_pred, average='macro')
# self.mean_f1s.append(mean_f1)
# self.recalls.append(recall)
# self.precisions.append(precision)
# print('mean_F1: {} — precision: {} — recall: {}'.format(mean_f1, precision, recall))
# class PlotLosses(keras.callbacks.Callback):
# def on_train_begin(self, logs={}):
# self.i = 0
# self.x = []
# self.losses = []
# self.val_losses = []
# self.fig = plt.figure()
# self.logs = []
# def on_epoch_end(self, epoch, logs={}):
# self.logs.append(logs)
# self.x.append(self.i)
# self.losses.append(logs.get('loss'))
# self.val_losses.append(logs.get('val_loss'))
# self.i += 1
# clear_output(wait=True)
# plt.plot(self.x, self.losses, label="loss")
# plt.plot(self.x, self.val_losses, label="val_loss")
# plt.grid()
# plt.legend()
# plt.show()
def all_call_backs():
callbacks_list = []
a = keras.callbacks.ReduceLROnPlateau(
monitor='val_loss',
factor=0.15,
patience=3,
min_lr=0.0001
)
b = keras.callbacks.EarlyStopping(
monitor='val_loss',
min_delta=0,
patience=8,
verbose=0,
mode='auto'
)
c = keras.callbacks.ModelCheckpoint(
filepath='/model-checkpoints/',
monitor='val_loss',
verbose=0,
save_best_only=True,
save_weights_only=False,
mode='auto',
period=1
)
# callbacks_list = [a, b, c]
# callbacks_list = callbacks_list + [PlotLosses()]
# callbacks_list = callbacks_list + [Metrics()]
return callbacks_list[a, b, c]
|
{"/model/trainer/train.py": ["/exception_callbacks/callbacks.py"]}
|
4,582
|
simgeekiz/FashionChallenge
|
refs/heads/master
|
/data_preparation/multilabel_functions.py
|
'''
Module to import multilabels. It assumes that you have run the corresponding Notebook once.
Notebook: ./MultiLabelProcessor.ipynb
'''
def get_multilabels_train(filename):
return np.load(filename)
def get_multilabels_validation(filename):
return np.load(filename)
|
{"/model/trainer/train.py": ["/exception_callbacks/callbacks.py"]}
|
4,583
|
simgeekiz/FashionChallenge
|
refs/heads/master
|
/pretrained_network/model/trainer/train.py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import sys
sys.path.append('../')
import numpy as np
import pandas as pd
from keras.models import Model
from keras.layers import GlobalAveragePooling2D, Dense, Dropout
from keras.applications import VGG16
import sklearn
import argparse
import cPickle
import gzip
import json
import logging
import tensorflow as tf
from tensorflow.python.lib.io import file_io
try:
from batch_generator import BatchGenerator, BatchSequence
except:
from .batch_generator import BatchGenerator, BatchSequence
def load_data(path):
# Load images
# Load and decompress training labels
with file_io.FileIO(path + 'data/y_train.pickle', mode='rb') as fp:
data = gzip.GzipFile(fileobj=fp)
y_train = cPickle.load(data)
# Load and decompress validation labels
with file_io.FileIO(path + 'data/y_validation.pickle', mode='rb') as fp:
data = gzip.GzipFile(fileobj=fp)
y_validation = cPickle.load(data)
# with file_io.FileIO(path + 'data/binarizer.pickle', mode='rb') as fp:
# #data = gzip.GzipFile(fileobj=fp)
# binarizer = cPickle.load(fp)
# y_train = binarizer.transform(y_train)
# y_validation = binarizer.transform(y_validation)
return y_train, y_validation
def preprocessing():
return None
def create_model():
# Init pre-trained network
base_model = VGG16(weights='imagenet', include_top=False, input_shape=(290,290,3))
# Adding the last two fully-connected layers
x = base_model.output
x = GlobalAveragePooling2D()(x) # global average pooling (flatten)
x = Dense(1024, activation='relu')(x) # should be rather large with 228 output labels
y = Dense(228, activation='sigmoid')(x) # sigmoid instead of softmax to have independent probabilities
model = Model(inputs=base_model.input, outputs=y)
# Train only the top layer
for layer in base_model.layers:
layer.trainable = False
# Use binary loss instead of categorical loss to penalize each output independently
model.compile(optimizer='adam', loss='binary_crossentropy')
return model
def main(train_file, test_file, job_dir, session):
y_train, y_validation = load_data(train_file)
y_train = np.array([j[1:] for j in y_train])
y_validation = np.array([j[1:] for j in y_validation])
epochs = 10
batch_size = 64
#input_dir=job_dir+'data/train'
training_gen = BatchGenerator(input_dir=job_dir+'data/train',
y=y_train,
epochs=epochs,
batch_size=batch_size,
session=session)
validation_gen = BatchSequence(input_dir=job_dir+'data/validation',
y=y_validation,
batch_size=batch_size,
session=session)
model = create_model()
#model.fit_generator(generator=training_gen,
# steps_per_epoch=int(len(y_train)/batch_size),
# epochs=epochs,
# validation_data=validation_gen,
# validation_steps=int(len(y_validation)/batch_size))
for i in range(epochs):
for batch_x, batch_y in training_gen:
model.fit(batch_x, batch_y)
model.save(job_dir + 'models/vgg16.h5')
if __name__ == '__main__':
"""
The argparser can also be extended to take --n-epochs or --batch-size arguments
"""
parser = argparse.ArgumentParser()
LOGGER = logging.getLogger('trainer')
LOGGER.info('TESTING LOGGER ITSELF')
# Input Arguments
parser.add_argument(
'--train-file',
help='GCS or local paths to training data',
required=True
)
parser.add_argument(
'--test-file',
help='GCS or local paths to test data',
required=False
)
parser.add_argument(
'--job-dir',
help='GCS location to write checkpoints and export models',
required=True
)
args = parser.parse_args()
arguments = args.__dict__
print('args: {}'.format(arguments))
# This works
with tf.Session() as session:
session.run(main(args.train_file, args.test_file, args.job_dir, session))
|
{"/model/trainer/train.py": ["/exception_callbacks/callbacks.py"]}
|
4,584
|
simgeekiz/FashionChallenge
|
refs/heads/master
|
/pretrained_network/model/setup.py
|
from setuptools import find_packages
from setuptools import setup
REQUIRED_PACKAGES = ['sklearn', 'numpy>=1.13.3', 'pandas', 'keras', 'tensorflow', 'h5py', 'pillow', 'google-gax<=0.13.0']
setup(
name='trainer',
version='0.1',
install_requires=REQUIRED_PACKAGES,
packages=find_packages(),
include_package_data=True,
description='iMaterialist Challenge (Fashion) model on Cloud ML Engine'
)
|
{"/model/trainer/train.py": ["/exception_callbacks/callbacks.py"]}
|
4,610
|
mfs6174/Twitdao11
|
refs/heads/master
|
/image_proxy.py
|
# -*- coding: utf-8 -*-
from google.appengine.ext import webapp
from google.appengine.ext.webapp import util
from google.appengine.api import memcache
from google.appengine.api import urlfetch
from base import BaseHandler
from django.utils import simplejson as json
from datetime import datetime
import md
import logging
import urllib
_cached_headers=['last-modified', 'etag', 'cache-control', 'expires', 'content-type']
class ImageProxy(BaseHandler):
def initialize(self, request, response):
BaseHandler.initialize(self, request, response)
self.image_proxy_config = md.get_image_proxy_config()
def get_image(self, image_url, cache_id=None):
if not cache_id: cache_id=image_url
_cache=memcache.get(cache_id)
if _cache:
if self.request.if_modified_since and 'last-modified' in _cache:
since = self.request.if_modified_since
last = datetime.strptime(_cache['last-modified'], '%a, %d %b %Y %H:%M:%S GMT')
if not last.tzinfo:
since=since.replace(tzinfo=None)
if last<=since:
logging.debug('[ImageProxy] Hit Cache: last-modified')
self.response.set_status(304)
if 'content-type' in _cache:
self.response.headers['Content-Type']=_cache['content-type']
return
if self.request.if_none_match and 'etag' in _cache:
if str(self.request.if_none_match) == _cache['etag']:
logging.debug('[ImageProxy] Hit Cache: etag')
self.response.set_status(304)
if 'content-type' in _cache:
self.response.headers['Content-Type']=_cache['content-type']
return
image=urlfetch.fetch(image_url)
logging.debug('[ImageProxy] Response Headers: %s' % image.headers)
_cache={}
for h in _cached_headers:
if h in image.headers:
_cache[h]=image.headers[h]
self.response.headers[h]=image.headers[h]
memcache.set(cache_id, _cache)
logging.debug('[ImageProxy] Cached Header: %s' % _cache)
self.response.out.write(image.content)
def b58decode(s):
alphabet = '123456789abcdefghijkmnopqrstuvwxyzABCDEFGHJKLMNPQRSTUVWXYZ'
num, decoded, multi = len(s), 0, 1
for i in range(num-1, -1, -1):
decoded = decoded+multi*(alphabet.index(s[i]))
multi = multi*len(alphabet)
return decoded
def flickr_rest(api_url, **params):
params.update( { 'format':'json', 'nojsoncallback':1 } )
try:
http_method = params.pop('http_method')
except KeyError:
http_method = urlfetch.GET
res=urlfetch.fetch('%s?%s' % (api_url, urllib.urlencode(params)), method=http_method)
content = json.loads(res.content)
logging.debug('[ImageProxy] Flickr REST: %s' % content)
return content
class Flickr(ImageProxy):
def get(self, link_type, image_id):
api_key = self.image_proxy_config.flickr_api_key
rest_api_url = self.image_proxy_config.flickr_rest_api_url
if not api_key:
self.redirect('/images/flickr-not-ready.png')
return
photo_id = image_id
if link_type == 'short':
photo_id = b58decode(image_id)
image_url = memcache.get('Image-Flickr-URL-%s-%s' % (link_type, image_id) )
if not image_url:
fpi = flickr_rest(rest_api_url, method='flickr.photos.getInfo', api_key=api_key, photo_id=photo_id )
if fpi['stat'] == 'fail':
self.redirect('/images/flickr-not-ready.png')
return
p = fpi['photo']
image_url = 'http://farm%s.static.flickr.com/%s/%s_%s_m.jpg' % (p['farm'], p['server'], p['id'], p['secret'])
memcache.set('Image-Flickr-URL-%s-%s' % (link_type, image_id), image_url)
cache_id = 'Image-Flickr-%s' % image_id
self.get_image(image_url, cache_id)
class Twitpic(ImageProxy):
def get(self, image_size, image_id):
# Thumb(150px x 150px max), Mini(75px x 75px max)
# http://twitpic.com/show/[size]/[image-id]
image_url = 'http://twitpic.com/show/%s/%s' % (image_size, image_id)
cache_id = 'Image-Twitpic-%s-%s' % (image_size, image_id)
self.get_image(image_url, cache_id)
class Twitgoo(ImageProxy):
def get(self, image_size, image_id):
# Thumb/mini (up to 160x160), Img (up to 1600x1600)
# http://twitgoo.com/show/[size]/[gooid]
image_url='http://twitgoo.com/show/%s/%s' % (image_size, image_id)
cache_id='Image-Twitgoo-%s-%s' % (image_size, image_id)
self.get_image(image_url, cache_id)
class Yfrog(ImageProxy):
def get(self, domain_tail, image_id):
image_url='http://yfrog.%s/%s.th.jpg' % (domain_tail, image_id)
cache_id='Image-Yfrog-%s-%s' % (domain_tail, image_id)
self.get_image(image_url, cache_id)
class Imgly(ImageProxy):
def get(self, image_size, image_id):
# http://img.ly/show/[mini|thumb|medium|large|full]/<image-id>
image_url='http://img.ly/show/%s/%s' % (image_size, image_id)
cache_id='Image-Imgly-%s-%s' % (image_size, image_id)
self.get_image(image_url, cache_id)
class Youtube(ImageProxy):
def get(self, video_id):
image_url='http://i.ytimg.com/vi/%s/1.jpg' % video_id
cache_id='Image-Youtube-%s' % video_id
self.get_image(image_url, cache_id)
class Moby(ImageProxy):
def get(self, image_size, image_id):
#full, square, view, medium, thumbnail, thumb
image_url='http://moby.to/%s:%s' % (image_id, image_size)
cache_id='Image-Moby-%s-%s' % (image_size, image_id)
self.get_image(image_url, cache_id)
class Instagram(ImageProxy):
def get(self, image_id, image_size):
#size: One of t (thumbnail), m (medium), l (large). Defaults to m.
if not image_size:
image_size='l'
image_url='http://instagr.am/p/%s/media/?size=%s' % (image_id, image_size)
#self.get_image(image_url)
self.redirect(image_url)
def picplz_url(image_id, image_size):
# See: https://sites.google.com/site/picplzapi/
api_url='http://api.picplz.com/api/v2/pic.json'
try:
res=urlfetch.fetch('%s?shorturl_id=%s' % (api_url, image_id))
img=json.loads(res.content)
if img['result']!='ok':
return None
else:
return img['value']['pics'][0]['pic_files'][image_size]['img_url']
except urlfetch.DownloadError:
return None
except KeyError, e:
logging.warning(e)
return None
class Picplz(ImageProxy):
def get(self, image_id, image_size):
# The default format list is: 640r, 320rh, 100sh
if not image_size:
image_size='320rh'
image_url = picplz_url(image_id, image_size)
if image_url:
self.get_image(image_url)
else:
self.error(404)
class Plixi(ImageProxy):
def get(self, image_id, image_size):
# big - original
# medium - 600px scaled
# mobile - 320px scaled
# small - 150px cropped
# thumbnail - 79px cropped
if not image_size:
image_size='mobile'
image_url = 'http://api.plixi.com/api/tpapi.svc/imagefromurl?url=http://plixi.com/p/%s&size=%s' % (image_id, image_size)
self.get_image(image_url)
def main():
application = webapp.WSGIApplication([
('/i/twitpic/(thumb|mini)/([0-9a-zA-Z]+)', Twitpic),
('/i/twitgoo/(thumb|mini|img)/([0-9a-zA-Z]+)', Twitgoo),
('/i/yfrog/([\.a-zA-Z]+)/([0-9a-zA-Z]+)', Yfrog),
('/i/imgly/(mini|thumb|medium|large|full)/([0-9a-zA-Z]+)', Imgly),
('/i/flickr/(long|short)/([0-9a-zA-Z]+)', Flickr),
('/i/y2b/([0-9a-zA-Z_\-]+)', Youtube),
('/i/moby/(full|square|view|medium|thumbnail|thumb)/([0-9a-zA-Z]+)', Moby),
('/i/instagram/(?P<image_id>[0-9a-zA-Z_\-]+)(?:/(?P<image_size>t|m|l))?', Instagram),
('/i/picplz/([0-9a-zA-Z]+)(?:/(?P<image_size>640r|320rh|100sh))?', Picplz),
('/i/plixi/(?P<image_id>[0-9a-zA-Z]+)(?:/(?P<image_size>big|medium|mobile|small|thumbnail))?', Plixi),
], debug=True)
util.run_wsgi_app(application)
if __name__ == '__main__':
main()
|
{"/templatetags/string.py": ["/utils.py"], "/user.py": ["/base.py", "/twitdao.py", "/md.py"], "/main.py": ["/base.py", "/twitdao.py", "/md.py"], "/ajax1.py": ["/base.py", "/twitdao.py", "/md.py", "/twitpic2.py"], "/settings.py": ["/base.py", "/twitdao.py", "/md.py"], "/index.py": ["/base.py", "/md.py"], "/config.py": ["/base.py", "/md.py"], "/twitdao.py": ["/twitter.py", "/md.py"], "/queue.py": ["/base.py", "/twitdao.py", "/md.py"]}
|
4,611
|
mfs6174/Twitdao11
|
refs/heads/master
|
/templatetags/string.py
|
# -*- coding: utf-8 -*-
from google.appengine.ext import webapp
from django.utils import simplejson as json
from django.utils.safestring import mark_safe
from django.template.defaultfilters import stringfilter
import ttp
import utils
import time
import calendar
import rfc822
import htmllib
import urllib
register = webapp.template.create_template_register()
@register.filter
@stringfilter
def twitter_text_py(text):
p = ttp.Parser()
return p.parse(text).html
@register.filter
@stringfilter
def tweet_id_encode(text):
return utils.tweet_id_encode(text)
tweet_id_encode.is_safe=True
@register.filter
@stringfilter
def tweet_id_decode(text):
return utils.tweet_id_decode(text)
tweet_id_decode.is_safe=True
def _m_escape(text):
return ''.join({'&':'&', '"':'"', '\'':''', '>':'>', '<':'<'}.get(c, c) for c in text)
def _m_format_tag(tag, text):
return '<a href="/a/search?q=%s">%s%s</a>' % (urllib.quote('#' + text.encode('utf-8')), tag, text)
def _m_format_username(at_char, user):
return '<a href="/m/u-%s">%s%s</a>' % (user, at_char, user)
def _m_format_list(at_char, user, list_name):
return '<a href="/m/l-%s/%s">%s%s/%s</a>' % (user, list_name, at_char, user, list_name)
def _m_google_format_url(url, text):
return '<a target="_blank" href="http://www.google.com/gwt/n?u=%s">%s</a>' % (urllib.quote(_m_escape(url).encode('utf-8')), text)
def _m_baidu_format_url(url, text):
return '<a target="_blank" href="http://gate.baidu.com/tc?from=opentc&src=%s">%s</a>' % (urllib.quote(_m_escape(url).encode('utf-8')), text)
def _m_format_url(url, text):
return '<a target="_blank" href="%s">%s</a>' % (_m_escape(url), text)
@register.filter
@stringfilter
def m_twitter_text(text, op=None):
p = ttp.Parser()
p.format_tag=_m_format_tag
p.format_username=_m_format_username
p.format_list=_m_format_list
if op=='google-gwt':
p.format_url=_m_google_format_url
elif op=='baidu-gate':
p.format_url=_m_baidu_format_url
else:
p.format_url=_m_format_url
return p.parse(text).html
@register.filter
@stringfilter
def human_readable(date_str):
'''Get a human redable string representing the posting time
Returns:
A human readable string representing the posting time
'''
if not date_str:
return ''#TODO 似乎要仔细检查啊。
fudge = 1.25
delta = long(time.time()) - long(calendar.timegm(rfc822.parsedate(date_str)))
if delta < (1 * fudge):
return 'a second ago'
elif delta < (60 * (1/fudge)):
return '%d seconds ago' % (delta)
elif delta < (60 * fudge):
return 'a minute ago'
elif delta < (60 * 60 * (1/fudge)):
return '%d minutes ago' % (delta / 60)
elif delta < (60 * 60 * fudge):
return 'about an hour ago'
elif delta < (60 * 60 * 24 * (1/fudge)):
return 'about %d hours ago' % (delta / (60 * 60))
elif delta < (60 * 60 * 24 * fudge):
return 'about a day ago'
else:
return 'about %d days ago' % (delta / (60 * 60 * 24))
human_readable.is_safe=True
@register.filter
@stringfilter
def time_format(date_str, fmt_str="%Y-%m-%d"):
try:
dtp=rfc822.parsedate(date_str)
return time.strftime(fmt_str, dtp)
except:
return None
time_format.is_safe=True
@register.filter
@stringfilter
def milliseconds(date_str):
dtp=rfc822.parsedate(date_str)
if dtp:
return long(time.mktime(dtp)*1000)
else:
return None
milliseconds.is_safe=True
@register.filter
@stringfilter
def unescape(s):
p = htmllib.HTMLParser(None)
p.save_bgn()
p.feed(s)
return p.save_end()
@register.filter
def to_json(obj):
return mark_safe(json.dumps(obj))
|
{"/templatetags/string.py": ["/utils.py"], "/user.py": ["/base.py", "/twitdao.py", "/md.py"], "/main.py": ["/base.py", "/twitdao.py", "/md.py"], "/ajax1.py": ["/base.py", "/twitdao.py", "/md.py", "/twitpic2.py"], "/settings.py": ["/base.py", "/twitdao.py", "/md.py"], "/index.py": ["/base.py", "/md.py"], "/config.py": ["/base.py", "/md.py"], "/twitdao.py": ["/twitter.py", "/md.py"], "/queue.py": ["/base.py", "/twitdao.py", "/md.py"]}
|
4,612
|
mfs6174/Twitdao11
|
refs/heads/master
|
/templatetags/entities.py
|
# -*- coding: utf-8 -*-
from google.appengine.ext import webapp
from django.template.defaultfilters import stringfilter
from google.appengine.api import urlfetch
from django.utils import simplejson as json
#from google.appengine.api import memcache
import re
import urllib
import ttp
register = webapp.template.create_template_register()
_twitpic=re.compile('https?://twitpic\.com/(?P<id>[0-9a-zA-Z]+)', re.I)
_twitgoo=re.compile('https?://twitgoo\.com/(?P<id>[0-9a-zA-Z]+)', re.I)
_imgly=re.compile('https?://img\.ly/(?P<id>[0-9a-zA-Z]+)', re.I)
_yfrog=re.compile('https?://yfrog\.(?P<tail>[^/]+)(/[a-z])?/(?P<id>[0-9a-zA-Z]{2,})', re.I)
_flic_kr=re.compile('https?://flic\.kr/p/(?P<id>[0-9a-zA-Z]+)', re.I)
_flickr_com=re.compile('https?://(www\.|)flickr\.com/photos/[0-9a-zA-Z_]+/(?P<id>[0-9]+)', re.I)
_youtu_be=re.compile('https?://youtu\.be/(?P<id>[0-9a-zA-Z_\-]+)', re.I)
_youtube_com=re.compile('https?://(www\.|)youtube\.com/((watch\?v=)|(v/))(?P<id>[0-9a-zA-Z_\-]+)', re.I)
_moby_to=re.compile('https?://moby\.to/(?P<id>[0-9a-zA-Z]+)', re.I)
_instagram=re.compile('https?://instagr\.am/p/(?P<id>[0-9a-zA-Z_\-]+)', re.I)
_instagramcom=re.compile('https?://instagram\.com/p/(?P<id>[0-9a-zA-Z_\-]+)', re.I)
_picplz=re.compile('https?://picplz\.com/(?P<id>[0-9a-zA-Z]+)', re.I)
_plixi=re.compile('https?://plixi\.com/p/(?P<id>[0-9a-zA-Z]+)', re.I)
_youku=re.compile('https?://v\.youku\.com/v_show/id_(?P<id>[0-9a-zA-Z_\-=]+)\.html', re.I)
_tudou=re.compile('https?://(www\.|)tudou\.com/programs/view/(?P<id>[0-9a-zA-Z_\-]+)', re.I)
_56=re.compile('https?://(www\.|)56\.com/([0-9a-zA-Z]+)/v_(?P<id>[0-9a-zA-Z_\-]+)\.html', re.I)
_ku6=re.compile('https?://v\.ku6\.com/show/(?P<id>[0-9a-zA-Z_\-]+)\.html', re.I)
_bitly = re.compile('http://bit\.ly/(?P<id>[0-9a-zA-Z_\-]+)', re.I)
_jmp = re.compile('http://j\.mp/(?P<id>[0-9a-zA-Z_\-]+)', re.I)
_tco = re.compile('http://t\.co/(?P<id>[a-z0-9]*)', re.I)
_tcn = re.compile('http://t\.cn/(?P<id>[a-z0-9]*)', re.I)
_isgd = re.compile('http://is\.gd/(?P<id>[0-9a-zA-Z_\-]+)', re.I)
_googl = re.compile('http://goo\.gl/(?P<id>[0-9a-zA-Z_\-]{3,})', re.I)
_googlfb = re.compile('http://goo\.gl/fb/(?P<id>[0-9a-zA-Z_\-]+)', re.I)
@register.filter
@stringfilter
def image_preview(url):
''' show photo thumbnails '''
try:
url,is_short= url_unshort(url)
except:
return '<span class="unshorturl"><a href="%s" target="_blank" rel="noreferrer">%s</a></span>' % (url,url)
m=_twitpic.search(url)
if m:
twitpic_id=m.group('id')
if twitpic_id.lower() in ['photos','events','places','widgets','upload','account','logout','doc']:
return ''
return '<a href="%s" target="_blank" rel="noreferrer"><img src="/i/twitpic/%s/%s" /></a>' % ( url, 'thumb', twitpic_id )
m=_twitgoo.search(url)
if m:
twitgoo_id=m.group('id')
return '<a href="%s" target="_blank" rel="noreferrer"><img src="/i/twitgoo/%s/%s" /></a>' % ( url, 'thumb', twitgoo_id )
m=_imgly.search(url)
if m:
imgly_id=m.group('id')
return '<a href="%s" target="_blank" rel="noreferrer"><img src="/i/imgly/%s/%s" /></a>' % ( url, 'medium', imgly_id )
m=_yfrog.search(url)
if m:
yfrog_id=m.group('id')
yfrog_tail=m.group('tail')
return '<a href="%s" target="_blank" rel="noreferrer"><img src="/i/yfrog/%s/%s" /></a>' % ( url, yfrog_tail, yfrog_id )
m=_flic_kr.search(url)
if m:
flickr_id=m.group('id')
return '<a href="%s" target="_blank" rel="noreferrer"><img src="/i/flickr/short/%s" /></a>' % ( url, flickr_id )
m=_flickr_com.search(url)
if m:
flickr_id=m.group('id')
return '<a href="%s" target="_blank" rel="noreferrer"><img src="/i/flickr/long/%s" /></a>' % ( url, flickr_id )
m=_youtu_be.search(url)
if m:
youtube_id=m.group('id')
return '<a href="%s" target="_blank" rel="noreferrer"><img src="/i/y2b/%s" /></a>' % ( url, youtube_id )
m=_youtube_com.search(url)
if m:
youtube_id=m.group('id')
return '<a href="%s" target="_blank" rel="noreferrer"><img src="/i/y2b/%s" /></a>' % ( url, youtube_id )
m=_moby_to.search(url)
if m:
moby_id=m.group('id')
return '<a href="%s" target="_blank" rel="noreferrer"><img src="/i/moby/thumb/%s" /></a>' % ( url, moby_id )
m=_instagram.search(url)
if m:
insid=m.group('id')
return '<a href="%s" target="_blank" rel="noreferrer"><img src="/i/instagram/%s" width="550" /></a>' % ( url, insid )
m=_instagramcom.search(url)
if m:
insid=m.group('id')
return '<a href="%s" target="_blank" rel="noreferrer"><img src="/i/instagram/%s" width="550" /></a>' % ( url, insid )
m=_picplz.search(url)
if m:
pic_id=m.group('id')
return '<a href="%s" target="_blank" rel="noreferrer"><img src="/i/picplz/%s" /></a>' % ( url, pic_id )
m=_plixi.search(url)
if m:
pic_id=m.group('id')
return '<a href="%s" target="_blank" rel="noreferrer"><img src="/i/plixi/%s" /></a>' % ( url, pic_id )
m=_youku.search(url)
if m:
youku_id=m.group('id')
return '<embed src="http://player.youku.com/player.php/sid/%s/v.swf" quality="high" width="480" height="400" align="middle" allowScriptAccess="sameDomain" type="application/x-shockwave-flash"></embed>' % youku_id
m=_tudou.search(url)
if m:
tudou_id=m.group('id')
return '<embed src="http://www.tudou.com/v/%s/v.swf" type="application/x-shockwave-flash" allowscriptaccess="always" allowfullscreen="true" wmode="opaque" width="480" height="400"></embed>' % tudou_id
m=_56.search(url)
if m:
_56_id=m.group('id')
return '<embed src="http://player.56.com/v_%s.swf" type="application/x-shockwave-flash" width="480" height="395" allowNetworking="all" allowScriptAccess="always"></embed>' % _56_id
m=_ku6.search(url)
if m:
ku6_id=m.group('id')
return '<embed src="http://player.ku6.com/refer/%s/v.swf" quality="high" width="480" height="400" align="middle" allowScriptAccess="always" allowfullscreen="true" type="application/x-shockwave-flash"></embed>' % ku6_id
if is_short == 1:
return '<span class="unshorturl"><a href="%s" target="_blank" rel="noreferrer">%s</a></span>' % (url,url)
return '<span class="unshorturl"><a href="%s" target="_blank" rel="noreferrer">%s</a></span>' % (url,url)
def url_unshort(url):
m=_bitly.search(url)
if m:
bitly_id=m.group('id')
try:
res=urlfetch.fetch('http://api.unshort.me/?r=http://bit.ly/%s&t=json' % bitly_id)
url_json = json.loads(res.content)
newurl = url_json['resolvedURL']
if newurl != "http://unshort.me":
return newurl,1
except urlfetch.DownloadError:
return url,0
m=_jmp.search(url)
if m:
jmp_id=m.group('id')
try:
res=urlfetch.fetch('http://api.unshort.me/?r=http://j.mp/%s&t=json' % jmp_id)
url_json = json.loads(res.content)
newurl = url_json['resolvedURL']
if newurl != "http://unshort.me":
return newurl,1
except urlfetch.DownloadError:
return url,0
m=_tco.search(url)
if m:
tco_id=m.group('id')
try:
res=urlfetch.fetch('http://api.unshort.me/?r=http://t.co/%s&t=json' % tco_id)
url_json = json.loads(res.content)
newurl = url_json['resolvedURL']
if newurl != "http://unshort.me":
return newurl,1
except urlfetch.DownloadError:
return url,0
m=_tcn.search(url)
if m:
tcn_id=m.group('id')
try:
res=urlfetch.fetch('http://api.unshort.me/?r=http://t.cn/%s&t=json' % tcn_id)
url_json = json.loads(res.content)
newurl = url_json['resolvedURL']
if newurl != "http://unshort.me":
return newurl,1
except urlfetch.DownloadError:
return url,0
m=_isgd.search(url)
if m:
isgd_id=m.group('id')
try:
res=urlfetch.fetch('http://api.unshort.me/?r=http://is.gd/%s&t=json' % isgd_id)
url_json = json.loads(res.content)
newurl = url_json['resolvedURL']
if newurl != "http://unshort.me":
return newurl,1
except urlfetch.DownloadError:
return url,0
m=_googl.search(url)
if m:
googl_id=m.group('id')
try:
res=urlfetch.fetch('http://api.unshort.me/?r=http://goo.gl/%s&t=json' % googl_id)
url_json = json.loads(res.content)
newurl = url_json['resolvedURL']
if newurl != "http://unshort.me":
return newurl,1
except urlfetch.DownloadError:
return url,0
m=_googlfb.search(url)
if m:
googl_id=m.group('id')
try:
res=urlfetch.fetch('http://api.unshort.me/?r=http://goo.gl/fb/%s&t=json' % googl_id)
url_json = json.loads(res.content)
newurl = url_json['resolvedURL']
if newurl != "http://unshort.me":
return newurl,1
except urlfetch.DownloadError:
return url,0
return url,0
#def get_url_cache(self, short_service, cache_id=None):
def _m_google_gwt_url(url):
return 'http://www.google.com/gwt/n?u=%s' % urllib.quote(url)
def _m_baidu_gate_url(url):
return 'http://gate.baidu.com/tc?from=opentc&src=%s' % urllib.quote(url)
def _m_media_url(url, op=None):
if op=='google-gwt':
url=_m_google_gwt_url(url)
elif op=='baidu-gate':
url=_m_baidu_gate_url(url)
m=_twitpic.search(url)
if m:
twitpic_id=m.group('id')
if twitpic_id.lower() in ['photos','events','places','widgets','upload','account','logout','doc']:
return ''
return '<a href="%s" target="_blank" rel="noreferrer"><img src="/i/twitpic/%s/%s" /></a>' % ( url, 'thumb', twitpic_id )
m=_twitgoo.search(url)
if m:
twitgoo_id=m.group('id')
return '<a href="%s" target="_blank" rel="noreferrer"><img src="/i/twitgoo/%s/%s" /></a>' % ( url, 'thumb', twitgoo_id )
m=_imgly.search(url)
if m:
imgly_id=m.group('id')
return '<a href="%s" target="_blank" rel="noreferrer"><img src="/i/imgly/%s/%s" /></a>' % ( url, 'thumb', imgly_id )
m=_yfrog.search(url)
if m:
yfrog_id=m.group('id')
yfrog_tail=m.group('tail')
return '<a href="%s" target="_blank" rel="noreferrer"><img src="/i/yfrog/%s/%s" /></a>' % ( url, yfrog_tail, yfrog_id )
m=_flic_kr.search(url)
if m:
flickr_id=m.group('id')
return '<a href="%s" target="_blank" rel="noreferrer"><img src="/i/flickr/short/%s" /></a>' % ( url, flickr_id )
m=_flickr_com.search(url)
if m:
flickr_id=m.group('id')
return '<a href="%s" target="_blank" rel="noreferrer"><img src="/i/flickr/long/%s" /></a>' % ( url, flickr_id )
m=_youtu_be.search(url)
if m:
youtube_id=m.group('id')
return '<a href="%s" target="_blank" rel="noreferrer"><img src="/i/y2b/%s" /></a>' % ( url, youtube_id )
m=_youtube_com.search(url)
if m:
youtube_id=m.group('id')
return '<a href="%s" target="_blank" rel="noreferrer"><img src="/i/y2b/%s" /></a>' % ( url, youtube_id )
m=_moby_to.search(url)
if m:
moby_id=m.group('id')
return '<a href="%s" target="_blank" rel="noreferrer"><img src="/i/moby/thumb/%s" /></a>' % ( url, moby_id )
m=_instagram.search(url)
if m:
insid=m.group('id')
return '<a href="%s" target="_blank" rel="noreferrer"><img src="/i/instagram/%s" /></a>' % ( url, insid )
m=_picplz.search(url)
if m:
pic_id=m.group('id')
return '<a href="%s" target="_blank" rel="noreferrer"><img src="/i/picplz/%s" /></a>' % ( url, pic_id )
m=_plixi.search(url)
if m:
pic_id=m.group('id')
return '<a href="%s" target="_blank" rel="noreferrer"><img src="/i/plixi/%s" /></a>' % ( url, pic_id )
return None
@register.filter
@stringfilter
def m_media_preview(text, op=None):
p=ttp.Parser()
urls=p.parse(text).urls
medias=[]
for url in urls:
u=_m_media_url(url, op)
if u:
medias.append(u)
return ''.join(medias)
|
{"/templatetags/string.py": ["/utils.py"], "/user.py": ["/base.py", "/twitdao.py", "/md.py"], "/main.py": ["/base.py", "/twitdao.py", "/md.py"], "/ajax1.py": ["/base.py", "/twitdao.py", "/md.py", "/twitpic2.py"], "/settings.py": ["/base.py", "/twitdao.py", "/md.py"], "/index.py": ["/base.py", "/md.py"], "/config.py": ["/base.py", "/md.py"], "/twitdao.py": ["/twitter.py", "/md.py"], "/queue.py": ["/base.py", "/twitdao.py", "/md.py"]}
|
4,613
|
mfs6174/Twitdao11
|
refs/heads/master
|
/twitter.py
|
# -*- coding: utf-8 -*-
import oauth
from django.utils import simplejson as json
from google.appengine.api import urlfetch
import urllib
from cgi import parse_qsl
import mimetypes
import random
import logging
#default configs
CONSUMER_KEY = ''
CONSUMER_SECRET = ''
REQUEST_TOKEN_URL = 'https://api.twitter.com/oauth/request_token'
ACCESS_TOKEN_URL = 'https://api.twitter.com/oauth/access_token'
AUTHORIZE_URL = 'https://twitter.com/oauth/authorize'
AUTHENTICATE_URL = 'https://twitter.com/oauth/authenticate'
API_URL = 'https://api.twitter.com/1.1/'
SEARCH_API_URL = 'https://api.twitter.com/1.1/search/'
MAX_FETCH_COUNT = 5
_http_methods={
'GET':urlfetch.GET,
'POST':urlfetch.POST,
'HEAD':urlfetch.HEAD,
'PUT':urlfetch.PUT,
'DELETE':urlfetch.DELETE
}
def _generate_boundary(length=16):
s = '1234567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-_'
a = []
for i in range(length):
a.append(random.choice(s))
return ''.join(a)
class Twitter:
def __init__(self,
oauth_token=None,
oauth_token_secret=None,
consumer_key=CONSUMER_KEY,
consumer_secret=CONSUMER_SECRET,
request_token_url=REQUEST_TOKEN_URL,
access_token_url=ACCESS_TOKEN_URL,
authorize_url=AUTHORIZE_URL,
authenticate_url=AUTHENTICATE_URL,
api_url=API_URL,
search_api_url=SEARCH_API_URL
):
if oauth_token and oauth_token_secret:
token = oauth.OAuthToken(oauth_token, oauth_token_secret)
else:
token = None
self._consumer = oauth.OAuthConsumer(consumer_key, consumer_secret)
self._signature_method = oauth.OAuthSignatureMethod_HMAC_SHA1()
self._oauth_token = token
self.http_status=0
self.http_headers={}
self.http_body=''
#api config
self.request_token_url=request_token_url
self.access_token_url=access_token_url
self.authorize_url=authorize_url
self.authenticate_url=authenticate_url
self.api_url=api_url
self.search_api_url=search_api_url
def _get_content_type(self, filename):
return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
def _encode_multipart_formdata(self, fields, files=[]):
"""
fields is a sequence of (name, value) elements for regular form fields.
files is a sequence of (name, filename, value) elements for data to be uploaded as files
Return (boundary, body)
"""
boundary=_generate_boundary()
crlf = '\r\n'
l = []
for k, v in fields:
l.append('--' + boundary)
l.append('Content-Disposition: form-data; name="%s"' % k)
l.append('')
l.append(v)
for (k, f, v) in files:
l.append('--' + boundary)
l.append('Content-Disposition: form-data; name="%s"; filename="%s"' % (k, f))
l.append('Content-Type: %s' % self._get_content_type(f))
l.append('')
l.append(v)
l.append('--' + boundary + '--')
l.append('')
body = crlf.join(l)
return boundary, body
def _fetch(self, method, url, params={}, headers={}, files=None):
payload=None
if method.upper() in ['POST','PUT']:
if files and type(files) == list:
boundary, payload = self._encode_multipart_formdata(params.items(), files)
headers['Content-Type']='multipart/form-data; boundary=%s' % boundary
else:
payload=urllib.urlencode(params)
try:
res=urlfetch.fetch(url, payload, _http_methods[method.upper()], headers)
except:
self.http_status=500
return ''
self.http_status=res.status_code
self.http_headers=res.headers
self.http_body=res.content
logging.debug('[Twitter] Response Headers: %s' % res.headers)
return res.content
def _extend_fetch(self, method, url, params={}, headers={}, files=None):
http_body=''
for count in range(MAX_FETCH_COUNT):
try:
http_body = self._fetch(method, url, params, headers, files)
if self.http_status!=200:
logging.debug('[HTTP Status %s] body %s' % (self.http_status, http_body) )
if self.http_status in range(499, 600):
continue
logging.debug('[Twitter] fetch count: %s ' % str(count+1))
return http_body
except urlfetch.DownloadError, e:
logging.warning('[Twitter] urlfetch: %s' % e)
continue
raise Exception('Max fetch count exceeded.')
def oauth_request(self, url, params={}, method = 'GET', files=None):
oauth_request = oauth.OAuthRequest.from_consumer_and_token(
self._consumer,
self._oauth_token,
http_url=url,
http_method=method,
parameters = params if not files else {}
)
oauth_request.sign_request(
self._signature_method,
self._consumer,
self._oauth_token
)
if method.upper() == 'GET':
resp = self._extend_fetch(method, oauth_request.to_url())
else:
resp = self._extend_fetch(
method,
oauth_request.get_normalized_http_url(),
params,
headers=oauth_request.to_header(),
files=files
)
return resp
def fetch_request_token(self, callback=None):
"""returns {'oauth_token':'the-request-token',
'oauth_token_secret':'the-request-secret',
'oauth_callback_confirmed':'true'}"""
param = {}
if callback:
param.update({'oauth_callback':callback})
response_body = self.oauth_request(self.request_token_url, param)
request_token = dict(parse_qsl(response_body))
if 'oauth_token' not in request_token:
return None
self._oauth_token = oauth.OAuthToken(
request_token['oauth_token'],
request_token['oauth_token_secret']
)
return request_token
def fetch_access_token(self, verifier):
"""returns {'oauth_token':'the-access-token',
'oauth_token_secret':'the-access-secret',
'user_id':'1234567',
'screen_name':'darasion'}"""
param = {}
param.update({'oauth_verifier':verifier})
response_body = self.oauth_request(self.access_token_url, param, 'POST')
access_token = dict(parse_qsl(response_body))
if 'oauth_token' not in access_token:
return None
self._oauth_token = oauth.OAuthToken(
access_token['oauth_token'],
access_token['oauth_token_secret']
)
return access_token
def get_authenticate_url(self, request_token, force_login=False):
if force_login:
return "%s?oauth_token=%s&force_login=true" % (self.authenticate_url, request_token['oauth_token'])
else:
return "%s?oauth_token=%s" % (self.authenticate_url, request_token['oauth_token'])
def get_authorize_url(self, request_token, force_login=False):
if force_login:
return "%s?oauth_token=%s&force_login=true" % (self.authorize_url, request_token['oauth_token'])
else:
return "%s?oauth_token=%s" % (self.authorize_url, request_token['oauth_token'])
def api_call(self, http_method, api_method, parameters={}, files=None):
try:
return json.loads(self.oauth_request(''.join([
self.api_url,
api_method,
'.json'
]), parameters, http_method, files))
except:
logging.warning('[Twitter] Still cant handle: Status: %s, Body: %s' % (self.http_status, self.http_body))
raise
def get_users_profile_image_url(self, screen_name, size='normal'):
res=urlfetch.fetch('%s/users/profile_image/%s?size=%s' % (self.api_url, screen_name, size), follow_redirects=False)
if res.status_code == 302 or res.status_code == 301:
return res.headers['location']
return None
def search_api_call(self, q, **params):
pms={'q':q}
pms.update(params)
data = urllib.urlencode(pms)
return json.loads(urllib.urlopen(''.join([self.search_api_url, 'tweets.json']), data).read())
def hacked_search(self, q, since_id=None, page=None):
# since_id, page(next_page)
# include_entities=1, contributor_details=true, domain=https://twitter.com, format=phoenix
pms={
'q':q,
'include_entities':'1',
'contributor_details':'true',
'format':'phoenix',
'domain':'https://twitter.com'
}
if since_id:
pms['since_id']=since_id
if page:
pms['page']=page
pms['rpp']=200
data = urllib.urlencode(pms)
url="https://twitter.com/phoenix_search.phoenix"
res = json.loads(self.oauth_request(''.join([url, '?', data]), pms, 'GET'))
try:
logging.debug('RateLimit Class: %s' % self.http_headers['X-RateLimit-Class'])
logging.debug('RateLimit Limit: %s' % self.http_headers['X-RateLimit-Limit'])
logging.debug('RateLimit Remaining: %s' % self.http_headers['X-RateLimit-Remaining'])
logging.debug('RateLimit Reset: %s' % self.http_headers['X-RateLimit-Reset'])
except:
pass
return res
def hacked_following_followers_of(self, user_id):
# Also followed by.
# user_id, cursor=-1
pms={'user_id':user_id,'cursor':'-1'}
qs = urllib.urlencode(pms)
url='https://twitter.com/users/following_followers_of.json'
res = json.loads(self.oauth_request(''.join([url, '?', qs]), pms, 'GET'))
try:
logging.debug('RateLimit Class: %s' % self.http_headers['X-RateLimit-Class'])
logging.debug('RateLimit Limit: %s' % self.http_headers['X-RateLimit-Limit'])
logging.debug('RateLimit Remaining: %s' % self.http_headers['X-RateLimit-Remaining'])
logging.debug('RateLimit Reset: %s' % self.http_headers['X-RateLimit-Reset'])
except:
pass
return res
def hacked_follows_in_common_with(self, user_id):
# You both follow.
# user_id, cursor=-1
pms={'user_id':user_id,'cursor':'-1'}
qs = urllib.urlencode(pms)
url='https://twitter.com/users/follows_in_common_with.json'
res = json.loads(self.oauth_request(''.join([url, '?', qs]), pms, 'GET'))
try:
logging.debug('RateLimit Class: %s' % self.http_headers['X-RateLimit-Class'])
logging.debug('RateLimit Limit: %s' % self.http_headers['X-RateLimit-Limit'])
logging.debug('RateLimit Remaining: %s' % self.http_headers['X-RateLimit-Remaining'])
logging.debug('RateLimit Reset: %s' % self.http_headers['X-RateLimit-Reset'])
except:
pass
return res
|
{"/templatetags/string.py": ["/utils.py"], "/user.py": ["/base.py", "/twitdao.py", "/md.py"], "/main.py": ["/base.py", "/twitdao.py", "/md.py"], "/ajax1.py": ["/base.py", "/twitdao.py", "/md.py", "/twitpic2.py"], "/settings.py": ["/base.py", "/twitdao.py", "/md.py"], "/index.py": ["/base.py", "/md.py"], "/config.py": ["/base.py", "/md.py"], "/twitdao.py": ["/twitter.py", "/md.py"], "/queue.py": ["/base.py", "/twitdao.py", "/md.py"]}
|
4,614
|
mfs6174/Twitdao11
|
refs/heads/master
|
/templatetags/tags.py
|
from google.appengine.ext import webapp
register = webapp.template.create_template_register()
from django.template import Node
from django.template import TemplateSyntaxError, VariableDoesNotExist, Variable
from datetime import datetime
import rfc822
@register.tag
def tweet_stats(parser, token):
try:
tag_name, tweet_count, created_at=token.split_contents()
except ValueError, e:
raise TemplateSyntaxError(e)
return TweetStatsNode(tweet_count, created_at)
class TweetStatsNode(Node):
def __init__(self, tweet_count, created_at):
self.tweet_count=Variable(tweet_count)
self.created_at=Variable(created_at)
def render(self, context):
try:
tweet_count=self.tweet_count.resolve(context)
created_at=self.created_at.resolve(context)
tc=float(tweet_count)
ca=datetime(*rfc822.parsedate(created_at)[0:6])
ts=tc/(datetime.now()-ca).days
except:
return 'NaN'
return '%9.2f' % ts
|
{"/templatetags/string.py": ["/utils.py"], "/user.py": ["/base.py", "/twitdao.py", "/md.py"], "/main.py": ["/base.py", "/twitdao.py", "/md.py"], "/ajax1.py": ["/base.py", "/twitdao.py", "/md.py", "/twitpic2.py"], "/settings.py": ["/base.py", "/twitdao.py", "/md.py"], "/index.py": ["/base.py", "/md.py"], "/config.py": ["/base.py", "/md.py"], "/twitdao.py": ["/twitter.py", "/md.py"], "/queue.py": ["/base.py", "/twitdao.py", "/md.py"]}
|
4,615
|
mfs6174/Twitdao11
|
refs/heads/master
|
/md.py
|
# -*- coding: utf-8 -*-
from google.appengine.ext import db
from google.appengine.api import memcache
from google.appengine.api import users
import hashlib
import logging
import sys
import pickle
_app_config_cache=None
class AppConfig(db.Model):
consumer_key = db.StringProperty(default='')
consumer_secret = db.StringProperty(default='')
request_token_url = db.StringProperty(default='https://api.twitter.com/oauth/request_token')
access_token_url = db.StringProperty(default='https://api.twitter.com/oauth/access_token')
authorize_url = db.StringProperty(default='https://twitter.com/oauth/authorize')
authenticate_url = db.StringProperty(default='https://twitter.com/oauth/authenticate')
api_url = db.StringProperty(default='https://api.twitter.com/1.1/')
search_api_url = db.StringProperty(default='https://api.twitter.com/1.1/search/')
twitpic_api_key = db.StringProperty(default='')
def set_app_config(
consumer_key=None,
consumer_secret=None,
request_token_url=None,
access_token_url=None,
authorize_url=None,
authenticate_url=None,
api_url=None,
search_api_url=None,
twitpic_api_key=None,
):
global _app_config_cache
params={'key_name':'app_config'}
if consumer_key:
params['consumer_key'] = consumer_key
if consumer_secret:
params['consumer_secret'] = consumer_secret
if request_token_url:
params['request_token_url'] = request_token_url
if access_token_url:
params['access_token_url'] = access_token_url
if authorize_url:
params['authorize_url'] = authorize_url
if authenticate_url:
params['authenticate_url'] = authenticate_url
if api_url:
params['api_url'] = api_url
if search_api_url:
params['search_api_url'] = search_api_url
if twitpic_api_key:
params['twitpic_api_key'] = twitpic_api_key
app_config = AppConfig(**params)
logging.debug('[App Config] Set: %s' % params)
app_config.put()
_app_config_cache = app_config
memcache.set('app_config', app_config)
return app_config
def get_app_config():
global _app_config_cache
if _app_config_cache:
logging.debug('[MD] hit _app_config_cache %s' % _app_config_cache)
return _app_config_cache
app_config = memcache.get('app_config')
_app_config_cache = app_config
if not app_config:
app_config = AppConfig.get_by_key_name('app_config')
if not app_config:
return set_app_config()
_app_config_cache = app_config
memcache.set('app_config', app_config)
return app_config
_image_proxy_config_cache=None
class ImageProxyConfig(db.Model):
flickr_api_key = db.StringProperty(default='')
flickr_api_secret = db.StringProperty(default='')
flickr_rest_api_url = db.StringProperty(default='http://api.flickr.com/services/rest/')
def set_image_proxy_config(
flickr_api_key=None,
flickr_api_secret=None,
flickr_rest_api_url=None
):
global _image_proxy_config_cache
params={'key_name':'image_proxy_config'}
if flickr_api_key:
params['flickr_api_key'] = flickr_api_key
if flickr_api_secret:
params['flickr_api_secret'] = flickr_api_secret
if flickr_rest_api_url:
params['flickr_rest_api_url'] = flickr_rest_api_url
image_proxy_config = ImageProxyConfig(**params)
logging.debug('[ImageProxy Config] Set: %s' % params)
image_proxy_config.put()
_image_proxy_config_cache = image_proxy_config
memcache.set('image_proxy_config', image_proxy_config)
return image_proxy_config
def get_image_proxy_config():
global _image_proxy_config_cache
if _image_proxy_config_cache:
return _image_proxy_config_cache
image_proxy_config = memcache.get('image_proxy_config')
_image_proxy_config_cache = image_proxy_config
if not image_proxy_config:
image_proxy_config = ImageProxyConfig.get_by_key_name('image_proxy_config')
if not image_proxy_config:
return set_image_proxy_config()
_image_proxy_config_cache = image_proxy_config
memcache.set('image_proxy_config', image_proxy_config)
return image_proxy_config
class PickledProperty(db.Property):
data_type = db.Blob
def get_value_for_datastore(self, model_instance):
value = self.__get__(model_instance, model_instance.__class__)
if value is not None:
return db.Blob(pickle.dumps(value))
def make_value_from_datastore(self, value):
if value is not None:
return pickle.loads(str(value))
class TwitdaoUser(db.Model):
app_user = db.UserProperty(auto_current_user_add=True)
default_token = db.ReferenceProperty(default=None)
def __str__(self):
return str(self.app_user)
_default_token_settings={
'show_media':True,
'm_show_avatar':False,
'm_show_media':False,
'm_optimizer':None
}
class AccessToken(db.Model):
#twitdao info
twitdao_user = db.ReferenceProperty(reference_class=TwitdaoUser, collection_name="access_tokens")
first_auth_at = db.DateTimeProperty(auto_now_add=True)
last_auth_at = db.DateTimeProperty(auto_now=True)
settings = PickledProperty(default=_default_token_settings)
#access token
user_id = db.IntegerProperty()
screen_name = db.StringProperty()
oauth_token = db.StringProperty()
oauth_token_secret = db.StringProperty()
def __str__(self):
return '(%s, %s, key=%s)' % (self.user_id, self.screen_name, self.key())
class NoUserError(Exception):
'''Raise when we can't find any user.'''
pass
def _default_app_user():
app_user = users.get_current_user()
if not app_user:
raise NoUserError('Have you logged in?')
return app_user
def _app_user_key(app_user=None):
'''Identifier of the user. '''
if not app_user:
app_user = _default_app_user()
return 'token-%s-%s-%s-%s-%s' % (
app_user.nickname(),
app_user.email(),
app_user.user_id(),
app_user.federated_identity(),
app_user.federated_provider()
)
def set_default_access_token(access_token, app_user=None):
'''
app_userĬaccess token.
'''
if not app_user:
app_user = _default_app_user()
twitdao_user = TwitdaoUser.all().filter('app_user =', app_user).get()
twitdao_user.default_token = access_token
twitdao_user.put()
default_key = _app_user_key(app_user)
memcache.set( default_key, access_token)
return access_token
def get_access_tokens(size=50, cursor=None):
'''
ȡ access tokens.
token бһ cursor.
ص cursor!=None, иtokens; ص cursor==None, tokenѾȡ.
'''
q=AccessToken.all()
if cursor:
q.with_cursor(cursor)
tokens=q.fetch(size)
next_cursor=q.cursor()
if len(tokens)<size:
next_cursor = None
return tokens, next_cursor
def get_user_access_tokens(app_user=None, size=10, cursor=None):
'''
ȡ app_user access tokens.
token бһ cursor.
ص cursor!=None, иtokens; ص cursor==None, tokenѾȡ.
δָ app_user Ĭapp_userǵǰ¼û
'''
if not app_user:
app_user = _default_app_user()
tdu = TwitdaoUser.all().filter('app_user =', app_user).get()
next_cursor=None
tokens=None
if tdu:
if cursor:
q=tdu.access_tokens.with_cursor(cursor)
else:
q=tdu.access_tokens
tokens=q.fetch(size)
next_cursor=q.cursor()
else:
return None,None
if len(tokens)<size:
next_cursor = None
return tokens, next_cursor
def get_default_access_token(app_user=None):
'''
ȡ app_user Ĭ access token.
δָ app_user Ĭapp_userǵǰ¼û
'''
if not app_user:
app_user = _default_app_user()
default_key = _app_user_key(app_user)
token = memcache.get(default_key)
if not token:
twitdao_user = TwitdaoUser.all().filter('app_user =', app_user).get()
if twitdao_user:
# Try to prevent the "ReferenceProperty failed to be resolved" error.
try:
token = twitdao_user.default_token
if not token: return None
memcache.set_multi({str(token.key()):token, default_key:token})
except:
logging.warning('Exception: %s' % sys.exc_info()[0])
return None
else:
return None
return token
def get_access_token(token_key=None, app_user=None):
'''
ȡtoken_keyaccess token.
ָ app_user , ֻȡ app_user access token
ֱȡ access_token
'''
if app_user:
token=memcache.get(str(token_key))
if not token:
token = AccessToken.get(token_key)
if not token:
return None
elif token.twitdao_user.app_user != app_user:
return None
else:
memcache.set(str(token_key),token)
return token
else:
token=memcache.get(str(token_key))
if not token:
token = AccessToken.get(token_key)
if not token: return None
memcache.set(str(token_key),token)
return token
def save_access_token(
user_id,
screen_name,
oauth_token,
oauth_token_secret,
app_user
):
tdu = TwitdaoUser.all().filter('app_user =', app_user).get()
if not tdu:
tdu = TwitdaoUser()
tdu.put()
tk = tdu.access_tokens.filter('user_id =', long(user_id)).get()
if tk:
tk.screen_name=screen_name
tk.oauth_token=oauth_token
tk.oauth_token_secret=oauth_token_secret
tk.twitdao_user=tdu
tk.put()
else:
tk = AccessToken(
app_user = app_user,
twitdao_user=tdu,
user_id=long(user_id),
screen_name=screen_name,
oauth_token=oauth_token,
oauth_token_secret=oauth_token_secret
)
tk.put()
# Set the token as default only if default_token is None or the Error is raised.
try:
# Try to prevent the "ReferenceProperty failed to be resolved" error.
if not tdu.default_token:
tdu.default_token = tk
tdu.put()
except:
logging.warning('Exception: %s' % sys.exc_info()[0])
tdu.default_token = tk
tdu.put()
return tk
def delete_access_token(token_key=None, app_user=None):
'''
ɾtoken_key access token.
ָ app_user, ֻɾapp_user access token.
ֱɾ access token.
'''
token = AccessToken.get(token_key)
if not token:
return None
if not app_user:
memcache.delete_multi(keys=[str(token_key), _app_user_key(token.twitdao_user.app_user)])
token.delete()
elif token.twitdao_user.app_user != app_user:
return None
else:
memcache.delete_multi(keys=[str(token_key), _app_user_key(app_user)])
token.delete()
return token
def _cleanup_settings(settings):
if not isinstance(settings, dict):
return _default_token_settings
skeys=settings.keys()
for k in skeys:
if k not in _default_token_settings:
del settings[k]
return settings
def set_token_settings(token_key, app_user=None, **settings):
token = AccessToken.get(token_key)
if not token:
return None
if not app_user:
settings=_cleanup_settings(settings)
old_settings=_cleanup_settings(token.settings)
old_settings.update(settings)
token.settings=old_settings
memcache.delete_multi({str(token_key):token, _app_user_key(token.twitdao_user.app_user):token})
token.put()
elif token.twitdao_user.app_user != app_user:
return None
else:
settings=_cleanup_settings(settings)
old_settings=_cleanup_settings(token.settings)
old_settings.update(settings)
token.settings=old_settings
memcache.delete_multi({str(token_key):token, _app_user_key(app_user):token})
token.put()
def get_proxy_access_token():
return get_access_token('agdnYWUtdHVpchILEgtBY2Nlc3NUb2tlbhipRgw','')
|
{"/templatetags/string.py": ["/utils.py"], "/user.py": ["/base.py", "/twitdao.py", "/md.py"], "/main.py": ["/base.py", "/twitdao.py", "/md.py"], "/ajax1.py": ["/base.py", "/twitdao.py", "/md.py", "/twitpic2.py"], "/settings.py": ["/base.py", "/twitdao.py", "/md.py"], "/index.py": ["/base.py", "/md.py"], "/config.py": ["/base.py", "/md.py"], "/twitdao.py": ["/twitter.py", "/md.py"], "/queue.py": ["/base.py", "/twitdao.py", "/md.py"]}
|
4,616
|
mfs6174/Twitdao11
|
refs/heads/master
|
/user.py
|
# -*- coding: utf-8 -*-
from google.appengine.ext import webapp
from google.appengine.ext.webapp import util
from google.appengine.api import taskqueue
from base import BaseHandler
from twitdao import Twitdao
import md
import urllib
class ShowUserTimeline(BaseHandler):
def get(self, screen_name):
params = self.params([
'user_id',
'since_id',
'max_id',
'count',
'page',
'trim_user',
'include_rts',
'include_entities',
],include_rts='true')
#if screen_name== '':
# self.redirect('/')
# return
token = md.get_proxy_access_token()
#if not token:
# self.redirect('/')
# return
td = Twitdao(token)
owner_user = td.users_show_by_screen_name( screen_name=screen_name )
token_user = td.users_show_by_id(user_id = token.user_id)
friendship = td.friendships_show(source_id=token.user_id, target_screen_name=screen_name)
timeline = td.user_timeline(screen_name=screen_name, **params)
self.render('user-timeline-proxy.html', {
'token':token,
#'token_user':'twittertwitter',# token_user
'owner_user':owner_user,
'max_id':str(timeline[-1]['id']-1) if type(timeline)==list and len(timeline)>0 else None,
'since_id':timeline[0]['id_str'] if type(timeline)==list and len(timeline)>0 else None,
'timeline':timeline,
#'friendship':friendship,
'where':'user',
})
def main():
application = webapp.WSGIApplication([
('/user/([0-9a-zA-Z_]+)', ShowUserTimeline),
], debug=True)
util.run_wsgi_app(application)
if __name__ == '__main__':
main()
|
{"/templatetags/string.py": ["/utils.py"], "/user.py": ["/base.py", "/twitdao.py", "/md.py"], "/main.py": ["/base.py", "/twitdao.py", "/md.py"], "/ajax1.py": ["/base.py", "/twitdao.py", "/md.py", "/twitpic2.py"], "/settings.py": ["/base.py", "/twitdao.py", "/md.py"], "/index.py": ["/base.py", "/md.py"], "/config.py": ["/base.py", "/md.py"], "/twitdao.py": ["/twitter.py", "/md.py"], "/queue.py": ["/base.py", "/twitdao.py", "/md.py"]}
|
4,617
|
mfs6174/Twitdao11
|
refs/heads/master
|
/main.py
|
# -*- coding: utf-8 -*-
from google.appengine.ext import webapp
from google.appengine.ext.webapp import util
from google.appengine.api import taskqueue
from base import BaseHandler
from twitdao import Twitdao
import md
import urllib
#Home
class HomeTimeline(BaseHandler):
def get(self):
params=self.params([
'since_id',
'max_id',
'count',
'page',
'trim_user',
'include_rts',
'include_entities'
])
token = md.get_default_access_token()
if not token:
self.redirect('/settings')
return
td = Twitdao(token)
token_user = td.users_show_by_id(user_id = token.user_id)
owner_user = token_user
timeline = td.home_timeline(**params)
limit_rate = td.API_limit_rate()
self.render('home-timeline.html', {
'token':token,
'token_user':token_user,
'owner_user':owner_user,
'timeline':timeline,
'max_id':str(timeline[-1]['id']-1) if type(timeline)==list and len(timeline)>0 else None,
'since_id':timeline[0]['id_str'] if type(timeline)==list and len(timeline)>0 else None,
'limit_rate':limit_rate,
'where':'home'
})
class Mentions(BaseHandler):
def get(self):
params=self.params([
'since_id',
'max_id',
'count',
'page',
'trim_user',
'include_rts',
'include_entities'
])
token = md.get_default_access_token()
if not token:
self.redirect('/settings')
return
td = Twitdao(token)
token_user = td.users_show_by_id(user_id = token.user_id)
owner_user = token_user
timeline = td.mentions(**params)
self.render('mentions-timeline.html', {
'token':token,
'token_user':token_user,
'owner_user':owner_user,
'timeline':timeline,
'max_id':str(timeline[-1]['id']-1) if type(timeline)==list and len(timeline)>0 else None,
'since_id':timeline[0]['id_str'] if type(timeline)==list and len(timeline)>0 else None,
'where':'mentions'
})
class Retweets(BaseHandler):
def get(self, which):
params=self.params([
'since_id',
'max_id',
'count',
'page',
'trim_user',
'include_entities'
])
token = md.get_default_access_token()
if not token:
self.redirect('/settings')
return
td = Twitdao(token)
timeline=[]
if which == 'retweeted_by_me':
timeline = td.retweeted_by_me(**params)
title = "retweeted by me"
elif which == 'retweeted_to_me':
timeline = td.retweeted_to_me(**params)
title = "retweeted to me"
elif which == 'retweeted_of_me':
timeline = td.retweets_of_me(**params)
title = "retweeted of me"
token_user = td.users_show_by_id(user_id = token.user_id)
owner_user = token_user
self.render('retweets-timeline.html', {
'token':token,
'token_user':token_user,
'owner_user':owner_user,
'timeline':timeline,
'max_id':str(timeline[-1]['id']-1) if type(timeline)==list and len(timeline)>0 else None,
'since_id':timeline[0]['id_str'] if type(timeline)==list and len(timeline)>0 else None,
'where':which,
'which':which,
'title':title,
})
class Retweet(BaseHandler):
def get(self, id):
params = self.params(['trim_user','include_entities'])
token = md.get_default_access_token()
if not token:
self.redirect('/settings')
return
td = Twitdao(token)
token_user = td.users_show_by_id(user_id = token.user_id)
owner_user = token_user
tweet = td.statuses_show(id=id, **params)
self.render('retweet.html', {
'token_user':token_user,
'owner_user':owner_user,
'tweet':tweet,
})
def post(self, id):
params = self.params(['trim_user','include_entities'])
token = md.get_default_access_token()
if not token:
self.redirect('/settings')
return
td = Twitdao(token)
tweet = td.statuses_retweet(id=id, **params)
taskqueue.add(queue_name='cache', url='/q/update_user_cache', params={'tk':token.key(), 'user_id':token.user_id}, method="GET" )
self.redirect('/t')
class UserTimeline(BaseHandler):
def get(self, screen_name):
params = self.params([
'user_id',
'since_id',
'max_id',
'count',
'page',
'trim_user',
'include_rts',
'include_entities',
],include_rts='true')
token = md.get_default_access_token()
if not token:
self.redirect('/settings')
return
td = Twitdao(token)
owner_user = td.users_show_by_screen_name( screen_name=screen_name )
token_user = td.users_show_by_id(user_id = token.user_id)
friendship = td.friendships_show(source_id=token.user_id, target_screen_name=screen_name)
timeline = td.user_timeline(screen_name=screen_name, **params)
self.render('user-timeline.html', {
'token':token,
'token_user':token_user,
'owner_user':owner_user,
'max_id':str(timeline[-1]['id']-1) if type(timeline)==list and len(timeline)>0 else None,
'since_id':timeline[0]['id_str'] if type(timeline)==list and len(timeline)>0 else None,
'timeline':timeline,
'friendship':friendship,
'where':'user',
})
class UpdateStatus(BaseHandler):
def get(self):
screen_name = self.param('screen_name')
status_id = self.param('status_id')
params = self.params(['trim_user','include_entities'])
token = md.get_default_access_token()
if not token:
self.redirect('/settings')
return
if screen_name:
td = Twitdao(token)
token_user = td.users_show_by_id(user_id = token.user_id)
owner_user = token_user
self.render('reply.html', {
'token':token,
'token_user':token_user,
'owner_user':owner_user,
'screen_name':screen_name,
})
else:
td = Twitdao(token)
token_user = td.users_show_by_id(user_id = token.user_id)
owner_user = token_user
tweet = td.statuses_show(id=status_id,**params)
self.render('reply.html', {
'token':token,
'token_user':token_user,
'owner_user':owner_user,
'tweet':tweet,
})
def post(self):
status = self.param('status')
params = self.params([
'in_reply_to_status_id',
'lat',
'long',
'place_id',
'display_coordinates',
'trim_user',
'include_entities',
])
token = md.get_default_access_token()
if not token:
self.redirect('/settings')
return
td = Twitdao(token)
td.statuses_update(status=status.encode('utf-8'), **params)
taskqueue.add(queue_name='cache', url='/q/update_user_cache', params={'tk':token.key(), 'user_id':token.user_id}, method="GET" )
self.redirect('/t')
class ShowStatus(BaseHandler):
def get(self, status_id):
params = self.params(['trim_user','include_entities'])
token = md.get_default_access_token()
if not token:
self.redirect('/settings')
return
td = Twitdao(token)
token_user = td.users_show_by_id(user_id = token.user_id)
owner_user = token_user
tweet = td.statuses_show(id=status_id,**params)
self.render('tweet-show.html', {
'token':token,
'token_user':token_user,
'owner_user':owner_user,
'tweet':tweet,
})
class DeleteStatus(BaseHandler):
def get(self, id):
params = self.params(['trim_user','include_entities'])
token = md.get_default_access_token()
if not token:
self.redirect('/settings')
return
td = Twitdao(token)
token_user = td.users_show_by_id(user_id = token.user_id)
owner_user = token_user
tweet = td.statuses_show(id=id, **params)
self.render('tweet-delete.html', {
'token':token,
'token_user':token_user,
'owner_user':owner_user,
'tweet':tweet,
})
def post(self, id):
params = self.params(['trim_user','include_entities'])
token = md.get_default_access_token()
if not token:
self.redirect('/settings')
return
td = Twitdao(token)
tweet = td.statuses_destroy(id=id, **params)
taskqueue.add(queue_name='cache', url='/q/update_user_cache', params={'tk':token.key(), 'user_id':token.user_id}, method="GET" )
self.redirect('/t')
class Followers(BaseHandler):
def get(self, screen_name):
params = self.params([
'user_id',
'cursor',
'include_entities',
], cursor=-1)
token = md.get_default_access_token()
if not token:
self.redirect('/settings')
return
td = Twitdao(token)
token_user = td.users_show_by_id(user_id = token.user_id)
owner_user = td.users_show_by_screen_name(screen_name = screen_name)
followers = td.statuses_followers(screen_name=screen_name, **params)
self.render('followers.html', {
'token':token,
'token_user':token_user,
'owner_user':owner_user,
'error': followers['error'] if 'error' in followers else False,
'followers':followers if 'error' in followers else followers['users'],
'next_cursor':None if 'error' in followers else followers['next_cursor'],
'next_cursor_str':None if 'error' in followers else followers['next_cursor_str'],
'previous_cursor':None if 'error' in followers else followers['previous_cursor'],
'previous_cursor_str':None if 'error' in followers else followers['previous_cursor_str'],
'where':'followers',
})
class Following(BaseHandler):
def get(self, screen_name):
params = self.params([
'user_id',
'cursor',
'include_entities',
], cursor=-1)
token = md.get_default_access_token()
if not token:
self.redirect('/settings')
return
td = Twitdao(token)
token_user = td.users_show_by_id(user_id = token.user_id)
owner_user = td.users_show_by_screen_name(screen_name = screen_name)
following = td.statuses_friends(screen_name=screen_name, **params)
self.render('following.html', {
'token':token,
'token_user':token_user,
'owner_user':owner_user,
'error': following['error'] if 'error' in following else False,
'following':following if 'error' in following else following['users'],
'next_cursor':None if 'error' in following else following['next_cursor'],
'next_cursor_str':None if 'error' in following else following['next_cursor_str'],
'previous_cursor':None if 'error' in following else following['previous_cursor'],
'previous_cursor_str':None if 'error' in following else following['previous_cursor_str'],
'where':'following',
})
class Follow(BaseHandler):
def get(self, screen_name):
token = md.get_default_access_token()
if not token:
self.redirect('/settings')
return
td = Twitdao(token)
token_user = td.users_show_by_id(user_id = token.user_id)
owner_user = token_user
follow_user = td.users_show_by_screen_name(screen_name = screen_name)
self.render('follow.html', {
'token':token,
'token_user':token_user,
'owner_user':owner_user,
'user':follow_user,
})
def post(self, screen_name):
token = md.get_default_access_token()
if not token:
self.redirect('/settings')
return
td = Twitdao(token)
follow_user = td.friendships_create(screen_name = screen_name)
taskqueue.add(queue_name='cache', url='/q/update_user_cache', params={'tk':token.key(), 'user_id':token.user_id}, method="GET" )
taskqueue.add(queue_name='cache', url='/q/update_user_cache', params={'tk':token.key(), 'screen_name':screen_name}, method="GET" )
self.redirect('/t/%s?force_refresh=true' % screen_name)
class UnFollow(BaseHandler):
def get(self, screen_name):
token = md.get_default_access_token()
if not token:
self.redirect('/settings')
return
td = Twitdao(token)
token_user = td.users_show_by_id(user_id = token.user_id)
owner_user = token_user
follow_user = td.users_show_by_screen_name(screen_name = screen_name)
self.render('unfollow.html', {
'token':token,
'token_user':token_user,
'owner_user':owner_user,
'user':follow_user,
})
def post(self, screen_name):
token = md.get_default_access_token()
if not token:
self.redirect('/settings')
return
td = Twitdao(token)
follow_user = td.friendships_destroy(screen_name = screen_name)
taskqueue.add(queue_name='cache', url='/q/update_user_cache', params={'tk':token.key(), 'user_id':token.user_id}, method="GET" )
taskqueue.add(queue_name='cache', url='/q/update_user_cache', params={'tk':token.key(), 'screen_name':screen_name}, method="GET" )
self.redirect('/t/%s?force_refresh=true' % screen_name)
class Block(BaseHandler):
def get(self, screen_name):
token = md.get_default_access_token()
if not token:
self.redirect('/settings')
return
td = Twitdao(token)
token_user = td.users_show_by_id(user_id = token.user_id)
owner_user = token_user
block_user = td.users_show_by_screen_name(screen_name = screen_name)
self.render('block.html', {
'token':token,
'token_user':token_user,
'owner_user':owner_user,
'user':block_user,
})
def post(self, screen_name):
token = md.get_default_access_token()
if not token:
self.redirect('/settings')
return
td = Twitdao(token)
block_user = td.blocks_create(screen_name = screen_name)
taskqueue.add(queue_name='cache', url='/q/update_user_cache', params={'tk':token.key(), 'user_id':token.user_id}, method="GET" )
taskqueue.add(queue_name='cache', url='/q/update_user_cache', params={'tk':token.key(), 'screen_name':screen_name}, method="GET" )
self.redirect('/t/%s?force_refresh=true' % screen_name)
class UnBlock(BaseHandler):
def get(self, screen_name):
token = md.get_default_access_token()
if not token:
self.redirect('/settings')
return
td = Twitdao(token)
token_user = td.users_show_by_id(user_id = token.user_id)
owner_user = token_user
block_user = td.users_show_by_screen_name(screen_name = screen_name)
self.render('unblock.html', {
'token':token,
'token_user':token_user,
'owner_user':owner_user,
'user':block_user,
})
def post(self, screen_name):
token = md.get_default_access_token()
if not token:
self.redirect('/settings')
return
td = Twitdao(token)
follow_user = td.blocks_destroy(screen_name = screen_name)
taskqueue.add(queue_name='cache', url='/q/update_user_cache', params={'tk':token.key(), 'user_id':token.user_id}, method="GET" )
taskqueue.add(queue_name='cache', url='/q/update_user_cache', params={'tk':token.key(), 'screen_name':screen_name}, method="GET" )
self.redirect('/t/%s?force_refresh=true' % screen_name)
#Favorite
class Favorites(BaseHandler):
def get(self, screen_name):
params = self.params(['page', 'include_entities'])
page = self.param('page')
token = md.get_default_access_token()
if not token:
self.redirect('/settings')
return
td = Twitdao(token)
token_user = td.users_show_by_id(user_id = token.user_id)
owner_user = td.users_show_by_screen_name(screen_name = screen_name)
favorites = td.favorites(id=screen_name, **params)
prev_page, next_page = None, 2
if page:
try:
page = int(page)
prev_page = page-1 if page-1>0 else None
next_page = page+1
except:
pass
self.render('favorites.html', {
'token':token,
'token_user':token_user,
'owner_user':owner_user,
'favorites':favorites,
'prev_page':prev_page,
'next_page':next_page,
'where':'favorites',
})
class FavoritesDestroy(BaseHandler):
def get(self, id):
params = self.params(['trim_user','include_entities'])
token = md.get_default_access_token()
if not token:
self.redirect('/settings')
return
td = Twitdao(token)
token_user = td.users_show_by_id(user_id = token.user_id)
owner_user = token_user
tweet = td.statuses_show(id=id, **params)
self.render('unfavorite.html', {
'token':token,
'token_user':token_user,
'owner_user':owner_user,
'tweet':tweet,
})
def post(self, id):
params = self.params(['include_entities'])
token = md.get_default_access_token()
if not token:
self.redirect('/settings')
return
td = Twitdao(token)
tweet = td.favorites_destroy(id=id, **params)
taskqueue.add(queue_name='cache', url='/q/update_user_cache', params={'tk':token.key(), 'user_id':token.user_id}, method="GET" )
self.redirect('/t/%s/favorites' % token.screen_name)
class FavoritesCreate(BaseHandler):
def get(self, id):
params = self.params(['trim_user','include_entities'])
token = md.get_default_access_token()
if not token:
self.redirect('/settings')
return
td = Twitdao(token)
token_user = td.users_show_by_id(user_id = token.user_id)
owner_user = token_user
tweet = td.statuses_show(id=id, **params)
self.render('favorite.html', {
'token':token,
'token_user':token_user,
'owner_user':owner_user,
'tweet':tweet,
})
def post(self, id):
params = self.params(['include_entities'])
token = md.get_default_access_token()
if not token:
self.redirect('/settings')
return
td = Twitdao(token)
tweet = td.favorites_create(id=id, **params)
taskqueue.add(queue_name='cache', url='/q/update_user_cache', params={'tk':token.key(), 'user_id':token.user_id}, method="GET" )
self.redirect('/t/%s/favorites' % token.screen_name)
#direct message
class DirectMessages(BaseHandler):
def get(self):
params = self.params([
'since_id',
'max_id',
'count',
'page',
'include_entities',
])
token = md.get_default_access_token()
if not token:
self.redirect('/settings')
return
td = Twitdao(token)
token_user = td.users_show_by_id(user_id = token.user_id)
owner_user = token_user
direct_messages = td.direct_messages(**params)
self.render('messages.html', {
'token':token,
'token_user':token_user,
'owner_user':owner_user,
'max_id':str(direct_messages[-1]['id']-1) if type(direct_messages)==list and len(direct_messages)>0 else None,
'since_id':direct_messages[0]['id_str'] if type(direct_messages)==list and len(direct_messages)>0 else None,
'messages':direct_messages,
'where':'inbox',
})
class DirectMessagesSent(BaseHandler):
def get(self):
params = self.params([
'since_id',
'max_id',
'count',
'page',
'include_entities',
])
token = md.get_default_access_token()
if not token:
self.redirect('/settings')
return
td = Twitdao(token)
token_user = td.users_show_by_id(user_id = token.user_id)
owner_user = token_user
direct_messages = td.direct_messages_sent(**params)
self.render('messages-sent.html', {
'token':token,
'token_user':token_user,
'owner_user':owner_user,
'max_id':str(direct_messages[-1]['id']-1) if type(direct_messages)==list and len(direct_messages)>0 else None,
'since_id':direct_messages[0]['id_str'] if type(direct_messages)==list and len(direct_messages)>0 else None,
'messages':direct_messages,
'where':'sent',
})
class DirectMessagesNew(BaseHandler):
def get(self):
screen_name = self.param('screen_name')
token = md.get_default_access_token()
if not token:
self.redirect('/settings')
return
td = Twitdao(token)
token_user = td.users_show_by_id(user_id = token.user_id)
owner_user = token_user
self.render('message-new.html',{
'token':token,
'token_user':token_user,
'owner_user':owner_user,
'screen_name':screen_name,
})
def post(self):
screen_name = self.param('screen_name')
user_id = self.param('user_id')
text = self.param('text')
params = self.params(['include_entities'])
token = md.get_default_access_token()
if not token:
self.redirect('/settings')
return
td = Twitdao(token)
message = td.direct_messages_new(user_id=user_id, screen_name=screen_name, text=text.encode('utf-8'), **params)
self.redirect('/a/messages_sent')
class DirectMessagesDestroy(BaseHandler):
def get(self, id):
token = md.get_default_access_token()
if not token:
self.redirect('/settings')
return
td = Twitdao(token)
token_user = td.users_show_by_id(user_id = token.user_id)
owner_user = token_user
#No show single message api.
message = None
self.render('message-destroy.html',{
'token':token,
'token_user':token_user,
'owner_user':owner_user,
'message':message,
})
def post(self, id):
params = self.params(['include_entities'])
token = md.get_default_access_token()
if not token:
self.redirect('/settings')
return
td = Twitdao(token)
message = td.direct_messages_destroy(id=id, **params)
self.redirect('/a/messages_sent')
class Lists(BaseHandler):
def get(self, screen_name):
params = self.params(['cursor'],cursor=-1)
token = md.get_default_access_token()
if not token:
self.redirect('/settings')
return
td = Twitdao(token)
token_user = td.users_show_by_id(user_id = token.user_id)
owner_user = td.users_show_by_screen_name(screen_name = screen_name)
lists = td.user_lists_get(screen_name = screen_name, **params)
self.render('lists.html', {
'token':token,
'token_user':token_user,
'owner_user':owner_user,
'lists':lists['lists'],
'next_cursor':lists['next_cursor'],
'next_cursor_str':lists['next_cursor_str'],
'previous_cursor':lists['previous_cursor'],
'previous_cursor_str':lists['previous_cursor_str'],
'where':'lists',
})
class ListsMemberships(BaseHandler):
def get(self, screen_name):
params = self.params(['cursor'],cursor=-1)
token = md.get_default_access_token()
if not token:
self.redirect('/settings')
return
td = Twitdao(token)
token_user = td.users_show_by_id(user_id = token.user_id)
owner_user = td.users_show_by_screen_name(screen_name = screen_name)
lists = td.user_list_memberships(screen_name = screen_name, **params)
self.render('lists-memberships.html', {
'token':token,
'token_user':token_user,
'owner_user':owner_user,
'lists':lists['lists'],
'next_cursor':lists['next_cursor'],
'next_cursor_str':lists['next_cursor_str'],
'previous_cursor':lists['previous_cursor'],
'previous_cursor_str':lists['previous_cursor_str'],
'where':'list-memberships',
})
class ListsSubscriptions(BaseHandler):
def get(self, screen_name):
params = self.params(['cursor'],cursor=-1)
token = md.get_default_access_token()
if not token:
self.redirect('/settings')
return
td = Twitdao(token)
token_user = td.users_show_by_id(user_id = token.user_id)
owner_user = td.users_show_by_screen_name(screen_name = screen_name)
lists = td.user_list_subscriptions(screen_name = screen_name, **params)
self.render('lists-subscriptions.html', {
'token':token,
'token_user':token_user,
'owner_user':owner_user,
'lists':lists['lists'],
'next_cursor':lists['next_cursor'],
'next_cursor_str':lists['next_cursor_str'],
'previous_cursor':lists['previous_cursor'],
'previous_cursor_str':lists['previous_cursor_str'],
'where':'list-subscriptions',
})
class ListTimeline(BaseHandler):
def get(self, screen_name, slug ):
params = self.params(['since_id','max_id','per_page','page','include_entities'])
token = md.get_default_access_token()
if not token:
self.redirect('/settings')
return
td = Twitdao(token)
token_user = td.users_show_by_id(user_id = token.user_id)
owner_user = td.users_show_by_screen_name(screen_name = screen_name)
ls = td.user_list_id_get(id=slug, screen_name=screen_name)
timeline = td.user_list_id_statuses(id=slug, screen_name = screen_name, **params)
self.render('list-timeline.html', {
'token':token,
'token_user':token_user,
'owner_user':owner_user,
'list':ls,
'timeline':timeline,
'max_id':str(timeline[-1]['id']-1) if type(timeline)==list and len(timeline)>0 else None,
'since_id':timeline[0]['id_str'] if type(timeline)==list and len(timeline)>0 else None,
'where':'list-timeline'
})
class ListCreate(BaseHandler):
def get(self):
token = md.get_default_access_token()
if not token:
self.redirect('/settings')
return
td = Twitdao(token)
token_user = td.users_show_by_id(user_id = token.user_id)
owner_user = token_user
self.render('list-create.html',{
'token_user':token_user,
'owner_user':owner_user,
})
def post(self):
name = self.param('name')
params = self.params(['mode','description'], mode='public')
name=name.encode('utf-8')
if 'description' in params:
params['description']=params['description'].encode('utf-8')
token = md.get_default_access_token()
if not token:
self.redirect('/settings')
return
td = Twitdao(token)
token_user = td.users_show_by_id(user_id = token.user_id)
lst = td.user_lists_post(name=name, **params)
self.redirect('/t/%s/%s' % (token_user['screen_name'], urllib.quote(lst['slug'].encode('utf-8'))))
class ListEdit(BaseHandler):
def get(self, lid):
token = md.get_default_access_token()
if not token:
self.redirect('/settings')
return
td = Twitdao(token)
token_user = td.users_show_by_id(user_id = token.user_id)
owner_user = token_user
lst = td.user_list_id_get(id=lid)
self.render('list-edit.html',{
'token_user':token_user,
'owner_user':owner_user,
'list':lst,
})
def post(self, lid):
params = self.params(['name','mode','description'])
if 'name' in params:
params['name']=params['name'].encode('utf-8')
if 'description' in params:
params['description']=params['description'].encode('utf-8')
token = md.get_default_access_token()
if not token:
self.redirect('/settings')
return
td = Twitdao(token)
token_user = td.users_show_by_id(user_id = token.user_id)
lst = td.user_lists_id_post(id=lid, **params)
self.jedirect('/t/%s/%s' % (token_user['screen_name'], urllib.quote(lst['slug'].encode('utf-8'))), time=2000)
class ListDelete(BaseHandler):
def get(self, lid):
token = md.get_default_access_token()
if not token:
self.redirect('/settings')
return
td = Twitdao(token)
token_user = td.users_show_by_id(user_id = token.user_id)
owner_user = token_user
lst = td.user_list_id_get(id=lid)
self.render('list-delete.html',{
'token_user':token_user,
'owner_user':owner_user,
'list':lst,
})
def post(self, lid):
token = md.get_default_access_token()
if not token:
self.redirect('/settings')
return
td = Twitdao(token)
lst = td.user_list_id_delete(id=lid)
self.redirect('/t/%s/lists' % token.screen_name)
class ListFollow(BaseHandler):
def get(self, screen_name, slug ):
token = md.get_default_access_token()
if not token:
self.redirect('/settings')
return
td = Twitdao(token)
token_user = td.users_show_by_id(user_id = token.user_id)
owner_user = token_user
lst = td.user_list_id_get(id=slug, screen_name=screen_name )
self.render('list-follow.html',{
'token_user':token_user,
'owner_user':owner_user,
'list':lst,
})
def post(self, screen_name, slug ):
token = md.get_default_access_token()
if not token:
self.redirect('/settings')
return
td = Twitdao(token)
td.user_list_id_subscribers_post(screen_name=screen_name, list_id=slug)
self.redirect('/t/%s/%s' % (screen_name, slug) )
class ListUnFollow(BaseHandler):
def get(self, screen_name, slug ):
token = md.get_default_access_token()
if not token:
self.redirect('/settings')
return
td = Twitdao(token)
token_user = td.users_show_by_id(user_id = token.user_id)
owner_user = token_user
lst = td.user_list_id_get(id=slug, screen_name=screen_name )
self.render('list-unfollow.html',{
'token_user':token_user,
'owner_user':owner_user,
'list':lst,
})
def post(self, screen_name, slug ):
token = md.get_default_access_token()
if not token:
self.redirect('/settings')
return
td = Twitdao(token)
td.user_list_id_subscribers_delete(screen_name=screen_name, list_id=slug)
self.redirect('/t/%s/%s' % (screen_name, slug) )
class ListAdd(BaseHandler):
def get(self, screen_name):
params = self.params(['cursor'],cursor=-1)
token = md.get_default_access_token()
if not token:
self.redirect('/settings')
return
td = Twitdao(token)
token_user = td.users_show_by_id(user_id = token.user_id)
owner_user = td.users_show_by_screen_name(screen_name = screen_name)
add_user = owner_user
lists = td.user_lists_get(**params)
self.render('lists-add-to.html', {
'token':token,
'token_user':token_user,
'owner_user':owner_user,
'add_user':add_user,
'lists':lists['lists'],
'where':'lists',
})
def post(self, screen_name):
list_ids=self.request.get_all('list_ids')
token = md.get_default_access_token()
if not token:
self.redirect('/settings')
return
td = Twitdao(token)
for list_id in list_ids:
taskqueue.add(url="/q/list_add_user", params={'tk':token.key(), 'list_id':list_id, 'screen_name':screen_name}, method='GET')
#td.user_list_id_members_post(token.screen_name, list_id, id=screen_name)
self.redirect('/t/%s/lists' % token.screen_name)
class ListRemove(BaseHandler):
def get(self, slug, screen_name):
token = md.get_default_access_token()
if not token:
self.redirect('/settings')
return
td = Twitdao(token)
token_user = td.users_show_by_id(user_id = token.user_id)
owner_user = token_user
remove_user = td.users_show_by_screen_name(screen_name = screen_name)
lst = td.user_list_id_get(id=slug, screen_name=token.screen_name )
self.render('list-remove-from.html', {
'token':token,
'token_user':token_user,
'owner_user':owner_user,
'remove_user':remove_user,
'list':lst,
'where':'lists',
})
def post(self, slug, screen_name):
token = md.get_default_access_token()
if not token:
self.redirect('/settings')
return
td = Twitdao(token)
td.user_list_id_members_delete(screen_name=token.screen_name, list_id=slug, id=screen_name)
self.redirect('/t/%s/%s/following' % (token.screen_name, slug) )
class ListFollowing(BaseHandler):
def get(self, screen_name, slug):
params = self.params(['cursor', 'include_entities'], cursor=-1)
token = md.get_default_access_token()
if not token:
self.redirect('/settings')
return
td = Twitdao(token)
token_user = td.users_show_by_id(user_id = token.user_id)
owner_user = td.users_show_by_screen_name(screen_name = screen_name)
lst = td.user_list_id_get(id=slug, screen_name=screen_name )
following = td.user_list_id_members_get(screen_name, slug, **params)
self.render('list-following.html', {
'token':token,
'token_user':token_user,
'owner_user':owner_user,
'error': following['error'] if 'error' in following else False,
'following':following if 'error' in following else following['users'],
'next_cursor':None if 'error' in following else following['next_cursor'],
'next_cursor_str':None if 'error' in following else following['next_cursor_str'],
'previous_cursor':None if 'error' in following else following['previous_cursor'],
'previous_cursor_str':None if 'error' in following else following['previous_cursor_str'],
'list':lst,
'where':'list-following',
})
class ListFollowers(BaseHandler):
def get(self, screen_name, slug):
params = self.params(['cursor', 'include_entities'], cursor=-1)
token = md.get_default_access_token()
if not token:
self.redirect('/settings')
return
td = Twitdao(token)
token_user = td.users_show_by_id(user_id = token.user_id)
owner_user = td.users_show_by_screen_name(screen_name = screen_name)
lst = td.user_list_id_get(id=slug, screen_name=screen_name )
followers = td.user_list_id_subscribers_get(screen_name, slug, **params)
self.render('list-followers.html', {
'token':token,
'token_user':token_user,
'owner_user':owner_user,
'error': followers['error'] if 'error' in followers else False,
'followers':followers if 'error' in followers else followers['users'],
'next_cursor':None if 'error' in followers else followers['next_cursor'],
'next_cursor_str':None if 'error' in followers else followers['next_cursor_str'],
'previous_cursor':None if 'error' in followers else followers['previous_cursor'],
'previous_cursor_str':None if 'error' in followers else followers['previous_cursor_str'],
'list':lst,
'where':'list-followers',
})
class Blocking(BaseHandler):
def get(self):
token = md.get_default_access_token()
if not token:
self.redirect('/settings')
return
params = self.params(['page', 'include_entities'])
page = self.param('page')
td = Twitdao(token)
token_user = td.users_show_by_id(user_id = token.user_id)
owner_user = token_user
blocking = td.blocks_blocking(**params)
prev_page, next_page = None, 2
if page:
try:
page = int(page)
prev_page = page-1 if page-1>0 else None
next_page = page+1
except:
pass
self.render('blocking.html',{
'token_user':token_user,
'owner_user':owner_user,
'blocking':blocking,
'prev_page':prev_page,
'next_page':next_page,
})
class ReportSpam(BaseHandler):
def get(self, screen_name):
token = md.get_default_access_token()
if not token:
self.redirect('/settings')
return
td = Twitdao(token)
token_user = td.users_show_by_id(user_id = token.user_id)
owner_user = td.users_show_by_screen_name(screen_name = screen_name)
self.render('report-spam.html', {
'token':token,
'token_user':token_user,
'owner_user':owner_user,
'title':'Report %s for spam?' % screen_name,
'confirm':'Report',
'where':'reportspam',
})
def post(self, screen_name):
#user_id, screen_name, include_entities
token = md.get_default_access_token()
if not token:
self.redirect('/settings')
return
td = Twitdao(token)
td.report_spam(screen_name = screen_name)
taskqueue.add(queue_name='cache', url='/q/update_user_cache', params={'tk':token.key(), 'user_id':token.user_id}, method="GET" )
taskqueue.add(queue_name='cache', url='/q/update_user_cache', params={'tk':token.key(), 'screen_name':screen_name}, method="GET" )
self.redirect('/t/%s?force_refresh=true' % screen_name)
class SavedSearches(BaseHandler):
def get(self):
token = md.get_default_access_token()
if not token:
self.redirect('/settings')
return
td = Twitdao(token)
token_user = td.users_show_by_id(user_id = token.user_id)
owner_user = token_user
searches = td.saved_searches()
self.render('saved_searches.html',{
'token':token,
'token_user':token_user,
'owner_user':owner_user,
'searches':searches,
})
class Search(BaseHandler):
def get(self):
q = self.param('q')
params = self.params([
'lang',
'locate',
'rpp',
'page',
'since_id',
'until',
'geocode',
'show_user',
'result_type',
])
token = md.get_default_access_token()
if not token:
self.redirect('/settings')
return
td = Twitdao(token)
token_user = td.users_show_by_id(user_id = token.user_id)
owner_user = token_user
limit_rate = td.API_limit_rate()
searchd = None
if q:
q = q.encode('utf-8')
searchd = td.search(q, **params)
self.render('search.html', {
'token':token,
'token_user':token_user,
'owner_user':owner_user,
'q':q,
'limit_rate':limit_rate,
'search_data':searchd
})
class HackedSearch(BaseHandler):
def get(self):
q = self.param('q')
page = self.param('page')
token = md.get_default_access_token()
if not token:
self.redirect('/settings')
return
td = Twitdao(token)
token_user = td.users_show_by_id(user_id = token.user_id)
owner_user = token_user
searchd = None
timeline=[]
if q:
searchd=td.hacked_search(q.encode('utf-8'), page=page)
timeline=searchd['statuses']
self.render('hacked_search.html', {
'token':token,
'token_user':token_user,
'owner_user':owner_user,
'q':q,
'since_id':timeline[0]['id_str'] if type(timeline)==list and len(timeline)>0 else None,
'search_data':searchd
})
def main():
application = webapp.WSGIApplication([
('/t/?', HomeTimeline),
('/t/mentions', Mentions),
('/t/retweets/(retweeted_by_me)', Retweets),
('/t/retweets/(retweeted_to_me)', Retweets),
('/t/retweets/(retweeted_of_me)', Retweets),
('/a/retweet/([0-9]+)', Retweet),
('/t/statuses/update', UpdateStatus),
('/a/statuses/reply', UpdateStatus),
('/a/statuses/mention', UpdateStatus),
('/a/statuses/delete/([0-9]+)', DeleteStatus),
('/a/statuses/([0-9]+)', ShowStatus),
('/t/([0-9a-zA-Z_]+)/followers', Followers),
('/t/([0-9a-zA-Z_]+)/following', Following),
('/t/([0-9a-zA-Z_]+)/favorites', Favorites),
('/t/[0-9a-zA-Z_]+/favorites/create/([0-9]+)', FavoritesCreate),
('/t/[0-9a-zA-Z_]+/favorites/destroy/([0-9]+)', FavoritesDestroy),
('/t/([0-9a-zA-Z_]+)/lists', Lists),
('/t/([0-9a-zA-Z_]+)/lists/memberships', ListsMemberships),
('/t/([0-9a-zA-Z_]+)/lists/subscriptions', ListsSubscriptions),
('/t/([0-9a-zA-Z_]+)/([0-9a-zA-Z\-%]+)/?', ListTimeline),
('/t/([0-9a-zA-Z_]+)/([0-9a-zA-Z\-%]+)/following', ListFollowing),
('/t/([0-9a-zA-Z_]+)/([0-9a-zA-Z\-%]+)/followers', ListFollowers),
('/a/list_create', ListCreate),
('/a/list_edit/([0-9a-zA-Z\-%]+)', ListEdit),
('/a/list_delete/([0-9a-zA-Z\-%]+)', ListDelete),
('/a/list_follow/([0-9a-zA-Z_]+)/([0-9a-zA-Z\-%]+)', ListFollow),
('/a/list_unfollow/([0-9a-zA-Z_]+)/([0-9a-zA-Z\-%]+)', ListUnFollow),
('/a/list_add/([0-9a-zA-Z_]+)', ListAdd),
('/a/list_remove/([0-9a-zA-Z\-%]+)/([0-9a-zA-Z_]+)', ListRemove),
('/t/([0-9a-zA-Z_]+)', UserTimeline),
('/a/messages', DirectMessages),
('/a/messages_sent', DirectMessagesSent),
('/a/messages_new', DirectMessagesNew),
('/a/messages_destroy/([0-9]+)', DirectMessagesDestroy),
('/a/follow/([0-9a-zA-Z_]+)', Follow),
('/a/unfollow/([0-9a-zA-Z_]+)', UnFollow),
('/a/block/([0-9a-zA-Z_]+)', Block),
('/a/unblock/([0-9a-zA-Z_]+)', UnBlock),
('/a/blocking', Blocking),
('/a/report_spam/([0-9a-zA-Z_]+)', ReportSpam),
#('/a/search', Search),
('/a/saved_searches', SavedSearches),
('/a/search', HackedSearch),
], debug=True)
util.run_wsgi_app(application)
if __name__ == '__main__':
main()
|
{"/templatetags/string.py": ["/utils.py"], "/user.py": ["/base.py", "/twitdao.py", "/md.py"], "/main.py": ["/base.py", "/twitdao.py", "/md.py"], "/ajax1.py": ["/base.py", "/twitdao.py", "/md.py", "/twitpic2.py"], "/settings.py": ["/base.py", "/twitdao.py", "/md.py"], "/index.py": ["/base.py", "/md.py"], "/config.py": ["/base.py", "/md.py"], "/twitdao.py": ["/twitter.py", "/md.py"], "/queue.py": ["/base.py", "/twitdao.py", "/md.py"]}
|
4,618
|
mfs6174/Twitdao11
|
refs/heads/master
|
/twitpic2.py
|
# -*- coding: utf-8 -*-
import mimetypes
import urllib
import random
import oauth
from django.utils import simplejson as json
from google.appengine.api import urlfetch
_http_methods={
'GET':urlfetch.GET,
'POST':urlfetch.POST,
'HEAD':urlfetch.HEAD,
'PUT':urlfetch.PUT,
'DELETE':urlfetch.DELETE
}
_requires_authentication=[
'upload',
'comments/create',
'comments/delete',
'comments/create',
'comments/delete',
'faces/show',
'faces/create',
'faces/edit',
'faces/delete',
'event/create',
'event/delete',
'event/add',
'event/remove',
'tags/create',
'tags/delete'
]
def _generate_boundary(length=16):
s = '1234567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-_'
a = []
for i in range(length):
a.append(random.choice(s))
return ''.join(a)
def _get_content_type(filename):
return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
def _encode_multipart_formdata(fields, files=[]):
"""
fields is a sequence of (name, value) elements for regular form fields.
files is a sequence of (name, filename, value) elements for data to be uploaded as files
Return (boundary, body)
"""
boundary = _generate_boundary()
crlf = '\r\n'
l = []
for k, v in fields:
l.append('--' + boundary)
l.append('Content-Disposition: form-data; name="%s"' % k)
l.append('')
l.append(str(v))
for (k, f, v) in files:
l.append('--' + boundary)
l.append('Content-Disposition: form-data; name="%s"; filename="%s"' % (k, f))
l.append('Content-Type: %s' % _get_content_type(f))
l.append('')
l.append(str(v))
l.append('--' + boundary + '--')
l.append('')
body = crlf.join(l)
return boundary, body
class TwitPic2(oauth.OAuthClient):
"""TwitPic OAuth Client API"""
SIGNIN_URL = 'https://api.twitter.com/oauth/authenticate'
STATUS_UPDATE_URL = 'https://api.twitter.com/1.1/statuses/update.json'
USER_INFO_URL = 'https://api.twitter.com/1.1/account/verify_credentials.json'
FORMAT = 'json'
SERVER = 'http://api.twitpic.com/2/'
def __init__(self, consumer_key=None, consumer_secret=None,
service_key=None, access_token=None):
"""
An object for interacting with the Twitpic API.
The arguments listed below are generally required for most calls.
Args:
consumer_key:
Twitter API Key [optional]
consumer_secret:
Twitter API Secret [optional]
access_token:
Authorized access_token in string format. [optional]
service_key:
Twitpic service key used to interact with the API. [optional]
NOTE:
The TwitPic OAuth Client does NOT support fetching
an access_token. Use your favorite Twitter API Client to
retrieve this.
"""
self.server = self.SERVER
self.consumer = oauth.OAuthConsumer(consumer_key, consumer_secret)
self.signature_method = oauth.OAuthSignatureMethod_HMAC_SHA1()
self.service_key = service_key
self.format = self.FORMAT
self.http_status=0
self.http_headers={}
self.http_body=''
if access_token:
self.access_token = oauth.OAuthToken.from_string(access_token)
def set_comsumer(self, consumer_key, consumer_secret):
self.consumer = oauth.OAuthConsumer(consumer_key, consumer_secret)
def set_access_token(self, accss_token):
self.access_token = oauth.OAuthToken.from_string(access_token)
def set_service_key(self, service_key):
self.service_key = service_key
def _fetch(self, method, url, params={}, headers={}, files=None):
payload=None
if method.upper() in ['POST','PUT']:
if files and type(files) == list:
boundary, payload = _encode_multipart_formdata(params.items(), files)
headers['Content-Type']='multipart/form-data; boundary=%s' % boundary
else:
payload=urllib.urlencode(params)
res=urlfetch.fetch(url, payload, _http_methods[method.upper()], headers)
self.http_status=res.status_code
self.http_headers=res.headers
self.http_body=res.content
return res.content
def api_call(self, http_method, api_method, params={}, files=None):
url = '%s%s.%s' % (self.server, api_method, self.format)
if api_method not in _requires_authentication:
resp = self._fetch(http_method, url, params, headers)
return json.loads(resp)
oauth_request = oauth.OAuthRequest.from_consumer_and_token(
self.consumer,
self.access_token,
http_url=self.USER_INFO_URL
)
# Sign our request before setting Twitpic-only parameters
oauth_request.sign_request(self.signature_method, self.consumer, self.access_token)
# Set TwitPic parameters
oauth_request.set_parameter('key', self.service_key)
for key, value in params.iteritems():
oauth_request.set_parameter(key, value)
# Build request body parameters.
params = oauth_request.parameters
# Get the oauth headers.
oauth_headers = oauth_request.to_header(realm='http://api.twitter.com/')
# Add the headers required by TwitPic and any additional headers.
headers = {
'X-Verify-Credentials-Authorization': oauth_headers['Authorization'],
'X-Auth-Service-Provider': self.USER_INFO_URL,
}
resp=self._fetch(http_method, url, params, headers, files)
return json.loads(resp)
|
{"/templatetags/string.py": ["/utils.py"], "/user.py": ["/base.py", "/twitdao.py", "/md.py"], "/main.py": ["/base.py", "/twitdao.py", "/md.py"], "/ajax1.py": ["/base.py", "/twitdao.py", "/md.py", "/twitpic2.py"], "/settings.py": ["/base.py", "/twitdao.py", "/md.py"], "/index.py": ["/base.py", "/md.py"], "/config.py": ["/base.py", "/md.py"], "/twitdao.py": ["/twitter.py", "/md.py"], "/queue.py": ["/base.py", "/twitdao.py", "/md.py"]}
|
4,619
|
mfs6174/Twitdao11
|
refs/heads/master
|
/ajax1.py
|
# -*- coding: utf-8 -*-
from google.appengine.ext import webapp
from google.appengine.ext.webapp import util
from google.appengine.api import taskqueue
from base import BaseHandler
from django.utils import simplejson as json
from urllib import urlencode
from twitdao import Twitdao
import md
import twitpic2
class UserTimeline(BaseHandler):
def get(self, screen_name, slug):
params = self.params([
'user_id',
'since_id',
'max_id',
'count',
'page',
'trim_user',
'include_rts',
'include_entities',
],include_rts='true')
token = md.get_proxy_access_token()
#if not token:
# token = md.get_proxy_access_token()
if not token:
self.write(json.dumps({
'success':False,
'info':'No access token avaliable.',
}))
return
td = Twitdao(token)
owner_user = td.users_show_by_screen_name( screen_name=screen_name, **params)
token_user = td.users_show_by_id(user_id = token.user_id)
timeline = td.user_timeline(screen_name=screen_name, **params)
tweets = self.render('ajax/user-user.html', {
'token':token,
#'token_user':token_user,
'owner_user':owner_user,
'timeline':timeline,
},out=False)
if slug == 'refresh':
next_params={}
count=0
if type(timeline) == list and len(timeline):
next_params['since_id'] = str(timeline[0]['id'])
count = len(timeline)
else:
tweets=''
next_params['since_id'] = str(params['since_id'])
count = 0
self.write(json.dumps({
'success':True,
'info':'OK',
'tweets':tweets,
'params':next_params,
'count':count,
}))
else:
next_params={}
count=0
if type(timeline) == list and len(timeline):
next_params['max_id'] = str(timeline[-1]['id']-1)
count = len(timeline)
else:
tweets=''
next_params['max_id'] = str(params['max_id'])
count = 0
self.write(json.dumps({
'success':True,
'info':'OK',
'tweets':tweets,
'params':next_params,
'count':count,
'href':'/user/%s?%s' % (screen_name, urlencode(next_params))
}))
class ShowStatus(BaseHandler):
def get(self, status_id):
params = self.params(['trim_user','include_entities'])
token = md.get_proxy_access_token()
if not token:
self.redirect('/settings')
return
td = Twitdao(token)
token_user = td.users_show_by_id(user_id = token.user_id)
owner_user = token_user
tweet = td.statuses_show(id=status_id,**params)
self.render('tweet-show-proxy.html', {
'token':token,
#'token_user':token_user,
'owner_user':owner_user,
'tweet':tweet,
})
class AjaxShowStatus(BaseHandler):
def get(self, id):
params = self.params(['trim_user','include_entities'])
token = md.get_default_access_token()
if not token:
self.write(json.dumps({
'success':False,
'info':'No access token avaliable.',
}))
return
td = Twitdao(token)
token_user = td.users_show_by_id(user_id = token.user_id)
tweet = td.statuses_show(id=id, **params)
tweet_html = self.render('ajax/user-tweet.html', {
'token':token,
#'token_user':token_user,
'tweet':tweet,
}, out=False)
self.write(json.dumps({
'tweet':tweet_html if 'error' not in tweet else None,
'success':'error' not in tweet,
'info':tweet['error'] if 'error' in tweet else 'OK',
}))
def main():
application = webapp.WSGIApplication([
('/x1/user/([0-9a-zA-Z_]+)/(refresh|more)', UserTimeline),
('/x1/statuses/([0-9]+)', ShowStatus),
('/x1/show/([0-9]+)', AjaxShowStatus),
], debug=True)
util.run_wsgi_app(application)
if __name__ == '__main__':
main()
|
{"/templatetags/string.py": ["/utils.py"], "/user.py": ["/base.py", "/twitdao.py", "/md.py"], "/main.py": ["/base.py", "/twitdao.py", "/md.py"], "/ajax1.py": ["/base.py", "/twitdao.py", "/md.py", "/twitpic2.py"], "/settings.py": ["/base.py", "/twitdao.py", "/md.py"], "/index.py": ["/base.py", "/md.py"], "/config.py": ["/base.py", "/md.py"], "/twitdao.py": ["/twitter.py", "/md.py"], "/queue.py": ["/base.py", "/twitdao.py", "/md.py"]}
|
4,620
|
mfs6174/Twitdao11
|
refs/heads/master
|
/settings.py
|
# -*- coding: utf-8 -*-
from google.appengine.ext import webapp
from google.appengine.ext.webapp import util
from google.appengine.api import memcache
from google.appengine.api import users
from base import BaseHandler
from twitdao import Twitdao
import md
import random
import os
def _generate_id(length=64):
'''Generate a cookie id. '''
s = '1234567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_'
a = []
for i in range(length):
a.append(random.choice(s))
return ''.join(a)
class Auth(BaseHandler):
def get(self):
url = self.param('url')
if not url:
url='%s://%s/settings' % (self.request.scheme, os.environ['HTTP_HOST'])
callback='%s://%s/settings/callback?url=%s' % (self.request.scheme, os.environ['HTTP_HOST'], url)
td=Twitdao()
request_token = td.fetch_request_token(callback=callback)
if not request_token and users.is_current_user_admin():
self.redirect('/config')
return
elif not request_token:
self.redirect('/settings')
return
cookie_id = _generate_id()
memcache.set(cookie_id, request_token)
self.set_cookie('cid', cookie_id)
self.redirect(td.get_authorize_url(request_token, force_login=True))
class AuthCallback(BaseHandler):
def get(self):
denied = self.param('denied', default_value=None)
if denied:
self.render('denied.html')
return
oauth_verifier = self.param('oauth_verifier')
cookie_id = self.get_cookie('cid','')
request_token = memcache.get(cookie_id)
if not request_token or 'oauth_token' not in request_token:
self.delete_cookie('cid')
self.error(404)
return
td = Twitdao(md.AccessToken(
oauth_token=request_token['oauth_token'],
oauth_token_secret=request_token['oauth_token_secret']
))
access_token = td.fetch_access_token(oauth_verifier)
md.save_access_token(
user_id=access_token['user_id'],
screen_name=access_token['screen_name'],
oauth_token=access_token['oauth_token'],
oauth_token_secret=access_token['oauth_token_secret'],
app_user = users.get_current_user()
)
self.delete_cookie('cid')
self.redirect(self.param('url'))
class Settings(BaseHandler):
def get(self):
cursor=self.param('cursor', default_value=None)
default_token = md.get_default_access_token()
tokens, cursor = md.get_user_access_tokens(users.get_current_user(), 10, cursor)
self.render('settings.html', {
'default_token':default_token,
'tokens':tokens,
'cursor':cursor,
'where':'settings'
})
class SetDefaultToken(BaseHandler):
def post(self):
token_key = self.param('token_key')
token = md.get_access_token(token_key, users.get_current_user())
md.set_default_access_token(token)
self.redirect('/settings')
class DeleteToken(BaseHandler):
def post(self):
token_key = self.param('token_key')
t = md.delete_access_token(token_key, users.get_current_user())
self.redirect('/settings')
class SettingsProfile(BaseHandler):
def get(self):
tk=self.param('tk')
if not tk:
self.error(404)
return
token = md.get_access_token(tk, users.get_current_user())
if not token:
self.redirect('/settings')
return
td=Twitdao(token)
token_user=td.users_show_by_id(user_id=token.user_id, _twitdao_force_refresh=True)
self.render('settings-profile.html', {
'token_key':tk,
'token':token,
'token_user':token_user,
'owner_user':token_user,
'where':'settings-profile'
})
def post(self):
tk=self.param('tk')
if not tk:
self.error(404)
return
token = md.get_access_token(tk, users.get_current_user())
if not token:
self.redirect('/settings')
return
td=Twitdao(token)
image=self.param('picture')
if image:
filename=self.request.POST[u'picture'].filename.encode('utf-8')
td.account_update_profile_image(('image', filename, image))
params=self.params(['name', 'url', 'location', 'description', 'include_entities'])
for k in params:
params[k]=params[k].encode('utf-8')
td.account_update_profile(**params)
self.redirect('/settings/profile?tk=%s' % tk)
class SettingsDesign(BaseHandler):
def get(self):
tk=self.param('tk')
if not tk:
self.error(404)
return
token = md.get_access_token(tk, users.get_current_user())
if not token:
self.redirect('/settings')
return
td=Twitdao(token)
token_user=td.users_show_by_id(user_id=token.user_id, _twitdao_force_refresh=True)
self.render('settings-design.html', {
'token_key':tk,
'token':token,
'token_user':token_user,
'owner_user':token_user,
'where':'settings-design'
})
def post(self):
tk=self.param('tk')
if not tk:
self.error(404)
return
ds_type=self.param('ds_type')
token = md.get_access_token(tk, users.get_current_user())
if not token:
self.redirect('/settings')
return
td=Twitdao(token)
if ds_type == 'colors':
params=self.params([
'profile_background_color',
'profile_text_color',
'profile_link_color',
'profile_sidebar_fill_color',
'profile_sidebar_border_color',
'include_entities',
])
td.account_update_profile_colors(**params)
elif ds_type == 'background':
image=self.param('image')
if image:
params=self.params(['tile','include_entities'])
for k in params:
params[k]=params[k].encode('utf-8')
filename=self.request.POST[u'image'].filename.encode('utf-8')
td.account_update_profile_background_image(('image', filename, image), **params)
self.redirect('/settings/design?tk=%s' % tk)
class SettingsTwitdao(BaseHandler):
def get(self):
tk=self.param('tk')
if not tk:
self.error(404)
return
token = md.get_access_token(tk, users.get_current_user())
if not token:
self.redirect('/settings')
return
td=Twitdao(token)
token_user=td.users_show_by_id(user_id=token.user_id)
self.render('settings-twitdao.html', {
'token_key':tk,
'token':token,
'token_user':token_user,
'owner_user':token_user,
'where':'settings-twitdao'
})
def post(self):
tk=self.param('tk')
if not tk:
self.error(404)
return
ds_type=self.param('ds_type')
token = md.get_access_token(tk, users.get_current_user())
if not token:
self.redirect('/settings')
return
show_media=self.param('show_media')
settings={}
settings['show_media']=True if show_media=='True' else False
md.set_token_settings(tk, users.get_current_user(), **settings)
self.redirect('/settings/twitdao?tk=%s' % tk)
def main():
application = webapp.WSGIApplication([
('/settings', Settings),
('/settings/auth', Auth),
('/settings/callback', AuthCallback),
('/settings/delete_token', DeleteToken),
('/settings/set_default_token', SetDefaultToken),
('/settings/profile', SettingsProfile),
('/settings/design', SettingsDesign),
('/settings/twitdao', SettingsTwitdao),
#('/settings/sync', SettingsSync), #TODO
], debug=True)
util.run_wsgi_app(application)
if __name__ == '__main__':
main()
|
{"/templatetags/string.py": ["/utils.py"], "/user.py": ["/base.py", "/twitdao.py", "/md.py"], "/main.py": ["/base.py", "/twitdao.py", "/md.py"], "/ajax1.py": ["/base.py", "/twitdao.py", "/md.py", "/twitpic2.py"], "/settings.py": ["/base.py", "/twitdao.py", "/md.py"], "/index.py": ["/base.py", "/md.py"], "/config.py": ["/base.py", "/md.py"], "/twitdao.py": ["/twitter.py", "/md.py"], "/queue.py": ["/base.py", "/twitdao.py", "/md.py"]}
|
4,621
|
mfs6174/Twitdao11
|
refs/heads/master
|
/index.py
|
# -*- coding: utf-8 -*-
from google.appengine.ext import webapp
from google.appengine.ext.webapp import util
from google.appengine.api import users
from base import BaseHandler
import md
_mobile = [
'2.0 MMP',
'240x320',
'400X240',
'AvantGo',
'BlackBerry',
'Blazer',
'Cellphone',
'Danger',
'DoCoMo',
'Elaine/3.0',
'EudoraWeb',
'Googlebot-Mobile',
'hiptop',
'IEMobile',
'KYOCERA/WX310K',
'LG/U990',
'MIDP-2.',
'MMEF20',
'MOT-V',
'NetFront',
'Newt',
'Nintendo Wii',
'Nitro', #Nintendo DS
'Nokia',
'Opera Mini',
'Opera Mobi', #Opera Mobile
'Palm',
'PlayStation Portable',
'portalmmm',
'Proxinet',
'ProxiNet',
'SHARP-TQ-GX10',
'SHG-i900',
'Small',
'SonyEricsson',
'Symbian OS',
'SymbianOS',
'TS21i-10',
'UP.Browser',
'UP.Link',
'webOS', #Palm Pre, etc.
'Windows CE',
'WinWAP',
'YahooSeeker/M1A1-R2D2',
]
_touch = [
'iPhone',
'iPod',
'Android',
'BlackBerry9530',
'LG-TU915 Obigo', #LG touch browser
'LGE VX',
'webOS', #Palm Pre, etc.
'Nokia5800',
]
def _is_mobile(ua):
for b in _mobile + _touch:
if ua.find(b)!=-1:
return True
return False
class Index(BaseHandler):
def get(self):
if not users.get_current_user():
login_url = users.create_login_url("/")
self.render('index.html', {'login_url':login_url})
else:
default_token = md.get_default_access_token()
if default_token:
if _is_mobile(self.request.headers['user-agent']):
self.redirect('/m/u-/home')
else:
self.redirect('/t')
return
else:
self.redirect('/settings')
def main():
application = webapp.WSGIApplication([
('/', Index),
], debug=True)
util.run_wsgi_app(application)
if __name__ == '__main__':
main()
|
{"/templatetags/string.py": ["/utils.py"], "/user.py": ["/base.py", "/twitdao.py", "/md.py"], "/main.py": ["/base.py", "/twitdao.py", "/md.py"], "/ajax1.py": ["/base.py", "/twitdao.py", "/md.py", "/twitpic2.py"], "/settings.py": ["/base.py", "/twitdao.py", "/md.py"], "/index.py": ["/base.py", "/md.py"], "/config.py": ["/base.py", "/md.py"], "/twitdao.py": ["/twitter.py", "/md.py"], "/queue.py": ["/base.py", "/twitdao.py", "/md.py"]}
|
4,622
|
mfs6174/Twitdao11
|
refs/heads/master
|
/ajax.py
|
# -*- coding: utf-8 -*-
from google.appengine.ext import webapp
from google.appengine.ext.webapp import util
from google.appengine.api import taskqueue
from base import BaseHandler
from django.utils import simplejson as json
from urllib import urlencode
from twitdao import Twitdao
import md
import twitpic2
class UpdateStatus(BaseHandler):
def post(self):
status = self.param('status')
params = self.params([
'in_reply_to_status_id',
'lat',
'long',
'place_id',
'display_coordinates',
'trim_user',
'include_entities',
])
token = md.get_default_access_token()
if not token:
self.write(json.dumps({
'success':False,
'info':'No access token avaliable.',
}))
return
td = Twitdao(token)
tweet = td.statuses_update(status=status.encode('utf-8'), **params)
taskqueue.add(queue_name='cache', url='/q/update_user_cache', params={'tk':token.key(), 'user_id':token.user_id}, method="GET" )
self.write(json.dumps({
'success':'error' not in tweet,
'info':tweet['error'] if 'error' in tweet else 'OK',
'tweet':tweet if 'error' not in tweet else None,
}))
class UploadImage(BaseHandler):
def post(self):
media = self.param('media')
token = md.get_default_access_token()
if not token:
self.write(json.dumps({
'success':False,
'info':'No access token avaliable.',
}))
return
app_config = md.get_app_config()
td = Twitdao(token)
twitpic = twitpic2.TwitPic2(
consumer_key = app_config.consumer_key,
consumer_secret = app_config.consumer_secret,
access_token = 'oauth_token=%s&oauth_token_secret=%s' % (token.oauth_token, token.oauth_token_secret),
service_key = app_config.twitpic_api_key,
)
try:
if media:
filename=self.request.POST[u'media'].filename.encode('utf-8')
resp=twitpic.api_call('POST', 'upload', {'message':''}, files=[('media', filename, media)])
self.write(json.dumps({
'success':'id' in resp,
'info':'OK',
'response':resp,
}))
except Exception, e:
self.write(json.dumps({
'success':False,
'info':str(e),
'response':None,
}))
except:
self.write(json.dumps({
'success':False,
'info':'Unkown Error.',
'response':None,
}))
class ShowStatus(BaseHandler):
def get(self, id):
params = self.params(['trim_user','include_entities'])
token = md.get_default_access_token()
if not token:
self.write(json.dumps({
'success':False,
'info':'No access token avaliable.',
}))
return
td = Twitdao(token)
token_user = td.users_show_by_id(user_id = token.user_id)
tweet = td.statuses_show(id=id, **params)
tweet_html = self.render('ajax/tweet.html', {
'token':token,
'token_user':token_user,
'tweet':tweet,
}, out=False)
self.write(json.dumps({
'tweet':tweet_html if 'error' not in tweet else None,
'success':'error' not in tweet,
'info':tweet['error'] if 'error' in tweet else 'OK',
}))
class HomeTimeline(BaseHandler):
def get(self, slug):
params=self.params([
'since_id',
'max_id',
'count',
'page',
'trim_user',
'include_rts',
'include_entities'
])
params['count'] = 100
token = md.get_default_access_token()
if not token:
self.write(json.dumps({
'success':False,
'info':'No access token avaliable.',
}))
return
td = Twitdao(token)
token_user = td.users_show_by_id(user_id = token.user_id)
timeline = td.home_timeline(**params)
tweets = self.render('ajax/home.html', {
'token':token,
'token_user':token_user,
'timeline':timeline,
}, out=False)
if slug == 'refresh':
next_params={}
count=0
if type(timeline) == list and len(timeline):
next_params['since_id'] = str(timeline[0]['id'])
count = len(timeline)
else:
tweets=''
next_params['since_id'] = str(params['since_id'])
count = 0
self.write(json.dumps({
'success':True,
'info':'OK',
'tweets':tweets,
'params':next_params,
'count':count
}))
else:
next_params={}
count=0
if type(timeline) == list and len(timeline):
next_params['max_id'] = str(timeline[-1]['id']-1)
count = len(timeline)
else:
tweet=''
next_params['max_id'] = str(params['max_id'])
count = 0
self.write(json.dumps({
'success':True,
'info':'OK',
'tweets':tweets,
'params':next_params,
'count':count,
'href':'/t?%s' % urlencode(next_params)
}))
class Mentions(BaseHandler):
def get(self, slug):
params=self.params([
'since_id',
'max_id',
'count',
'page',
'trim_user',
'include_rts',
'include_entities'
])
token = md.get_default_access_token()
if not token:
self.write(json.dumps({
'success':False,
'info':'No access token avaliable.',
}))
return
td = Twitdao(token)
token_user = td.users_show_by_id(user_id = token.user_id)
timeline = td.mentions(**params)
tweets = self.render('ajax/mentions.html', {
'token':token,
'token_user':token_user,
'timeline':timeline,
}, out=False)
if slug == 'refresh':
next_params={}
count=0
if type(timeline) == list and len(timeline):
next_params['since_id'] = str(timeline[0]['id'])
count = len(timeline)
else:
tweets=''
next_params['since_id'] = str(params['since_id'])
count = 0
self.write(json.dumps({
'success':True,
'info':'OK',
'tweets':tweets,
'params':next_params,
'count':count
}))
else:
next_params={}
count=0
if type(timeline) == list and len(timeline):
next_params['max_id'] = str(timeline[-1]['id']-1)
count = len(timeline)
else:
tweets=''
next_params['max_id'] = str(params['max_id'])
count = 0
self.write(json.dumps({
'success':True,
'info':'OK',
'tweets':tweets,
'params':next_params,
'count':count,
'href':'/t/mentions?%s' % urlencode(next_params)
}))
class Retweets(BaseHandler):
def get(self, which, slug):
params=self.params([
'since_id',
'max_id',
'count',
'page',
'trim_user',
'include_entities',
])
token = md.get_default_access_token()
if not token:
self.write(json.dumps({
'success':False,
'info':'No access token avaliable.',
}))
return
td = Twitdao(token)
timeline=[]
if which == 'retweeted_by_me':
timeline = td.retweeted_by_me(**params)
elif which == 'retweeted_to_me':
timeline = td.retweeted_to_me(**params)
elif which == 'retweeted_of_me':
timeline = td.retweets_of_me(**params)
token_user = td.users_show_by_id(user_id = token.user_id)
tweets = self.render('ajax/retweets.html', {
'token':token,
'token_user':token_user,
'timeline':timeline,
}, out=False)
if slug == 'refresh':
next_params={}
count=0
if type(timeline) == list and len(timeline):
next_params['since_id'] = str(timeline[0]['id'])
count = len(timeline)
else:
tweets=''
next_params['since_id'] = str(params['since_id'])
count = 0
self.write(json.dumps({
'success':True,
'info':'OK',
'tweets':tweets,
'params':next_params,
'count':count
}))
else:
next_params={}
count=0
if type(timeline) == list and len(timeline):
next_params['max_id'] = str(timeline[-1]['id']-1)
count = len(timeline)
else:
tweets=''
next_params['max_id'] = str(params['max_id'])
count = 0
self.write(json.dumps({
'success':True,
'info':'OK',
'tweets':tweets,
'params':next_params,
'count':count,
'href':'/t/retweets/%s?%s' % (which, urlencode(next_params))
}))
class RetweetedBy(BaseHandler):
def get(self, tweet_id):
params = self.params([
'count',
'page',
'trim_user',
'include_entities'
], include_entities='0')
#default count number is 20.
token = md.get_default_access_token()
if not token:
self.write(json.dumps({
'success':False,
'info':'No access token avaliable.',
}))
return
td = Twitdao(token)
token_user = td.users_show_by_id(user_id = token.user_id)
owner_user = token_user
users = td.statuses_retweeted_by(id=tweet_id, **params)
retweeted_by = self.render('ajax/retweeted-by.html', {
'token':token,
'token_user':token_user,
'owner_user':owner_user,
'users':users,
},out=False)
self.write(json.dumps({
'success':True,
'info':'OK',
'retweeted_by':retweeted_by,
}))
class UserTimeline(BaseHandler):
def get(self, screen_name, slug):
params = self.params([
'user_id',
'since_id',
'max_id',
'count',
'page',
'trim_user',
'include_rts',
'include_entities',
],include_rts='true')
token = md.get_default_access_token()
#if not token:
# token = md.get_proxy_access_token()
if not token:
self.write(json.dumps({
'success':False,
'info':'No access token avaliable.',
}))
return
td = Twitdao(token)
owner_user = td.users_show_by_screen_name( screen_name=screen_name, **params)
token_user = td.users_show_by_id(user_id = token.user_id)
timeline = td.user_timeline(screen_name=screen_name, **params)
tweets = self.render('ajax/user.html', {
'token':token,
'token_user':token_user,
'owner_user':owner_user,
'timeline':timeline,
},out=False)
if slug == 'refresh':
next_params={}
count=0
if type(timeline) == list and len(timeline):
next_params['since_id'] = str(timeline[0]['id'])
count = len(timeline)
else:
tweets=''
next_params['since_id'] = str(params['since_id'])
count = 0
self.write(json.dumps({
'success':True,
'info':'OK',
'tweets':tweets,
'params':next_params,
'count':count,
}))
else:
next_params={}
count=0
if type(timeline) == list and len(timeline):
next_params['max_id'] = str(timeline[-1]['id']-1)
count = len(timeline)
else:
tweets=''
next_params['max_id'] = str(params['max_id'])
count = 0
self.write(json.dumps({
'success':True,
'info':'OK',
'tweets':tweets,
'params':next_params,
'count':count,
'href':'/t/%s?%s' % (screen_name, urlencode(next_params))
}))
class Favorite(BaseHandler):
def post(self, status_id, slug):
params = self.params(['include_entities'])
token = md.get_default_access_token()
if not token:
self.write(json.dumps({
'success':False,
'info':'No access token avaliable.',
}))
return
td = Twitdao(token)
tweet=None
if slug=='create':
tweet = td.favorites_create(id=status_id, **params)
elif slug=='delete':
tweet = td.favorites_destroy(id=status_id, **params)
taskqueue.add(queue_name='cache', url='/q/update_user_cache', params={'tk':token.key(), 'user_id':token.user_id}, method="GET" )
self.write(json.dumps({
'tweet':tweet if 'error' not in tweet else None,
'success':'error' not in tweet,
'info':tweet['error'] if 'error' in tweet else 'OK',
}))
class Retweet(BaseHandler):
def post(self, id):
params = self.params(['trim_user','include_entities'])
token = md.get_default_access_token()
if not token:
self.write(json.dumps({
'success':False,
'info':'No access token avaliable.',
}))
return
td = Twitdao(token)
tweet = td.statuses_retweet(id=id, **params)
taskqueue.add(queue_name='cache', url='/q/update_user_cache', params={'tk':token.key(), 'user_id':token.user_id}, method="GET" )
self.write(json.dumps({
'tweet':tweet if 'error' not in tweet else None,
'success':'error' not in tweet,
'info':tweet['error'] if 'error' in tweet else 'OK',
}))
class DeleteStatus(BaseHandler):
def post(self, id):
params = self.params(['trim_user','include_entities'])
token = md.get_default_access_token()
if not token:
self.write(json.dumps({
'success':False,
'info':'No access token avaliable.',
}))
return
td = Twitdao(token)
tweet = td.statuses_destroy(id=id, **params)
taskqueue.add(queue_name='cache', url='/q/update_user_cache', params={'tk':token.key(), 'user_id':token.user_id}, method="GET" )
self.write(json.dumps({
'tweet':tweet if 'error' not in tweet else None,
'success':'error' not in tweet,
'info':tweet['error'] if 'error' in tweet else 'OK',
}))
#TODO
#lists,
class Follow(BaseHandler):
def post(self,screen_name, slug):
token = md.get_default_access_token()
if not token:
self.write(json.dumps({
'success':False,
'info':'No access token avaliable.',
}))
return
td = Twitdao(token)
fuser=None
if 'make' == slug:
fuser = td.friendships_create(screen_name = screen_name)
else:
fuser = td.friendships_destroy(screen_name = screen_name)
taskqueue.add(queue_name='cache', url='/q/update_user_cache', params={'tk':token.key(), 'user_id':token.user_id}, method="GET" )
taskqueue.add(queue_name='cache', url='/q/update_user_cache', params={'tk':token.key(), 'screen_name':screen_name}, method="GET" )
if 'error' in fuser:
self.write(json.dumps({
'success':False,
'info':fuser['error'],
}))
else:
self.write(json.dumps({
'success':True,
'info':'OK',
'user':fuser,
}))
class Block(BaseHandler):
def post(self, screen_name, slug):
token = md.get_default_access_token()
if not token:
self.write(json.dumps({
'success':False,
'info':'No access token avaliable.',
}))
return
td = Twitdao(token)
buser=None
if 'add' == slug:
buser = td.blocks_create(screen_name = screen_name)
else:
buser = td.blocks_destroy(screen_name = screen_name)
taskqueue.add(queue_name='cache', url='/q/update_user_cache', params={'tk':token.key(), 'user_id':token.user_id}, method="GET" )
taskqueue.add(queue_name='cache', url='/q/update_user_cache', params={'tk':token.key(), 'screen_name':screen_name}, method="GET" )
if 'error' in buser:
self.write(json.dumps({
'success':False,
'info':buser['error'],
}))
else:
self.write(json.dumps({
'success':True,
'info':'OK',
'user':buser,
}))
class ReportSpam(BaseHandler):
def post(self, screen_name):
token = md.get_default_access_token()
if not token:
self.write(json.dumps({
'success':False,
'info':'No access token avaliable.',
}))
return
td = Twitdao(token)
ruser = td.report_spam(screen_name = screen_name)
taskqueue.add(queue_name='cache', url='/q/update_user_cache', params={'tk':token.key(), 'user_id':token.user_id}, method="GET" )
taskqueue.add(queue_name='cache', url='/q/update_user_cache', params={'tk':token.key(), 'screen_name':screen_name}, method="GET" )
if 'error' in ruser:
self.write(json.dumps({
'success':False,
'info':ruser['error'],
}))
else:
self.write(json.dumps({
'success':True,
'info':'OK',
'user':ruser,
}))
class Blocking(BaseHandler):
def get(self):
pass
class SavedSearch(BaseHandler):
def get(self):
pass
class MessageSend(BaseHandler):
def post(self):
screen_name = self.param('screen_name')
user_id = self.param('user_id')
text = self.param('text')
params = self.params(['include_entities'])
token = md.get_default_access_token()
if not token:
self.write(json.dumps({
'success':False,
'info':'No access token avaliable.',
}))
return
td = Twitdao(token)
message = td.direct_messages_new(user_id=user_id, screen_name=screen_name, text=text.encode('utf-8'), **params)
if 'error' in message:
self.write(json.dumps({
'success':False,
'info':message['error'],
}))
else:
self.write(json.dumps({
'success':True,
'info':'OK',
'message':message,
}))
class MessageDestroy(BaseHandler):
def post(self, id):
params = self.params(['include_entities'])
token = md.get_default_access_token()
if not token:
self.write(json.dumps({
'success':False,
'info':'No access token avaliable.',
}))
return
td = Twitdao(token)
message = td.direct_messages_destroy(id=id, **params)
if 'error' in message:
self.write(json.dumps({
'success':False,
'info':message['error'],
}))
else:
self.write(json.dumps({
'success':True,
'info':'OK',
'message':message,
}))
class ListTimeline(BaseHandler):
def get(self, screen_name, slug, xlug):
params = self.params(['since_id','max_id','per_page','page','include_entities'])
token = md.get_default_access_token()
if not token:
self.redirect('/settings')
return
td = Twitdao(token)
token_user = td.users_show_by_id(user_id = token.user_id)
owner_user = td.users_show_by_screen_name(screen_name = screen_name)
#ls = td.user_list_id_get(id=slug, screen_name=screen_name)
timeline = td.user_list_id_statuses(id=slug, screen_name = screen_name, **params)
tweets=self.render('ajax/list.html', {
'token':token,
'token_user':token_user,
'owner_user':owner_user,
#'list':ls,
'timeline':timeline,
},out=False)
if xlug == 'refresh':
next_params={}
count=0
if type(timeline) == list and len(timeline):
next_params['since_id'] = str(timeline[0]['id'])
count = len(timeline)
else:
tweets=''
next_params['since_id'] = str(params['since_id'])
count = 0
self.write(json.dumps({
'success':True,
'info':'OK',
'tweets':tweets,
'params':next_params,
'count':count,
}))
else:
next_params={}
count=0
if type(timeline) == list and len(timeline):
next_params['max_id'] = str(timeline[-1]['id']-1)
count = len(timeline)
else:
tweets=''
next_params['max_id'] = str(params['max_id'])
count = 0
self.write(json.dumps({
'success':True,
'info':'OK',
'tweets':tweets,
'params':next_params,
'count':count,
'href':'/t/%s/%s?%s'% (screen_name, slug, urlencode(next_params))
}))
class HackedSearch(BaseHandler):
def get(self, slug):
q = self.param('q')
since_id=self.param('since_id')
page=self.param('page')
token = md.get_default_access_token()
if not token:
self.write(json.dumps({
'success':False,
'info':'Token error.'
}))
return
td = Twitdao(token)
token_user = td.users_show_by_id(user_id = token.user_id)
owner_user = token_user
searchd=td.hacked_search(q.encode('utf-8'), since_id, page)
timeline=searchd['statuses']
count=0
next_params={'q':q}
if slug=='refresh':
if type(timeline) == list and len(timeline):
next_params['since_id'] = str(timeline[0]['id'])
else:
next_params['since_id'] = str(since_id)
elif slug=='more':
next_params['page'] = searchd['next_page']
count = len(timeline)
tweets=self.render('ajax/hacked_search.html', {
'token':token,
'token_user':token_user,
'owner_user':owner_user,
'timeline':timeline,
},out=False)
self.write(json.dumps({
'success':True,
'info':'OK',
'tweets':tweets,
'params':next_params,
'count':count,
'href': '/a/search?%s' % urlencode({'page':searchd['next_page'], 'q':q.encode('utf-8')})
}))
class HackedFollowingFollowersOf(BaseHandler):
def get(self):
user_id = self.param('user_id')
token = md.get_default_access_token()
if not token:
self.write(json.dumps({
'success':False,
'info':'Token error.'
}))
return
td = Twitdao(token)
token_user = td.users_show_by_id(user_id = token.user_id)
owner_user = token_user
res=td.hacked_following_followers_of(user_id)
tweets=self.render('ajax/following_followers_of.html', {
'token':token,
'token_user':token_user,
'owner_user':owner_user,
'res':res,
},out=False)
self.write(json.dumps({
'success':True,
'info':'OK',
'html':tweets,
}))
class HackedFollowsInCommonWith(BaseHandler):
def get(self):
user_id = self.param('user_id')
token = md.get_default_access_token()
if not token:
self.write(json.dumps({
'success':False,
'info':'Token error.'
}))
return
td = Twitdao(token)
token_user = td.users_show_by_id(user_id = token.user_id)
owner_user = token_user
res=td.hacked_follows_in_common_with(user_id)
tweets=self.render('ajax/follows_in_common_with.html', {
'token':token,
'token_user':token_user,
'owner_user':owner_user,
'res':res,
},out=False)
self.write(json.dumps({
'success':True,
'info':'OK',
'html':tweets,
}))
def main():
application = webapp.WSGIApplication([
('/x/update', UpdateStatus),
('/x/delete/([0-9]+)', DeleteStatus),
('/x/show/([0-9]+)', ShowStatus),
('/x/home/(refresh|more)', HomeTimeline),
('/x/mentions/(refresh|more)', Mentions),
('/x/retweets/(retweeted_by_me|retweeted_to_me|retweeted_of_me)/(refresh|more)', Retweets),
('/x/retweet/([0-9]+)', Retweet),
('/x/retweeted_by/([0-9]+)', RetweetedBy),
('/x/user/([0-9a-zA-Z_]+)/(refresh|more)', UserTimeline),
('/x/list/([0-9a-zA-Z_]+)/([0-9a-zA-Z\-%]+)/(refresh|more)', ListTimeline),
('/x/message_send', MessageSend),
('/x/message_destroy/([0-9]+)', MessageDestroy),
('/x/favorite/([0-9]+)/(create|delete)', Favorite),
('/x/friends/([0-9a-zA-Z_]+)/(make|break)', Follow),
('/x/block/([0-9a-zA-Z_]+)/(add|remove)', Block),
('/x/report/([0-9a-zA-Z_]+)', ReportSpam),
('/x/upload_image', UploadImage),
('/x/search/(refresh|more)', HackedSearch),
('/x/following_followers_of', HackedFollowingFollowersOf),
('/x/follows_in_common_with', HackedFollowsInCommonWith),
], debug=True)
util.run_wsgi_app(application)
if __name__ == '__main__':
main()
|
{"/templatetags/string.py": ["/utils.py"], "/user.py": ["/base.py", "/twitdao.py", "/md.py"], "/main.py": ["/base.py", "/twitdao.py", "/md.py"], "/ajax1.py": ["/base.py", "/twitdao.py", "/md.py", "/twitpic2.py"], "/settings.py": ["/base.py", "/twitdao.py", "/md.py"], "/index.py": ["/base.py", "/md.py"], "/config.py": ["/base.py", "/md.py"], "/twitdao.py": ["/twitter.py", "/md.py"], "/queue.py": ["/base.py", "/twitdao.py", "/md.py"]}
|
4,623
|
mfs6174/Twitdao11
|
refs/heads/master
|
/base.py
|
# -*- coding: utf-8 -*-
from google.appengine.dist import use_library
use_library('django','1.2')
from django.conf import settings
settings.configure(INSTALLED_APPS=('zombie',))
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
from google.appengine.api import users
from Cookie import SimpleCookie
import os
template.register_template_library('templatetags.string')
template.register_template_library('templatetags.fix')
template.register_template_library('templatetags.entities')
template.register_template_library('templatetags.tags')
class BaseHandler(webapp.RequestHandler):
def initialize(self, request, response):
webapp.RequestHandler.initialize(self, request, response)
self.current = os.environ['PATH_INFO']
self.logout_url = users.create_logout_url("/")
self.template_vals = {
'self':self
}
def render(self,tempalte_name, template_values={}, out=True):
self.template_vals.update(template_values)
directory = os.path.dirname(__file__)
path = os.path.join(directory, os.path.join('templates', tempalte_name))
result = template.render(path, self.template_vals)
if out:
self.response.out.write(result)
return result
def param(self, name, **kw):
return self.request.get(name, **kw)
def write(self, c):
return self.response.out.write(c)
def params(self, param_list, **default_vals):
params={}
for i in param_list:
param=self.request.get(i)
if param:
params[i] = param
elif i in default_vals:
params[i]=default_vals[i]
elif i=='include_entities': #temp
params[i]='t'
return params
def jedirect(self, uri, time=5000, text="Redirecting..."):
self.write('''<script type="text/javascript">
setTimeout(function(){window.location="%s"},%s)
</script>''' % (uri, time))
self.write('%s' % text)
def set_cookie(self, key, value='', max_age=None,
path='/', domain=None, secure=None, httponly=False,
version=None, comment=None):
cookies = SimpleCookie()
cookies[key] = value
for var_name, var_value in [
('max-age', max_age),
('path', path),
('domain', domain),
('secure', secure),
('HttpOnly', httponly),
('version', version),
('comment', comment),
]:
if var_value is not None and var_value is not False:
cookies[key][var_name] = str(var_value)
header_value = cookies[key].output(header='').lstrip()
self.response.headers.add_header('Set-Cookie', header_value)
def get_cookie(self, key, default=None):
if key in self.request.cookies:
return self.request.cookies[key]
else:
return default
def delete_cookie(self, key):
self.set_cookie(key, '', max_age=0)
|
{"/templatetags/string.py": ["/utils.py"], "/user.py": ["/base.py", "/twitdao.py", "/md.py"], "/main.py": ["/base.py", "/twitdao.py", "/md.py"], "/ajax1.py": ["/base.py", "/twitdao.py", "/md.py", "/twitpic2.py"], "/settings.py": ["/base.py", "/twitdao.py", "/md.py"], "/index.py": ["/base.py", "/md.py"], "/config.py": ["/base.py", "/md.py"], "/twitdao.py": ["/twitter.py", "/md.py"], "/queue.py": ["/base.py", "/twitdao.py", "/md.py"]}
|
4,624
|
mfs6174/Twitdao11
|
refs/heads/master
|
/mobile.py
|
# -*- coding: utf-8 -*-
from google.appengine.ext import webapp
from google.appengine.ext.webapp import util
from google.appengine.api import users
from google.appengine.api import taskqueue
from base import BaseHandler
from twitdao import Twitdao
import md
import utils
import twitpic2
import logging
class Home(BaseHandler):
def get(self):
params=self.params([
'since_id',
'max_id',
'count',
'page',
'trim_user',
'include_rts',
'include_entities'
])
token = md.get_default_access_token()
if not token:
self.redirect('/settings')
return
td = Twitdao(token)
token_user = td.users_show_by_id(user_id = token.user_id)
owner_user = token_user
timeline = td.home_timeline(**params)
if 'error' in timeline:
timeline=[]
self.render('mobile/home.html', {
'token':token,
'token_user':token_user,
'owner_user':owner_user,
'timeline':timeline,
'max_id':str(timeline[-1]['id']-1) if type(timeline)==list and len(timeline)>0 else None,
'since_id':timeline[0]['id_str'] if type(timeline)==list and len(timeline)>0 else None,
'where':'home'
})
class Mentions(BaseHandler):
def get(self):
params=self.params([
'since_id',
'max_id',
'count',
'page',
'trim_user',
'include_rts',
'include_entities'
])
token = md.get_default_access_token()
if not token:
self.redirect('/settings')
return
td = Twitdao(token)
token_user = td.users_show_by_id(user_id = token.user_id)
owner_user = token_user
timeline = td.mentions(**params)
if 'error' in timeline:
timeline=[]
self.render('mobile/mentions.html', {
'token':token,
'token_user':token_user,
'owner_user':owner_user,
'timeline':timeline,
'max_id':str(timeline[-1]['id']-1) if type(timeline)==list and len(timeline)>0 else None,
'since_id':timeline[0]['id_str'] if type(timeline)==list and len(timeline)>0 else None,
'where':'mentions'
})
class Favorites(BaseHandler):
def get(self, screen_name):
params = self.params(['page', 'include_entities'])
page = self.param('page')
token = md.get_default_access_token()
if not token:
self.redirect('/settings')
return
if not screen_name:
screen_name=token.screen_name
td = Twitdao(token)
token_user = td.users_show_by_id(user_id = token.user_id)
owner_user = td.users_show_by_screen_name(screen_name = screen_name)
favorites = td.favorites(id=screen_name, **params)
prev_page, next_page = None, 2
if page:
try:
page = int(page)
prev_page = page-1 if page-1>0 else None
next_page = page+1
except:
pass
self.render('mobile/favorites.html', {
'token':token,
'token_user':token_user,
'owner_user':owner_user,
'favorites':favorites,
'prev_page':prev_page,
'next_page':next_page,
'where':'favorites',
})
class Followers(BaseHandler):
def get(self, screen_name):
params = self.params([
'user_id',
'cursor',
'include_entities',
'count'
], cursor=-1, count=50)
token = md.get_default_access_token()
if not token:
self.redirect('/settings')
return
if not screen_name:
screen_name=token.screen_name
td = Twitdao(token)
token_user = td.users_show_by_id(user_id = token.user_id)
owner_user = td.users_show_by_screen_name(screen_name = screen_name)
followers = td.statuses_followers(screen_name=screen_name, **params)
self.render('mobile/followers.html', {
'token':token,
'token_user':token_user,
'owner_user':owner_user,
'error': followers['error'] if 'error' in followers else False,
'followers':followers if 'error' in followers else followers['users'],
'next_cursor':None if 'error' in followers else followers['next_cursor'],
'next_cursor_str':None if 'error' in followers else followers['next_cursor_str'],
'previous_cursor':None if 'error' in followers else followers['previous_cursor'],
'previous_cursor_str':None if 'error' in followers else followers['previous_cursor_str'],
'where':'followers',
})
class Following(BaseHandler):
def get(self, screen_name):
params = self.params([
'user_id',
'cursor',
'include_entities',
'count'
], cursor=-1, count=50)
token = md.get_default_access_token()
if not token:
self.redirect('/settings')
return
if not screen_name:
screen_name=token.screen_name
td = Twitdao(token)
token_user = td.users_show_by_id(user_id = token.user_id)
owner_user = td.users_show_by_screen_name(screen_name = screen_name)
following = td.statuses_friends(screen_name=screen_name, **params)
self.render('mobile/following.html', {
'token':token,
'token_user':token_user,
'owner_user':owner_user,
'error': following['error'] if 'error' in following else False,
'following':following if 'error' in following else following['users'],
'next_cursor':None if 'error' in following else following['next_cursor'],
'next_cursor_str':None if 'error' in following else following['next_cursor_str'],
'previous_cursor':None if 'error' in following else following['previous_cursor'],
'previous_cursor_str':None if 'error' in following else following['previous_cursor_str'],
'where':'following',
})
class Messages(BaseHandler):
def get(self, mbox):
params = self.params([
'since_id',
'max_id',
'count',
'page',
'include_entities',
])
token = md.get_default_access_token()
if not token:
self.redirect('/settings')
return
td = Twitdao(token)
token_user = td.users_show_by_id(user_id = token.user_id)
owner_user = token_user
direct_messages = []
if mbox=='inbox':
direct_messages = td.direct_messages(**params)
elif mbox=='sent':
direct_messages = td.direct_messages_sent(**params)
else:
self.error(404)
return
self.render('mobile/messages.html', {
'token':token,
'token_user':token_user,
'owner_user':owner_user,
'max_id':str(direct_messages[-1]['id']-1) if type(direct_messages)==list and len(direct_messages)>0 else None,
'since_id':direct_messages[0]['id_str'] if type(direct_messages)==list and len(direct_messages)>0 else None,
'messages':direct_messages,
'where': 'messages',
'at': mbox,
})
class SendMessage(BaseHandler):
def get(self):
screen_name = self.param('screen_name')
token = md.get_default_access_token()
if not token:
self.redirect('/settings')
return
td = Twitdao(token)
token_user = td.users_show_by_id(user_id = token.user_id)
owner_user = token_user
self.render('mobile/message-send.html',{
'token':token,
'token_user':token_user,
'owner_user':owner_user,
'screen_name':screen_name,
})
def post(self):
screen_name = self.param('screen_name')
user_id = self.param('user_id')
text = self.param('text')
params = self.params(['include_entities'])
token = md.get_default_access_token()
if not token:
self.redirect('/settings')
return
td = Twitdao(token)
message = td.direct_messages_new(user_id=user_id, screen_name=screen_name, text=text.encode('utf-8'), **params)
self.redirect('/m/m-sent')
class DeleteMessage(BaseHandler):
def get(self):
id=self.param('id')
token = md.get_default_access_token()
if not token:
self.redirect('/settings')
return
td = Twitdao(token)
token_user = td.users_show_by_id(user_id = token.user_id)
owner_user = token_user
#No show single message api.
message = None
self.render('mobile/message-del.html',{
'token':token,
'token_user':token_user,
'owner_user':owner_user,
'message':message,
'id':id
})
def post(self):
params = self.params(['include_entities'])
id = self.param('id')
token = md.get_default_access_token()
if not token:
self.redirect('/settings')
return
td = Twitdao(token)
message = td.direct_messages_destroy(id=id, **params)
self.redirect('/m/m-inbox')
class User(BaseHandler):
def get(self, screen_name):
params = self.params([
'user_id',
'since_id',
'max_id',
'count',
'page',
'trim_user',
'include_rts',
'include_entities',
],include_rts='true')
token = md.get_default_access_token()
if not token:
self.redirect('/settings')
return
td = Twitdao(token)
owner_user = td.users_show_by_screen_name( screen_name=screen_name )
token_user = td.users_show_by_id(user_id = token.user_id)
friendship = td.friendships_show(source_id=token.user_id, target_screen_name=screen_name)
timeline = td.user_timeline(screen_name=screen_name, **params)
self.render('mobile/user.html', {
'token':token,
'token_user':token_user,
'owner_user':owner_user,
'max_id':str(timeline[-1]['id']-1) if type(timeline)==list and len(timeline)>0 else None,
'since_id':timeline[0]['id_str'] if type(timeline)==list and len(timeline)>0 else None,
'timeline':timeline,
'friendship':friendship,
'where':'user',
})
class ActionFollow(BaseHandler):
def get(self, screen_name):
token = md.get_default_access_token()
if not token:
self.redirect('/settings')
return
td = Twitdao(token)
token_user = td.users_show_by_id(user_id = token.user_id)
owner_user = token_user
follow_user = td.users_show_by_screen_name(screen_name = screen_name)
self.render('mobile/follow.html', {
'token':token,
'token_user':token_user,
'owner_user':owner_user,
'user':follow_user,
})
def post(self, screen_name):
token = md.get_default_access_token()
if not token:
self.redirect('/settings')
return
td = Twitdao(token)
follow_user = td.friendships_create(screen_name = screen_name)
taskqueue.add(queue_name='cache', url='/q/update_user_cache', params={'tk':token.key(), 'user_id':token.user_id}, method="GET" )
taskqueue.add(queue_name='cache', url='/q/update_user_cache', params={'tk':token.key(), 'screen_name':screen_name}, method="GET" )
self.redirect('/m/u-%s' % screen_name)
class ActionUnfollow(BaseHandler):
def get(self, screen_name):
token = md.get_default_access_token()
if not token:
self.redirect('/settings')
return
td = Twitdao(token)
token_user = td.users_show_by_id(user_id = token.user_id)
owner_user = token_user
follow_user = td.users_show_by_screen_name(screen_name = screen_name)
self.render('mobile/unfollow.html', {
'token':token,
'token_user':token_user,
'owner_user':owner_user,
'user':follow_user,
})
def post(self, screen_name):
token = md.get_default_access_token()
if not token:
self.redirect('/settings')
return
td = Twitdao(token)
follow_user = td.friendships_destroy(screen_name = screen_name)
taskqueue.add(queue_name='cache', url='/q/update_user_cache', params={'tk':token.key(), 'user_id':token.user_id}, method="GET" )
taskqueue.add(queue_name='cache', url='/q/update_user_cache', params={'tk':token.key(), 'screen_name':screen_name}, method="GET" )
self.redirect('/m/u-%s' % screen_name)
class ActionBlock(BaseHandler):
def get(self, screen_name):
token = md.get_default_access_token()
if not token:
self.redirect('/settings')
return
td = Twitdao(token)
token_user = td.users_show_by_id(user_id = token.user_id)
owner_user = token_user
block_user = td.users_show_by_screen_name(screen_name = screen_name)
self.render('mobile/block.html', {
'token':token,
'token_user':token_user,
'owner_user':owner_user,
'user':block_user,
})
def post(self, screen_name):
token = md.get_default_access_token()
if not token:
self.redirect('/settings')
return
td = Twitdao(token)
block_user = td.blocks_create(screen_name = screen_name)
taskqueue.add(queue_name='cache', url='/q/update_user_cache', params={'tk':token.key(), 'user_id':token.user_id}, method="GET" )
taskqueue.add(queue_name='cache', url='/q/update_user_cache', params={'tk':token.key(), 'screen_name':screen_name}, method="GET" )
self.redirect('/m/u-%s' % screen_name)
class ActionUnblock(BaseHandler):
def get(self, screen_name):
token = md.get_default_access_token()
if not token:
self.redirect('/settings')
return
td = Twitdao(token)
token_user = td.users_show_by_id(user_id = token.user_id)
owner_user = token_user
block_user = td.users_show_by_screen_name(screen_name = screen_name)
self.render('mobile/unblock.html', {
'token':token,
'token_user':token_user,
'owner_user':owner_user,
'user':block_user,
})
def post(self, screen_name):
token = md.get_default_access_token()
if not token:
self.redirect('/settings')
return
td = Twitdao(token)
follow_user = td.blocks_destroy(screen_name = screen_name)
taskqueue.add(queue_name='cache', url='/q/update_user_cache', params={'tk':token.key(), 'user_id':token.user_id}, method="GET" )
taskqueue.add(queue_name='cache', url='/q/update_user_cache', params={'tk':token.key(), 'screen_name':screen_name}, method="GET" )
self.redirect('/m/u-%s' % screen_name)
class ActionDelete(BaseHandler):
def get(self, tweet_id):
params = self.params(['trim_user','include_entities'])
token = md.get_default_access_token()
if not token:
self.redirect('/settings')
return
id=utils.tweet_id_decode(tweet_id)
td = Twitdao(token)
token_user = td.users_show_by_id(user_id = token.user_id)
owner_user = token_user
tweet = td.statuses_show(id=id, **params)
self.render('mobile/tweet-del.html', {
'token':token,
'token_user':token_user,
'owner_user':owner_user,
'tweet':tweet,
})
def post(self, tweet_id):
params = self.params(['trim_user','include_entities'])
token = md.get_default_access_token()
if not token:
self.redirect('/settings')
return
id=utils.tweet_id_decode(tweet_id)
td = Twitdao(token)
tweet = td.statuses_destroy(id=id, **params)
taskqueue.add(queue_name='cache', url='/q/update_user_cache', params={'tk':token.key(), 'user_id':token.user_id}, method="GET" )
self.redirect('/m/u-/home')
class ActionTweet(BaseHandler):
def get(self):
screen_name = self.param('screen_name')
tweet_id = self.param('tweet_id')
params = self.params(['trim_user','include_entities'])
token = md.get_default_access_token()
if not token:
self.redirect('/settings')
return
tweet_id = utils.tweet_id_decode(tweet_id)
if screen_name:
td = Twitdao(token)
token_user = td.users_show_by_id(user_id = token.user_id)
owner_user = token_user
self.render('mobile/reply.html', {
'token':token,
'token_user':token_user,
'owner_user':owner_user,
'screen_name':screen_name,
})
else:
td = Twitdao(token)
token_user = td.users_show_by_id(user_id = token.user_id)
owner_user = token_user
tweet = td.statuses_show(id=tweet_id,**params)
self.render('mobile/reply.html', {
'token':token,
'token_user':token_user,
'owner_user':owner_user,
'tweet':tweet,
})
def post(self):
status = self.param('status')
params = self.params([
'in_reply_to_status_id',
'lat',
'long',
'place_id',
'display_coordinates',
'trim_user',
'include_entities',
])
token = md.get_default_access_token()
if not token:
self.redirect('/settings')
return
td = Twitdao(token)
td.statuses_update(status=status.encode('utf-8'), **params)
taskqueue.add(queue_name='cache', url='/q/update_user_cache', params={'tk':token.key(), 'user_id':token.user_id}, method="GET" )
self.redirect('/m/u-/home')
class ShowTweet(BaseHandler):
def get(self, tweet_id):
params = self.params(['trim_user','include_entities'])
token = md.get_default_access_token()
if not token:
self.redirect('/settings')
return
tweet_id = utils.tweet_id_decode(tweet_id)
td = Twitdao(token)
token_user = td.users_show_by_id(user_id = token.user_id)
owner_user = token_user
tweet = td.statuses_show(id=tweet_id,**params)
self.render('mobile/tweet-show.html', {
'token':token,
'token_user':token_user,
'owner_user':owner_user,
'tweet':tweet,
})
class ActionQuote(BaseHandler):
def get(self, tweet_id):
params = self.params(['trim_user','include_entities'])
token = md.get_default_access_token()
if not token:
self.redirect('/settings')
return
id = utils.tweet_id_decode(tweet_id)
td = Twitdao(token)
token_user = td.users_show_by_id(user_id = token.user_id)
owner_user = token_user
tweet = td.statuses_show(id=id, **params)
self.render('mobile/quote.html', {
'token_user':token_user,
'owner_user':owner_user,
'tweet':tweet,
})
class ActionRetweet(BaseHandler):
def get(self, tweet_id):
params = self.params(['trim_user','include_entities'])
token = md.get_default_access_token()
if not token:
self.redirect('/settings')
return
id = utils.tweet_id_decode(tweet_id)
td = Twitdao(token)
token_user = td.users_show_by_id(user_id = token.user_id)
owner_user = token_user
tweet = td.statuses_show(id=id, **params)
self.render('mobile/retweet.html', {
'token_user':token_user,
'owner_user':owner_user,
'tweet':tweet,
})
def post(self, tweet_id):
params = self.params(['trim_user','include_entities'])
token = md.get_default_access_token()
if not token:
self.redirect('/settings')
return
id = utils.tweet_id_decode(tweet_id)
td = Twitdao(token)
tweet = td.statuses_retweet(id=id, **params)
taskqueue.add(queue_name='cache', url='/q/update_user_cache', params={'tk':token.key(), 'user_id':token.user_id}, method="GET" )
self.redirect('/m/u-/home')
class ActionUndoRetweet(BaseHandler):
def get(self, tweet_id):
pass
class ActionFavorite(BaseHandler):
def get(self, tweet_id):
params = self.params(['trim_user','include_entities'])
token = md.get_default_access_token()
if not token:
self.redirect('/settings')
return
id = utils.tweet_id_decode(tweet_id)
td = Twitdao(token)
token_user = td.users_show_by_id(user_id = token.user_id)
owner_user = token_user
tweet = td.statuses_show(id=id, **params)
self.render('mobile/favorite.html', {
'token':token,
'token_user':token_user,
'owner_user':owner_user,
'tweet':tweet,
})
def post(self, tweet_id):
params = self.params(['include_entities'])
token = md.get_default_access_token()
if not token:
self.redirect('/settings')
return
id = utils.tweet_id_decode(tweet_id)
td = Twitdao(token)
tweet = td.favorites_create(id=id, **params)
taskqueue.add(queue_name='cache', url='/q/update_user_cache', params={'tk':token.key(), 'user_id':token.user_id}, method="GET" )
self.redirect('/m/u-%s/favs' % token.screen_name)
class ActionUnfavorite(BaseHandler):
def get(self, tweet_id):
params = self.params(['trim_user','include_entities'])
token = md.get_default_access_token()
if not token:
self.redirect('/settings')
return
id = utils.tweet_id_decode(tweet_id)
td = Twitdao(token)
token_user = td.users_show_by_id(user_id = token.user_id)
owner_user = token_user
tweet = td.statuses_show(id=id, **params)
self.render('mobile/unfavorite.html', {
'token':token,
'token_user':token_user,
'owner_user':owner_user,
'tweet':tweet,
})
def post(self, tweet_id):
params = self.params(['include_entities'])
token = md.get_default_access_token()
if not token:
self.redirect('/settings')
return
id = utils.tweet_id_decode(tweet_id)
td = Twitdao(token)
tweet = td.favorites_destroy(id=id, **params)
taskqueue.add(queue_name='cache', url='/q/update_user_cache', params={'tk':token.key(), 'user_id':token.user_id}, method="GET" )
self.redirect('/m/u-%s/favs' % token.screen_name)
class Settings(BaseHandler):
def get(self, section):
token = md.get_default_access_token()
if not token:
self.redirect('/settings')
return
cursor=self.param('cursor', default_value=None)
tokens, cursor = md.get_user_access_tokens(users.get_current_user(), 10, cursor)
td=Twitdao(token)
token_user=td.users_show_by_id(user_id=token.user_id)
self.render('mobile/settings.html', {
'token':token,
'tokens':tokens,
'token_user':token_user,
'owner_user':token_user,
'where':'settings'
})
def post(self, section):
token = md.get_default_access_token()
if not token:
self.redirect('/settings')
return
if section=='token':
token_key = self.param('tk')
token = md.get_access_token(token_key, users.get_current_user())
md.set_default_access_token(token)
elif section=='media':
show_avatar=self.param('show_avatar')
show_media=self.param('show_media')
settings={}
settings['m_show_avatar']=True if show_avatar=='t' else False
settings['m_show_media']=True if show_media=='t' else False
md.set_token_settings(token.key(), users.get_current_user(), **settings)
elif section=='opti':
opti=self.param('opti')
settings={}
settings['m_optimizer']=opti if opti!='none' or opti=='' else None
md.set_token_settings(token.key(), users.get_current_user(), **settings)
self.redirect('/m/s-')
class UploadPhoto(BaseHandler):
def get(self):
token = md.get_default_access_token()
if not token:
self.redirect('/settings')
return
td = Twitdao(token)
token_user = td.users_show_by_id(user_id = token.user_id)
owner_user = token_user
self.render('mobile/upload.html', {
'token':token,
'token_user':token_user,
'owner_user':owner_user,
})
def post(self):
media = self.param('media')
status = self.param('status')
token = md.get_default_access_token()
if not token:
self.redirect('/settings')
return
app_config = md.get_app_config()
td = Twitdao(token)
twitpic = twitpic2.TwitPic2(
consumer_key = app_config.consumer_key,
consumer_secret = app_config.consumer_secret,
access_token = 'oauth_token=%s&oauth_token_secret=%s' % (token.oauth_token, token.oauth_token_secret),
service_key = app_config.twitpic_api_key,
)
try:
if media:
filename=self.request.POST[u'media'].filename.encode('utf-8')
resp=twitpic.api_call('POST', 'upload', {'message':status.encode('utf-8')}, files=[('media', filename, media)])
full_status=status+" "+resp['url']
tweet_status = full_status
if len(full_status)-140>0:
tweet_status = status[:140-len(resp['url'])-4]+"... "+resp['url']
td.statuses_update(status=tweet_status.encode('utf-8'))
taskqueue.add(queue_name='cache', url='/q/update_user_cache', params={'tk':token.key(), 'user_id':token.user_id}, method="GET" )
except Exception, e:
logging.debug(e)
except:
raise
self.redirect('/m/u-/home')
class UserAgentTest(BaseHandler):
def get(self):
self.response.headers['Content-Type'] = 'text/plain'
self.write(self.request.headers['user-agent'])
def main():
application = webapp.WSGIApplication([
('/m(?:|/|/u-/home)', Home),
('/m/u-/at', Mentions),
('/m/u-([0-9a-zA-Z_]*)/favs', Favorites),
('/m/u-([0-9a-zA-Z_]*)/foers', Followers),
('/m/u-([0-9a-zA-Z_]*)/foing', Following),
('/m/m-(inbox|sent)', Messages),
('/m/m-send', SendMessage),
('/m/m-del', DeleteMessage),
('/m/u-([0-9a-zA-Z_]+)', User),
('/m/u-([0-9a-zA-Z_]+)/fo', ActionFollow),
('/m/u-([0-9a-zA-Z_]+)/ufo', ActionUnfollow),
('/m/u-([0-9a-zA-Z_]+)/b', ActionBlock),
('/m/u-([0-9a-zA-Z_]+)/ub', ActionUnblock),
('/m/t-', ActionTweet),
('/m/t-([0-9a-zA-Z_\-\.]+)', ShowTweet),
('/m/t-([0-9a-zA-Z_\-\.]+)/qt', ActionQuote),
('/m/t-([0-9a-zA-Z_\-\.]+)/del', ActionDelete),
('/m/t-([0-9a-zA-Z_\-\.]+)/rt', ActionRetweet),
('/m/t-([0-9a-zA-Z_\-\.]+)/urt', ActionUndoRetweet),
('/m/t-([0-9a-zA-Z_\-\.]+)/fav', ActionFavorite),
('/m/t-([0-9a-zA-Z_\-\.]+)/ufav', ActionUnfavorite),
('/m/s-(token|media|opti|)', Settings),
('/m/p-', UploadPhoto),
('/m/uat-', UserAgentTest),
], debug=True)
util.run_wsgi_app(application)
if __name__ == '__main__':
main()
|
{"/templatetags/string.py": ["/utils.py"], "/user.py": ["/base.py", "/twitdao.py", "/md.py"], "/main.py": ["/base.py", "/twitdao.py", "/md.py"], "/ajax1.py": ["/base.py", "/twitdao.py", "/md.py", "/twitpic2.py"], "/settings.py": ["/base.py", "/twitdao.py", "/md.py"], "/index.py": ["/base.py", "/md.py"], "/config.py": ["/base.py", "/md.py"], "/twitdao.py": ["/twitter.py", "/md.py"], "/queue.py": ["/base.py", "/twitdao.py", "/md.py"]}
|
4,625
|
mfs6174/Twitdao11
|
refs/heads/master
|
/config.py
|
# -*- coding: utf-8 -*-
from google.appengine.ext import webapp
from google.appengine.ext.webapp import util
from google.appengine.api import users
from google.appengine.api import memcache
from google.appengine.api import taskqueue
from base import BaseHandler
import md
import os
import logging
class AppConfig(BaseHandler):
def get(self):
app_config = None
if users.is_current_user_admin():
app_config = md.get_app_config()
self.render('app-config.html', {
'app_config':app_config,
'where':'twitdao-config'
})
def post(self):
params=self.params([
'consumer_key',
'consumer_secret',
'request_token_url',
'access_token_url',
'authorize_url',
'authenticate_url',
'api_url',
'search_api_url',
'twitpic_api_key',
])
md.set_app_config(**params)
self.redirect('/config')
class ImageProxyConfig(BaseHandler):
def get(self):
image_proxy_config = None
if users.is_current_user_admin():
image_proxy_config = md.get_image_proxy_config()
self.render('image-proxy-config.html', {
'image_proxy_config':image_proxy_config,
'where':'image_proxy-config'
})
def post(self):
params=self.params([
'flickr_api_key',
'flickr_api_secret',
'flickr_rest_api_url',
])
md.set_image_proxy_config(**params)
self.redirect('/config/image_proxy')
class Memcache(BaseHandler):
def get(self):
stats = memcache.get_stats()
self.render('memcache-config.html',{
'stats':stats,
'success':self.params('success'),
'where':'memcache-config'
})
def post(self):
success = memcache.flush_all()
self.redirect('/config/memcache?success=%s' % success)
class CleanUpAccesses(BaseHandler):
def get(self):
self.render('clean-up-accesses.html',{'where':'clean-up-accesses'})
def post(self):
self.response.headers['Content-Type'] = 'text/plain'
cursor = self.param('cursor', default_value=None)
manual = not ( 'X-AppEngine-QueueName' in self.request.headers or 'X-AppEngine-Cron' in self.request.headers )
tokens, next_cursor = md.get_access_tokens(size=50, cursor=cursor)
for token in tokens:
taskqueue.add(queue_name='clean-up-accesses', url='/q/verify_access', params={'tk':token.key()}, method='GET')
logging.debug('Add token: %s' % token)
if manual:
self.write('Add token: %s\n' % token)
if next_cursor:
taskqueue.add(queue_name='clean-up-accesses', url='/config/clean_up_accesses', params={'cursor':next_cursor}, method='POST')
logging.debug('More cursor: %s' % next_cursor)
if manual:
self.write('\nMore cursor: %s\n' % next_cursor)
self.write('\nThe program is still working, and will run for some time.\n')
self.write('Go: [https://appengine.google.com/queuedetails?&app_id=%s&queue_name=clean-up-accesses] to watch details.' % os.environ['APPLICATION_ID'])
self.write('\n'*20)
else:
logging.debug('No more accesses.')
if manual:
self.write('\nThe End.\n')
def main():
application = webapp.WSGIApplication([
('/config', AppConfig),
('/config/image_proxy', ImageProxyConfig),
('/config/memcache', Memcache),
('/config/clean_up_accesses', CleanUpAccesses),
], debug=True)
util.run_wsgi_app(application)
if __name__ == '__main__':
main()
|
{"/templatetags/string.py": ["/utils.py"], "/user.py": ["/base.py", "/twitdao.py", "/md.py"], "/main.py": ["/base.py", "/twitdao.py", "/md.py"], "/ajax1.py": ["/base.py", "/twitdao.py", "/md.py", "/twitpic2.py"], "/settings.py": ["/base.py", "/twitdao.py", "/md.py"], "/index.py": ["/base.py", "/md.py"], "/config.py": ["/base.py", "/md.py"], "/twitdao.py": ["/twitter.py", "/md.py"], "/queue.py": ["/base.py", "/twitdao.py", "/md.py"]}
|
4,626
|
mfs6174/Twitdao11
|
refs/heads/master
|
/templatetags/fix.py
|
# -*- coding: utf-8 -*-
from google.appengine.ext import webapp
from django.template.defaultfilters import stringfilter
import re
register = webapp.template.create_template_register()
@register.filter
@stringfilter
def secure_image(image_url):
''' *.twimg.com to https://*.amazonaws.com '''
#comment this line when need https.
return image_url
m=re.search(r'a([0-9]+)\..+(/profile_images/.+)', image_url, re.I)
if m:
return 'https://s3.amazonaws.com/twitter_production%s' % m.group(2)
return image_url
secure_image.is_safe=True
_origin_image_re=re.compile('_(normal|mini|bigger)\.(png|gif|jpg|jpeg)$', re.I)
@register.filter
@stringfilter
def origin_image(image_url):
return _origin_image_re.sub('.\g<2>', image_url)
origin_image.is_safe=True
|
{"/templatetags/string.py": ["/utils.py"], "/user.py": ["/base.py", "/twitdao.py", "/md.py"], "/main.py": ["/base.py", "/twitdao.py", "/md.py"], "/ajax1.py": ["/base.py", "/twitdao.py", "/md.py", "/twitpic2.py"], "/settings.py": ["/base.py", "/twitdao.py", "/md.py"], "/index.py": ["/base.py", "/md.py"], "/config.py": ["/base.py", "/md.py"], "/twitdao.py": ["/twitter.py", "/md.py"], "/queue.py": ["/base.py", "/twitdao.py", "/md.py"]}
|
4,627
|
mfs6174/Twitdao11
|
refs/heads/master
|
/utils.py
|
_urlsafe_chars='ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_.'
_urlsafe_chars_num=len(_urlsafe_chars)
def tweet_id_encode(n):
tl, n = [], long(n)
while(n>0):
m, n = n%_urlsafe_chars_num, n//_urlsafe_chars_num
tl.insert(0,_urlsafe_chars[int(m)])
return ''.join(tl)
def tweet_id_decode(t):
t=str(t)
n,i=0,len(t)-1
for c in t:
if c not in _urlsafe_chars: return 0
n+=(_urlsafe_chars.index(c)*pow(_urlsafe_chars_num, i))
i-=1
return n
|
{"/templatetags/string.py": ["/utils.py"], "/user.py": ["/base.py", "/twitdao.py", "/md.py"], "/main.py": ["/base.py", "/twitdao.py", "/md.py"], "/ajax1.py": ["/base.py", "/twitdao.py", "/md.py", "/twitpic2.py"], "/settings.py": ["/base.py", "/twitdao.py", "/md.py"], "/index.py": ["/base.py", "/md.py"], "/config.py": ["/base.py", "/md.py"], "/twitdao.py": ["/twitter.py", "/md.py"], "/queue.py": ["/base.py", "/twitdao.py", "/md.py"]}
|
4,628
|
mfs6174/Twitdao11
|
refs/heads/master
|
/twitdao.py
|
# -*- coding: utf-8 -*-
from google.appengine.api import memcache
from twitter import Twitter
import md
USER_CACHE_TIME = 10*60
TWEET_CACHE_TIME = 60*60
class Twitdao():
def __init__(self, token=None):
self.token = token
config = md.get_app_config()
if token:
self.twitter = Twitter(
oauth_token=self.token.oauth_token,
oauth_token_secret=self.token.oauth_token_secret,
consumer_key=config.consumer_key,
consumer_secret=config.consumer_secret,
request_token_url=config.request_token_url,
access_token_url=config.access_token_url,
authorize_url=config.authorize_url,
authenticate_url=config.authenticate_url,
api_url=config.api_url,
search_api_url=config.search_api_url
)
else:
self.twitter = Twitter(
consumer_key=config.consumer_key,
consumer_secret=config.consumer_secret,
request_token_url=config.request_token_url,
access_token_url=config.access_token_url,
authorize_url=config.authorize_url,
authenticate_url=config.authenticate_url,
api_url=config.api_url,
search_api_url=config.search_api_url
)
def fetch_request_token(self, callback=None):
return self.twitter.fetch_request_token(callback)
def fetch_access_token(self, verifier):
access_token = self.twitter.fetch_access_token(verifier)
return access_token
def get_authenticate_url(self, request_token, force_login=False):
return self.twitter.get_authenticate_url(request_token, force_login)
def get_authorize_url(self, request_token, force_login=False):
return self.twitter.get_authorize_url(request_token, force_login)
#==========================================================================
def _cache_timeline(self, timeline, **params):
if not 'errors' in timeline:
trim_user=params['trim_user'] if 'trim_user' in params else None
include_entities=params['include_entities'] if 'include_entities' in params else None
td=dict(('%s-%s-%s' % (tweet['id_str'], trim_user, include_entities), tweet) for tweet in timeline)
return memcache.set_multi(td, time=TWEET_CACHE_TIME, key_prefix="tweet-")
return False
def _cache_tweet(self, tweet, **params):
if not 'errors' in tweet:
trim_user=params['trim_user'] if 'trim_user' in params else None
include_entities=params['include_entities'] if 'include_entities' in params else None
return memcache.set( 'tweet-%s-%s-%s' % (tweet['id_str'], trim_user, include_entities), tweet, time=TWEET_CACHE_TIME,)
return False
def _get_cached_tweet(self, id, **params):
trim_user=params['trim_user'] if 'trim_user' in params else None
include_entities=params['include_entities'] if 'include_entities' in params else None
return memcache.get( 'tweet-%s-%s-%s' % (id, trim_user, include_entities) )
def _del_cached_tweet(self, id, **params):
trim_user=params['trim_user'] if 'trim_user' in params else None
include_entities=params['include_entities'] if 'include_entities' in params else None
return memcache.delete( 'tweet-%s-%s-%s' % (id, trim_user, include_entities) )
#好像不好。
def _cache_users(self, users, **params):
if not 'errors' in users:
include_entities = params['include_entities'] if 'include_entities' in params else None
us=dict(('%s-%s' % (user['id_str'], include_entities), user) for user in users)
us.update(dict(('%s-%s' % (user['screen_name'], include_entities), user) for user in users))
return memcache.set_multi(us, key_prefix="user-", time=USER_CACHE_TIME)
return False
def _cache_user(self, user, **params):
if not 'errors' in user:
include_entities = params['include_entities'] if 'include_entities' in params else None
return memcache.set_multi({
('id-%s-%s' % (user['id_str'], include_entities)):user,
('screen_name-%s-%s' % (user['screen_name'], include_entities)):user
}, key_prefix="user-", time=USER_CACHE_TIME)
return False
def _get_cached_user_by_id(self, id, **params):
include_entities = params['include_entities'] if 'include_entities' in params else None
return memcache.get('user-id-%s-%s' % (id, include_entities))
def _get_cached_user_by_screen_name(self, screen_name, **params):
include_entities = params['include_entities'] if 'include_entities' in params else None
return memcache.get('user-screen_name-%s-%s' % (screen_name, include_entities))
#删不全啊。
def _del_cached_user_by_id(self, id, **params):
include_entities = params['include_entities'] if 'include_entities' in params else None
return memcache.delete('user-id-%s-%s' % (id, include_entities))
def _del_cached_user_by_screen_name(self, screen_name, **params):
include_entities = params['include_entities'] if 'include_entities' in params else None
return memcache.delete('user-screen_name-%s-%s' % (screen_name, include_entities))
def public_timeline(self, **params):
#trim_user, include_entities
timeline = self.twitter.api_call('GET','statuses/sample', params)
return timeline
def home_timeline(self, **params):
#since_id, max_id, count, page, trim_user, include_rts, include_entities
timeline = self.twitter.api_call('GET','statuses/home_timeline', params)
self._cache_timeline(timeline, **params)
return timeline
def friends_timeline(self, **params):
#since_id, max_id, count, page, trim_user, include_rts, include_entities
timeline = self.twitter.api_call('GET','statuses/friends_timeline', params)
self._cache_timeline(timeline, **params)
return timeline
def user_timeline(self, **params):
#user_id, screen_name, since_id, max_id, count, page, trim_user, include_rts, include_entities
timeline = self.twitter.api_call('GET','statuses/user_timeline', params)
self._cache_timeline(timeline, **params)
return timeline
def mentions(self, **params):
#since_id, max_id, count, page, trim_user, include_rts, include_entities
timeline = self.twitter.api_call('GET','statuses/mentions_timeline', params)
self._cache_timeline(timeline, **params)
return timeline
def retweeted_by_me(self, **params):
#since_id, max_id, count, page, trim_user, include_entities
timeline = self.twitter.api_call('GET','statuses/retweeted_by_me', params)
self._cache_timeline(timeline, **params)
return timeline
def retweeted_to_me(self, **params):
#since_id, max_id, count, page, trim_user, include_entities
timeline = self.twitter.api_call('GET','statuses/retweeted_to_me', params)
self._cache_timeline(timeline, **params)
return timeline
def retweets_of_me(self, **params):
#since_id, max_id, count, page, trim_user, include_entities
timeline = self.twitter.api_call('GET','statuses/retweets_of_me', params)
self._cache_timeline(timeline, **params)
return timeline
# Tweets Resources
def statuses_show(self, id, **params):
#trim_user, include_entities
tweet = self._get_cached_tweet(id, **params)
if not tweet:
tweet = self.twitter.api_call('GET', 'statuses/show/%s' % id, params)
self._cache_tweet(tweet, **params)
return tweet
def statuses_update(self, status, **params):
#in_reply_to_status_id, lat, long, place_id, display_coordinates, trim_user, include_entities
pms={'status':status}
pms.update(params)
tweet = self.twitter.api_call('POST', 'statuses/update', pms)
return tweet
def statuses_destroy(self, id, **params):
#trim_user, include_entities
tweet = self.twitter.api_call('POST', 'statuses/destroy/%s' % id, params)
self._del_cached_tweet(id, **params)
return tweet
def statuses_retweet(self, id, **params):
#trim_user, include_entities
tweet = self.twitter.api_call('POST', 'statuses/retweet/%s' % id, params)
return tweet
def statuses_retweets(self, id, **params):
#count, trim_user, include_entities
tweets = self.twitter.api_call('GET', 'statuses/retweets/%s' % id, params)
return tweets
def statuses_retweeted_by(self, id, **params):
#count, page, trim_user, include_entities
users = self.twitter.api_call('GET', 'statuses/%s/retweeted_by' % id, params)
return users
def statuses_retweeted_by_ids(self, id, **params):
#count, page, trim_user, include_entities
ids = self.twitter.api_call('GET', 'statuses/%s/retweeted_by/ids' % id, params)
return ids
#User resources
#users_show
def users_show_by_id(self, user_id, **params):
user=None
_tdfr=False
if '_twitdao_force_refresh' in params:
_tdfr=params['_twitdao_force_refresh']
del params['_twitdao_force_refresh']
if not _tdfr:
user=self._get_cached_user_by_id(user_id, **params)
if not user:
params.update({'user_id':user_id})
user = self.twitter.api_call('GET', 'users/show', params)
self._cache_user(user, **params)
return user
#users_show
def users_show_by_screen_name(self, screen_name, **params):
user=None
_tdfr=False
if '_twitdao_force_refresh' in params:
_tdfr=params['_twitdao_force_refresh']
del params['_twitdao_force_refresh']
if not _tdfr:
user=self._get_cached_user_by_screen_name(screen_name, **params)
if not user:
params.update({'screen_name':screen_name})
user = self.twitter.api_call('GET', 'users/show', params)
self._cache_user(user, **params)
return user
def users_lookup(self, user_id=None, screen_name=None, **params):
#include_entities
pms={}
if user_id:
pms = {'user_id':user_id}
elif screen_name:
pms ={'screen_name':screen_name}
pms.update(params)
users = self.twitter.api_call('POST', 'users/lookup', pms)
return users
def users_search(self, q, **params):
#per_page, page, include_entities
pms = {'q':q}
pms.update(params)
users = self.twitter.api_call('GET', 'users/search', pms)
return users
def users_suggestions(self):
sugs = self.twitter.api_call('GET', 'users/suggestions')
return sugs
def users_suggestions_slug(self, slug):
sugs = self.twitter.api_call('GET', 'users/suggestions/%s' % slug)
return sugs
def users_profile_image(self, screen_name, **params):
#size
url = self.twitter.api_call('GET', 'users/profile_image/%s' % screen_name, params)
return url
def statuses_friends(self, **params):
#user_id, screen_name, cursor, include_entities
friends = self.twitter.api_call('GET', 'friends/list', params)
return friends
def statuses_followers(self, **params):
#user_id, screen_name, cursor, include_entities
followers = self.twitter.api_call('GET', 'followers/list', params)
return followers
#List Resources
def user_lists_post(self, name, **params):
'''Creates a new list for the authenticated user. Accounts are limited to 20 lists.'''
#mode, description
pms = {'name':name}
pms.update(params)
ls = self.twitter.api_call('POST', '%s/lists' % self.token.screen_name, pms)
return ls
def user_lists_id_post(self, id, **params):
'''Updates the specified list.
#name, mode, description'''
ls = self.twitter.api_call('POST', '%s/lists/%s' % (self.token.screen_name, id), params)
return ls
def user_lists_get(self, screen_name=None, **params):
'''List the lists of the specified user. Private lists will be included if the authenticated users
is the same as the user who's lists are being returned.'''
#cursor
if not screen_name:
screen_name = self.token.screen_name
lists = self.twitter.api_call('GET', '%s/lists' % screen_name, params)
return lists
def user_list_id_get(self, id, screen_name=None):
'''Show the specified list. Private lists will only be shown if the authenticated user owns the specified list.'''
if not screen_name:
screen_name = self.token.screen_name
ls = self.twitter.api_call('GET', '%s/lists/%s' % (screen_name, id) )
return ls
def user_list_id_delete(self, id):
'''Deletes the specified list. Must be owned by the authenticated user.'''
ls = self.twitter.api_call('POST', '%s/lists/%s' % (self.token.screen_name, id), {'_method':'DELETE'})
return ls
def user_list_id_statuses(self, id, screen_name, **params):
'''Show tweet timeline for members of the specified list.'''
#since_id, max_id, per_page, page, include_entities
ls = self.twitter.api_call('GET', '%s/lists/%s/statuses' % (screen_name, id), params)
return ls
def user_list_memberships(self, screen_name, **params):
'''List the lists the specified user has been added to.'''
#cursor
lists = self.twitter.api_call('GET', '%s/lists/memberships' % screen_name, params)
return lists
def user_list_subscriptions(self, screen_name, **params):
'''List the lists the specified user follows.'''
#cursor
lists = self.twitter.api_call('GET', '%s/lists/subscriptions' % screen_name, params)
return lists
#List Subscribers Resources
def user_list_id_subscribers_get(self, screen_name, list_id, **params):
'''Returns the subscribers of the specified list.'''
#cursor, include_entities
users = self.twitter.api_call('GET', '%s/%s/subscribers' % (screen_name, list_id), params )
return users
def user_list_id_subscribers_post(self, screen_name, list_id):
'''Make the authenticated user follow the specified list.'''
return self.twitter.api_call('POST', '%s/%s/subscribers' % (screen_name, list_id) )
def user_list_id_subscribers_delete(self, screen_name, list_id, **params):
'''Unsubscribes the authenticated user form the specified list.'''
params['_method'] = 'DELETE'
return self.twitter.api_call('POST', '%s/%s/subscribers' % (screen_name, list_id), params )
def user_list_id_subscribers_id(self, screen_name, list_id, id, **params):
'''Check if a user is a subscriber of the specified list.'''
#include_entities
return self.twitter.api_call('POST', '%s/%s/subscribers/%s' % (screen_name, list_id, id), params )
#List Members Resources
def user_list_id_members_get(self, screen_name, list_id, **params):
''' Returns the members of the specified list. '''
#cursor, include_entities
return self.twitter.api_call('GET', '%s/%s/members' % (screen_name, list_id), params )
def user_list_id_members_post(self, screen_name, list_id, id):
'''Add a member to a list. The authenticated user must own the list to be able to add members to it.
Lists are limited to having 500 members.'''
params={}
params['id'] = id
return self.twitter.api_call('POST', '%s/%s/members' % (screen_name, list_id), params )
def user_list_id_members_create_all(self, screen_name, list_id, **params):
'''Adds multiple members to a list, by specifying a comma-separated list of member ids or screen names.
The authenticated user must own the list to be able to add members to it. Lists are limited to having 500 members,
and you are limited to adding up to 100 members to a list at a time with this method.'''
#screen_name, user_id
return self.twitter.api_call('POST', '%s/%s/create_all' %(screen_name, list_id) ,params )
def user_list_id_members_delete(self, screen_name, list_id, id):
'''Removes the specified member from the list. The authenticated user must be the list's owner to remove members from the list.'''
params={}
params['_method'] = 'DELETE'
params['id'] = id
return self.twitter.api_call('POST', '%s/%s/members' % (screen_name, list_id), params )
def user_list_id_members_id(self, screen_name, list_id, id, **params):
'''Check if a user is a member of the specified list.'''
#include_entities
return self.twitter.api_call('GET', '%s/%s/members/%s' % (screen_name, list_id, id), params )
#Direct Messages Resources
def direct_messages(self, **params):
#since_id, max_id, count, page, include_entities
messages = self.twitter.api_call('GET', 'direct_messages', params)
return messages
def direct_messages_sent(self, **params):
#since_id, max_id, count, page, include_entities
message = self.twitter.api_call('GET', 'direct_messages/sent', params)
return message
def direct_messages_new(self, screen_name, user_id, text, **params):
#include_entities
pms = {}
if user_id:
params['user_id'] = user_id
elif screen_name:
params['screen_name'] = screen_name
params['text'] = text
pms.update(params)
message = self.twitter.api_call('POST', 'direct_messages/new', pms)
return message
def direct_messages_destroy(self, id, **params):
#include_entities
message = self.twitter.api_call('POST', 'direct_messages/destroy/%s' % id, params)
return message
#Favorites Resources
def favorites(self, **params):
#id, page, include_entities
favorites = None
if 'id' in params:
id = params['id']
del params['id']
favorites = self.twitter.api_call('GET', 'favorites/%s' % id, params)
else:
favorites = self.twitter.api_call('GET', 'favorites/list', params)
return favorites
def favorites_create(self, id, **params):
#include_entities
tweet = self.twitter.api_call('POST', 'favorites/create/%s' % id, params)
return tweet
def favorites_destroy(self, id, **params):
#include_entities
tweet = self.twitter.api_call('POST', 'favorites/destroy/%s' % id, params)
return tweet
#Friendship Resources
def friendships_create(self, **params):
#user_id, screen_name, follow, include_entities
user = self.twitter.api_call('POST', 'friendships/create', params)
return user
def friendships_destroy(self, **params):
#user_id, screen_name, include_entities
user = self.twitter.api_call('POST', 'friendships/destroy', params)
return user
def friendships_show(self, **params):
#source_id, source_screen_name, target_id, target_screen_name
return self.twitter.api_call('GET', 'friendships/show', params)
#Account Resources
def account_verify_credentials(self, **params):
#include_entities
return self.twitter.api_call('GET', 'account/verify_credentials', params)
def account_rate_limit_status(self):
return self.twitter.api_call('GET', 'account/rate_limit_status')
def account_update_delivery_device(self, device, **params):
#device(sms, none), include_entities
return self.twitter.api_call('POST', 'account/update_delivery_device', params)
def account_update_profile_colors(self, **params):
#profile_background_color, profile_text_color, profile_link_color, profile_sidebar_fill_color, profile_sidebar_border_color, include_entities
return self.twitter.api_call('POST', 'account/update_profile_colors', params)
def account_update_profile_image(self, image, **params):
#include_entities
#image-> ('param_name', file_name, image_content)
return self.twitter.api_call('POST', 'account/update_profile_image', params, [image])
def account_update_profile_background_image(self, image, **params):
#tile, include_entities
#image-> ('param_name', file_name, image_content)
return self.twitter.api_call('POST', 'account/update_profile_background_image', params, [image])
def account_update_profile(self, **params):
#name, url, location, description, include_entities
return self.twitter.api_call('POST', 'account/update_profile', params)
#Block Resources
def blocks_create(self, **params):
#user_id, screen_name, include_entities
user = self.twitter.api_call('POST', 'blocks/create', params)
return user
def blocks_destroy(self, **params):
#user_id, screen_name, include_entities
user = self.twitter.api_call('POST', 'blocks/destroy', params)
return user
def blocks_blocking(self, **params):
#page, include_entities
blocking = self.twitter.api_call('GET', 'blocks/list', params)
return blocking#user list
#Spam Reporting resources
def report_spam(self, **params):
#user_id, screen_name, include_entities
user = self.twitter.api_call('POST', 'users/report_spam', params)
return user
#Saved Searches Resources
def saved_searches(self):
return self.twitter.api_call('GET','saved_searches/list')
def API_limit_rate(self):
return self.twitter.api_call('GET','account/rate_limit_status')
def saved_searches_show(self, id):
return self.twitter.api_call('GET','saved_searches/show/%s' % id)
def saved_searches_create(self, **params):
#query
return self.twitter.api_call('POST','saved_searches/create/%s', params)
def saved_searches_destroy(self, id):
return self.twitter.api_call('POST','saved_searches/destroy/%s' % id)
#Search API
def search(self, q, **params):
#lang, locate, rpp, page, since_id, until, geocode, show_user, result_type
timeline = self.twitter.search_api_call(q, **params)
return timeline
#Hacked Search
def hacked_search(self, q, since_id=None, page=None):
return self.twitter.hacked_search(q, since_id, page)
#Hacked
def hacked_following_followers_of(self, user_id):
# Also followed by.
return self.twitter.hacked_following_followers_of(user_id)
def hacked_follows_in_common_with(self, user_id):
# You both follow.
return self.twitter.hacked_follows_in_common_with(user_id)
|
{"/templatetags/string.py": ["/utils.py"], "/user.py": ["/base.py", "/twitdao.py", "/md.py"], "/main.py": ["/base.py", "/twitdao.py", "/md.py"], "/ajax1.py": ["/base.py", "/twitdao.py", "/md.py", "/twitpic2.py"], "/settings.py": ["/base.py", "/twitdao.py", "/md.py"], "/index.py": ["/base.py", "/md.py"], "/config.py": ["/base.py", "/md.py"], "/twitdao.py": ["/twitter.py", "/md.py"], "/queue.py": ["/base.py", "/twitdao.py", "/md.py"]}
|
4,629
|
mfs6174/Twitdao11
|
refs/heads/master
|
/queue.py
|
# -*- coding: utf-8 -*-
from google.appengine.ext import webapp
from google.appengine.ext.webapp import util
from base import BaseHandler
from twitdao import Twitdao
import md
import urllib
import logging
class UpdateUserCache(BaseHandler):
def get(self):
tk = self.param('tk')
screen_name = self.param('screen_name')
user_id = self.param('user_id')
params={'_twitdao_force_refresh':True}
include_entities = self.param('include_entities')
if include_entities:
params.update({'include_entities':include_entities})
token = md.get_access_token(tk)
td = Twitdao(token)
user = None
if user_id:
user=td.users_show_by_id(user_id=user_id, **params)
elif screen_name:
user=td.users_show_by_screen_name(screen_name=screen_name, **params)
logging.debug(user)
if 'X-AppEngine-QueueName' not in self.request.headers:
self.write(repr(user))
class VerifyAccess(BaseHandler):
def get(self):
tk = self.param('tk')
token = md.get_access_token(tk)
if not token:
logging.debug('Token not found.')
return
td = Twitdao(token)
token_user = td.account_verify_credentials()
if 'error' in token_user:
logging.debug('Delete invalid token: %s' % token)
md.delete_access_token(token.key())
else:
logging.debug('Verified token: %s' % token)
if 'X-AppEngine-QueueName' not in self.request.headers:
self.write(repr(token_user))
class ListAddUser(BaseHandler):
def get(self):
tk = self.param('tk')
list_id = self.param('list_id')
screen_name = self.param('screen_name')
token = md.get_access_token(tk)
td = Twitdao(token)
lst=td.user_list_id_members_post(token.screen_name, urllib.quote(list_id.encode('utf-8')), id=screen_name)
logging.debug(lst)
if 'X-AppEngine-QueueName' not in self.request.headers:
self.write(repr(lst))
def main():
application = webapp.WSGIApplication([
('/q/update_user_cache', UpdateUserCache),
('/q/verify_access', VerifyAccess),
('/q/list_add_user', ListAddUser),
], debug=True)
util.run_wsgi_app(application)
if __name__ == '__main__':
main()
|
{"/templatetags/string.py": ["/utils.py"], "/user.py": ["/base.py", "/twitdao.py", "/md.py"], "/main.py": ["/base.py", "/twitdao.py", "/md.py"], "/ajax1.py": ["/base.py", "/twitdao.py", "/md.py", "/twitpic2.py"], "/settings.py": ["/base.py", "/twitdao.py", "/md.py"], "/index.py": ["/base.py", "/md.py"], "/config.py": ["/base.py", "/md.py"], "/twitdao.py": ["/twitter.py", "/md.py"], "/queue.py": ["/base.py", "/twitdao.py", "/md.py"]}
|
4,630
|
chongchuanbing/api_demo
|
refs/heads/master
|
/app/utils/cache_utils.py
|
import random
from functools import wraps
from app import app
from app.config import CACHE_GLOBAL_PREFIX
from app.utils.redis_helper import redis_manage
class CachePrefix:
API_ADDRESS_BASE_LOC = 'address/baseLoc/'
API_VER_CODE = 'verCode/'
API_SALT = 'salt/'
API_SID = 'sid/'
API_TASK_DEVICE = 'taskDevice/'
API_FREE_REGISTER = 'free/'
API_DEVICE_UPGRADE = 'dUpgrade/'
DB_VERSION_COUNT = 'versionCount/'
DB_CDC_COUNT = 'cdcCount/'
DB_VERSION_LIST = 'dvList/'
DB_VERSION_ITEM = 'dvItem/'
DB_DEVICE_CMD = 'deviceCmd/'
DB_QR_GEN = 'qrGen/'
DB_DEVICE = 'device/'
DB_DEVICE_COUNT = 'deviceCount/'
DB_DEVICE_CITY_LIST = 'deviceCityList/'
DB_DEVICE_TYPE_MAP = 'deviceTypeMap/'
DB_DEVICE_CHAN_CITY_LIST = 'deviceChanCityList/'
DB_DEVICE_TYPE = 'deviceType/'
DB_DEVICE_TYPE_AUTH = 'deviceTypeAuth/'
DB_DEVICE_DEFINE = 'deviceDefine/'
DB_DEVICE_CHANNEL = 'deviceChannel/'
DB_DEVICE_CHANNEL_LIST = 'dcList/'
DB_DEVICE_CHANNEL_MAP = 'dcMap/'
DB_DEVICE_TYPE_DICT = 'deviceTypeDict/'
DB_DEVICE_ALIAS = 'deviceAlias/'
DB_CHAN_DEVICE_TYPE = 'chanDeviceType/'
DB_USER_CMD = 'udCmd/'
DB_USER_CMD_LIST = 'udCmdList/'
DB_DEVICE_CHAN_BIND = 'deviceChanBind/'
DB_MONITOR_LIST = 'monitorList/'
DB_MONITOR_NEW_LIST = 'monitorNewList/'
DB_DEVICE_DISCOUNT = 'deviceDiscount/'
DB_DEVICE_ACTION = 'deviceAction/'
DB_ORDER = 'order/'
DB_ORDER_COUNT = 'orderCount/'
DB_USER_LOGIN = 'userLogin/'
DB_DEVICE_AGENT = 'deviceAgent/'
DB_PERMISSION = 'permission/'
DB_PERMISSION_LIST = 'permissionList/'
DB_PERMISSION_BIND = 'permissionBind/'
DB_ALARM_TODAY = 'alarmToday/'
DB_CHAN_AGENT = 'deviceChanAgent/'
DB_LOC_PROVINCE = 'locProvince/'
DB_LOC_CITY = 'locCity/'
DB_TAG = 'tag/'
DB_DEVICE_TASK = 'task/'
DB_DEVICE_BUFF = 'buff/'
DB_GAME_ORDER = 'orderGame/'
DB_WX_CONFIG = 'wxCfg/'
DB_HX_PAY_ORDER = 'hxOrderId/'
DB_WX_CODE = 'wxCode/'
DB_CONFIG_LOG = 'configLog/'
DB_TOTAL_INCOME = 'totalIncome/'
DB_TODAY_INCOME = 'todayIncome/'
DB_ALL_INCOME = 'allIncome/'
DB_MONTH_INCOME = 'monthIncome/'
DB_ALL_FANS = 'allFans/'
DB_TODAY_FANS = 'todayFans/'
DB_GRID_INFO = 'gridInfo/'
DB_PLAYABLE = 'playable/'
DB_PCD = 'pcd/'
DB_DEVICE_START = 'deviceStart/'
DB_GRID_ID_START = 'gridIdStart/'
DB_CHAN_MONITOR_LIST = 'chanMonitorList/'
def api_cache(prefix='tm/', ignore_first=True, timeout=60, name='', noneable=False, random_timeout=None):
def decorator(func):
@wraps(func)
def wrapper_fun(*args, **kwargs):
none_flag = '!#$ None $#!'
key_time = random_timeout[0] + random.randint(0, random_timeout[1]) if random_timeout else timeout
pos_key = '/'.join([str(arg) for arg in args[1 if ignore_first else 0:]])
kwargs_key = '/'.join([str(kwargs[key]) for key in kwargs])
cache_key = CACHE_GLOBAL_PREFIX + prefix + pos_key + ('/' + kwargs_key if pos_key else kwargs_key)
if name:
cache_key += name
cache = app.flask_cache.get(cache_key)
if noneable and type(cache) is str and cache == none_flag:
return
if cache:
return cache
exe_res = func(*args, **kwargs)
if exe_res is not None:
app.flask_cache.set(cache_key, exe_res, timeout=key_time)
elif noneable:
app.flask_cache.set(cache_key, none_flag, timeout=key_time)
return exe_res
return wrapper_fun
return decorator
def clear_api_cache(prefix='tm/', *args, **kwargs):
pos_key = '/'.join([str(arg) for arg in args])
kwargs_key = '/'.join([str(kwargs[key]) for key in kwargs])
cache_key = CACHE_GLOBAL_PREFIX + prefix + pos_key + ('/' + kwargs_key if pos_key else kwargs_key)
app.flask_cache.delete(cache_key)
def clear_cache_fuzzy(*fuzzy_keys):
if not fuzzy_keys:
return
pool = redis_manage.get_redis_pool()
pipe = pool.pipeline()
for fk in fuzzy_keys:
pipe.keys('flask_cache_{}{}*'.format(CACHE_GLOBAL_PREFIX, fk))
find = pipe.execute()
for line in find:
for k in line:
pipe.delete(k.decode())
pipe.execute()
def get(cache_key):
return app.flask_cache.get(CACHE_GLOBAL_PREFIX + cache_key)
def save(cache_key, data, timeout=50):
return app.flask_cache.set(CACHE_GLOBAL_PREFIX + cache_key, data, timeout=timeout)
def remove(cache_key):
return app.flask_cache.delete(CACHE_GLOBAL_PREFIX + cache_key)
if __name__ == '__main__':
clear_cache_fuzzy(CachePrefix.DB_DEVICE_CHANNEL_LIST, CachePrefix.DB_DEVICE_CHANNEL_MAP)
|
{"/app/utils/img_util.py": ["/app/utils/auth_utils.py"]}
|
4,631
|
chongchuanbing/api_demo
|
refs/heads/master
|
/app/utils/img_util.py
|
import os
import random
import time
from urllib.request import urlopen
import io
import qrcode
from PIL import Image, ImageDraw, ImageFont
from app.config import CDN_SERVER, HOST_ID, APP_ROOT
from app.utils import ali_oss_helper
from app.utils.auth_utils import md5
ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg', 'gif', 'bmp'}
ALLOWED_VIDEO_EXTENSIONS = {'avi', 'rmvb', 'rm', 'asf', 'divx', 'mpg', 'mpeg', 'mpe', 'wmv', 'mp4', 'mkv', 'vob'}
class FileWrap:
def __init__(self, fp):
self.data = open(fp, 'rb').read()
def read(self):
return self.data
menlo = FileWrap(os.path.join(APP_ROOT, 'app', 'utils', 'Menlo.ttc'))
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
def allowed_video(filename):
return '.' in filename and filename.rsplit('.', 1)[1] in ALLOWED_VIDEO_EXTENSIONS
def get_and_save_img(img_url):
file = io.BytesIO(urlopen(img_url).read())
return save_img(file)
def _gen_file_name():
now = str(int(time.time() * 1000))
return md5(now + HOST_ID) + now
def save_img(img):
return save_file(img)
def save_file(new_file):
ext = new_file.filename[new_file.filename.rfind('.'):]
img_name = _gen_file_name() + ext
return ali_oss_helper.save_img(img_name, new_file)
def save(file_name, content):
ext = file_name[file_name.rfind('.'):]
img_name = _gen_file_name() + ext
return ali_oss_helper.save_img(img_name, content)
def get_img_url(thumb, img, high=-1, width=-1):
if not img:
return img
if high != -1 and width != -1:
return CDN_SERVER + img + '?x-oss-process=image/resize,m_fill,h_{},w_{}'.format(high, width)
if thumb:
return CDN_SERVER + img + '?x-oss-process=image/resize,m_fill,h_100,w_120'
else:
return CDN_SERVER + img
def gen_sn_qr(sn, text='', output=''):
"""
生成sn码的二维码
:param sn: 设备sn码,用于显示在二维码下方及图片名称
:param text: 二维码内容,默认为空字符串则二维码内容为sn码, 如果内容带有{sn}则会自动将文本内容的{sn}填充为sn码
:param output: 图片输出目录,默认为空表示当前目录
:return:
"""
qr = qrcode.QRCode(version=5, box_size=5, border=4)
if not text:
text = sn
elif '{sn}' in text:
text = text.format(sn=sn)
qr.add_data(text)
qr.make(fit=True)
img = qr.make_image()
img = img.convert("RGBA")
bg = Image.new('RGB', (300, 300), (255, 255, 255))
bg.paste(img, ((300 - img.width) // 2, 0))
dr = ImageDraw.Draw(bg)
font = ImageFont.truetype(menlo, 30)
dr.text((100, img.height), sn, font=font, fill='#000000')
bg.save(os.path.join(output if output else '.', sn + '.png'))
def save_cert(chan, local_file_name):
return ali_oss_helper.save_cert('cert/'+chan, local_file_name)
|
{"/app/utils/img_util.py": ["/app/utils/auth_utils.py"]}
|
4,632
|
chongchuanbing/api_demo
|
refs/heads/master
|
/server.py
|
import os
import time
from flask import request, render_template
from flask import send_from_directory, redirect
from flask_cors import CORS
from app import init_app_br
from app.api.api_response import get_json_data
from app.api.api_base import get_docs
from app.app import create_app
from app.config import in_product
from app.utils import logger, cache_utils
from db_base import db
app = create_app()
CORS(app, supports_credentials=True)
# doc.init(app)
logger.init(app)
db.init_app(app)
init_app_br(app)
@app.route('/test/cache', methods=['GET', 'POST', 'DELETE', 'PUT'])
def cache_test():
result = cache_utils.get('testCache')
if not result:
result = 'test page, curr time = ' + str(time.ctime())
cache_utils.save('testCache', result, timeout=5 * 60)
return result
@app.before_request
def call_before_request():
if not in_product():
print('Request path: {}, params: {}'.format(request.path, get_json_data()))
# print('request.headers : ', request.headers)
if request.method != 'OPTIONS':
logger.api_logger.info('Request path: %s, params: %s', request.path, get_json_data())
@app.route('/api-json', methods=['GET'])
def api_doc_json():
return get_docs()
@app.route('/api', methods=['GET'])
def api_doc():
return render_template('api_doc.html')
if __name__ == '__main__':
app.run(host='0.0.0.0', port=12345)
|
{"/app/utils/img_util.py": ["/app/utils/auth_utils.py"]}
|
4,633
|
chongchuanbing/api_demo
|
refs/heads/master
|
/gun.py
|
import multiprocessing
debug = False
deamon = False
loglevel = 'info'
bind = '0.0.0.0:12345'
max_requests = 50000
worker_connections = 50000
pidfile = '/home/log/tissue/tissue_gun.pid'
x_forwarded_for_header = "X-Real-IP"
# 启动的进程数
workers = multiprocessing.cpu_count()
# workers = 3
worker_class = "gevent"
loglevel = 'error'
accesslog = '/home/log/api/access.log'
access_log_format = '%({X-Real-IP}i)s %(l)s %(u)s %(t)s "%(r)s" %(s)s %(b)s "%(f)s" "%(a)s"'
errorlog = '/home/log/api/error.log'
timeout = 60
|
{"/app/utils/img_util.py": ["/app/utils/auth_utils.py"]}
|
4,634
|
chongchuanbing/api_demo
|
refs/heads/master
|
/app/utils/ip2addr/__init__.py
|
import os
from app.config import APP_ROOT
from .ip2Region import Ip2Region
ip2region = Ip2Region(os.path.sep.join((APP_ROOT, 'app', 'utils', 'ip2addr', 'ip2region.db')))
|
{"/app/utils/img_util.py": ["/app/utils/auth_utils.py"]}
|
4,635
|
chongchuanbing/api_demo
|
refs/heads/master
|
/app/utils/auth_utils.py
|
# encoding=utf-8
import base64
import hashlib
from Crypto.Cipher import DES3
import time
# from app import app
from app import app
MATCH = 1
NOT_FIND = -1
TIME_OUT = -2
MISMATCH = -3
BS = DES3.block_size
def b64decode(content):
return base64.b64decode(content).decode()
def b64encode(content):
return base64.b64encode(content.encode()).decode()
def md5(src, upper=False):
"""
md5加密
:param src: 原始内容
:param upper: 结果大小写
:return:
"""
md5_tool = hashlib.md5()
md5_tool.update(src.encode(encoding='utf_8'))
if upper:
return md5_tool.hexdigest().upper()
else:
return md5_tool.hexdigest()
def sha1(src, upper=False):
tool = hashlib.sha1()
tool.update(src.encode('utf-8'))
if upper:
return tool.hexdigest().upper()
else:
return tool.hexdigest()
def sha256(src, upper=False):
tool = hashlib.sha256()
tool.update(src.encode('utf-8'))
if upper:
return tool.hexdigest().upper()
else:
return tool.hexdigest()
def check_ver_code(phone, edit_ver_code):
"""
检查验证码
:param phone: 用户手机号码
:param edit_ver_code: 输入的验证码
:return:
"""
cache_key = 'ver_code/' + phone
ver_code = app.flask_cache.get(cache_key)
if not ver_code:
return NOT_FIND
if ver_code != edit_ver_code.upper():
return MISMATCH
app.flask_cache.cache.delete(cache_key)
return MATCH
def pad(s):
return s + (BS - len(s) % BS) * chr(BS - len(s) % BS).encode()
def unpad(s):
return s[0:-ord(s[-1])]
class Prpcrypt(object):
def __init__(self, key, iv):
self.key = key
self.mode = DES3.MODE_CBC
self.iv = iv.encode()
def encrypt(self, text):
text = pad(text.encode())
cryptor = DES3.new(self.key, self.mode, self.iv)
x = len(text) % 8
if x != 0:
text = text + '\0' * (8 - x)
# print(text)
self.ciphertext = cryptor.encrypt(text)
return base64.standard_b64encode(self.ciphertext).decode("utf-8")
def decrypt(self, text):
cryptor = DES3.new(self.key, self.mode, self.iv)
de_text = base64.standard_b64decode(text.encode())
plain_text = cryptor.decrypt(de_text)
st = str(plain_text.decode('utf-8', 'ignore')).rstrip('\0')
out = unpad(st)
return out
def hash_code(data):
print('md5: ', md5(data))
print('sha1: ', sha1(data))
print('sha256: ', sha256(data))
if __name__ == '__main__':
pc = Prpcrypt('OWJjQbOBkOt3MjtGRWPGYcgP', 'LScJ5bkE')
# # print(sha1(''))
# # print(md5('123456'))
# e = pc.encrypt("华永星") # 加密内容
d = pc.decrypt('6A7Jsw3jPH+q+vigThqvpmtblLTWmu00s00VySK3Yhu/cT0lwJZZTDO4Ka2W/x7LO0fAQOlLAq0mhCP5s68y0RDzsTgFNcDsftgNS8SVj+uzeNGn8+vOUrrTxz+nBRJ6EjCMVVl924Ivux0p5gwE11feJi8ifvT1E2i7xboqNcdrIypNtzxMzHcClC6PuPC70WBU4tp+MP52tuez/X4CyqhxIQdKNDbrT7lEqEUQ9c3SY/V/UkxdzQmSVTqSYAK5YS4KOOmPy/w+Ql4bP+RUw8f07XC5uLKxdzPiB71hOx12lXvsqqU2qHyVreLW+bpq3hMal6TTpVZv5Nkg9SHG5v4EpQexyTzahgfa3RwnYapj19RHTbP00sfrRJdA97aX')
print(d)
# print("加密后%s,解密后%s" % (e, d))
# hash_code('10004304' + '17')
# print(sha1('jsapi_ticket=kgt8ON7yVITDhtdwci0qeW9NRvqUmgG_qWPadGfUN2F4NAQ8EEwdbmWSpN4trP1jPAR8D2hFNX42QaSLIyFDKg&noncestr=15887dc079e4f088bc84da1439be387f×tamp=1543836951&url=http://tmp.beesmartnet.com/static/platform/index.html'))
# print(sha1('ddddd'))
|
{"/app/utils/img_util.py": ["/app/utils/auth_utils.py"]}
|
4,651
|
wwhappylife/deep_equilibrium_inverse
|
refs/heads/main
|
/training/refactor_equilibrium_training.py
|
import torch
import numpy as np
from solvers import new_equilibrium_utils as eq_utils
from torch import autograd
from utils import cg_utils
def train_solver(single_iterate_solver, train_dataloader, test_dataloader,
measurement_process, optimizer,
save_location, loss_function, n_epochs, deep_eq_module,
use_dataparallel=False, device='cpu', scheduler=None,
print_every_n_steps=10, save_every_n_epochs=5, start_epoch=0):
for epoch in range(start_epoch, n_epochs):
if epoch % save_every_n_epochs == 0:
if use_dataparallel:
torch.save({'solver_state_dict': single_iterate_solver.module.state_dict(),
'epoch': epoch,
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict()
}, save_location)
else:
torch.save({'solver_state_dict': single_iterate_solver.state_dict(),
'epoch': epoch,
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict()
}, save_location)
for ii, sample_batch in enumerate(train_dataloader):
optimizer.zero_grad()
sample_batch = sample_batch.to(device=device)
y = measurement_process(sample_batch)
single_iterate_solver.set_initial_point(y)
reconstruction = deep_eq_module.forward(y)
loss = loss_function(reconstruction, sample_batch)
loss.backward()
optimizer.step()
if ii % print_every_n_steps == 0:
logging_string = "Epoch: " + str(epoch) + " Step: " + str(ii) + \
" Loss: " + str(loss.cpu().detach().numpy())
print(logging_string, flush=True)
if scheduler is not None:
scheduler.step(epoch)
if use_dataparallel:
torch.save({'solver_state_dict': single_iterate_solver.module.state_dict(),
'epoch': epoch,
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict()
}, save_location)
else:
torch.save({'solver_state_dict': single_iterate_solver.state_dict(),
'epoch': epoch,
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict()
}, save_location)
def train_solver_precond(single_iterate_solver, train_dataloader,
measurement_process, optimizer,
save_location, loss_function, n_epochs, deep_eq_module,
use_dataparallel=False, device='cpu', scheduler=None, noise_sigma=0.000001, precond_iterates=100,
print_every_n_steps=2, save_every_n_epochs=5, start_epoch=0, forward_operator = None,
test_dataloader = None):
previous_loss = 10.0
reset_flag = False
for epoch in range(start_epoch, n_epochs):
if reset_flag:
save_state_dict = torch.load(save_location)
single_iterate_solver.load_state_dict(save_state_dict['solver_state_dict'])
optimizer.load_state_dict(save_state_dict['optimizer_state_dict'])
reset_flag = False
for ii, sample_batch in enumerate(train_dataloader):
optimizer.zero_grad()
sample_batch = sample_batch[0].to(device=device)
target_img = sample_batch[1].to(device=device)
y = measurement_process(sample_batch)
if forward_operator is not None:
with torch.no_grad():
initial_point = forward_operator.adjoint(y)
reconstruction = deep_eq_module.forward(y, initial_point=initial_point)
else:
reconstruction = deep_eq_module.forward(y)
loss = loss_function(reconstruction, target_img)
if np.isnan(loss.item()):
reset_flag = True
break
loss.backward()
optimizer.step()
if ii == 0:
previous_loss = loss.item()
if ii % print_every_n_steps == 0:
logging_string = "Epoch: " + str(epoch) + " Step: " + str(ii) + \
" Loss: " + str(loss.cpu().detach().numpy())
print(logging_string, flush=True)
if ii % 200 == 0:
if use_dataparallel:
torch.save({'solver_state_dict': single_iterate_solver.module.state_dict(),
'epoch': epoch+1,
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict()
}, save_location)
else:
torch.save({'solver_state_dict': single_iterate_solver.state_dict(),
'epoch': epoch+1,
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict()
}, save_location)
if (previous_loss - loss.item()) / previous_loss < -10.0 or np.isnan(loss.item()):
reset_flag = True
if scheduler is not None:
scheduler.step(epoch)
if not reset_flag:
if use_dataparallel:
# torch.save({'solver_state_dict': single_iterate_solver.module.state_dict(),
# 'epoch': epoch,
# 'optimizer_state_dict': optimizer.state_dict(),
# 'scheduler_state_dict': scheduler.state_dict()
# }, save_location + "_" + str(epoch))
torch.save({'solver_state_dict': single_iterate_solver.module.state_dict(),
'epoch': epoch,
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict()
}, save_location)
else:
# torch.save({'solver_state_dict': single_iterate_solver.state_dict(),
# 'epoch': epoch,
# 'optimizer_state_dict': optimizer.state_dict(),
# 'scheduler_state_dict': scheduler.state_dict()
# }, save_location + "_" + str(epoch))
torch.save({'solver_state_dict': single_iterate_solver.state_dict(),
'epoch': epoch,
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict()
}, save_location)
def train_solver_precond1(single_iterate_solver, train_dataloader,
measurement_process, optimizer,
save_location, loss_function, n_epochs, deep_eq_module,
use_dataparallel=False, device='cpu', scheduler=None, noise_sigma=0.000001, precond_iterates=100,
print_every_n_steps=2, save_every_n_epochs=5, start_epoch=0, forward_operator = None,
test_dataloader = None):
previous_loss = 10.0
reset_flag = False
for epoch in range(start_epoch, n_epochs):
if reset_flag:
save_state_dict = torch.load(save_location)
single_iterate_solver.load_state_dict(save_state_dict['solver_state_dict'])
optimizer.load_state_dict(save_state_dict['optimizer_state_dict'])
reset_flag = False
for ii, sample_batch in enumerate(train_dataloader):
optimizer.zero_grad()
sample_batch = sample_batch.to(device=device)
y = measurement_process(sample_batch)
if forward_operator is not None:
with torch.no_grad():
initial_point = cg_utils.conjugate_gradient(initial_point=forward_operator.adjoint(y),
ATA=forward_operator.gramian,
regularization_lambda=noise_sigma, n_iterations=precond_iterates)
reconstruction = deep_eq_module.forward(y, initial_point=initial_point)
else:
reconstruction = deep_eq_module.forward(y)
loss = loss_function(reconstruction, sample_batch)
if np.isnan(loss.item()):
reset_flag = True
break
loss.backward()
optimizer.step()
if ii == 0:
previous_loss = loss.item()
if ii % print_every_n_steps == 0:
logging_string = "Epoch: " + str(epoch) + " Step: " + str(ii) + \
" Loss: " + str(loss.cpu().detach().numpy())
print(logging_string, flush=True)
if ii % 200 == 0:
if use_dataparallel:
torch.save({'solver_state_dict': single_iterate_solver.module.state_dict(),
'epoch': epoch+1,
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict()
}, save_location)
else:
torch.save({'solver_state_dict': single_iterate_solver.state_dict(),
'epoch': epoch+1,
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict()
}, save_location)
if (previous_loss - loss.item()) / previous_loss < -10.0 or np.isnan(loss.item()):
reset_flag = True
if scheduler is not None:
scheduler.step(epoch)
if not reset_flag:
if use_dataparallel:
# torch.save({'solver_state_dict': single_iterate_solver.module.state_dict(),
# 'epoch': epoch,
# 'optimizer_state_dict': optimizer.state_dict(),
# 'scheduler_state_dict': scheduler.state_dict()
# }, save_location + "_" + str(epoch))
torch.save({'solver_state_dict': single_iterate_solver.module.state_dict(),
'epoch': epoch,
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict()
}, save_location)
else:
# torch.save({'solver_state_dict': single_iterate_solver.state_dict(),
# 'epoch': epoch,
# 'optimizer_state_dict': optimizer.state_dict(),
# 'scheduler_state_dict': scheduler.state_dict()
# }, save_location + "_" + str(epoch))
torch.save({'solver_state_dict': single_iterate_solver.state_dict(),
'epoch': epoch,
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict()
}, save_location)
def train_solver_mnist(single_iterate_solver, train_dataloader, test_dataloader,
measurement_process, optimizer,
save_location, loss_function, n_epochs,
use_dataparallel=False, device='cpu', scheduler=None,
print_every_n_steps=10, save_every_n_epochs=5, start_epoch=0, max_iters=100):
n_iterations = [5]*n_epochs
for ee in range(n_epochs):
if ee >= 20:
n_iterations[ee] = 5
if ee >= 23:
n_iterations[ee] = 7
if ee >= 28:
n_iterations[ee] = 9
if ee >= 38:
n_iterations[ee] = 11
if ee >= 44:
n_iterations[ee] = 13
if ee >= 50:
n_iterations[ee] = 20
if ee >= 58:
n_iterations[ee] = 30
forward_iterator = eq_utils.anderson
deep_eq_module = eq_utils.DEQFixedPoint(single_iterate_solver, solver=forward_iterator,
m=5, lam=1e-4, max_iter=max_iters, tol=1e-3, beta=1.5)
for epoch in range(start_epoch, n_epochs):
# We are lucky to have
if epoch % save_every_n_epochs == 0:
if use_dataparallel:
torch.save({'solver_state_dict': single_iterate_solver.module.state_dict(),
'epoch': epoch,
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict()
}, save_location)
else:
torch.save({'solver_state_dict': single_iterate_solver.state_dict(),
'epoch': epoch,
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict()
}, save_location)
for ii, sample_batch in enumerate(train_dataloader):
optimizer.zero_grad()
sample_batch = sample_batch[0].to(device=device)
y = measurement_process(sample_batch)
single_iterate_solver.set_initial_point(y)
reconstruction = deep_eq_module.forward(y)
loss = loss_function(reconstruction, sample_batch)
loss.backward()
optimizer.step()
if ii % print_every_n_steps == 0:
logging_string = "Epoch: " + str(epoch) + " Step: " + str(ii) + \
" Loss: " + str(loss.cpu().detach().numpy())
print(logging_string, flush=True)
if scheduler is not None:
scheduler.step(epoch)
if use_dataparallel:
torch.save({'solver_state_dict': single_iterate_solver.module.state_dict(),
'epoch': epoch,
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict()
}, save_location)
else:
torch.save({'solver_state_dict': single_iterate_solver.state_dict(),
'epoch': epoch,
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict()
}, save_location)
#####################TEST##########################
# loss_accumulator = []
# mse_loss = torch.nn.MSELoss()
# for ii, sample_batch in enumerate(test_dataloader):
# sample_batch = sample_batch.to(device=device)
# y = measurement_process(sample_batch)
# initial_point = y
# reconstruction = solver(initial_point, iterations=6)
#
# reconstruction = torch.clamp(reconstruction, -1 ,1)
#
# loss = mse_loss(reconstruction, sample_batch)
# loss_logger = loss.cpu().detach().numpy()
# loss_accumulator.append(loss_logger)
#
# loss_array = np.asarray(loss_accumulator)
# loss_mse = np.mean(loss_array)
# PSNR = -10 * np.log10(loss_mse)
# percentiles = np.percentile(loss_array, [25,50,75])
# percentiles = -10.0*np.log10(percentiles)
# print("TEST LOSS: " + str(sum(loss_accumulator) / len(loss_accumulator)), flush=True)
# print("MEAN TEST PSNR: " + str(PSNR), flush=True)
# print("TEST PSNR QUARTILES AND MEDIAN: " + str(percentiles[0]) +
# ", " + str(percentiles[1]) + ", " + str(percentiles[2]), flush=True)
|
{"/scripts/fixedpoint/deblur_proxgrad_fixedeta_pre.py": ["/solvers/equilibrium_solvers.py"], "/scripts/fixedpoint/mri_prox_fixedeta_pre_and.py": ["/operators/singlecoil_mri.py", "/solvers/equilibrium_solvers.py"], "/scripts/denoising/mri_unet_denoise.py": ["/solvers/equilibrium_solvers.py"], "/scripts/denoising/mri_dncnn_denoise.py": ["/solvers/equilibrium_solvers.py"]}
|
4,652
|
wwhappylife/deep_equilibrium_inverse
|
refs/heads/main
|
/training/equilibrium_training.py
|
import torch
import numpy as np
from solvers import equilibrium_utils as eq_utils
from torch import autograd
def train_solver(single_iterate_solver, train_dataloader, test_dataloader,
measurement_process, optimizer,
save_location, loss_function, n_epochs,
use_dataparallel=False, device='cpu', scheduler=None,
print_every_n_steps=10, save_every_n_epochs=5, start_epoch=0):
n_iterations = [5]*n_epochs
for ee in range(n_epochs):
if ee >= 5:
n_iterations[ee] = 5
if ee >= 8:
n_iterations[ee] = 8
if ee >= 10:
n_iterations[ee] = 10
if ee >= 12:
n_iterations[ee] = 15
if ee >= 15:
n_iterations[ee] = 20
for epoch in range(start_epoch, n_epochs):
# We are lucky to have
if epoch % save_every_n_epochs == 0:
if use_dataparallel:
torch.save({'solver_state_dict': single_iterate_solver.module.state_dict(),
'epoch': epoch,
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict()
}, save_location)
else:
torch.save({'solver_state_dict': single_iterate_solver.state_dict(),
'epoch': epoch,
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict()
}, save_location)
for ii, sample_batch in enumerate(train_dataloader):
optimizer.zero_grad()
sample_batch = sample_batch.to(device=device)
y = measurement_process(sample_batch)
single_iterate_solver.set_initial_point(y)
reconstruction = eq_utils.get_equilibrium_point(y, single_iterate_solver, max_iterations=n_iterations[epoch])
reconstruction = torch.clamp(reconstruction, -1, 1)
loss = loss_function(reconstruction, sample_batch)
if epoch < 2:
loss.backward()
optimizer.step()
else:
# f_zstar = single_iterate_solver(static_zstar)
# delf_deltheta = torch.autograd.grad(inputs=static_zstar, outputs=f_zstar,
# grad_outputs=torch.ones_like(f_zstar))
dell_delz = torch.autograd.grad(inputs=reconstruction, outputs=loss,
grad_outputs=torch.ones_like(loss))[0]
delf_deltheta_invJ = eq_utils.conjugate_gradient_equilibriumgrad(b=dell_delz,
input_z=reconstruction,
f_function=single_iterate_solver,
n_iterations=5)
# loss.backward(retain_graph=True)
torch.autograd.backward(tensors=reconstruction, grad_tensors=delf_deltheta_invJ)
optimizer.step()
# exit()
# for name, param in single_iterate_solver.named_parameters():
# jj = 0
# if param.grad is not None:
# print(name)
# print(param.shape)
# print(param.grad.shape)
# jj+=1
# if jj == 2:
# break
# exit()
if ii % print_every_n_steps == 0:
logging_string = "Epoch: " + str(epoch) + " Step: " + str(ii) + \
" Loss: " + str(loss.cpu().detach().numpy())
print(logging_string, flush=True)
if scheduler is not None:
scheduler.step(epoch)
#####################TEST##########################
# loss_accumulator = []
# mse_loss = torch.nn.MSELoss()
# for ii, sample_batch in enumerate(test_dataloader):
# sample_batch = sample_batch.to(device=device)
# y = measurement_process(sample_batch)
# initial_point = y
# reconstruction = solver(initial_point, iterations=6)
#
# reconstruction = torch.clamp(reconstruction, -1 ,1)
#
# loss = mse_loss(reconstruction, sample_batch)
# loss_logger = loss.cpu().detach().numpy()
# loss_accumulator.append(loss_logger)
#
# loss_array = np.asarray(loss_accumulator)
# loss_mse = np.mean(loss_array)
# PSNR = -10 * np.log10(loss_mse)
# percentiles = np.percentile(loss_array, [25,50,75])
# percentiles = -10.0*np.log10(percentiles)
# print("TEST LOSS: " + str(sum(loss_accumulator) / len(loss_accumulator)), flush=True)
# print("MEAN TEST PSNR: " + str(PSNR), flush=True)
# print("TEST PSNR QUARTILES AND MEDIAN: " + str(percentiles[0]) +
# ", " + str(percentiles[1]) + ", " + str(percentiles[2]), flush=True)
def train_solver_mnist(single_iterate_solver, train_dataloader, test_dataloader,
measurement_process, optimizer,
save_location, loss_function, n_epochs,
use_dataparallel=False, device='cpu', scheduler=None,
print_every_n_steps=10, save_every_n_epochs=5, start_epoch=0):
n_iterations = [5]*n_epochs
for ee in range(n_epochs):
if ee >= 20:
n_iterations[ee] = 5
if ee >= 23:
n_iterations[ee] = 7
if ee >= 28:
n_iterations[ee] = 9
if ee >= 38:
n_iterations[ee] = 11
if ee >= 44:
n_iterations[ee] = 13
if ee >= 50:
n_iterations[ee] = 20
if ee >= 58:
n_iterations[ee] = 30
for epoch in range(start_epoch, n_epochs):
if use_dataparallel:
torch.save({'solver_state_dict': single_iterate_solver.module.state_dict(),
'epoch': epoch,
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict()
}, save_location)
else:
torch.save({'solver_state_dict': single_iterate_solver.state_dict(),
'epoch': epoch,
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict()
}, save_location)
for ii, sample_batch in enumerate(train_dataloader):
optimizer.zero_grad()
sample_batch = sample_batch[0].to(device=device)
y = measurement_process(sample_batch)
single_iterate_solver.set_initial_point(y)
def jacobian_vector_product(f, z, v):
z = z.detach().requires_grad_()
v = v.detach().requires_grad_()
vjp_val = autograd.grad(f(z), z, v, create_graph=True)[0]
return vjp_val
# jvp_val = autograd.grad(vjp_val, v, v.detach(), create_graph=True)[0]
# return jvp_val
if epoch < 10:
reconstruction = eq_utils.get_equilibrium_point(y, single_iterate_solver,
max_iterations=n_iterations[epoch])
reconstruction = torch.clamp(reconstruction, 0, 1)
loss = loss_function(reconstruction, sample_batch)
loss.backward()
# for name, param in single_iterate_solver.named_parameters():
# if param.grad is not None:
# print(name)
# print(param.grad.shape)
# torch.autograd.backward(reconstruction, grad_tensors=reconstruction)
# for name, param in single_iterate_solver.named_parameters():
# if param.grad is not None:
# print(name)
# print(param.grad.shape)
# print(autograd.functional.jacobian(single_iterate_solver, reconstruction).shape)
# exit()
optimizer.step()
else:
exit()
# f_zstar = single_iterate_solver(static_zstar)
# reconstruction = single_iterate_solver(sample_batch)
# reconstruction = eq_utils.get_equilibrium_point(y, single_iterate_solver,
# max_iterations=n_iterations[epoch])
reconstruction = eq_utils.get_equilibrium_point(y, single_iterate_solver,
max_iterations=n_iterations[epoch])
reconstruction = torch.clamp(reconstruction, 0, 1)
loss = loss_function(reconstruction, sample_batch)
# delf_deltheta = torch.autograd.grad(inputs=static_zstar, outputs=f_zstar,
# grad_outputs=torch.ones_like(f_zstar))
dell_delz = torch.autograd.grad(inputs=reconstruction, outputs=loss,
grad_outputs=torch.ones_like(loss))[0]
# delf_deltheta_invJ = eq_utils.conjugate_gradient_equilibriumgrad(b=dell_delz,
# input_z=sample_batch.requires_grad_(),
# f_function=single_iterate_solver,
# n_iterations=10)
# torch.autograd.backward(tensors=single_iterate_solver(sample_batch), grad_tensors=delf_deltheta_invJ)
delf_deltheta_invJ = eq_utils.conjugate_gradient_equilibriumgrad(b=dell_delz,
input_z=reconstruction,
f_function=single_iterate_solver,
n_iterations=10)
torch.autograd.backward(tensors=reconstruction, grad_tensors=-delf_deltheta_invJ)
torch.nn.utils.clip_grad_norm_(single_iterate_solver.parameters(), 1.0)
# for name, param in single_iterate_solver.named_parameters():
# if param.grad is not None:
# print(name)
# print(torch.norm(param.grad))
# jacobian_vect_product = delf_deltheta_invJ#.flatten(start_dim=1)
# vector_jacobian_product = jacobian_vector_product(single_iterate_solver, reconstruction, jacobian_vect_product)
# print(vector_jacobian_product.shape)
# exit()
# gradient = torch.reshape(jacobian_vect_product, (8,1,28,28))
# gradient = torch.squeeze(torch.mean(gradient, dim=0))
# print(single_iterate_solver.nonlinear_op.linear_layer(torch.flatten(delf_deltheta_invJ, start_dim=1)))
# print(delf_deltheta_invJ.shape)
#
# exit()
# torch.autograd.backward(tensors=reconstruction, grad_tensors=delf_deltheta_invJ)
optimizer.step()
# exit()
# for name, param in single_iterate_solver.named_parameters():
# jj = 0
# if param.grad is not None:
# print(name)
# print(param.shape)
# print(param.grad.shape)
# jj+=1
# if jj == 2:
# break
# exit()
if ii % print_every_n_steps == 0:
logging_string = "Epoch: " + str(epoch) + " Step: " + str(ii) + \
" Loss: " + str(loss.cpu().detach().numpy())
print(logging_string, flush=True)
if scheduler is not None:
scheduler.step(epoch)
if use_dataparallel:
torch.save({'solver_state_dict': single_iterate_solver.module.state_dict(),
'epoch': epoch,
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict()
}, save_location)
else:
torch.save({'solver_state_dict': single_iterate_solver.state_dict(),
'epoch': epoch,
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict()
}, save_location)
#####################TEST##########################
# loss_accumulator = []
# mse_loss = torch.nn.MSELoss()
# for ii, sample_batch in enumerate(test_dataloader):
# sample_batch = sample_batch.to(device=device)
# y = measurement_process(sample_batch)
# initial_point = y
# reconstruction = solver(initial_point, iterations=6)
#
# reconstruction = torch.clamp(reconstruction, -1 ,1)
#
# loss = mse_loss(reconstruction, sample_batch)
# loss_logger = loss.cpu().detach().numpy()
# loss_accumulator.append(loss_logger)
#
# loss_array = np.asarray(loss_accumulator)
# loss_mse = np.mean(loss_array)
# PSNR = -10 * np.log10(loss_mse)
# percentiles = np.percentile(loss_array, [25,50,75])
# percentiles = -10.0*np.log10(percentiles)
# print("TEST LOSS: " + str(sum(loss_accumulator) / len(loss_accumulator)), flush=True)
# print("MEAN TEST PSNR: " + str(PSNR), flush=True)
# print("TEST PSNR QUARTILES AND MEDIAN: " + str(percentiles[0]) +
# ", " + str(percentiles[1]) + ", " + str(percentiles[2]), flush=True)
|
{"/scripts/fixedpoint/deblur_proxgrad_fixedeta_pre.py": ["/solvers/equilibrium_solvers.py"], "/scripts/fixedpoint/mri_prox_fixedeta_pre_and.py": ["/operators/singlecoil_mri.py", "/solvers/equilibrium_solvers.py"], "/scripts/denoising/mri_unet_denoise.py": ["/solvers/equilibrium_solvers.py"], "/scripts/denoising/mri_dncnn_denoise.py": ["/solvers/equilibrium_solvers.py"]}
|
4,653
|
wwhappylife/deep_equilibrium_inverse
|
refs/heads/main
|
/utils/bsd500.py
|
import torch
import h5py
import random
import numpy as np
import os
from PIL import Image
from torchvision import transforms
class Dataset(torch.utils.data.Dataset):
def __init__(self, train=True, mode='S'):
super(Dataset, self).__init__()
self.train = train
self.mode = mode
self.data_loc = '/share/data/vision-greg2/users/gilton/train.h5'
self.val_loc = '/share/data/vision-greg2/users/gilton/val.h5'
if self.train:
if self.mode == 'S':
h5f = h5py.File(self.data_loc, 'r')
elif self.mode == 'B':
h5f = h5py.File('train_B.h5', 'r')
else:
if self.mode == 'S':
h5f = h5py.File(self.val_loc, 'r')
elif self.mode == 'B':
h5f = h5py.File('val_B.h5', 'r')
self.keys = list(h5f.keys())
random.shuffle(self.keys)
h5f.close()
def __len__(self):
return len(self.keys)
def __getitem__(self, index):
if self.train:
if self.mode == 'S':
h5f = h5py.File(self.data_loc, 'r')
elif self.mode == 'B':
h5f = h5py.File('train_B.h5', 'r')
# h5f = h5py.File('train.h5', 'r')
else:
if self.mode == 'S':
h5f = h5py.File(self.val_loc, 'r')
elif self.mode == 'B':
h5f = h5py.File('val_B.h5', 'r')
# h5f = h5py.File('val.h5', 'r')
key = self.keys[index]
#scale from -1 to 1
data = 2*np.array(h5f[key]) - 1
h5f.close()
return torch.Tensor(data)
def directory_filelist(target_directory):
file_list = [f for f in sorted(os.listdir(target_directory))
if os.path.isfile(os.path.join(target_directory, f))]
file_list = list(file_list)
file_list = [f for f in file_list if not f.startswith('.')]
return file_list
def load_img(file_name):
with open(file_name,'rb') as f:
img = Image.open(f).convert("L")
return img
class EquilibriumDataset(torch.utils.data.Dataset):
def __init__(self, target_directory, init_directory, validation_data=False, transform=None):
super(EquilibriumDataset, self).__init__()
filelist = directory_filelist(target_directory)
training_data = filelist
self.full_filelist = [target_directory + single_file for single_file in training_data]
self.init_directory = init_directory
self.transform = transform
self.options = ['_1.png','_2.png','_3.png','_4.png']
def __len__(self):
return len(self.full_filelist)
def convert_to_2d(self, x):
return torch.cat((x, torch.zeros_like(x)), dim=0)
def __getitem__(self, item):
image_name = self.full_filelist[item]
# image_name = "/Users/dgilton/Documents/MATLAB/prDeep-master/train/test_001.png"
data = load_img(image_name)
if self.transform is not None:
data = self.transform(data)
data = 2.0*data - 1.0
data = self.convert_to_2d(data)
random_choice = random.choice(self.options)
initial_point_filename = os.path.splitext(os.path.split(image_name)[1])[0] + random_choice
initial_point = load_img(self.init_directory + initial_point_filename)
if self.transform is not None:
initial_point = self.transform(initial_point)
initial_point = 2.0 * initial_point - 1.0
initial_point = self.convert_to_2d(initial_point)
return data, initial_point
if __name__=="__main__":
dataset_folder = "/Users/dgilton/PycharmProjects/provableplaying/training/data/train/"
transform = transforms.Compose(
[
transforms.ToTensor(),
]
)
dataset = EquilibriumDataset(dataset_folder, transform=transform)
print(dataset[0].shape)
|
{"/scripts/fixedpoint/deblur_proxgrad_fixedeta_pre.py": ["/solvers/equilibrium_solvers.py"], "/scripts/fixedpoint/mri_prox_fixedeta_pre_and.py": ["/operators/singlecoil_mri.py", "/solvers/equilibrium_solvers.py"], "/scripts/denoising/mri_unet_denoise.py": ["/solvers/equilibrium_solvers.py"], "/scripts/denoising/mri_dncnn_denoise.py": ["/solvers/equilibrium_solvers.py"]}
|
4,654
|
wwhappylife/deep_equilibrium_inverse
|
refs/heads/main
|
/scripts/fixedpoint/deblur_proxgrad_fixedeta_pre.py
|
import torch
import os
import random
import sys
import argparse
sys.path.append('/home-nfs/gilton/learned_iterative_solvers')
# sys.path.append('/Users/dgilton/PycharmProjects/learned_iterative_solvers')
import torch.nn as nn
import torch.optim as optim
from torchvision import transforms
import operators.blurs as blurs
from operators.operator import OperatorPlusNoise
from utils.celeba_dataloader import CelebaTrainingDatasetSubset, CelebaTestDataset
from networks.normalized_equilibrium_u_net import UnetModel, DnCNN
from solvers.equilibrium_solvers import EquilibriumProxGrad
from training import refactor_equilibrium_training
from solvers import new_equilibrium_utils as eq_utils
parser = argparse.ArgumentParser()
parser.add_argument('--n_epochs', default=80)
parser.add_argument('--batch_size', type=int, default=16)
parser.add_argument('--and_maxiters', default=100)
parser.add_argument('--and_beta', type=float, default=1.0)
parser.add_argument('--and_m', type=int, default=5)
parser.add_argument('--lr', type=float, default=0.1)
parser.add_argument('--etainit', type=float, default=0.9)
parser.add_argument('--lr_gamma', type=float, default=0.1)
parser.add_argument('--sched_step', type=int, default=10)
parser.add_argument('--savepath',
default="/share/data/vision-greg2/users/gilton/celeba_equilibriumgrad_blur_save_inf.ckpt")
args = parser.parse_args()
# Parameters to modify
n_epochs = int(args.n_epochs)
current_epoch = 0
batch_size = int(args.batch_size)
n_channels = 3
max_iters = int(args.and_maxiters)
anderson_m = int(args.and_m)
anderson_beta = float(args.and_beta)
learning_rate = float(args.lr)
print_every_n_steps = 2
save_every_n_epochs = 1
initial_eta = 0.2
initial_data_points = 10000
# point this towards your celeba files
data_location = "/share/data/vision-greg2/mixpatch/img_align_celeba/"
kernel_size = 5
kernel_sigma = 5.0
noise_sigma = 1e-2
# modify this for your machine
# save_location = "/share/data/vision-greg2/users/gilton/mnist_equilibriumgrad_blur.ckpt"
save_location = args.savepath
load_location = "/share/data/willett-group/users/gilton/denoisers/celeba_denoiser_normunet_3.ckpt"
gpu_ids = []
for ii in range(6):
try:
torch.cuda.get_device_properties(ii)
print(str(ii), flush=True)
if not gpu_ids:
gpu_ids = [ii]
else:
gpu_ids.append(ii)
except AssertionError:
print('Not ' + str(ii) + "!", flush=True)
print(os.getenv('CUDA_VISIBLE_DEVICES'), flush=True)
gpu_ids = [int(x) for x in gpu_ids]
# device management
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
use_dataparallel = len(gpu_ids) > 1
print("GPU IDs: " + str([int(x) for x in gpu_ids]), flush=True)
# Set up data and dataloaders
transform = transforms.Compose(
[
transforms.Resize((128, 128)),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
]
)
celeba_train_size = 162770
total_data = initial_data_points
total_indices = random.sample(range(celeba_train_size), k=total_data)
initial_indices = total_indices
dataset = CelebaTrainingDatasetSubset(data_location, subset_indices=initial_indices, transform=transform)
dataloader = torch.utils.data.DataLoader(
dataset=dataset, batch_size=batch_size, shuffle=True, drop_last=True,
)
test_dataset = CelebaTestDataset(data_location, transform=transform)
test_dataloader = torch.utils.data.DataLoader(
dataset=test_dataset, batch_size=batch_size, shuffle=False, drop_last=True,
)
### Set up solver and problem setting
forward_operator = blurs.GaussianBlur(sigma=kernel_sigma, kernel_size=kernel_size,
n_channels=3, n_spatial_dimensions=2).to(device=device)
measurement_process = OperatorPlusNoise(forward_operator, noise_sigma=noise_sigma).to(device=device)
internal_forward_operator = blurs.GaussianBlur(sigma=kernel_sigma, kernel_size=kernel_size,
n_channels=3, n_spatial_dimensions=2).to(device=device)
# standard u-net
# learned_component = UnetModel(in_chans=n_channels, out_chans=n_channels, num_pool_layers=4,
# drop_prob=0.0, chans=32)
learned_component = DnCNN(channels=n_channels)
if os.path.exists(load_location):
if torch.cuda.is_available():
saved_dict = torch.load(load_location)
else:
saved_dict = torch.load(load_location, map_location='cpu')
start_epoch = saved_dict['epoch']
learned_component.load_state_dict(saved_dict['solver_state_dict'])
# learned_component = Autoencoder()
solver = EquilibriumProxGrad(linear_operator=internal_forward_operator, nonlinear_operator=learned_component,
eta=initial_eta, minval=-1, maxval = 1)
if use_dataparallel:
solver = nn.DataParallel(solver, device_ids=gpu_ids)
solver = solver.to(device=device)
start_epoch = 0
optimizer = optim.Adam(params=solver.parameters(), lr=learning_rate)
scheduler = optim.lr_scheduler.StepLR(optimizer=optimizer, step_size=int(args.sched_step), gamma=float(args.lr_gamma))
cpu_only = not torch.cuda.is_available()
if os.path.exists(save_location):
if not cpu_only:
saved_dict = torch.load(save_location)
else:
saved_dict = torch.load(save_location, map_location='cpu')
start_epoch = saved_dict['epoch']
solver.load_state_dict(saved_dict['solver_state_dict'])
# optimizer.load_state_dict(saved_dict['optimizer_state_dict'])
scheduler.load_state_dict(saved_dict['scheduler_state_dict'])
# set up loss and train
lossfunction = torch.nn.MSELoss(reduction='sum')
forward_iterator = eq_utils.andersonexp
deep_eq_module = eq_utils.DEQFixedPoint(solver, forward_iterator, m=anderson_m, beta=anderson_beta, lam=1e-2,
max_iter=max_iters, tol=1e-5)
# forward_iterator = eq_utils.forward_iteration
# deep_eq_module = eq_utils.DEQFixedPoint(solver, forward_iterator, max_iter=100, tol=1e-8)
# Do train
refactor_equilibrium_training.train_solver_precond1(
single_iterate_solver=solver, train_dataloader=dataloader, test_dataloader=test_dataloader,
measurement_process=measurement_process, optimizer=optimizer, save_location=save_location,
deep_eq_module=deep_eq_module, loss_function=lossfunction, n_epochs=n_epochs,
use_dataparallel=use_dataparallel, device=device, scheduler=scheduler,
print_every_n_steps=print_every_n_steps, save_every_n_epochs=save_every_n_epochs,
start_epoch=start_epoch, forward_operator = forward_operator, noise_sigma=noise_sigma,
precond_iterates=60)
|
{"/scripts/fixedpoint/deblur_proxgrad_fixedeta_pre.py": ["/solvers/equilibrium_solvers.py"], "/scripts/fixedpoint/mri_prox_fixedeta_pre_and.py": ["/operators/singlecoil_mri.py", "/solvers/equilibrium_solvers.py"], "/scripts/denoising/mri_unet_denoise.py": ["/solvers/equilibrium_solvers.py"], "/scripts/denoising/mri_dncnn_denoise.py": ["/solvers/equilibrium_solvers.py"]}
|
4,655
|
wwhappylife/deep_equilibrium_inverse
|
refs/heads/main
|
/scripts/fixedpoint/mri_prox_fixedeta_pre_and.py
|
import torch
import os
import random
import sys
import argparse
sys.path.append('/home-nfs/gilton/learned_iterative_solvers')
# sys.path.append('/Users/dgilton/PycharmProjects/learned_iterative_solvers')
import torch.nn as nn
import torch.optim as optim
import operators.singlecoil_mri as mrimodel
from operators.operator import OperatorPlusNoise
from utils.fastmri_dataloader import singleCoilFastMRIDataloader
from networks.normalized_equilibrium_u_net import UnetModel, DnCNN
from solvers.equilibrium_solvers import EquilibriumProxGradMRI
from training import refactor_equilibrium_training
from solvers import new_equilibrium_utils as eq_utils
parser = argparse.ArgumentParser()
parser.add_argument('--n_epochs', default=80)
parser.add_argument('--batch_size', type=int, default=16)
parser.add_argument('--and_maxiters', default=100)
parser.add_argument('--and_beta', type=float, default=1.0)
parser.add_argument('--and_m', type=int, default=5)
parser.add_argument('--lr', type=float, default=0.1)
parser.add_argument('--etainit', type=float, default=0.4)
parser.add_argument('--lr_gamma', type=float, default=0.1)
parser.add_argument('--sched_step', type=int, default=10)
parser.add_argument('--acceleration', type=float, default=8.0)
parser.add_argument('--savepath',
default="/share/data/vision-greg2/users/gilton/celeba_equilibriumgrad_mri_save_inf.ckpt")
parser.add_argument('--loadpath',
default="/share/data/vision-greg2/users/gilton/celeba_equilibriumgrad_mri_save_inf.ckpt")
args = parser.parse_args()
# Parameters to modify
n_epochs = int(args.n_epochs)
current_epoch = 0
batch_size = int(args.batch_size)
n_channels = 2
max_iters = int(args.and_maxiters)
anderson_m = int(args.and_m)
anderson_beta = float(args.and_beta)
learning_rate = float(args.lr)
print_every_n_steps = 2
save_every_n_epochs = 1
initial_eta = float(args.etainit)
dataheight = 320
datawidth = 320
mri_center_fraction = 0.04
mri_acceleration = float(args.acceleration)
mask = mrimodel.create_mask(shape=[dataheight, datawidth, 2], acceleration=mri_acceleration,
center_fraction=mri_center_fraction, seed=10)
noise_sigma = 1e-2
# modify this for your machine
# save_location = "/share/data/vision-greg2/users/gilton/mnist_equilibriumgrad_blur.ckpt"
save_location = args.savepath
load_location = "/share/data/willett-group/users/gilton/denoisers/mri_denoiser_unetnorm_4.ckpt"
gpu_ids = []
for ii in range(6):
try:
torch.cuda.get_device_properties(ii)
print(str(ii), flush=True)
if not gpu_ids:
gpu_ids = [ii]
else:
gpu_ids.append(ii)
except AssertionError:
print('Not ' + str(ii) + "!", flush=True)
print(os.getenv('CUDA_VISIBLE_DEVICES'), flush=True)
gpu_ids = [int(x) for x in gpu_ids]
# device management
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
use_dataparallel = len(gpu_ids) > 1
print("GPU IDs: " + str([int(x) for x in gpu_ids]), flush=True)
# Set up data and dataloaders
data_location = "/share/data/vision-greg2/users/gilton/singlecoil_curated_clean/"
trainset_size = 2000
total_data = 2194
random.seed(10)
all_indices = list(range(trainset_size))
train_indices = random.sample(range(total_data), k=trainset_size)
dataset = singleCoilFastMRIDataloader(data_location, data_indices=train_indices)
dataloader = torch.utils.data.DataLoader(
dataset=dataset, batch_size=args.batch_size, shuffle=True, drop_last=True,
)
### Set up solver and problem setting
forward_operator = mrimodel.cartesianSingleCoilMRI(kspace_mask=mask).to(device=device)
measurement_process = OperatorPlusNoise(forward_operator, noise_sigma=noise_sigma).to(device=device)
internal_forward_operator = mrimodel.cartesianSingleCoilMRI(kspace_mask=mask).to(device=device)
# standard u-net
# learned_component = UnetModel(in_chans=n_channels, out_chans=n_channels, num_pool_layers=4,
# drop_prob=0.0, chans=32)
learned_component = DnCNN(channels=n_channels)
cpu_only = not torch.cuda.is_available()
if os.path.exists(load_location):
if not cpu_only:
saved_dict = torch.load(load_location)
else:
saved_dict = torch.load(load_location, map_location='cpu')
learned_component.load_state_dict(saved_dict['solver_state_dict'])
# learned_component = Autoencoder()
solver = EquilibriumProxGradMRI(linear_operator=internal_forward_operator, nonlinear_operator=learned_component,
eta=initial_eta, minval=-1, maxval = 1)
if use_dataparallel:
solver = nn.DataParallel(solver, device_ids=gpu_ids)
solver = solver.to(device=device)
start_epoch = 0
optimizer = optim.Adam(params=solver.parameters(), lr=learning_rate)
scheduler = optim.lr_scheduler.StepLR(optimizer=optimizer, step_size=int(args.sched_step), gamma=float(args.lr_gamma))
cpu_only = not torch.cuda.is_available()
if os.path.exists(save_location):
if not cpu_only:
saved_dict = torch.load(save_location)
else:
saved_dict = torch.load(save_location, map_location='cpu')
start_epoch = saved_dict['epoch']
solver.load_state_dict(saved_dict['solver_state_dict'])
# optimizer.load_state_dict(saved_dict['optimizer_state_dict'])
scheduler.load_state_dict(saved_dict['scheduler_state_dict'])
# set up loss and train
lossfunction = torch.nn.MSELoss(reduction='sum')
forward_iterator = eq_utils.andersonexp
deep_eq_module = eq_utils.DEQFixedPoint(solver, forward_iterator, m=anderson_m, beta=anderson_beta, lam=1e-2,
max_iter=max_iters, tol=1e-4)
# forward_iterator = eq_utils.forward_iteration
# deep_eq_module = eq_utils.DEQFixedPoint(solver, forward_iterator, max_iter=max_iters, tol=1e-8)
# Do train
refactor_equilibrium_training.train_solver_precond(
single_iterate_solver=solver, train_dataloader=dataloader,
measurement_process=measurement_process, optimizer=optimizer, save_location=save_location,
deep_eq_module=deep_eq_module, loss_function=lossfunction, n_epochs=n_epochs,
use_dataparallel=use_dataparallel, device=device, scheduler=scheduler,
print_every_n_steps=print_every_n_steps, save_every_n_epochs=save_every_n_epochs,
start_epoch=start_epoch, forward_operator = forward_operator, noise_sigma=0.3,
precond_iterates=50)
|
{"/scripts/fixedpoint/deblur_proxgrad_fixedeta_pre.py": ["/solvers/equilibrium_solvers.py"], "/scripts/fixedpoint/mri_prox_fixedeta_pre_and.py": ["/operators/singlecoil_mri.py", "/solvers/equilibrium_solvers.py"], "/scripts/denoising/mri_unet_denoise.py": ["/solvers/equilibrium_solvers.py"], "/scripts/denoising/mri_dncnn_denoise.py": ["/solvers/equilibrium_solvers.py"]}
|
4,656
|
wwhappylife/deep_equilibrium_inverse
|
refs/heads/main
|
/operators/singlecoil_mri.py
|
import torch, numbers, math
import torch.nn as nn
import torch.nn.functional as torchfunc
from operators.operator import LinearOperator
import numpy as np
import torch
def to_tensor(data):
"""
Convert numpy array to PyTorch tensor. For complex arrays, the real and imaginary parts
are stacked along the last dimension.
Args:
data (np.array): Input numpy array
Returns:
torch.Tensor: PyTorch version of data
"""
if np.iscomplexobj(data):
data = np.stack((data.real, data.imag), axis=-1)
return torch.from_numpy(data)
def apply_mask(data, mask_func, seed=None, padding=None):
"""
Subsample given k-space by multiplying with a mask.
Args:
data (torch.Tensor): The input k-space data. This should have at least 3 dimensions, where
dimensions -3 and -2 are the spatial dimensions, and the final dimension has size
2 (for complex values).
mask_func (callable): A function that takes a shape (tuple of ints) and a random
number seed and returns a mask.
seed (int or 1-d array_like, optional): Seed for the random number generator.
Returns:
(tuple): tuple containing:
masked data (torch.Tensor): Subsampled k-space data
mask (torch.Tensor): The generated mask
"""
shape = np.array(data.shape)
shape[:-3] = 1
mask = mask_func(shape, seed)
if padding is not None:
mask[:, :, :padding[0]] = 0
mask[:, :, padding[1]:] = 0 # padding value inclusive on right of zeros
masked_data = data * mask + 0.0 # The + 0.0 removes the sign of the zeros
return masked_data, mask
def mask_center(x, mask_from, mask_to):
# b, c, h, w, two = x.shape
mask = torch.zeros_like(x)
mask[:, :, :, mask_from:mask_to] = x[:, :, :, mask_from:mask_to]
return mask
def complex_mul(x, y):
assert x.shape[-1] == y.shape[-1] == 2
re = x[..., 0] * y[..., 0] - x[..., 1] * y[..., 1]
im = x[..., 0] * y[..., 1] + x[..., 1] * y[..., 0]
return torch.stack((re, im), dim=-1)
def complex_conj(x):
assert x.shape[-1] == 2
return torch.stack((x[..., 0], -x[..., 1]), dim=-1)
def fft2(data):
"""
Apply centered 2 dimensional Fast Fourier Transform.
Args:
data (torch.Tensor): Complex valued input data containing at least 3 dimensions: dimensions
-3 & -2 are spatial dimensions and dimension -1 has size 2. All other dimensions are
assumed to be batch dimensions.
Returns:
torch.Tensor: The FFT of the input.
"""
if not data.shape[-1] == 2:
raise ValueError("Tensor does not have separate complex dim.")
data = ifftshift(data, dim=[-3, -2])
data = torch.view_as_real(
torch.fft.fftn( # type: ignore
torch.view_as_complex(data), dim=(-2, -1), norm="ortho"
)
)
data = fftshift(data, dim=[-3, -2])
return data
def dft_matrix(N, mask):
learnable_parameters = torch.arange(0,N, dtype=torch.float32)
learnable_parameters.requires_grad_(True)
mask_vec = fftshift(mask[0, :], dim=0)
mask_vec = mask_vec > 0
mask_vec = mask_vec.squeeze()
masked_params = torch.masked_select(learnable_parameters, mask_vec)
normalizer = np.sqrt(N)
ii, jj = torch.meshgrid(masked_params, torch.arange(0,N, dtype=torch.float32))
W = torch.exp(-2.0 * np.pi * 1j * ii*jj / N) / normalizer
return W
def onedfft(data, dim):
# data = ifftshift(data, dim=dim)
dim_size = data.shape[dim]
for ii in range(dim_size):
if dim==1:
data[:,ii,:] = torch.fft.fftn( # type: ignore
torch.view_as_complex(data), dim=0, norm="ortho")
else:
data[ii, :, :] = torch.fft.fftn( # type: ignore
torch.view_as_complex(data), dim=1, norm="ortho")
# data = ifftshift(data, dim=dim)
return data
def onedifft(data, dim):
# data = ifftshift(data, dim=dim)
dim_size = data.shape[dim]
for ii in range(dim_size):
if dim==1:
data[:,ii,:] = torch.fft.ifftn( # type: ignore
torch.view_as_complex(data), dim=0, norm="ortho")
else:
data[ii, :, :] = torch.fft.ifftn( # type: ignore
torch.view_as_complex(data), dim=1, norm="ortho")
# data = ifftshift(data, dim=dim)
return data
def ifft2(data):
"""
Apply centered 2-dimensional Inverse Fast Fourier Transform.
Args:
data (torch.Tensor): Complex valued input data containing at least 3 dimensions: dimensions
-3 & -2 are spatial dimensions and dimension -1 has size 2. All other dimensions are
assumed to be batch dimensions.
Returns:
torch.Tensor: The IFFT of the input.
"""
if not data.shape[-1] == 2:
raise ValueError("Tensor does not have separate complex dim.")
data = ifftshift(data, dim=[-3, -2])
data = torch.view_as_real(
torch.fft.ifftn( # type: ignore
torch.view_as_complex(data), dim=(-2, -1), norm="ortho"
)
)
data = fftshift(data, dim=[-3, -2])
return data
def complex_abs(data):
"""
Compute the absolute value of a complex valued input tensor.
Args:
data (torch.Tensor): A complex valued tensor, where the size of the final dimension
should be 2.
Returns:
torch.Tensor: Absolute value of data
"""
assert data.size(-1) == 2
return (data ** 2).sum(dim=-1).sqrt()
def complex_abs_sq(data):
"""
Compute the squared absolute value of a complex tensor
"""
assert data.size(-1) == 2
return (data ** 2).sum(dim=-1)
def root_sum_of_squares(data, dim=0):
"""
Compute the Root Sum of Squares (RSS) transform along a given dimension of a tensor.
Args:
data (torch.Tensor): The input tensor
dim (int): The dimensions along which to apply the RSS transform
Returns:
torch.Tensor: The RSS value
"""
return torch.sqrt((data ** 2).sum(dim))
def root_sum_of_squares_complex(data, dim=0):
"""
Compute the Root Sum of Squares (RSS) transform along a given dimension of a tensor.
Args:
data (torch.Tensor): The input tensor
dim (int): The dimensions along which to apply the RSS transform
Returns:
torch.Tensor: The RSS value
"""
return torch.sqrt(complex_abs_sq(data).sum(dim))
def center_crop(data, shape):
"""
Apply a center crop to the input real image or batch of real images.
Args:
data (torch.Tensor): The input tensor to be center cropped. It should have at
least 2 dimensions and the cropping is applied along the last two dimensions.
shape (int, int): The output shape. The shape should be smaller than the
corresponding dimensions of data.
Returns:
torch.Tensor: The center cropped image
"""
assert 0 < shape[0] <= data.shape[-2]
assert 0 < shape[1] <= data.shape[-1]
w_from = (data.shape[-2] - shape[0]) // 2
h_from = (data.shape[-1] - shape[1]) // 2
w_to = w_from + shape[0]
h_to = h_from + shape[1]
return data[..., w_from:w_to, h_from:h_to]
def complex_center_crop(data, shape):
"""
Apply a center crop to the input image or batch of complex images.
Args:
data (torch.Tensor): The complex input tensor to be center cropped. It should
have at least 3 dimensions and the cropping is applied along dimensions
-3 and -2 and the last dimensions should have a size of 2.
shape (int, int): The output shape. The shape should be smaller than the
corresponding dimensions of data.
Returns:
torch.Tensor: The center cropped image
"""
assert 0 < shape[0] <= data.shape[-3]
assert 0 < shape[1] <= data.shape[-2]
w_from = (data.shape[-3] - shape[0]) // 2
h_from = (data.shape[-2] - shape[1]) // 2
w_to = w_from + shape[0]
h_to = h_from + shape[1]
return data[..., w_from:w_to, h_from:h_to, :]
def center_crop_to_smallest(x, y):
"""
Apply a center crop on the larger image to the size of the smaller image.
"""
smallest_width = min(x.shape[-1], y.shape[-1])
smallest_height = min(x.shape[-2], y.shape[-2])
x = center_crop(x, (smallest_height, smallest_width))
y = center_crop(y, (smallest_height, smallest_width))
return x, y
def normalize(data, mean, stddev, eps=0.):
"""
Normalize the given tensor using:
(data - mean) / (stddev + eps)
Args:
data (torch.Tensor): Input data to be normalized
mean (float): Mean value
stddev (float): Standard deviation
eps (float): Added to stddev to prevent dividing by zero
Returns:
torch.Tensor: Normalized tensor
"""
return (data - mean) / (stddev + eps)
def normalize_instance(data, eps=0.):
"""
Normalize the given tensor using:
(data - mean) / (stddev + eps)
where mean and stddev are computed from the data itself.
Args:
data (torch.Tensor): Input data to be normalized
eps (float): Added to stddev to prevent dividing by zero
Returns:
torch.Tensor: Normalized tensor
"""
mean = data.mean()
std = data.std()
return normalize(data, mean, std, eps), mean, std
# Helper functions
def roll(x, shift, dim):
"""
Similar to np.roll but applies to PyTorch Tensors
"""
if isinstance(shift, (tuple, list)):
assert len(shift) == len(dim)
for s, d in zip(shift, dim):
x = roll(x, s, d)
return x
shift = shift % x.size(dim)
if shift == 0:
return x
left = x.narrow(dim, 0, x.size(dim) - shift)
right = x.narrow(dim, x.size(dim) - shift, shift)
return torch.cat((right, left), dim=dim)
def fftshift(x, dim=None):
"""
Similar to np.fft.fftshift but applies to PyTorch Tensors
"""
if dim is None:
dim = tuple(range(x.dim()))
shift = [dim // 2 for dim in x.shape]
elif isinstance(dim, int):
shift = x.shape[dim] // 2
else:
shift = [x.shape[i] // 2 for i in dim]
return roll(x, shift, dim)
def ifftshift(x, dim=None):
"""
Similar to np.fft.ifftshift but applies to PyTorch Tensors
"""
if dim is None:
dim = tuple(range(x.dim()))
shift = [(dim + 1) // 2 for dim in x.shape]
elif isinstance(dim, int):
shift = (x.shape[dim] + 1) // 2
else:
shift = [(x.shape[i] + 1) // 2 for i in dim]
return roll(x, shift, dim)
class ApplyKSpaceMask(nn.Module):
def __init__(self, mask):
super(ApplyKSpaceMask, self).__init__()
self.mask = mask
def forward(self, input):
kspace_data = fft2(ifftshift(input))
masked_kspace_data = kspace_data * self.mask + 0.0
visual_data = fftshift(ifft2(masked_kspace_data))
return visual_data
def gaussian_oned(x):
return 1.0 / np.sqrt(2.0*np.pi) * np.exp(-1*x**2 / 2.0)
def find_nearest(x, array):
idx = (np.abs(x - array)).argmin()
return idx
def exhaustive_sample(center_frac, acceleration, n_cols, seed):
grid = np.linspace(-3.0,3.0,n_cols)
sample_grid = np.zeros((n_cols,))
num_low_freqs = int(round(n_cols * center_frac))
pad = (n_cols - num_low_freqs + 1) // 2
sample_grid[pad:pad+num_low_freqs] = [True]*num_low_freqs
rng = np.random.RandomState(seed=seed)
while True:
sample_point = rng.standard_normal()
if np.abs(sample_point) < 3.0:
nearest_index = find_nearest(sample_point, grid)
sample_grid[nearest_index] = True
ratio_sampled = n_cols / sum(sample_grid)
if acceleration > ratio_sampled:
return sample_grid
def create_mask(shape, center_fraction, acceleration, seed=0, flipaxis=False):
num_cols = shape[-2]
# Create the mask
mask = exhaustive_sample(center_fraction, acceleration, num_cols, seed)
# num_low_freqs = int(round(num_cols * center_fraction))
# prob = (num_cols / acceleration - num_low_freqs) / (num_cols - num_low_freqs)
# rng = np.random.RandomState(seed=seed)
#
# mask = rng.standard_normal(size=num_cols) < prob
# pad = (num_cols - num_low_freqs + 1) // 2
# mask[pad:pad + num_low_freqs] = True
# Reshape the mask
mask_shape = [1 for _ in shape]
if flipaxis:
mask_shape[0] = num_cols
else:
mask_shape[-2] = num_cols
# mask = mask.astype(np.float32)
mask = mask.reshape(*mask_shape).astype(np.float32)
# print(mask.shape)
# exit()
mask = torch.tensor(mask, requires_grad=False)
return mask
class toKspace(nn.Module):
def __init__(self, mask=None):
super(toKspace, self).__init__()
if mask is None:
self.mask = mask
else:
self.register_buffer('mask', tensor=mask)
def forward(self, input):
kspace_data = fft2(ifftshift(input.permute((0,2,3,1))))
if self.mask is not None:
kspace_data = kspace_data * self.mask + 0.0
return kspace_data.permute((0,3,1,2))
class toKspaceMulti(nn.Module):
def __init__(self, masks):
super(toKspaceMulti, self).__init__()
self.masks = masks
self.ii = 0
def advance_ii(self):
self.ii = (self.ii + 1) % 3
def forward(self, input):
kspace_data = fft2(ifftshift(input.permute((0,2,3,1))))
mask = self.masks[self.ii]
kspace_data = kspace_data * mask + 0.0
return kspace_data.permute((0,3,1,2))
class fromKspace(nn.Module):
def __init__(self, mask=None):
super(fromKspace, self).__init__()
if mask is None:
self.mask = mask
else:
self.register_buffer('mask', tensor=mask)
def forward(self, input):
if self.mask is not None:
input = input.permute((0,2,3,1)) * self.mask + 0.0
else:
input = input.permute((0,2,3,1))
image_data = ifftshift(ifft2(input))
return image_data.permute((0,3,1,2))
class cartesianSingleCoilMRI(LinearOperator):
def __init__(self, kspace_mask):
super(cartesianSingleCoilMRI, self).__init__()
self.register_buffer('mask', tensor=kspace_mask)
def forward(self, input):
input = ifftshift(input.permute((0, 2, 3, 1)))
complex_input = torch.view_as_complex(input)
kspace = torch.fft.fftn(complex_input, dim=1, norm="ortho")
kspace = torch.fft.fftn(kspace, dim=2, norm="ortho")
kspace = fftshift(kspace)
if self.mask is not None:
kspace_data = kspace * self.mask + 0.0
kspace_data = ifftshift(kspace_data)
return torch.view_as_real(kspace_data)
def gramian(self, input):
input = ifftshift(input.permute((0, 2, 3, 1)))
complex_input = torch.view_as_complex(input)
kspace = torch.fft.fftn(complex_input, dim=1, norm="ortho")
kspace = torch.fft.fftn(kspace, dim=2, norm="ortho")
kspace = fftshift(kspace)
if self.mask is not None:
kspace_data = kspace * self.mask + 0.0
kspace_data = ifftshift(kspace_data)
kspace_data = torch.fft.ifftn(kspace_data, dim=1, norm="ortho")
realspace = torch.fft.ifftn(kspace_data, dim=2, norm="ortho")
realspace = torch.view_as_real(realspace)
output = ifftshift(realspace).permute((0,3,1,2))
return output
def adjoint(self, input):
complex_input = torch.view_as_complex(input)
complex_input = torch.fft.ifftn(complex_input, dim=1, norm="ortho")
realspace = torch.fft.ifftn(complex_input, dim=2, norm="ortho")
realspace = torch.view_as_real(realspace)
output = ifftshift(realspace).permute((0, 3, 1, 2))
return output
|
{"/scripts/fixedpoint/deblur_proxgrad_fixedeta_pre.py": ["/solvers/equilibrium_solvers.py"], "/scripts/fixedpoint/mri_prox_fixedeta_pre_and.py": ["/operators/singlecoil_mri.py", "/solvers/equilibrium_solvers.py"], "/scripts/denoising/mri_unet_denoise.py": ["/solvers/equilibrium_solvers.py"], "/scripts/denoising/mri_dncnn_denoise.py": ["/solvers/equilibrium_solvers.py"]}
|
4,657
|
wwhappylife/deep_equilibrium_inverse
|
refs/heads/main
|
/solvers/gradnet.py
|
import torch.nn as nn
import torch
from solvers.cg_utils import conjugate_gradient
from PIL import Image
import imageio
import numpy as np
tt = 0
class GradNet(nn.Module):
def __init__(self, linear_operator, nonlinear_operator, eta_initial_val=0.1):
super(GradNet,self).__init__()
self.linear_op = linear_operator
self.nonlinear_op = nonlinear_operator
# Check if the linear operator has parameters that can be learned:
# if so, register them to be learned as part of the network.
linear_param_name = 'linear_param_'
for ii, parameter in enumerate(self.linear_op.parameters()):
parameter_name = linear_param_name + str(ii)
self.register_parameter(name=parameter_name, param=parameter)
self.register_parameter(name='eta', param=torch.nn.Parameter(torch.tensor(eta_initial_val), requires_grad=True))
def _linear_op(self, x):
return self.linear_op.forward(x)
def _linear_adjoint(self, x):
return self.linear_op.adjoint(x)
# This is a bit redundant
def initial_point(self, y):
return self._linear_adjoint(y)
def initial_point_precond(self, y):
initial_point = self._linear_adjoint(y)
preconditioned_input = conjugate_gradient(initial_point, self.linear_op.gramian, regularization_lambda=self.eta,
n_iterations=60)
return preconditioned_input
def single_block(self, input, y):
grad_update = self.linear_op.gramian(input) - self._linear_adjoint(y) - self.nonlinear_op(input)
return input - self.eta * grad_update
def forward(self, y, iterations):
initial_point = self.initial_point_precond(y)
running_term = initial_point
# global tt
# bsz = initial_point.shape[0]
# past_iterate = initial_point
for bb in range(iterations):
running_term = self.single_block(running_term, y)
# # img_array = (np.clip(np.transpose(running_term.cpu().detach().numpy(), (0, 2, 3, 1)), -1,
# # 1) + 1.0) * 127.5
# img_array = torch.norm(running_term, dim=1).cpu().detach().numpy() * 255.0 / np.sqrt(2)
# img_array = img_array.astype(np.uint8)
#
# residual = torch.norm(running_term - past_iterate, dim=1).cpu().detach().numpy()
# if bb % 10 == 0:
# for k in range(bsz):
# # filename = "/share/data/vision-greg2/users/gilton/test_imgs/deblur/img/" + str(tt + k) + "_" + str(bb) + ".png"
# filename = "/share/data/vision-greg2/users/gilton/test_imgs/mrie2e/img/" + str(tt + k) + "_" + str(
# bb) + ".png"
# # filename = "/share/data/vision-greg2/users/gilton/test_imgs/cs/img/" + str(tt + k) + "_" + str(bb) + ".png"
# output_img = Image.fromarray(img_array[k, ...])
# output_img = output_img.resize((512, 512), resample=Image.NEAREST)
# imageio.imwrite(filename, output_img, format='PNG-PIL')
#
# # filename = "/share/data/vision-greg2/users/gilton/test_imgs/deblur/res/" + str(tt + k) + "_" + str(bb) + ".png"
# filename = "/share/data/vision-greg2/users/gilton/test_imgs/mrie2e/res/" + str(tt + k) + "_" + str(
# bb) + ".png"
# # filename = "/share/data/vision-greg2/users/gilton/test_imgs/cs/res/" + str(tt + k) + "_" + str(bb) + ".png"
#
# normalized_res = np.clip(residual[k, :, :] * 8, 0, 1) * 255.0
# # print(np.shape(normalized_res))
# # exit()
# normalized_res = normalized_res.astype(np.uint8)
# output_img = Image.fromarray(normalized_res)
# output_img = output_img.resize((512, 512), resample=Image.NEAREST)
# imageio.imwrite(filename, output_img, format='PNG-PIL')
#
# tt += bsz
return running_term
class PrecondNeumannNet(nn.Module):
def __init__(self, linear_operator, nonlinear_operator, lambda_initial_val=0.1, cg_iterations=10):
super(PrecondNeumannNet,self).__init__()
self.linear_op = linear_operator
self.nonlinear_op = nonlinear_operator
self.cg_iterations = cg_iterations
# Check if the linear operator has parameters that can be learned:
# if so, register them to be learned as part of the network.
linear_param_name = 'linear_param_'
for ii, parameter in enumerate(self.linear_op.parameters()):
parameter_name = linear_param_name + str(ii)
self.register_parameter(name=parameter_name, param=parameter)
self.register_parameter(name='eta', param=torch.nn.Parameter(torch.tensor(lambda_initial_val), requires_grad=True))
def _linear_op(self, x):
return self.linear_op.forward(x)
def _linear_adjoint(self, x):
return self.linear_op.adjoint(x)
# This is a bit redundant
def initial_point(self, y):
preconditioned_input = conjugate_gradient(y, self.linear_op.gramian, regularization_lambda=self.eta,
n_iterations=self.cg_iterations)
return preconditioned_input
def single_block(self, input):
preconditioned_step = conjugate_gradient(input, self.linear_op.gramian, regularization_lambda=self.eta,
n_iterations=self.cg_iterations)
return self.eta * preconditioned_step - self.nonlinear_op(input)
def forward(self, y, iterations):
initial_point = self.eta * self.initial_point(y)
running_term = initial_point
accumulator = initial_point
for bb in range(iterations):
running_term = self.single_block(running_term)
accumulator = accumulator + running_term
return accumulator
|
{"/scripts/fixedpoint/deblur_proxgrad_fixedeta_pre.py": ["/solvers/equilibrium_solvers.py"], "/scripts/fixedpoint/mri_prox_fixedeta_pre_and.py": ["/operators/singlecoil_mri.py", "/solvers/equilibrium_solvers.py"], "/scripts/denoising/mri_unet_denoise.py": ["/solvers/equilibrium_solvers.py"], "/scripts/denoising/mri_dncnn_denoise.py": ["/solvers/equilibrium_solvers.py"]}
|
4,658
|
wwhappylife/deep_equilibrium_inverse
|
refs/heads/main
|
/solvers/equilibrium_nets.py
|
import torch.nn as nn
import torch
from solvers.cg_utils import conjugate_gradient
class EquilibriumGrad(nn.Module):
def __init__(self, linear_operator, nonlinear_operator, eta_initial_val=0.1, minval = -1, maxval = 1):
super(EquilibriumGrad,self).__init__()
self.linear_op = linear_operator
self.nonlinear_op = nonlinear_operator
self.minval = minval
self.maxval = maxval
# Check if the linear operator has parameters that can be learned:
# if so, register them to be learned as part of the network.
linear_param_name = 'linear_param_'
for ii, parameter in enumerate(self.linear_op.parameters()):
parameter_name = linear_param_name + str(ii)
self.register_parameter(name=parameter_name, param=parameter)
self.register_parameter(name='eta', param=torch.nn.Parameter(torch.tensor(eta_initial_val), requires_grad=True))
def _linear_op(self, x):
return self.linear_op.forward(x)
def _linear_adjoint(self, x):
return self.linear_op.adjoint(x)
def set_initial_point(self, y):
self.initial_point = self._linear_adjoint(y)
def get_gradient(self, z, y):
return self.linear_op.gramian(z) - self._linear_adjoint(y) - self.nonlinear_op(z)
def forward(self, z, y):
z_tplus1 = z - self.eta * self.get_gradient(z, y)
z_tplus1 = torch.clamp(z_tplus1, self.minval, self.maxval)
return z_tplus1
class PrecondNeumannNet(nn.Module):
def __init__(self, linear_operator, nonlinear_operator, lambda_initial_val=0.1, cg_iterations=10):
super(PrecondNeumannNet,self).__init__()
self.linear_op = linear_operator
self.nonlinear_op = nonlinear_operator
self.cg_iterations = cg_iterations
# Check if the linear operator has parameters that can be learned:
# if so, register them to be learned as part of the network.
linear_param_name = 'linear_param_'
for ii, parameter in enumerate(self.linear_op.parameters()):
parameter_name = linear_param_name + str(ii)
self.register_parameter(name=parameter_name, param=parameter)
self.register_parameter(name='eta', param=torch.nn.Parameter(torch.tensor(lambda_initial_val), requires_grad=True))
def _linear_op(self, x):
return self.linear_op.forward(x)
def _linear_adjoint(self, x):
return self.linear_op.adjoint(x)
# This is a bit redundant
def initial_point(self, y):
preconditioned_input = conjugate_gradient(y, self.linear_op.gramian, regularization_lambda=self.eta,
n_iterations=self.cg_iterations)
return preconditioned_input
def single_block(self, input):
preconditioned_step = conjugate_gradient(input, self.linear_op.gramian, regularization_lambda=self.eta,
n_iterations=self.cg_iterations)
return self.eta * preconditioned_step - self.nonlinear_op(input)
def forward(self, y, iterations):
initial_point = self.eta * self.initial_point(y)
running_term = initial_point
accumulator = initial_point
for bb in range(iterations):
running_term = self.single_block(running_term)
accumulator = accumulator + running_term
return accumulator
|
{"/scripts/fixedpoint/deblur_proxgrad_fixedeta_pre.py": ["/solvers/equilibrium_solvers.py"], "/scripts/fixedpoint/mri_prox_fixedeta_pre_and.py": ["/operators/singlecoil_mri.py", "/solvers/equilibrium_solvers.py"], "/scripts/denoising/mri_unet_denoise.py": ["/solvers/equilibrium_solvers.py"], "/scripts/denoising/mri_dncnn_denoise.py": ["/solvers/equilibrium_solvers.py"]}
|
4,659
|
wwhappylife/deep_equilibrium_inverse
|
refs/heads/main
|
/solvers/equilibrium_solvers.py
|
import torch.nn as nn
import torch
import matplotlib
# matplotlib.use("TkAgg")
import matplotlib.pyplot as plt
from solvers.cg_utils import conjugate_gradient
class EquilibriumGrad(nn.Module):
def __init__(self, linear_operator, nonlinear_operator, eta, minval = -1, maxval = 1):
super(EquilibriumGrad,self).__init__()
self.linear_op = linear_operator
self.nonlinear_op = nonlinear_operator
# self.eta = eta
self.minval = minval
self.maxval = maxval
# Check if the linear operator has parameters that can be learned:
# if so, register them to be learned as part of the network.
linear_param_name = 'linear_param_'
for ii, parameter in enumerate(self.linear_op.parameters()):
parameter_name = linear_param_name + str(ii)
self.register_parameter(name=parameter_name, param=parameter)
self.register_parameter(name='eta', param=torch.nn.Parameter(torch.tensor(eta), requires_grad=True))
def _linear_op(self, x):
return self.linear_op.forward(x)
def _linear_adjoint(self, x):
return self.linear_op.adjoint(x)
def set_initial_point(self, y):
self.initial_point = self._linear_adjoint(y)
def get_gradient(self, z, y):
return self.linear_op.gramian(z) - self._linear_adjoint(y) - self.nonlinear_op(z)
def forward(self, z, y):
z_tplus1 = z - self.eta * self.get_gradient(z, y)
z_tplus1 = torch.clamp(z_tplus1, self.minval, self.maxval)
return z_tplus1
class EquilibriumProxGrad(nn.Module):
def __init__(self, linear_operator, nonlinear_operator, eta, minval = -1, maxval = 1):
super(EquilibriumProxGrad,self).__init__()
self.linear_op = linear_operator
self.nonlinear_op = nonlinear_operator
self.minval = minval
self.maxval = maxval
self.register_parameter(name='eta', param=torch.nn.Parameter(torch.tensor(eta), requires_grad=True))
# Check if the linear operator has parameters that can be learned:
# if so, register them to be learned as part of the network.
linear_param_name = 'linear_param_'
for ii, parameter in enumerate(self.linear_op.parameters()):
parameter_name = linear_param_name + str(ii)
self.register_parameter(name=parameter_name, param=parameter)
def _linear_op(self, x):
return self.linear_op.forward(x)
def _linear_adjoint(self, x):
return self.linear_op.adjoint(x)
def get_gradient(self, z, y):
return self.linear_op.gramian(z) - self._linear_adjoint(y)
def forward(self, z, y):
gradstep = z - self.eta * self.get_gradient(z, y)
z_tplus1 = gradstep + self.nonlinear_op(gradstep)
z_tplus1 = torch.clamp(z_tplus1, self.minval, self.maxval)
return z_tplus1
class EquilibriumProxGradMRI(nn.Module):
def __init__(self, linear_operator, nonlinear_operator, eta, minval = -1, maxval = 1):
super(EquilibriumProxGradMRI,self).__init__()
self.linear_op = linear_operator
self.nonlinear_op = nonlinear_operator
self.minval = minval
self.maxval = maxval
self.eta = eta
# Check if the linear operator has parameters that can be learned:
# if so, register them to be learned as part of the network.
linear_param_name = 'linear_param_'
for ii, parameter in enumerate(self.linear_op.parameters()):
parameter_name = linear_param_name + str(ii)
self.register_parameter(name=parameter_name, param=parameter)
def _linear_op(self, x):
return self.linear_op.forward(x)
def _linear_adjoint(self, x):
return self.linear_op.adjoint(x)
def get_gradient(self, z, y):
return self.linear_op.gramian(z) - self._linear_adjoint(y)
def forward(self, z, y):
gradstep = z - self.eta * self.get_gradient(z, y)
z_tplus1 = gradstep + self.nonlinear_op(gradstep)
z_tplus1 = torch.clamp(z_tplus1, self.minval, self.maxval)
return z_tplus1
class ProxPnP(nn.Module):
def __init__(self, linear_operator, nonlinear_operator, eta, minval = -1, maxval = 1):
super(ProxPnP,self).__init__()
self.linear_op = linear_operator
self.nonlinear_op = nonlinear_operator
self.minval = minval
self.maxval = maxval
self.eta = eta
def _linear_op(self, x):
return self.linear_op.forward(x)
def _linear_adjoint(self, x):
return self.linear_op.adjoint(x)
def get_gradient(self, z, y):
return self.linear_op.adjoint(self.linear_op.forward(z) - y)
def forward(self, z, y):
gradstep = z - self.eta*(self.linear_op.adjoint(self.linear_op.forward(z)) - self.linear_op.adjoint(y))
z_tplus1 = gradstep + self.nonlinear_op(gradstep)
#z_tplus1 = torch.clamp(z_tplus1, self.minval, self.maxval)
return z_tplus1
class DouglasRachford(nn.Module):
def __init__(self, linear_operator, nonlinear_operator, eta, max_iters = 10, minval = -1, maxval = 1):
super(DouglasRachford,self).__init__()
self.linear_op = linear_operator
self.nonlinear_op = nonlinear_operator
self.minval = minval
self.maxval = maxval
self.lambdaval = eta
self.max_cg_iterations = max_iters
def _linear_op(self, x):
return self.linear_op.forward(x)
def internal_prox(self, x, y):
initial_point = self.linear_op.adjoint(y) + self.lambdaval*x
return conjugate_gradient(initial_point, self.linear_op.gramian, self.lambdaval,
n_iterations=self.max_cg_iterations)
def get_gradient(self, z, y):
return self.linear_op.adjoint(self.linear_op.forward(z) - y)
def forward(self, z, y):
prox_f = self.internal_prox(z, y)
net_input = 2*prox_f - z
z_tplus1 = (z + 2*(self.nonlinear_op(net_input) + net_input)-net_input) / 2.0
z_tplus1 = torch.clamp(z_tplus1, self.minval, self.maxval)
return z_tplus1
class EquilibriumADMM(nn.Module):
def __init__(self, linear_operator, denoising_net, max_cg_iterations=20, x_alpha=0.4, eta = 0.1, minval=-1, maxval=1):
super(EquilibriumADMM, self).__init__()
self.linear_op = linear_operator
self.denoising_net = denoising_net
self.minval = minval
self.maxval = maxval
self.x_alpha = x_alpha
self.eta = eta
self.max_cg_iters = max_cg_iterations
# Check if the linear operator has parameters that can be learned:
# if so, register them to be learned as part of the network.
linear_param_name = 'linear_param_'
for ii, parameter in enumerate(self.linear_op.parameters()):
parameter_name = linear_param_name + str(ii)
self.register_parameter(name=parameter_name, param=parameter)
def _linear_op(self, x):
return self.linear_op.forward(x)
def _linear_adjoint(self, x):
return self.linear_op.adjoint(x)
def _x_update(self, z, u, y):
gramian = self.linear_op.gramian
# initial_point = self._linear_adjoint(y) + 0.0000001 * (z - u)
initial_point = self._linear_adjoint(y) + self.x_alpha*(z-u)
x_update = conjugate_gradient(initial_point, gramian, self.x_alpha, n_iterations=self.max_cg_iters)
return x_update, z, u
def _z_update(self, x, z, u):
net_input = x + u
z_update = net_input + self.denoising_net(net_input)
return x, z_update, u
def _u_update(self, x, z, u):
u_update = u + self.eta * (x - z)
# u_update = u + z - x
return x, z, u_update
def forward(self, z, u, y):
x_new, z, u = self._x_update(z, u, y)
x_new, z_new, u = self._z_update(x_new, z, u)
x_new, z_new, u_new = self._u_update(x_new, z_new, u)
z_new = torch.clamp(z_new, self.minval, self.maxval)
return z_new, u_new
class EquilibriumADMM2(nn.Module):
def __init__(self, linear_operator, denoising_net, max_cg_iterations=20, x_alpha=0.4, eta = 0.1, minval=-1, maxval=1):
super(EquilibriumADMM2, self).__init__()
self.linear_op = linear_operator
self.denoising_net = denoising_net
self.minval = minval
self.maxval = maxval
self.x_alpha = x_alpha
self.eta = eta
self.max_cg_iters = max_cg_iterations
# Check if the linear operator has parameters that can be learned:
# if so, register them to be learned as part of the network.
linear_param_name = 'linear_param_'
for ii, parameter in enumerate(self.linear_op.parameters()):
parameter_name = linear_param_name + str(ii)
self.register_parameter(name=parameter_name, param=parameter)
def _linear_op(self, x):
return self.linear_op.forward(x)
def _linear_adjoint(self, x):
return self.linear_op.adjoint(x)
def _x_update(self, z, u, y):
gramian = self.linear_op.gramian
# initial_point = self._linear_adjoint(y) + 0.0000001 * (z - u)
initial_point = self._linear_adjoint(y) + self.x_alpha*(z-u)
x_update = conjugate_gradient(initial_point, gramian, self.x_alpha, n_iterations=self.max_cg_iters)
return x_update, z, u
def _z_update(self, x, z, u):
net_input = x + u
z_update = net_input - self.denoising_net(net_input)
return x, z_update, u
def _u_update(self, x, z, u):
u_update = u + self.eta * (x - z)
# u_update = u + z - x
return x, z, u_update
def forward(self, z, u, y):
x_new, z, u = self._x_update(z, u, y)
x_new, z_new, u = self._z_update(x_new, z, u)
x_new, z_new, u_new = self._u_update(x_new, z_new, u)
z_new = torch.clamp(z_new, self.minval, self.maxval)
return z_new, u_new
class EquilibriumADMM_minus(nn.Module):
def __init__(self, linear_operator, denoising_net, max_cg_iterations=20, x_alpha=0.4, eta = 0.1, minval=-1, maxval=1):
super(EquilibriumADMM_minus, self).__init__()
self.linear_op = linear_operator
self.denoising_net = denoising_net
self.minval = minval
self.maxval = maxval
self.x_alpha = x_alpha
self.eta = eta
self.max_cg_iters = max_cg_iterations
# Check if the linear operator has parameters that can be learned:
# if so, register them to be learned as part of the network.
linear_param_name = 'linear_param_'
for ii, parameter in enumerate(self.linear_op.parameters()):
parameter_name = linear_param_name + str(ii)
self.register_parameter(name=parameter_name, param=parameter)
def _linear_op(self, x):
return self.linear_op.forward(x)
def _linear_adjoint(self, x):
return self.linear_op.adjoint(x)
def _x_update(self, z, u, y):
net_input = z - u
x_update = net_input - self.denoising_net(net_input)
return x_update, z, u
def _z_update(self, x, u, y):
gramian = self.linear_op.gramian
# initial_point = self._linear_adjoint(y) + 0.0000001 * (z - u)
initial_point = self._linear_adjoint(y) + self.x_alpha*(x+u)
z_update = conjugate_gradient(initial_point, gramian, self.x_alpha, n_iterations=self.max_cg_iters)
return x, z_update, u
def _u_update(self, x, z, u):
u_update = u + self.eta * (x - z)
# u_update = u + z - x
return x, z, u_update
def forward(self, z, u, y):
x_new, z, u = self._x_update(z, u, y)
x_new, z_new, u = self._z_update(x_new, u, y)
x_new, z_new, u_new = self._u_update(x_new, z_new, u)
z_new = torch.clamp(z_new, self.minval, self.maxval)
return z_new, u_new
class EquilibriumADMM_plus(nn.Module):
def __init__(self, linear_operator, denoising_net, max_cg_iterations=20, x_alpha=0.4, eta = 0.1, minval=-1, maxval=1):
super(EquilibriumADMM_plus, self).__init__()
self.linear_op = linear_operator
self.denoising_net = denoising_net
self.minval = minval
self.maxval = maxval
self.x_alpha = x_alpha
self.eta = eta
self.max_cg_iters = max_cg_iterations
# Check if the linear operator has parameters that can be learned:
# if so, register them to be learned as part of the network.
linear_param_name = 'linear_param_'
for ii, parameter in enumerate(self.linear_op.parameters()):
parameter_name = linear_param_name + str(ii)
self.register_parameter(name=parameter_name, param=parameter)
def _linear_op(self, x):
return self.linear_op.forward(x)
def _linear_adjoint(self, x):
return self.linear_op.adjoint(x)
def _x_update(self, z, u, y):
net_input = z - u
x_update = net_input + self.denoising_net(net_input)
return x_update, z, u
def _z_update(self, x, u, y):
gramian = self.linear_op.gramian
# initial_point = self._linear_adjoint(y) + 0.0000001 * (z - u)
initial_point = self._linear_adjoint(y) + self.x_alpha*(x+u)
z_update = conjugate_gradient(initial_point, gramian, self.x_alpha, n_iterations=self.max_cg_iters)
return x, z_update, u
def _u_update(self, x, z, u):
u_update = u + self.eta * (x - z)
# u_update = u + z - x
return x, z, u_update
def forward(self, z, u, y):
x_new, z, u = self._x_update(z, u, y)
x_new, z_new, u = self._z_update(x_new, u, y)
x_new, z_new, u_new = self._u_update(x_new, z_new, u)
z_new = torch.clamp(z_new, self.minval, self.maxval)
return z_new, u_new
|
{"/scripts/fixedpoint/deblur_proxgrad_fixedeta_pre.py": ["/solvers/equilibrium_solvers.py"], "/scripts/fixedpoint/mri_prox_fixedeta_pre_and.py": ["/operators/singlecoil_mri.py", "/solvers/equilibrium_solvers.py"], "/scripts/denoising/mri_unet_denoise.py": ["/solvers/equilibrium_solvers.py"], "/scripts/denoising/mri_dncnn_denoise.py": ["/solvers/equilibrium_solvers.py"]}
|
4,660
|
wwhappylife/deep_equilibrium_inverse
|
refs/heads/main
|
/networks/twolayer_linear_net.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import torch
from torch import nn
from torch.nn import functional as F
class LinearNet(nn.Module):
def __init__(self, input_size, bottleneck_size, output_size):
super().__init__()
# self.linear_layer = nn.Linear(input_size, output_size)
# self.linear_layer2 = nn.Linear(output_size, output_size)
self.network = nn.Sequential(
nn.Linear(input_size, bottleneck_size),
nn.ReLU(),
nn.Linear(bottleneck_size, bottleneck_size),
nn.ReLU(),
nn.Linear(bottleneck_size, output_size),
nn.Tanh()
)
self.network.apply(self.init_weights)
def init_weights(self, m):
if type(m) == nn.Linear:
torch.nn.init.normal_(m.weight, mean=0.0, std=0.01)
m.bias.data.fill_(0.01)
def forward(self, input):
input_shape = input.shape
output = self.network(torch.flatten(input, start_dim=1))
output = torch.reshape(output, shape=input_shape)
return output
|
{"/scripts/fixedpoint/deblur_proxgrad_fixedeta_pre.py": ["/solvers/equilibrium_solvers.py"], "/scripts/fixedpoint/mri_prox_fixedeta_pre_and.py": ["/operators/singlecoil_mri.py", "/solvers/equilibrium_solvers.py"], "/scripts/denoising/mri_unet_denoise.py": ["/solvers/equilibrium_solvers.py"], "/scripts/denoising/mri_dncnn_denoise.py": ["/solvers/equilibrium_solvers.py"]}
|
4,661
|
wwhappylife/deep_equilibrium_inverse
|
refs/heads/main
|
/scripts/denoising/mri_unet_denoise.py
|
import torch
import os
import random
import sys
import argparse
sys.path.append('/home-nfs/gilton/learned_iterative_solvers')
# sys.path.append('/Users/dgilton/PycharmProjects/learned_iterative_solvers')
import torch.nn as nn
import torch.optim as optim
import operators.operator as lin_operator
from operators.operator import OperatorPlusNoise
from utils.fastmri_dataloader import singleCoilFastMRIDataloader
from networks.equilibrium_u_net import UnetModel
from solvers.equilibrium_solvers import EquilibriumGrad
from training import denoiser_training
from solvers import new_equilibrium_utils as eq_utils
parser = argparse.ArgumentParser()
parser.add_argument('--n_epochs', default=80)
parser.add_argument('--batch_size', type=int, default=16)
parser.add_argument('--and_maxiters', default=100)
parser.add_argument('--and_beta', type=float, default=1.0)
parser.add_argument('--and_m', type=int, default=5)
parser.add_argument('--lr', type=float, default=0.1)
parser.add_argument('--etainit', type=float, default=0.5)
parser.add_argument('--lr_gamma', type=float, default=0.1)
parser.add_argument('--sched_step', type=int, default=10)
parser.add_argument('--acceleration', type=float, default=8.0)
parser.add_argument('--noise_sigma', type=float, default=0.01)
parser.add_argument('--savepath',
default="/share/data/vision-greg2/users/gilton/celeba_equilibriumgrad_mri_save_inf.ckpt")
args = parser.parse_args()
# Parameters to modify
n_epochs = int(args.n_epochs)
current_epoch = 0
batch_size = int(args.batch_size)
n_channels = 2
max_iters = int(args.and_maxiters)
anderson_m = int(args.and_m)
anderson_beta = float(args.and_beta)
learning_rate = float(args.lr)
print_every_n_steps = 10
save_every_n_epochs = 5
initial_eta = float(args.etainit)
dataheight = 320
datawidth = 320
noise_sigma = float(args.noise_sigma)
# modify this for your machine
# save_location = "/share/data/vision-greg2/users/gilton/mnist_equilibriumgrad_blur.ckpt"
save_location = args.savepath
load_location = args.savepath
gpu_ids = []
for ii in range(6):
try:
torch.cuda.get_device_properties(ii)
print(str(ii), flush=True)
if not gpu_ids:
gpu_ids = [ii]
else:
gpu_ids.append(ii)
except AssertionError:
print('Not ' + str(ii) + "!", flush=True)
print(os.getenv('CUDA_VISIBLE_DEVICES'), flush=True)
gpu_ids = [int(x) for x in gpu_ids]
# device management
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
use_dataparallel = len(gpu_ids) > 1
print("GPU IDs: " + str([int(x) for x in gpu_ids]), flush=True)
# Set up data and dataloaders
data_location = "/share/data/vision-greg2/users/gilton/singlecoil_curated_clean/"
trainset_size = 2000
total_data = 2194
random.seed(10)
all_indices = list(range(trainset_size))
train_indices = random.sample(range(total_data), k=trainset_size)
dataset = singleCoilFastMRIDataloader(data_location, data_indices=train_indices)
dataloader = torch.utils.data.DataLoader(
dataset=dataset, batch_size=args.batch_size, shuffle=True, drop_last=True,
)
### Set up solver and problem setting
forward_operator = lin_operator.Identity().to(device=device)
measurement_process = OperatorPlusNoise(forward_operator, noise_sigma=noise_sigma).to(device=device)
solver = UnetModel(in_chans=n_channels, out_chans=n_channels, num_pool_layers=4,
drop_prob=0.0, chans=32)
if use_dataparallel:
solver = nn.DataParallel(solver, device_ids=gpu_ids)
solver = solver.to(device=device)
start_epoch = 0
optimizer = optim.Adam(params=solver.parameters(), lr=learning_rate)
scheduler = optim.lr_scheduler.StepLR(optimizer=optimizer, step_size=int(args.sched_step), gamma=float(args.lr_gamma))
cpu_only = not torch.cuda.is_available()
# set up loss and train
lossfunction = torch.nn.MSELoss()
# Do train
denoiser_training.train_denoiser(denoising_net=solver, train_dataloader=dataloader, test_dataloader=dataloader,
measurement_process=measurement_process, optimizer=optimizer, save_location=save_location,
loss_function=lossfunction, n_epochs=n_epochs,
use_dataparallel=use_dataparallel, device=device, scheduler=scheduler,
print_every_n_steps=print_every_n_steps, save_every_n_epochs=save_every_n_epochs,
start_epoch=start_epoch)
|
{"/scripts/fixedpoint/deblur_proxgrad_fixedeta_pre.py": ["/solvers/equilibrium_solvers.py"], "/scripts/fixedpoint/mri_prox_fixedeta_pre_and.py": ["/operators/singlecoil_mri.py", "/solvers/equilibrium_solvers.py"], "/scripts/denoising/mri_unet_denoise.py": ["/solvers/equilibrium_solvers.py"], "/scripts/denoising/mri_dncnn_denoise.py": ["/solvers/equilibrium_solvers.py"]}
|
4,662
|
wwhappylife/deep_equilibrium_inverse
|
refs/heads/main
|
/solvers/new_equilibrium_utils.py
|
import torch.nn as nn
import torch
import matplotlib
#matplotlib.use("TkAgg")
from matplotlib import pyplot as plt
import imageio
import numpy as np
from PIL import Image
def complex_conj(x):
assert x.shape[1] == 2
return torch.stack((x[:,0, ...], -x[:,1,...]), dim=1)
def torchdotproduct(x,y):
# if complexdata:
# y = complex_conj(y)
return torch.sum(x*y,dim=[1,2,3])
def single_cg_iteration(x, d, g, b, ATA, regularization_lambda):
def regATA(input, ATA):
return ATA(input) + regularization_lambda*input
Qd = regATA(d, ATA)
dQd = torchdotproduct(d, Qd)
alpha = -torchdotproduct(g,d) / dQd
alpha = alpha.view((-1,1,1,1))
x = x + alpha * d
g = regATA(x, ATA) - b
gQd = torchdotproduct(g, Qd)
beta = gQd / dQd
beta = beta.view((-1,1,1,1))
d = -g + beta*d
return x, d, g
# This function solves the system ATA x = ATy, where initial_point is supposed
# to be ATy. This can be backpropagated through.
def conjugate_gradient(initial_point, ATA, regularization_lambda, n_iterations=10):
x = torch.zeros_like(initial_point)
d = initial_point
g = -d
for ii in range(n_iterations):
x, d, g = single_cg_iteration(x, d, g, initial_point, ATA, regularization_lambda)
return x
def complex_dotproduct(x, y):
return torchdotproduct(complex_conj(x), y)
def single_cg_iteration_MRI(rTr, x, r, p, ATA, regularization_lambda):
batch_size = x.shape[0]
def regATA(input):
return ATA(input) + regularization_lambda*input
Ap = regATA(p)
rTr = rTr.view(batch_size, 1, 1, 1)
alpha = rTr / complex_dotproduct(p, Ap).view(batch_size, 1, 1, 1)
x_new = x + alpha * p
r_new = r - alpha * Ap
rTr_new = complex_dotproduct(r_new, r_new)
rTr_new = rTr_new.view(batch_size, 1, 1, 1)
beta = rTr_new / rTr
p_new = r + beta * p
return rTr_new, x_new, r_new, p_new
def conjugate_gradient_MRI(initial_point, ATA, regularization_lambda, n_iterations=10):
'''Strightforward implementation of MoDLs code'''
x = torch.zeros_like(initial_point)
r = initial_point
p = initial_point
rTr = complex_dotproduct(r, r)
for ii in range(n_iterations):
rTr, x, r, p = single_cg_iteration_MRI(rTr, x, r, p, ATA, regularization_lambda)
return x
def jacobian_vector_product(g, z, v):
JTv = torch.autograd.grad(outputs=g, inputs=z, grad_outputs=v)[0]
return JTv
def conjugate_gradient_equilibriumgrad(b, input_z, f_function, n_iterations=10):
initial_guess = b.clone()
x_k = initial_guess
r_k = b
p_k = r_k
batch_size = b.shape[0]
g = f_function(input_z) - input_z
for ii in range(n_iterations):
# g = f_function(initial_guess) - initial_guess
# Ap_k = jacobian_vector_product(g, input_z, x_k)
Ap_k = (torch.autograd.grad(outputs=g, inputs=input_z, grad_outputs=x_k, retain_graph=True)[0] + 0.00001 * x_k)
rTr_k = torchdotproduct(r_k, r_k)
rTr_k = rTr_k.view(batch_size, 1, 1, 1)
pAp_k = torchdotproduct(Ap_k, p_k)
pAp_k = pAp_k.view(batch_size, 1, 1, 1)
alpha = rTr_k / pAp_k
x_k = x_k + alpha * p_k
r_kplus1 = r_k - alpha * Ap_k
rTr_kplus1 = torchdotproduct(r_kplus1, r_kplus1)
rTr_kplus1 = rTr_kplus1.view(batch_size, 1, 1, 1)
beta = rTr_k / rTr_kplus1
p_k = r_kplus1 + beta * p_k
r_k = r_kplus1
return x_k
#tt= 0
def anderson(f, x0, m=5, lam=1e-4, max_iter=50, tol=1e-2, beta=1.0):
""" Anderson acceleration for fixed point iteration.
This was taken from the Deep Equilibrium tutorial here: http://implicit-layers-tutorial.org/deep_equilibrium_models/
"""
#global tt
bsz, d, H, W = x0.shape
X = torch.zeros(bsz, m, d * H * W, dtype=x0.dtype, device=x0.device)
F = torch.zeros(bsz, m, d * H * W, dtype=x0.dtype, device=x0.device)
X[:, 0], F[:, 0] = x0.reshape(bsz, -1), f(x0).reshape(bsz, -1)
X[:, 1], F[:, 1] = F[:, 0], f(F[:, 0].reshape(x0.shape)).reshape(bsz, -1)
H = torch.zeros(bsz, m + 1, m + 1, dtype=x0.dtype, device=x0.device)
H[:, 0, 1:] = H[:, 1:, 0] = 1
y = torch.zeros(bsz, m + 1, 1, dtype=x0.dtype, device=x0.device)
y[:, 0] = 1
res = []
current_k = 0
past_iterate = x0
for k in range(2, max_iter):
current_k = k
n = min(k, m)
G = F[:, :n] - X[:, :n]
H[:, 1:n + 1, 1:n + 1] = torch.bmm(G, G.transpose(1, 2)) + lam * torch.eye(n, dtype=x0.dtype, device=x0.device)[
None]
alpha = torch.solve(y[:, :n + 1], H[:, :n + 1, :n + 1])[0][:, 1:n + 1, 0] # (bsz x n)
X[:, k % m] = beta * (alpha[:, None] @ F[:, :n])[:, 0] + (1 - beta) * (alpha[:, None] @ X[:, :n])[:, 0]
current_iterate = beta * (alpha[:, None] @ F[:, :n])[:, 0] + (1 - beta) * (alpha[:, None] @ X[:, :n])[:, 0]
F[:, k % m] = f(X[:, k % m].reshape(x0.shape)).reshape(bsz, -1)
res.append((F[:, k % m] - X[:, k % m]).norm().item() / (1e-5 + F[:, k % m].norm().item()))
if (res[-1] < tol):
break
#tt += bsz
return X[:, current_k % m].view_as(x0), res
def andersonexp(f, x0, m=5, lam=1e-4, max_iter=50, tol=1e-2, beta=1.0):
""" Anderson acceleration for fixed point iteration. """
# global tt
bsz, d, H, W = x0.shape
X = torch.zeros(bsz, m, d * H * W, dtype=x0.dtype, device=x0.device)
F = torch.zeros(bsz, m, d * H * W, dtype=x0.dtype, device=x0.device)
X[:, 0], F[:, 0] = x0.reshape(bsz, -1), f(x0).reshape(bsz, -1)
X[:, 1], F[:, 1] = F[:, 0], f(F[:, 0].reshape(x0.shape)).reshape(bsz, -1)
H = torch.zeros(bsz, m + 1, m + 1, dtype=x0.dtype, device=x0.device)
H[:, 0, 1:] = H[:, 1:, 0] = 1
y = torch.zeros(bsz, m + 1, 1, dtype=x0.dtype, device=x0.device)
y[:, 0] = 1
current_k = 0
for k in range(2, max_iter):
current_k = k
n = min(k, m)
G = F[:, :n] - X[:, :n]
H[:, 1:n + 1, 1:n + 1] = torch.bmm(G, G.transpose(1, 2)) + lam * torch.eye(n, dtype=x0.dtype, device=x0.device)[
None]
alpha = torch.solve(y[:, :n + 1], H[:, :n + 1, :n + 1])[0][:, 1:n + 1, 0] # (bsz x n)
X[:, k % m] = beta * (alpha[:, None] @ F[:, :n])[:, 0] + (1 - beta) * (alpha[:, None] @ X[:, :n])[:, 0]
F[:, k % m] = f(X[:, k % m].reshape(x0.shape)).reshape(bsz, -1)
res = (F[:, k % m] - X[:, k % m]).norm().item() / (1e-5 + F[:, k % m].norm().item())
if (res < tol):
break
# tt += bsz
return X[:, current_k % m].view_as(x0), res
def L2Norm(x):
return torch.sum(x**2, dim=[1,2,3], keepdim=True)
def epsilon2(f, x0, max_iter=50, tol=1e-2, lam=1e-4):
x = x0
for k in range(max_iter):
f_x = f(x)
delta_x = f_x - x
delta_f = f(f_x) - f_x
delta2_x = delta_f - delta_x
# term1 = delta_f * L2Norm(delta_x)
# term2 = delta_x * L2Norm(delta_f)
x_new = f_x + (delta_f * L2Norm(delta_x) - delta_x * L2Norm(delta_f)) / (L2Norm(delta2_x) + lam)
residual = (x_new - x).norm().item() / x_new.norm().item()
x = x_new
if (residual < tol):
break
return x, residual
def forward_iteration(f, x0, max_iter=50, tol=1e-5):
f0 = f(x0)
res = []
for k in range(max_iter):
x = f0
f0 = f(x)
res.append((f0 - x).norm().item() / (1e-7 + f0.norm().item()))
if (res[-1] < tol):
break
return f0, res
def forward_iteration_plot(f, x0, max_iter=50, tol=1e-5):
f0 = f(x0)
res = []
fig = plt.figure()
for k in range(max_iter):
x = f0
f0 = f(x)
# sub = fig.add_subplot(10,10, k)
# plt.imshow(f0[0, : , :, :].detach().cpu().numpy())
# plt.show()
res.append((f0 - x).norm().item() / (1e-7 + f0.norm().item()))
if (res[-1] < tol):
break
plt.show()
return f0, res
class DEQFixedPoint(nn.Module):
def __init__(self, f, solver, **kwargs):
super().__init__()
self.f = f
self.solver = solver
self.kwargs = kwargs
def forward(self, x, initial_point = None):
if initial_point is None:
init_point = torch.zeros_like(x)
else:
init_point = initial_point
# compute forward pass and re-engage autograd tape
with torch.no_grad():
z, self.forward_res = self.solver(lambda z: self.f(z, x), init_point, **self.kwargs)
z = self.f(z, x)
# set up Jacobian vector product (without additional forward calls)
z0 = z.clone().detach().requires_grad_()
f0 = self.f(z0, x)
def backward_hook(grad):
g, self.backward_res = self.solver(lambda y: torch.autograd.grad(f0, z0, y, retain_graph=True)[0] + grad,
grad, **self.kwargs)
return g
z.register_hook(backward_hook)
return z
class DEQFixedPointExp(nn.Module):
def __init__(self, f, solver, **kwargs):
super().__init__()
self.f = f
self.solver = solver
self.kwargs = kwargs
def forward(self, x, initial_point = None):
if initial_point is None:
init_point = torch.zeros_like(x)
else:
init_point = initial_point
# compute forward pass and re-engage autograd tape
with torch.no_grad():
z, self.forward_res = self.solver(lambda z: self.f(z, x), init_point, **self.kwargs)
z = self.f(z, x)
# set up Jacobian vector product (without additional forward calls)
z0 = z.clone().detach().requires_grad_()
f0 = self.f(z0, x)
def backward_hook(grad):
g, self.backward_res = self.solver(lambda y: torch.autograd.grad(f0, z0, y, retain_graph=True)[0] + grad,
grad, **self.kwargs)
return g
z.register_hook(backward_hook)
return z
class DEQFixedPointTest(nn.Module):
def __init__(self, f, solver, **kwargs):
super().__init__()
self.f = f
self.solver = solver
self.kwargs = kwargs
def forward(self, x, truth = None, initial_point = None):
if initial_point is None:
init_point = torch.zeros_like(x)
else:
init_point = initial_point
# compute forward pass and re-engage autograd tape
with torch.no_grad():
z, self.forward_res = self.solver(lambda z: self.f(z, x), init_point, **self.kwargs)
return z
def neumann_iteration(f, x0,k=10):
accumulator = x0
current_iterate = x0
for _ in range(k):
current_iterate = f(current_iterate)
accumulator = accumulator + current_iterate
return accumulator
class DEQFixedPointNeumann(nn.Module):
def __init__(self, f, solver, neumann_k, **kwargs):
super().__init__()
self.f = f
self.solver = solver
self.neumann_k = neumann_k
self.kwargs = kwargs
def forward(self, x):
# compute forward pass and re-engage autograd tape
with torch.no_grad():
z, self.forward_res = self.solver(lambda z: self.f(z, x), torch.zeros_like(x), **self.kwargs)
z = self.f(z, x)
# set up Jacobian vector product (without additional forward calls)
z0 = z.clone().detach().requires_grad_()
f0 = self.f(z0, x)
def backward_hook(grad):
g = neumann_iteration(lambda y: torch.autograd.grad(f0, z0, y, retain_graph=True)[0],
grad, self.neumann_k)
return g
z.register_hook(backward_hook)
return z
def get_equilibrium_point(solver, z, max_iterations=50, tolerance = 0.001):
old_iterate = z
for iteration in range(max_iterations):
new_iterate = solver(old_iterate)
res = (new_iterate-old_iterate).norm().item() / (1e-5 + new_iterate.norm().item())
old_iterate = new_iterate
if res < 1e-3:
break
return new_iterate, new_iterate
def get_equilibrium_point_plot(solver, z, truth, max_iterations=50, tolerance = 0.001):
running_iterate = z
# fig = plt.figure()
jj = 0
for iteration in range(max_iterations):
# if iteration % 10 == 0:
# sub = fig.add_subplot(2, 5, jj+1)
# img_to_show = torch.abs(running_iterate[0, :, :, :] - truth[0,:,:,:])*5.0
# # plt.imshow((running_iterate[0, :, :, :].permute(1,2,0).cpu().detach().numpy() + 1.0) / 2.0)
# # plt.show()
# # sub.imshow((img_to_show.permute(1,2,0).detach().cpu().numpy() + 1.0)/2.0)
# sub.imshow(img_to_show.permute(1,2,0).detach().cpu().numpy())
#
# jj += 1
running_iterate = solver(running_iterate)
# plt.show()
return running_iterate, running_iterate
|
{"/scripts/fixedpoint/deblur_proxgrad_fixedeta_pre.py": ["/solvers/equilibrium_solvers.py"], "/scripts/fixedpoint/mri_prox_fixedeta_pre_and.py": ["/operators/singlecoil_mri.py", "/solvers/equilibrium_solvers.py"], "/scripts/denoising/mri_unet_denoise.py": ["/solvers/equilibrium_solvers.py"], "/scripts/denoising/mri_dncnn_denoise.py": ["/solvers/equilibrium_solvers.py"]}
|
4,663
|
wwhappylife/deep_equilibrium_inverse
|
refs/heads/main
|
/training/new_equilibrium_training.py
|
import torch
import numpy as np
from solvers import new_equilibrium_utils as eq_utils
from torch import autograd
def train_solver(single_iterate_solver, train_dataloader, test_dataloader,
measurement_process, optimizer,
save_location, loss_function, n_epochs, forward_iterator, iterator_kwargs,
use_dataparallel=False, device='cpu', scheduler=None,
print_every_n_steps=10, save_every_n_epochs=5, start_epoch=0):
forward_iterator = eq_utils.anderson
deep_eq_module = eq_utils.DEQFixedPoint(single_iterate_solver, forward_iterator, iterator_kwargs)
for epoch in range(start_epoch, n_epochs):
if epoch % save_every_n_epochs == 0:
if use_dataparallel:
torch.save({'solver_state_dict': single_iterate_solver.module.state_dict(),
'epoch': epoch,
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict()
}, save_location)
else:
torch.save({'solver_state_dict': single_iterate_solver.state_dict(),
'epoch': epoch,
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict()
}, save_location)
for ii, sample_batch in enumerate(train_dataloader):
optimizer.zero_grad()
sample_batch = sample_batch.to(device=device)
y = measurement_process(sample_batch)
single_iterate_solver.set_initial_point(y)
reconstruction = deep_eq_module.forward(y)
loss = loss_function(reconstruction, sample_batch)
loss.backward()
optimizer.step()
if ii % print_every_n_steps == 0:
logging_string = "Epoch: " + str(epoch) + " Step: " + str(ii) + \
" Loss: " + str(loss.cpu().detach().numpy())
print(logging_string, flush=True)
if scheduler is not None:
scheduler.step(epoch)
if use_dataparallel:
torch.save({'solver_state_dict': single_iterate_solver.module.state_dict(),
'epoch': epoch,
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict()
}, save_location)
else:
torch.save({'solver_state_dict': single_iterate_solver.state_dict(),
'epoch': epoch,
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict()
}, save_location)
def train_solver_noanderson(single_iterate_solver, train_dataloader, test_dataloader,
measurement_process, optimizer,
save_location, loss_function, n_epochs,
use_dataparallel=False, device='cpu', scheduler=None,
print_every_n_steps=10, save_every_n_epochs=5, start_epoch=0, max_iters=100):
forward_iterator = eq_utils.forward_iteration
deep_eq_module = eq_utils.DEQFixedPoint(single_iterate_solver, solver=forward_iterator,
max_iter=max_iters, tol=1e-3)
for epoch in range(start_epoch, n_epochs):
# We are lucky to have
if epoch % save_every_n_epochs == 0:
if use_dataparallel:
torch.save({'solver_state_dict': single_iterate_solver.module.state_dict(),
'epoch': epoch,
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict()
}, save_location)
else:
torch.save({'solver_state_dict': single_iterate_solver.state_dict(),
'epoch': epoch,
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict()
}, save_location)
for ii, sample_batch in enumerate(train_dataloader):
optimizer.zero_grad()
sample_batch = sample_batch.to(device=device)
y = measurement_process(sample_batch)
single_iterate_solver.set_initial_point(y)
reconstruction = deep_eq_module.forward(y)
loss = loss_function(reconstruction, sample_batch)
loss.backward()
optimizer.step()
if ii % print_every_n_steps == 0:
logging_string = "Epoch: " + str(epoch) + " Step: " + str(ii) + \
" Loss: " + str(loss.cpu().detach().numpy())
print(logging_string, flush=True)
if scheduler is not None:
scheduler.step(epoch)
if use_dataparallel:
torch.save({'solver_state_dict': single_iterate_solver.module.state_dict(),
'epoch': epoch,
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict()
}, save_location)
else:
torch.save({'solver_state_dict': single_iterate_solver.state_dict(),
'epoch': epoch,
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict()
}, save_location)
def train_solver_mnist(single_iterate_solver, train_dataloader, test_dataloader,
measurement_process, optimizer,
save_location, loss_function, n_epochs,
use_dataparallel=False, device='cpu', scheduler=None,
print_every_n_steps=10, save_every_n_epochs=5, start_epoch=0, max_iters=100):
n_iterations = [5]*n_epochs
for ee in range(n_epochs):
if ee >= 20:
n_iterations[ee] = 5
if ee >= 23:
n_iterations[ee] = 7
if ee >= 28:
n_iterations[ee] = 9
if ee >= 38:
n_iterations[ee] = 11
if ee >= 44:
n_iterations[ee] = 13
if ee >= 50:
n_iterations[ee] = 20
if ee >= 58:
n_iterations[ee] = 30
forward_iterator = eq_utils.anderson
deep_eq_module = eq_utils.DEQFixedPointNeumann(single_iterate_solver, neumann_k=100, solver=forward_iterator,
m=5, lam=1e-4, max_iter=max_iters, tol=1e-3, beta=1.5)
for epoch in range(start_epoch, n_epochs):
# We are lucky to have
if epoch % save_every_n_epochs == 0:
if use_dataparallel:
torch.save({'solver_state_dict': single_iterate_solver.module.state_dict(),
'epoch': epoch,
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict()
}, save_location)
else:
torch.save({'solver_state_dict': single_iterate_solver.state_dict(),
'epoch': epoch,
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict()
}, save_location)
for ii, sample_batch in enumerate(train_dataloader):
optimizer.zero_grad()
sample_batch = sample_batch[0].to(device=device)
y = measurement_process(sample_batch)
single_iterate_solver.set_initial_point(y)
reconstruction = deep_eq_module.forward(y)
loss = loss_function(reconstruction, sample_batch)
loss.backward()
optimizer.step()
if ii % print_every_n_steps == 0:
logging_string = "Epoch: " + str(epoch) + " Step: " + str(ii) + \
" Loss: " + str(loss.cpu().detach().numpy())
print(logging_string, flush=True)
if scheduler is not None:
scheduler.step(epoch)
if use_dataparallel:
torch.save({'solver_state_dict': single_iterate_solver.module.state_dict(),
'epoch': epoch,
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict()
}, save_location)
else:
torch.save({'solver_state_dict': single_iterate_solver.state_dict(),
'epoch': epoch,
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict()
}, save_location)
#####################TEST##########################
# loss_accumulator = []
# mse_loss = torch.nn.MSELoss()
# for ii, sample_batch in enumerate(test_dataloader):
# sample_batch = sample_batch.to(device=device)
# y = measurement_process(sample_batch)
# initial_point = y
# reconstruction = solver(initial_point, iterations=6)
#
# reconstruction = torch.clamp(reconstruction, -1 ,1)
#
# loss = mse_loss(reconstruction, sample_batch)
# loss_logger = loss.cpu().detach().numpy()
# loss_accumulator.append(loss_logger)
#
# loss_array = np.asarray(loss_accumulator)
# loss_mse = np.mean(loss_array)
# PSNR = -10 * np.log10(loss_mse)
# percentiles = np.percentile(loss_array, [25,50,75])
# percentiles = -10.0*np.log10(percentiles)
# print("TEST LOSS: " + str(sum(loss_accumulator) / len(loss_accumulator)), flush=True)
# print("MEAN TEST PSNR: " + str(PSNR), flush=True)
# print("TEST PSNR QUARTILES AND MEDIAN: " + str(percentiles[0]) +
# ", " + str(percentiles[1]) + ", " + str(percentiles[2]), flush=True)
|
{"/scripts/fixedpoint/deblur_proxgrad_fixedeta_pre.py": ["/solvers/equilibrium_solvers.py"], "/scripts/fixedpoint/mri_prox_fixedeta_pre_and.py": ["/operators/singlecoil_mri.py", "/solvers/equilibrium_solvers.py"], "/scripts/denoising/mri_unet_denoise.py": ["/solvers/equilibrium_solvers.py"], "/scripts/denoising/mri_dncnn_denoise.py": ["/solvers/equilibrium_solvers.py"]}
|
4,664
|
wwhappylife/deep_equilibrium_inverse
|
refs/heads/main
|
/utils/testing_utils.py
|
from PIL import Image
import torch
import matplotlib.pyplot as plt
import numpy as np
import imageio
from PIL import Image
def save_tensor_as_color_img(img_tensor, filename):
np_array = img_tensor.cpu().detach().numpy()
imageio.save(filename, np_array)
def save_batch_as_color_imgs(tensor_batch, batch_size, ii, folder_name, names):
# img_array = (np.transpose(tensor_batch.cpu().detach().numpy(),(0,2,3,1)) + 1.0) * 127.5
img_array = (np.clip(np.transpose(tensor_batch.cpu().detach().numpy(),(0,2,3,1)),-1,1) + 1.0) * 127.5
# img_array = tensor_batch.cpu().detach().numpy()
# print(np.max(img_array[:]))
# print(np.min(img_array[:]))
img_array = img_array.astype(np.uint8)
for kk in range(batch_size):
desired_img = Image.fromarray(img_array[kk,...])
desired_img = desired_img.resize((512,512), resample=Image.NEAREST)
img_number = batch_size*ii + kk
filename = folder_name + str(img_number) + "_" + str(names[kk]) + ".png"
# print(np.shape(img_array))
# print(filename)
imageio.imwrite(filename, desired_img)
def save_mri_as_imgs(tensor_batch, batch_size, ii, folder_name, names):
# img_array = (np.transpose(tensor_batch.cpu().detach().numpy(),(0,2,3,1)) + 1.0) * 127.5
def rescale_to_01(input):
batch_size = input.shape[0]
for bb in range(batch_size):
flattened_img = torch.flatten(input[bb, ...], start_dim=0)
img_min = torch.min(flattened_img)
img_max = torch.max(flattened_img - img_min)
input[bb, ...] = (input[bb, ...] - img_min) / img_max
return input
tensor_batch = torch.norm(tensor_batch, dim=1)
tensor_batch = rescale_to_01(tensor_batch)
# img_array = torch.norm(tensor_batch, dim=1).cpu().detach().numpy()
img_array = tensor_batch.cpu().detach().numpy()
for kk in range(batch_size):
img_number = batch_size*ii + kk
target_img = img_array[kk,...] * 255.0
target_img = target_img.astype(np.uint8)
desired_img = Image.fromarray(target_img)
desired_img = desired_img.resize((512, 512), resample=Image.NEAREST)
filename = folder_name + str(img_number) + "_" + str(names[kk]) + ".png"
# plt.imshow(np.sqrt(img_array[kk,0,:,:]**2 + img_array[kk,1,:,:]**2))
# plt.gray()
# plt.xticks([])
# plt.yticks([])
# plt.savefig(filename, bbox_inches='tight')
imageio.imwrite(filename, desired_img, format="PNG-PIL")
|
{"/scripts/fixedpoint/deblur_proxgrad_fixedeta_pre.py": ["/solvers/equilibrium_solvers.py"], "/scripts/fixedpoint/mri_prox_fixedeta_pre_and.py": ["/operators/singlecoil_mri.py", "/solvers/equilibrium_solvers.py"], "/scripts/denoising/mri_unet_denoise.py": ["/solvers/equilibrium_solvers.py"], "/scripts/denoising/mri_dncnn_denoise.py": ["/solvers/equilibrium_solvers.py"]}
|
4,665
|
wwhappylife/deep_equilibrium_inverse
|
refs/heads/main
|
/solvers/broyd_equilibrium_utils.py
|
import torch.nn as nn
import torch
import matplotlib
#matplotlib.use("TkAgg")
from matplotlib import pyplot as plt
import imageio
import numpy as np
from PIL import Image
def _safe_norm(v):
if not torch.isfinite(v).all():
return np.inf
return torch.norm(v)
def scalar_search_armijo(phi, phi0, derphi0, c1=1e-4, alpha0=1, amin=0):
ite = 0
phi_a0 = phi(alpha0) # First do an update with step size 1
if phi_a0 <= phi0 + c1 * alpha0 * derphi0:
return alpha0, phi_a0, ite
# Otherwise, compute the minimizer of a quadratic interpolant
alpha1 = -(derphi0) * alpha0 ** 2 / 2.0 / (phi_a0 - phi0 - derphi0 * alpha0)
phi_a1 = phi(alpha1)
# Otherwise loop with cubic interpolation until we find an alpha which
# satisfies the first Wolfe condition (since we are backtracking, we will
# assume that the value of alpha is not too small and satisfies the second
# condition.
while alpha1 > amin: # we are assuming alpha>0 is a descent direction
factor = alpha0 ** 2 * alpha1 ** 2 * (alpha1 - alpha0)
a = alpha0 ** 2 * (phi_a1 - phi0 - derphi0 * alpha1) - \
alpha1 ** 2 * (phi_a0 - phi0 - derphi0 * alpha0)
a = a / factor
b = -alpha0 ** 3 * (phi_a1 - phi0 - derphi0 * alpha1) + \
alpha1 ** 3 * (phi_a0 - phi0 - derphi0 * alpha0)
b = b / factor
alpha2 = (-b + torch.sqrt(torch.abs(b ** 2 - 3 * a * derphi0))) / (3.0 * a)
phi_a2 = phi(alpha2)
ite += 1
if (phi_a2 <= phi0 + c1 * alpha2 * derphi0):
return alpha2, phi_a2, ite
if (alpha1 - alpha2) > alpha1 / 2.0 or (1 - alpha2 / alpha1) < 0.96:
alpha2 = alpha1 / 2.0
alpha0 = alpha1
alpha1 = alpha2
phi_a0 = phi_a1
phi_a1 = phi_a2
# Failed to find a suitable step length
return None, phi_a1, ite
def line_search(update, x0, g0, g, nstep=0, on=True):
"""
`update` is the propsoed direction of update.
Code adapted from scipy.
"""
tmp_s = [0]
tmp_g0 = [g0]
tmp_phi = [torch.norm(g0) ** 2]
s_norm = torch.norm(x0) / torch.norm(update)
def phi(s, store=True):
if s == tmp_s[0]:
return tmp_phi[0] # If the step size is so small... just return something
x_est = x0 + s * update
g0_new = g(x_est)
phi_new = _safe_norm(g0_new) ** 2
if store:
tmp_s[0] = s
tmp_g0[0] = g0_new
tmp_phi[0] = phi_new
return phi_new
if on:
s, phi1, ite = scalar_search_armijo(phi, tmp_phi[0], -tmp_phi[0], amin=1e-2)
if (not on) or s is None:
s = 1.0
ite = 0
x_est = x0 + s * update
if s == tmp_s[0]:
g0_new = tmp_g0[0]
else:
g0_new = g(x_est)
return x_est, g0_new, x_est - x0, g0_new - g0, ite
def rmatvec(part_Us, part_VTs, x):
# Compute x^T(-I + UV^T)
# x: (N, 2d, L')
# part_Us: (N, 2d, L', threshold)
# part_VTs: (N, threshold, 2d, L')
if part_Us.nelement() == 0:
return -x
xTU = torch.einsum('bij, bijd -> bd', x, part_Us) # (N, threshold)
return -x + torch.einsum('bd, bdij -> bij', xTU, part_VTs) # (N, 2d, L'), but should really be (N, 1, (2d*L'))
def matvec(part_Us, part_VTs, x):
# Compute (-I + UV^T)x
# x: (N, 2d, L')
# part_Us: (N, 2d, L', threshold)
# part_VTs: (N, threshold, 2d, L')
if part_Us.nelement() == 0:
return -x
VTx = torch.einsum('bdij, bij -> bd', part_VTs, x) # (N, threshold)
return -x + torch.einsum('bijd, bd -> bij', part_Us, VTx) # (N, 2d, L'), but should really be (N, (2d*L'), 1)
def broyden(g, x0, threshold=9, eps=1e-5, ls=False):
x0_shape = x0.shape
x0 = x0.reshape((x0.shape[0], -1, 1))
bsz, total_hsize, n_elem = x0.size()
dev = x0.device
x_est = x0 # (bsz, 2d, L')
gx = g(x_est) # (bsz, 2d, L')
nstep = 0
tnstep = 0
LBFGS_thres = min(threshold, 27)
# For fast calculation of inv_jacobian (approximately)
Us = torch.zeros(bsz, total_hsize, n_elem, LBFGS_thres).to(dev)
VTs = torch.zeros(bsz, LBFGS_thres, total_hsize, n_elem).to(dev)
update = gx
new_objective = init_objective = torch.norm(gx).item()
prot_break = False
trace = [init_objective]
new_trace = [-1]
# To be used in protective breaks
protect_thres = 1e6 * n_elem
lowest = new_objective
lowest_xest, lowest_gx, lowest_step = x_est, gx, nstep
while new_objective >= eps and nstep < threshold:
x_est, gx, delta_x, delta_gx, ite = line_search(update, x_est, gx, g, nstep=nstep, on=ls)
nstep += 1
tnstep += (ite + 1)
new_objective = torch.norm(gx).item()
trace.append(new_objective)
try:
new2_objective = torch.norm(delta_x).item() / (torch.norm(x_est - delta_x).item()) # Relative residual
except:
new2_objective = torch.norm(delta_x).item() / (torch.norm(x_est - delta_x).item() + 1e-9)
new_trace.append(new2_objective)
if new_objective < lowest:
lowest_xest, lowest_gx = x_est.clone().detach(), gx.clone().detach()
lowest = new_objective
lowest_step = nstep
if new_objective < eps:
# print(nstep)
break
if new_objective < 3 * eps and nstep > 30 and np.max(trace[-30:]) / np.min(trace[-30:]) < 1.3:
# if there's hardly been any progress in the last 30 steps
# print(nstep)
break
if new_objective > init_objective * protect_thres:
# prot_break = True
# print(nstep)
break
part_Us, part_VTs = Us[:, :, :, :(nstep - 1)], VTs[:, :(nstep - 1)]
vT = rmatvec(part_Us, part_VTs, delta_x)
u = (delta_x - matvec(part_Us, part_VTs, delta_gx)) / torch.einsum('bij, bij -> b', vT, delta_gx)[:, None, None]
vT[vT != vT] = 0
u[u != u] = 0
VTs[:, (nstep - 1) % LBFGS_thres] = vT
Us[:, :, :, (nstep - 1) % LBFGS_thres] = u
update = -matvec(Us[:, :, :, :nstep], VTs[:, :nstep], gx)
Us, VTs = None, None
lowest_xest = lowest_xest.reshape(x0_shape)
return lowest_xest, torch.norm(lowest_gx).item()
# return {"result": lowest_xest,
# "nstep": nstep,
# "tnstep": tnstep,
# "lowest_step": lowest_step,
# "diff": torch.norm(lowest_gx).item(),
# "diff_detail": torch.norm(lowest_gx, dim=1),
# "prot_break": prot_break,
# "trace": trace,
# "new_trace": new_trace,
# "eps": eps,
# "threshold": threshold}
def L2Norm(x):
return torch.sum(x**2, dim=[1,2,3], keepdim=True)
def epsilon2(f, x0, max_iter=50, tol=1e-2, lam=1e-4):
x = x0
for k in range(max_iter):
f_x = f(x)
delta_x = f_x - x
delta_f = f(f_x) - f_x
delta2_x = delta_f - delta_x
# term1 = delta_f * L2Norm(delta_x)
# term2 = delta_x * L2Norm(delta_f)
x_new = f_x + (delta_f * L2Norm(delta_x) - delta_x * L2Norm(delta_f)) / (L2Norm(delta2_x) + lam)
residual = (x_new - x).norm().item() / x_new.norm().item()
x = x_new
if (residual < tol):
break
return x, residual
def forward_iteration(f, x0, max_iter=50, tol=1e-5):
f0 = f(x0)
res = []
for k in range(max_iter):
x = f0
f0 = f(x)
res.append((f0 - x).norm().item() / (1e-7 + f0.norm().item()))
if (res[-1] < tol):
break
return f0, res
def forward_iteration_plot(f, x0, max_iter=50, tol=1e-5):
f0 = f(x0)
res = []
fig = plt.figure()
for k in range(max_iter):
x = f0
f0 = f(x)
# sub = fig.add_subplot(10,10, k)
# plt.imshow(f0[0, : , :, :].detach().cpu().numpy())
# plt.show()
res.append((f0 - x).norm().item() / (1e-7 + f0.norm().item()))
if (res[-1] < tol):
break
plt.show()
return f0, res
class DEQFixedPoint(nn.Module):
def __init__(self, f, **kwargs):
super().__init__()
self.f = f
self.kwargs = kwargs
def broyd_output_test(self, z, x, y_shape, input_shape):
reshaped_x = torch.reshape(input=x, shape=y_shape)
reshaped_z = torch.reshape(input=z, shape=input_shape)
output = self.f(reshaped_z, reshaped_x) - reshaped_z
flattened = torch.reshape(output, (output.shape[0], -1)).unsqueeze(-1)
return flattened
# def broyd_grad(self, g, z, x, g_shape, z_shape):
# self.solver(lambda y: torch.autograd.grad(f0, z0, y, retain_graph=True)[0] + grad,
# grad, **self.kwargs)
def internal_g(self, z, x):
return self.f(z, x) - z
def forward(self, x, truth = None, initial_point = None):
if initial_point is None:
init_point = torch.zeros_like(x)
else:
init_point = initial_point
# compute forward pass and re-engage autograd tape
# init_point = torch.reshape(init_point, (init_point.shape[0], -1, 1))
initial_point_shape = initial_point.shape
g = lambda z: self.broyd_output_test(z, x, x.shape, initial_point_shape)
with torch.no_grad():
output_x, self.forward_res = broyden(g, init_point, threshold=self.kwargs['max_iter'], eps=1e-8)
# output_x = torch.reshape(output_x, initial_point_shape)
z = self.f(output_x, x)
z0 = z.clone().detach().requires_grad_()
f0 = self.f(z0, x)
# g0 = f0 - z0
def backward_hook(grad):
def internal_function(y):
input_shape = y.shape
y = y.reshape(grad.shape)
broyden_function = grad + torch.autograd.grad(f0, z0, y, retain_graph=True)[0]
g_version = broyden_function - y
g_version = g_version.reshape(input_shape)
return g_version
result = broyden(internal_function, grad, threshold=10, eps=1e-7)
return result[0]
z.register_hook(backward_hook)
return z
class DEQFixedPointSimple(nn.Module):
def __init__(self, f, **kwargs):
super().__init__()
self.f = f
self.kwargs = kwargs
def broyd_output_test(self, z, x, y_shape, input_shape):
reshaped_x = torch.reshape(input=x, shape=y_shape)
reshaped_z = torch.reshape(input=z, shape=input_shape)
output = self.f(reshaped_z, reshaped_x) - reshaped_z
flattened = torch.reshape(output, (output.shape[0], -1)).unsqueeze(-1)
return flattened
def internal_g(self, z, x):
return self.f(z, x) - z
def forward(self, x, truth=None, initial_point=None):
if initial_point is None:
init_point = torch.zeros_like(x)
else:
init_point = initial_point
# compute forward pass and re-engage autograd tape
# init_point = torch.reshape(init_point, (init_point.shape[0], -1, 1))
initial_point_shape = initial_point.shape
g = lambda z: self.broyd_output_test(z, x, x.shape, initial_point_shape)
with torch.no_grad():
output_x, self.forward_res = broyden(g, init_point, threshold=self.kwargs['max_iter'], eps=1e-7)
# output_x = torch.reshape(output_x, initial_point_shape)
z = self.f(output_x, x)
return z
# def forward(self, x, initial_point = None):
# if initial_point is None:
# init_point = torch.zeros_like(x)
# else:
# init_point = initial_point
# # compute forward pass and re-engage autograd tape
# with torch.no_grad():
# z, self.forward_res = self.solver(lambda z: self.f(z, x), init_point, **self.kwargs)
# z = self.f(z, x)
#
# # set up Jacobian vector product (without additional forward calls)
# z0 = z.clone().detach().requires_grad_()
# f0 = self.f(z0, x)
#
# def backward_hook(grad):
# g, self.backward_res = self.solver(lambda y: torch.autograd.grad(f0, z0, y, retain_graph=True)[0] + grad,
# grad, **self.kwargs)
# return g
#
# z.register_hook(backward_hook)
# return z
class DEQFixedPoint2(nn.Module):
def __init__(self, f, **kwargs):
super().__init__()
self.f = f
self.kwargs = kwargs
def broyd_output_test(self, z, x, y_shape, input_shape):
reshaped_x = torch.reshape(input=x, shape=y_shape)
reshaped_z = torch.reshape(input=z, shape=input_shape)
output = self.f(reshaped_z, reshaped_x) - reshaped_z
flattened = torch.reshape(output, (output.shape[0], -1)).unsqueeze(-1)
return flattened
def internal_g(self, z, x):
return self.f(z, x) - z
def forward(self, x, truth = None, initial_point = None):
if initial_point is None:
init_point = torch.zeros_like(x)
else:
init_point = initial_point
# compute forward pass and re-engage autograd tape
# init_point = torch.reshape(init_point, (init_point.shape[0], -1, 1))
initial_point_shape = initial_point.shape
g = lambda z: self.broyd_output_test(z, x, x.shape, initial_point_shape)
with torch.no_grad():
output_x, self.forward_res = broyden(g, init_point, threshold=100, eps=1e-7)
# output_x = torch.reshape(output_x, initial_point_shape)
z = self.f(output_x, x)
z0 = z.clone().detach().requires_grad_()
f0 = self.f(z0, x)
# g0 = f0 - z0
def backward_hook(grad):
g, self.backward_res = self.solver(lambda y: torch.autograd.grad(f0, z0, y, retain_graph=True)[0] + grad,
grad, **self.kwargs)
return g
z.register_hook(backward_hook)
return z
class DEQFixedPointTest(nn.Module):
def __init__(self, f, solver, **kwargs):
super().__init__()
self.f = f
self.solver = solver
self.kwargs = kwargs
def broyd_output_test(self, z, x, y_shape, input_shape):
reshaped_x = torch.reshape(input=x, shape=y_shape)
reshaped_z = torch.reshape(input=z, shape=input_shape)
output = self.f(reshaped_z, reshaped_x) - reshaped_z
flattened = torch.reshape(output, (output.shape[0], -1)).unsqueeze(-1)
return flattened
def forward(self, x, truth = None, initial_point = None):
if initial_point is None:
init_point = torch.zeros_like(x)
else:
init_point = initial_point
# compute forward pass and re-engage autograd tape
init_point = torch.reshape(init_point, (init_point.shape[0], -1, 1))
initial_point_shape = initial_point.shape
g = lambda z: self.broyd_output_test(z, x, x.shape, initial_point_shape)
with torch.no_grad():
output_x, self.forward_res = broyden(g, init_point, threshold=50, eps=1e-7)
output_x = torch.reshape(output_x, initial_point_shape)
return output_x
def neumann_iteration(f, x0,k=10):
accumulator = x0
current_iterate = x0
for _ in range(k):
current_iterate = f(current_iterate)
accumulator = accumulator + current_iterate
return accumulator
class DEQFixedPointNeumann(nn.Module):
def __init__(self, f, solver, neumann_k, **kwargs):
super().__init__()
self.f = f
self.solver = solver
self.neumann_k = neumann_k
self.kwargs = kwargs
def forward(self, x):
# compute forward pass and re-engage autograd tape
with torch.no_grad():
z, self.forward_res = self.solver(lambda z: self.f(z, x), torch.zeros_like(x), **self.kwargs)
z = self.f(z, x)
# set up Jacobian vector product (without additional forward calls)
z0 = z.clone().detach().requires_grad_()
f0 = self.f(z0, x)
def backward_hook(grad):
g = neumann_iteration(lambda y: torch.autograd.grad(f0, z0, y, retain_graph=True)[0],
grad, self.neumann_k)
return g
z.register_hook(backward_hook)
return z
def get_equilibrium_point(solver, z, max_iterations=50, tolerance = 0.001):
running_iterate = z
for iteration in range(max_iterations):
running_iterate = solver(running_iterate)
return running_iterate, running_iterate
def get_equilibrium_point_plot(solver, z, truth, max_iterations=50, tolerance = 0.001):
running_iterate = z
# fig = plt.figure()
jj = 0
for iteration in range(max_iterations):
# if iteration % 10 == 0:
# sub = fig.add_subplot(2, 5, jj+1)
# img_to_show = torch.abs(running_iterate[0, :, :, :] - truth[0,:,:,:])*5.0
# # plt.imshow((running_iterate[0, :, :, :].permute(1,2,0).cpu().detach().numpy() + 1.0) / 2.0)
# # plt.show()
# # sub.imshow((img_to_show.permute(1,2,0).detach().cpu().numpy() + 1.0)/2.0)
# sub.imshow(img_to_show.permute(1,2,0).detach().cpu().numpy())
#
# jj += 1
running_iterate = solver(running_iterate)
# plt.show()
return running_iterate, running_iterate
|
{"/scripts/fixedpoint/deblur_proxgrad_fixedeta_pre.py": ["/solvers/equilibrium_solvers.py"], "/scripts/fixedpoint/mri_prox_fixedeta_pre_and.py": ["/operators/singlecoil_mri.py", "/solvers/equilibrium_solvers.py"], "/scripts/denoising/mri_unet_denoise.py": ["/solvers/equilibrium_solvers.py"], "/scripts/denoising/mri_dncnn_denoise.py": ["/solvers/equilibrium_solvers.py"]}
|
4,666
|
wwhappylife/deep_equilibrium_inverse
|
refs/heads/main
|
/scripts/denoising/mri_dncnn_denoise.py
|
import torch
import os
import random
import sys
import argparse
sys.path.append('/home-nfs/gilton/learned_iterative_solvers')
# sys.path.append('/Users/dgilton/PycharmProjects/learned_iterative_solvers')
import torch.nn as nn
import torch.optim as optim
import operators.operator as lin_operator
from operators.operator import OperatorPlusNoise
from utils.fastmri_dataloader import singleCoilFastMRIDataloader
from networks.normalized_cnn_2 import DnCNN
from solvers.equilibrium_solvers import EquilibriumGrad
from training import denoiser_training
from solvers import new_equilibrium_utils as eq_utils
parser = argparse.ArgumentParser()
parser.add_argument('--n_epochs', default=80)
parser.add_argument('--batch_size', type=int, default=16)
parser.add_argument('--and_maxiters', default=100)
parser.add_argument('--and_beta', type=float, default=1.0)
parser.add_argument('--and_m', type=int, default=5)
parser.add_argument('--lr', type=float, default=0.1)
parser.add_argument('--etainit', type=float, default=0.5)
parser.add_argument('--lr_gamma', type=float, default=0.1)
parser.add_argument('--sched_step', type=int, default=10)
parser.add_argument('--acceleration', type=float, default=8.0)
parser.add_argument('--noise_sigma', type=float, default=0.01)
parser.add_argument('--savepath',
default="/share/data/vision-greg2/users/gilton/celeba_equilibriumgrad_mri_save_inf.ckpt")
args = parser.parse_args()
# Parameters to modify
n_epochs = int(args.n_epochs)
current_epoch = 0
batch_size = int(args.batch_size)
n_channels = 2
max_iters = int(args.and_maxiters)
anderson_m = int(args.and_m)
anderson_beta = float(args.and_beta)
learning_rate = float(args.lr)
print_every_n_steps = 10
save_every_n_epochs = 5
initial_eta = float(args.etainit)
dataheight = 320
datawidth = 320
noise_sigma = float(args.noise_sigma)
# modify this for your machine
# save_location = "/share/data/vision-greg2/users/gilton/mnist_equilibriumgrad_blur.ckpt"
save_location = args.savepath
load_location = args.savepath
gpu_ids = []
for ii in range(6):
try:
torch.cuda.get_device_properties(ii)
print(str(ii), flush=True)
if not gpu_ids:
gpu_ids = [ii]
else:
gpu_ids.append(ii)
except AssertionError:
print('Not ' + str(ii) + "!", flush=True)
print(os.getenv('CUDA_VISIBLE_DEVICES'), flush=True)
gpu_ids = [int(x) for x in gpu_ids]
# device management
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
use_dataparallel = len(gpu_ids) > 1
print("GPU IDs: " + str([int(x) for x in gpu_ids]), flush=True)
# Set up data and dataloaders
data_location = "/share/data/vision-greg2/users/gilton/singlecoil_curated_clean/"
trainset_size = 2000
total_data = 2194
random.seed(10)
all_indices = list(range(trainset_size))
train_indices = random.sample(range(total_data), k=trainset_size)
dataset = singleCoilFastMRIDataloader(data_location, data_indices=train_indices)
dataloader = torch.utils.data.DataLoader(
dataset=dataset, batch_size=args.batch_size, shuffle=True, drop_last=True,
)
### Set up solver and problem setting
forward_operator = lin_operator.Identity().to(device=device)
measurement_process = OperatorPlusNoise(forward_operator, noise_sigma=noise_sigma).to(device=device)
solver = DnCNN(in_channels=n_channels, out_channels=n_channels, internal_channels=64,
num_of_layers=17, lip=1.0)
if use_dataparallel:
solver = nn.DataParallel(solver, device_ids=gpu_ids)
solver = solver.to(device=device)
start_epoch = 0
optimizer = optim.Adam(params=solver.parameters(), lr=learning_rate)
scheduler = optim.lr_scheduler.StepLR(optimizer=optimizer, step_size=int(args.sched_step), gamma=float(args.lr_gamma))
cpu_only = not torch.cuda.is_available()
if os.path.exists(load_location):
if not cpu_only:
saved_dict = torch.load(load_location)
else:
saved_dict = torch.load(load_location, map_location='cpu')
start_epoch = saved_dict['epoch']
solver.load_state_dict(saved_dict['solver_state_dict'])
# optimizer.load_state_dict(saved_dict['optimizer_state_dict'])
scheduler.load_state_dict(saved_dict['scheduler_state_dict'])
# set up loss and train
lossfunction = torch.nn.MSELoss()
# forward_iterator = eq_utils.anderson
# deep_eq_module = eq_utils.DEQFixedPoint(solver, forward_iterator, m=anderson_m, beta=anderson_beta, lam=1e-6,
# max_iter=max_iters, tol=1e-8)
forward_iterator = eq_utils.forward_iteration
deep_eq_module = eq_utils.DEQFixedPoint(solver, forward_iterator, max_iter=100, tol=1e-8)
# Do train
denoiser_training.train_denoiser(denoising_net=solver, train_dataloader=dataloader, test_dataloader=dataloader,
measurement_process=measurement_process, optimizer=optimizer, save_location=save_location,
loss_function=lossfunction, n_epochs=n_epochs,
use_dataparallel=use_dataparallel, device=device, scheduler=scheduler,
print_every_n_steps=print_every_n_steps, save_every_n_epochs=save_every_n_epochs,
start_epoch=start_epoch)
|
{"/scripts/fixedpoint/deblur_proxgrad_fixedeta_pre.py": ["/solvers/equilibrium_solvers.py"], "/scripts/fixedpoint/mri_prox_fixedeta_pre_and.py": ["/operators/singlecoil_mri.py", "/solvers/equilibrium_solvers.py"], "/scripts/denoising/mri_unet_denoise.py": ["/solvers/equilibrium_solvers.py"], "/scripts/denoising/mri_dncnn_denoise.py": ["/solvers/equilibrium_solvers.py"]}
|
4,686
|
JefferyQ/boltkit
|
refs/heads/master
|
/boltkit/server/stub.py
|
#!/usr/bin/env python
# coding: utf-8
# Copyright (c) 2002-2016 "Neo Technology,"
# Network Engine for Objects in Lund AB [http://neotechnology.com]
#
# This file is part of Neo4j.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Server components, including stub server and proxy server.
"""
from itertools import chain
from json import dumps as json_dumps
from logging import getLogger
from select import select
from socket import socket, SOL_SOCKET, SO_REUSEADDR, SHUT_RDWR
from struct import pack as raw_pack, unpack_from as raw_unpack
from threading import Thread
from boltkit.addressing import Address
from boltkit.server.bytetools import h
from boltkit.client import CLIENT, SERVER, BOLT
from boltkit.client.packstream import UINT_16, INT_32, Structure, pack, unpack
from boltkit.server.scripting import Script, ExitCommand
EXIT_OK = 0
EXIT_OFF_SCRIPT = 1
EXIT_TIMEOUT = 2
EXIT_UNKNOWN = 99
log = getLogger("boltkit")
server_agents = {
1: "Neo4j/3.0.0",
2: "Neo4j/3.4.0",
3: "Neo4j/3.5.0",
4: "Neo4j/4.0.0",
}
def message_repr(v, message):
name = next(key for key, value in chain(CLIENT[v].items(), SERVER[v].items()) if value == message.tag)
return "%s %s" % (name, " ".join(map(json_dumps, message.fields)))
class Peer(object):
def __init__(self, socket, address):
self.socket = socket
self.address = Address(address)
self.bolt_version = 0
class StubServer(Thread):
peers = None
script = Script()
def __init__(self, script_name=None, listen_addr=None, timeout=None):
super(StubServer, self).__init__()
self.address = listen_addr or Address.parse(":17687")
self.server = socket()
self.server.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
self.server.bind((self.address.host, self.address.port_number))
self.server.listen(0)
log.info("Listening for incoming connections on «%s»", self.address)
self.peers = {}
if script_name:
self.script = Script(script_name)
self.running = True
self.timeout = timeout
self.exit_code = 0
def run(self):
self.peers[self.server] = Peer(self.server, self.address)
while self.running:
try:
read_list, _, _ = select(list(self.peers), [], [], self.timeout)
if read_list:
for sock in read_list:
self.read(sock)
else:
log.error("Timed out after waiting %rs for an incoming "
"connection", self.timeout)
raise SystemExit(EXIT_TIMEOUT)
except SystemExit as e:
self.exit_code = e.args[0]
self.running = False
except:
self.exit_code = EXIT_UNKNOWN
self.running = False
self.stop()
log.info("Exiting with code %r", self.exit_code)
def stop(self):
if not self.peers:
return
peers, self.peers, self.running = list(self.peers.items()), {}, False
for sock, peer in peers:
log.debug("~~ <CLOSE> \"%s\" %d", *peer.address)
try:
sock.shutdown(SHUT_RDWR)
sock.close()
except OSError:
pass
def read(self, sock):
try:
if sock == self.server:
self.accept(sock)
elif self.peers[sock].bolt_version:
self.handle_request(sock)
else:
self.handshake(sock)
except (KeyError, OSError):
if self.running:
raise
def accept(self, sock):
new_sock, address = sock.accept()
self.peers[new_sock] = Peer(new_sock, address)
# listen_address = self.peers[sock].address
serve_address = self.peers[new_sock].address
log.info("Accepted incoming connection from «%s»", serve_address)
def handshake(self, sock):
data = sock.recv(4)
if data == BOLT:
log.debug("C: <BOLT>")
else:
if data:
log.error("C: <#?@!>")
self.stop()
return
raw_data = sock.recv(16)
suggested_version_1, = raw_unpack(INT_32, raw_data, 0)
suggested_version_2, = raw_unpack(INT_32, raw_data, 4)
suggested_version_3, = raw_unpack(INT_32, raw_data, 8)
suggested_version_4, = raw_unpack(INT_32, raw_data, 12)
client_requested_versions = [suggested_version_1, suggested_version_2, suggested_version_3, suggested_version_4]
log.debug("C: <VERSION> [0x%08x, 0x%08x, 0x%08x, 0x%08x]" % tuple(client_requested_versions))
v = self.script.bolt_version
if v not in client_requested_versions:
raise RuntimeError("Script protocol version %r not offered by client" % v)
# only single protocol version is currently supported
response = raw_pack(INT_32, v)
log.debug("S: <VERSION> 0x%08x" % v)
self.peers[sock].bolt_version = v
sock.send(response)
def handle_request(self, sock):
v = self.peers[sock].bolt_version
chunked_data = b""
message_data = b""
chunk_size = -1
debug = []
while chunk_size != 0:
chunk_header = sock.recv(2)
if len(chunk_header) == 0:
self.stop()
return
chunked_data += chunk_header
chunk_size, = raw_unpack(UINT_16, chunk_header)
if chunk_size > 0:
chunk = sock.recv(chunk_size)
chunked_data += chunk
message_data += chunk
else:
chunk = b""
debug.append(" [%s] %s" % (h(chunk_header), h(chunk)))
request = unpack(message_data)
if self.script.match_request(request):
# explicitly matched
log.debug("C: %s", message_repr(v, request))
elif self.script.match_auto_request(request):
# auto matched
log.debug("C! %s", message_repr(v, request))
else:
# not matched
if self.script.lines:
expected = message_repr(v, self.script.lines[0].message)
else:
expected = "END OF SCRIPT"
log.debug("C: %s", message_repr(v, request))
log.error("Message mismatch (expected <%s>, "
"received <%s>)", expected, message_repr(v, request))
self.stop()
raise SystemExit(EXIT_OFF_SCRIPT)
responses = self.script.match_responses()
if not responses and self.script.match_auto_request(request):
# These are hard-coded and therefore not very future-proof.
if request.tag in (CLIENT[v].get("HELLO"), CLIENT[v].get("INIT")):
responses = [Structure(SERVER[v]["SUCCESS"], {"server": server_agents.get(v, "Neo4j/9.99.999")})]
elif request.tag == CLIENT[v].get("GOODBYE"):
log.debug("S: <EXIT>")
self.stop()
raise SystemExit(EXIT_OK)
elif request.tag == CLIENT[v]["RUN"]:
responses = [Structure(SERVER[v]["SUCCESS"], {"fields": []})]
else:
responses = [Structure(SERVER[v]["SUCCESS"], {})]
for response in responses:
if isinstance(response, Structure):
data = pack(response)
self.send_chunk(sock, data)
self.send_chunk(sock)
log.debug("S: %s", message_repr(v, Structure(response.tag, *response.fields)))
elif isinstance(response, ExitCommand):
self.stop()
raise SystemExit(EXIT_OK)
else:
raise RuntimeError("Unknown response type %r" % (response,))
def send_chunk(self, sock, data=b""):
header = raw_pack(UINT_16, len(data))
header_hex = self.send_bytes(sock, header)
data_hex = self.send_bytes(sock, data)
return "[%s] %s" % (header_hex, data_hex)
def send_bytes(self, sock, data):
try:
sock.sendall(data)
except OSError:
log.error("S: <GONE>")
raise SystemExit(EXIT_OFF_SCRIPT)
else:
return h(data)
def stub_test(script, port=17687):
""" Decorator for stub tests.
"""
def f__(f):
def f_(*args, **kwargs):
server = StubServer(("127.0.0.1", port), script, timeout=5)
server.start()
kwargs["server"] = server
yield f(*args, **kwargs)
server.stop()
f_.__name__ = f.__name__
f_.__doc__ = f.__doc__
f_.__dict__.update(f.__dict__)
return f_
return f__
|
{"/boltkit/server/stub.py": ["/boltkit/server/scripting.py"], "/test/test_stub_server.py": ["/boltkit/server/stub.py"], "/boltkit/__main__.py": ["/boltkit/server/__init__.py", "/boltkit/server/stub.py"]}
|
4,687
|
JefferyQ/boltkit
|
refs/heads/master
|
/boltkit/server/scripting.py
|
#!/usr/bin/env python
# coding: utf-8
# Copyright (c) 2002-2016 "Neo Technology,"
# Network Engine for Objects in Lund AB [http://neotechnology.com]
#
# This file is part of Neo4j.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import deque
from json import JSONDecoder
from boltkit.client import CLIENT, SERVER, MAX_BOLT_VERSION, Structure
class Item(object):
pass
class Line(Item):
def __init__(self, protocol_version, line_no, peer, message):
self.protocol_version = protocol_version
self.line_no = line_no
self.peer = peer
self.message = message
class ExitCommand(Item):
pass
class Script(object):
def __init__(self, file_name=None):
self.bolt_version = 1
self.auto = []
self.lines = deque()
if file_name:
self.append(file_name)
def __nonzero__(self):
return bool(self.lines)
def __bool__(self):
return bool(self.lines)
def __len__(self):
return len(self.lines)
def parse_message(self, message):
tag, _, data = message.partition(" ")
v = self.bolt_version
if tag in CLIENT[v]:
parsed_tag = CLIENT[v][tag]
elif tag in SERVER[v]:
parsed_tag = SERVER[v][tag]
else:
raise ValueError("Unknown message type %s" % tag)
decoder = JSONDecoder()
parsed = []
while data:
data = data.lstrip()
try:
decoded, end = decoder.raw_decode(data)
except ValueError:
break
else:
parsed.append(decoded)
data = data[end:]
return Structure(parsed_tag, *parsed)
def parse_command(self, message):
tag, _, data = message.partition(" ")
if tag == "<EXIT>":
return ExitCommand()
else:
raise ValueError("Unknown command %s" % tag)
def parse_lines(self, lines):
mode = "C"
for line_no, line in enumerate(lines, start=1):
line = line.rstrip()
if line == "" or line.startswith("//"):
pass
elif len(line) >= 2 and line[1] == ":":
mode = line[0].upper()
yield line_no, mode, line[2:].lstrip()
elif mode is not None:
yield line_no, mode, line.lstrip()
def append(self, file_name):
lines = self.lines
with open(file_name) as f:
for line_no, mode, line in self.parse_lines(f):
if mode == "!":
command, _, rest = line.partition(" ")
if command == "AUTO":
self.auto.append(self.parse_message(rest))
if command == "BOLT":
self.bolt_version = int(rest)
if self.bolt_version < 0 or self.bolt_version > MAX_BOLT_VERSION or CLIENT[self.bolt_version] is None:
raise RuntimeError("Protocol version %r in script %r is not available "
"in this version of BoltKit" % (self.bolt_version, file_name))
elif mode in "CS":
if line.startswith("<"):
lines.append(Line(self.bolt_version, line_no, mode, self.parse_command(line)))
else:
lines.append(Line(self.bolt_version, line_no, mode, self.parse_message(line)))
def match_auto_request(self, request):
for message in self.auto:
if request.tag == message.tag:
return True
elif request == message:
return True
return False
def match_request(self, request):
if not self.lines:
return 0
line = self.lines[0]
if line.peer != "C":
return 0
if match(line.message, request):
self.lines.popleft()
return 1
else:
return 0
def match_responses(self):
responses = []
while self.lines and self.lines[0].peer == "S":
line = self.lines.popleft()
if isinstance(line, Line):
responses.append(line.message)
elif isinstance(line, ExitCommand):
pass
else:
raise RuntimeError("Unexpected response %r" % line)
return responses
def match(expected, actual):
return expected == actual
|
{"/boltkit/server/stub.py": ["/boltkit/server/scripting.py"], "/test/test_stub_server.py": ["/boltkit/server/stub.py"], "/boltkit/__main__.py": ["/boltkit/server/__init__.py", "/boltkit/server/stub.py"]}
|
4,688
|
JefferyQ/boltkit
|
refs/heads/master
|
/boltkit/server/__init__.py
|
#!/usr/bin/env python
# coding: utf-8
# Copyright (c) 2002-2016 "Neo Technology,"
# Network Engine for Objects in Lund AB [http://neotechnology.com]
#
# This file is part of Neo4j.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from itertools import chain
from logging import getLogger
from math import ceil
from os import getenv
from threading import Thread
from uuid import uuid4
from xml.etree import ElementTree
import certifi
from docker import DockerClient
from docker.errors import APIError, ImageNotFound
from urllib3 import PoolManager, make_headers
from boltkit.auth import make_auth
from boltkit.client import AddressList, Connection
TEAMCITY_USER = getenv("TEAMCITY_USER")
TEAMCITY_PASSWORD = getenv("TEAMCITY_PASSWORD")
log = getLogger("boltkit")
class Neo4jMachine:
""" A single Neo4j server instance, potentially part of a cluster.
"""
container = None
ip_address = None
ready = 0
def __init__(self, name, service_name, image, auth, bolt_port, http_port, **config):
self.name = name
self.service_name = service_name
self.fq_name = "{}.{}".format(self.name, self.service_name)
self.image = image
self.bolt_port = bolt_port
self.http_port = http_port
self.addresses = AddressList([("localhost", self.bolt_port)])
self.auth = auth
self.docker = DockerClient.from_env(version="auto")
environment = {}
if self.auth:
environment["NEO4J_AUTH"] = "{}/{}".format(self.auth[0], self.auth[1])
if "enterprise" in image:
environment["NEO4J_ACCEPT_LICENSE_AGREEMENT"] = "yes"
for key, value in config.items():
environment["NEO4J_" + key.replace("_", "__").replace(".", "_")] = value
ports = {
"7474/tcp": self.http_port,
"7687/tcp": self.bolt_port,
}
def create_container(img):
return self.docker.containers.create(img,
detach=True,
environment=environment,
hostname=self.fq_name,
name=self.fq_name,
network=self.service_name,
ports=ports)
try:
self.container = create_container(self.image)
except ImageNotFound:
log.info("Downloading Docker image %r", self.image)
self.docker.images.pull(self.image)
self.container = create_container(self.image)
def __hash__(self):
return hash(self.container)
def __repr__(self):
return "%s(fq_name=%r, image=%r, address=%r)" % (self.__class__.__name__, self.fq_name, self.image, self.addresses)
def start(self):
log.info("Starting machine %r at «%s»", self.fq_name, self.addresses)
self.container.start()
self.container.reload()
self.ip_address = self.container.attrs["NetworkSettings"]["Networks"][self.service_name]["IPAddress"]
def await_started(self, timeout):
try:
Connection.open(*self.addresses, auth=self.auth, timeout=timeout).close()
except OSError:
self.container.reload()
state = self.container.attrs["State"]
if state["Status"] == "exited":
self.ready = -1
log.error("Machine %r exited with code %r" % (self.fq_name, state["ExitCode"]))
for line in self.container.logs().splitlines():
log.error("> %s" % line.decode("utf-8"))
else:
log.error("Machine %r did not become available within %rs" % (self.fq_name, timeout))
else:
self.ready = 1
# log.info("Machine %r available", self.name)
def stop(self):
log.info("Stopping machine %r", self.fq_name)
self.container.stop()
self.container.remove(force=True)
class Neo4jService:
""" A Neo4j database management service.
"""
default_image = NotImplemented
default_bolt_port = 7687
default_http_port = 7474
snapshot_host = "live.neo4j-build.io"
snapshot_build_config_id = "Neo4j40_Docker"
snapshot_build_url = ("https://{}/repository/download/{}/"
"lastSuccessful".format(snapshot_host,
snapshot_build_config_id))
def __new__(cls, name=None, n_cores=None, **parameters):
if n_cores:
return object.__new__(Neo4jClusterService)
else:
return object.__new__(Neo4jStandaloneService)
def __init__(self, name=None, image=None, auth=None, **parameters):
self.name = name or uuid4().hex[-7:]
self.docker = DockerClient.from_env(version="auto")
headers = {}
if TEAMCITY_USER and TEAMCITY_PASSWORD:
headers.update(make_headers(
basic_auth="{}:{}".format(TEAMCITY_USER, TEAMCITY_PASSWORD)))
self.http = PoolManager(
cert_reqs="CERT_REQUIRED",
ca_certs=certifi.where(),
headers=headers,
)
self.image = self._resolve_image(image)
self.auth = auth or make_auth()
self.machines = []
self.routers = []
self.network = None
def __enter__(self):
try:
self.start(timeout=300)
except KeyboardInterrupt:
self.stop()
raise
else:
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.stop()
def _resolve_image(self, image):
resolved = image or self.default_image
if ":" not in resolved:
resolved = "neo4j:" + image
if resolved == "neo4j:snapshot":
return self._pull_snapshot("community")
elif resolved in ("neo4j:snapshot-enterprise",
"neo4j-enterprise:snapshot"):
return self._pull_snapshot("enterprise")
else:
return resolved
def _resolve_artifact_name(self, edition):
log.info("Resolving snapshot artifact name on «{}»".format(
self.snapshot_host))
prefix = "neo4j-{}".format(edition)
url = "{}/teamcity-ivy.xml".format(self.snapshot_build_url)
r1 = self.http.request("GET", url)
root = ElementTree.fromstring(r1.data)
for e in root.find("publications").findall("artifact"):
attr = e.attrib
if attr["type"] == "tar" and attr["name"].startswith(prefix):
return "{}.{}".format(attr["name"], attr["ext"])
@classmethod
def _derive_image_tag(cls, artifact_name):
if artifact_name.endswith("-docker-complete.tar"):
artifact_name = artifact_name[:-20]
else:
raise ValueError("Expected artifact name to end with "
"'-docker-complete.tar'")
if artifact_name.startswith("neo4j-enterprise-"):
return "neo4j-enterprise:{}".format(artifact_name[17:])
elif artifact_name.startswith("neo4j-community-"):
return "neo4j:{}".format(artifact_name[16:])
else:
raise ValueError("Expected artifact name to start with either "
"'neo4j-community-' or 'neo4j-enterprise-'")
def _pull_snapshot(self, edition):
artifact = self._resolve_artifact_name(edition)
derived = self._derive_image_tag(artifact)
try:
self.docker.images.get(derived)
except ImageNotFound:
log.info("Downloading {} from «{}»".format(
artifact, self.snapshot_host))
url = "{}/{}".format(self.snapshot_build_url, artifact)
r2 = self.http.request("GET", url)
images = self.docker.images.load(r2.data)
image = images[0]
return image.tags[0]
else:
return derived
def _for_each_machine(self, f):
threads = []
for machine in self.machines:
thread = Thread(target=f(machine))
thread.daemon = True
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
def start(self, timeout=None):
log.info("Starting service %r with image %r", self.name, self.image)
self.network = self.docker.networks.create(self.name)
self._for_each_machine(lambda machine: machine.start)
if timeout is not None:
self.await_started(timeout)
def await_started(self, timeout):
def wait(machine):
machine.await_started(timeout=timeout)
self._for_each_machine(wait)
if all(machine.ready == 1 for machine in self.machines):
log.info("Service %r available", self.name)
else:
log.error("Service %r unavailable - some machines failed", self.name)
raise OSError("Some machines failed")
def stop(self):
log.info("Stopping service %r", self.name)
self._for_each_machine(lambda machine: machine.stop)
self.network.remove()
@property
def addresses(self):
return AddressList(chain(*(r.addresses for r in self.routers)))
@classmethod
def find_and_stop(cls, service_name):
docker = DockerClient.from_env(version="auto")
for container in docker.containers.list(all=True):
if container.name.endswith(".{}".format(service_name)):
container.stop()
container.remove(force=True)
docker.networks.get(service_name).remove()
class Neo4jStandaloneService(Neo4jService):
default_image = "neo4j:latest"
def __init__(self, name=None, bolt_port=None, http_port=None, **parameters):
super().__init__(name, **parameters)
self.machines.append(Neo4jMachine(
"z",
self.name,
self.image,
auth=self.auth,
bolt_port=bolt_port or self.default_bolt_port,
http_port=http_port or self.default_http_port,
))
self.routers.extend(self.machines)
class Neo4jClusterService(Neo4jService):
default_image = "neo4j:enterprise"
# The minimum and maximum number of cores permitted
min_cores = 3
max_cores = 7
# The minimum and maximum number of read replicas permitted
min_replicas = 0
max_replicas = 10
default_bolt_port = 17601
default_http_port = 17401
@classmethod
def _port_range(cls, base_port, count):
return range(base_port, base_port + count)
def __init__(self, name=None, bolt_port=None, http_port=None, n_cores=None, n_replicas=None, **parameters):
super().__init__(name, n_cores=n_cores, n_replicas=n_replicas, **parameters)
self.n_cores = n_cores or self.min_cores
self.n_replicas = n_replicas or self.min_replicas
if not self.min_cores <= self.n_cores <= self.max_cores:
raise ValueError("A cluster must have been {} and {} cores".format(self.min_cores, self.max_cores))
if not self.min_replicas <= self.n_replicas <= self.max_replicas:
raise ValueError("A cluster must have been {} and {} read replicas".format(self.min_replicas, self.max_replicas))
# CORES
# =====
# Calculate port numbers for Bolt
core_bolt_port_range = self._port_range(bolt_port or self.default_bolt_port, self.max_cores)
# Calculate port numbers for HTTP
core_http_port_range = self._port_range(http_port or self.default_http_port, self.max_cores)
# Calculate machine names
core_names = [chr(i) for i in range(97, 97 + self.n_cores)]
core_addresses = ["{}.{}:5000".format(name, self.name) for name in core_names]
#
self.machines.extend(Neo4jMachine(
core_names[i],
self.name,
self.image,
auth=self.auth,
bolt_port=core_bolt_port_range[i],
http_port=core_http_port_range[i],
**{
"causal_clustering.initial_discovery_members": ",".join(core_addresses),
"causal_clustering.minimum_core_cluster_size_at_formation": self.n_cores,
"causal_clustering.minimum_core_cluster_size_at_runtime": self.min_cores,
"dbms.connector.bolt.advertised_address": "localhost:{}".format(core_bolt_port_range[i]),
"dbms.mode": "CORE",
}
) for i in range(self.n_cores or 0))
self.routers.extend(self.machines)
# REPLICAS
# ========
# Calculate port numbers for Bolt
replica_bolt_port_range = self._port_range(ceil(core_bolt_port_range.stop / 10) * 10, self.max_replicas)
# Calculate port numbers for HTTP
replica_http_port_range = self._port_range(ceil(core_http_port_range.stop / 10) * 10, self.max_replicas)
# Calculate machine names
replica_names = [chr(i) for i in range(48, 48 + self.n_replicas)]
#
self.machines.extend(Neo4jMachine(
replica_names[i],
self.name,
self.image,
auth=self.auth,
bolt_port=replica_bolt_port_range[i],
http_port=replica_http_port_range[i],
**{
"causal_clustering.initial_discovery_members": ",".join(core_addresses),
"dbms.connector.bolt.advertised_address": "localhost:{}".format(replica_bolt_port_range[i]),
"dbms.mode": "READ_REPLICA",
}
) for i in range(self.n_replicas or 0))
|
{"/boltkit/server/stub.py": ["/boltkit/server/scripting.py"], "/test/test_stub_server.py": ["/boltkit/server/stub.py"], "/boltkit/__main__.py": ["/boltkit/server/__init__.py", "/boltkit/server/stub.py"]}
|
4,689
|
JefferyQ/boltkit
|
refs/heads/master
|
/test/test_stub_server.py
|
#!/usr/bin/env python
# coding: utf-8
# Copyright (c) 2002-2016 "Neo Technology,"
# Network Engine for Objects in Lund AB [http://neotechnology.com]
#
# This file is part of Neo4j.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import TestCase
from boltkit.client import Connection
from boltkit.server.stub import stub_test
class ReturnOneTestCase(TestCase):
@stub_test("scripts/v1/return_1_as_x.bolt")
def test_v1(self, server):
# Given
with Connection.open(*server.addresses, **server.settings) as cx:
# When
records = []
cx.run("RETURN $x", {"x": 1})
cx.pull(-1, records)
cx.send_all()
cx.fetch_all()
# Then
self.assertEqual(records, [[1]])
self.assertEqual(cx.bolt_version, 1)
@stub_test("scripts/v2/return_1_as_x.bolt")
def test_v2(self, server):
# Given
with Connection.open(*server.addresses, **server.settings) as cx:
# When
records = []
cx.run("RETURN $x", {"x": 1})
cx.pull(-1, records)
cx.send_all()
cx.fetch_all()
# Then
self.assertEqual(records, [[1]])
self.assertEqual(cx.bolt_version, 2)
@stub_test("scripts/v3/return_1_as_x.bolt")
def test_v3(self, server):
# Given
with Connection.open(*server.addresses, **server.settings) as cx:
# When
records = []
cx.run("RETURN $x", {"x": 1})
cx.pull(-1, records)
cx.send_all()
cx.fetch_all()
# Then
self.assertEqual(records, [[1]])
self.assertEqual(cx.bolt_version, 3)
@stub_test("scripts/v4/return_1_as_x.bolt")
def test_v4(self, server):
# Given
with Connection.open(*server.addresses, **server.settings) as cx:
# When
records = []
cx.run("RETURN $x", {"x": 1})
cx.pull(-1, records)
cx.send_all()
cx.fetch_all()
# Then
self.assertEqual(records, [[1]])
self.assertEqual(cx.bolt_version, 4)
@stub_test("scripts/v4/return_1_as_x_explicit.bolt")
def test_v4_explicit(self, server):
# Given
with Connection.open(*server.addresses, **server.settings) as cx:
# When
records = []
cx.begin()
cx.run("RETURN $x", {"x": 1})
cx.pull(-1, records)
cx.commit()
cx.send_all()
cx.fetch_all()
# Then
self.assertEqual(records, [[1]])
self.assertEqual(cx.bolt_version, 4)
|
{"/boltkit/server/stub.py": ["/boltkit/server/scripting.py"], "/test/test_stub_server.py": ["/boltkit/server/stub.py"], "/boltkit/__main__.py": ["/boltkit/server/__init__.py", "/boltkit/server/stub.py"]}
|
4,690
|
JefferyQ/boltkit
|
refs/heads/master
|
/boltkit/__main__.py
|
#!/usr/bin/env python
# coding: utf-8
# Copyright (c) 2002-2016 "Neo Technology,"
# Network Engine for Objects in Lund AB [http://neotechnology.com]
#
# This file is part of Neo4j.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from itertools import chain
from logging import INFO, DEBUG
from shlex import quote as shlex_quote
from subprocess import run
from time import sleep
import click
from boltkit.addressing import Address, AddressList
from boltkit.auth import AuthParamType, Auth
from boltkit.client import Connection
from boltkit.dist import Distributor
from boltkit.server import Neo4jService
from boltkit.server.proxy import ProxyServer
from boltkit.server.stub import StubServer
from boltkit.watcher import watch
class AddressParamType(click.ParamType):
name = "addr"
def __init__(self, default_host=None, default_port=None):
self.default_host = default_host
self.default_port = default_port
def convert(self, value, param, ctx):
return Address.parse(value, self.default_host, self.default_port)
def __repr__(self):
return 'HOST:PORT'
class AddressListParamType(click.ParamType):
name = "addr"
def __init__(self, default_host=None, default_port=None):
self.default_host = default_host
self.default_port = default_port
def convert(self, value, param, ctx):
return AddressList.parse(value, self.default_host, self.default_port)
def __repr__(self):
return 'HOST:PORT [HOST:PORT...]'
def watch_log(ctx, param, value):
if value:
watch("boltkit", DEBUG if value >= 2 else INFO)
@click.group()
def bolt():
pass
@bolt.command(help="""\
Run a Bolt client.
""")
@click.option("-a", "--auth", type=AuthParamType(), envvar="NEO4J_AUTH")
@click.option("-b", "--bolt-version", default=0, type=int)
@click.option("-s", "--server-addr", type=AddressListParamType(), envvar="BOLT_SERVER_ADDR")
@click.option("-t", "--transaction", is_flag=True)
@click.option("-v", "--verbose", count=True, callback=watch_log, expose_value=False, is_eager=True)
@click.argument("cypher", nargs=-1)
def client(cypher, server_addr, auth, transaction, bolt_version):
if auth is None:
auth = Auth(click.prompt("User", default="neo4j"),
click.prompt("Password", hide_input=True))
if bolt_version:
bolt_versions = [bolt_version]
else:
bolt_versions = None
try:
with Connection.open(*server_addr or (), auth=auth, bolt_versions=bolt_versions) as cx:
records = []
if transaction:
cx.begin()
for statement in cypher:
cx.run(statement, {})
cx.pull(-1, -1, records)
if transaction:
cx.commit()
cx.send_all()
cx.fetch_all()
for record in records:
click.echo("\t".join(map(str, record)))
except Exception as e:
click.echo(" ".join(map(str, e.args)))
exit(1)
@bolt.command(help="""\
Run a Bolt stub server.
The stub server process listens for an incoming client connection and will
attempt to play through a pre-scripted exchange with that client. Any deviation
from that script will result in a non-zero exit code. This utility is primarily
useful for Bolt client integration testing.
""")
@click.option("-l", "--listen-addr", type=AddressParamType(),
envvar="BOLT_LISTEN_ADDR",
help="The address on which to listen for incoming connections "
"in INTERFACE:PORT format, where INTERFACE may be omitted "
"for 'localhost'. If completely omitted, this defaults to "
"':17687'. The BOLT_LISTEN_ADDR environment variable may "
"be used as an alternative to this option.")
@click.option("-t", "--timeout", type=float,
help="The number of seconds for which the stub server will wait "
"for an incoming connection before automatically "
"terminating. If unspecified, the server will wait "
"indefinitely.")
@click.option("-v", "--verbose", count=True, callback=watch_log,
expose_value=False, is_eager=True,
help="Show more detail about the client-server exchange.")
@click.argument("script")
def stub(script, listen_addr, timeout):
stub_server = StubServer(script, listen_addr, timeout=timeout)
try:
stub_server.start()
stub_server.join()
except KeyboardInterrupt:
exit(130)
except Exception as e:
click.echo(" ".join(map(str, e.args)), err=True)
exit(1)
finally:
exit(stub_server.exit_code)
@bolt.command(help="""\
Run a Bolt proxy server.
""")
@click.option("-l", "--listen-addr", type=AddressParamType(), envvar="BOLT_LISTEN_ADDR")
@click.option("-s", "--server-addr", type=AddressListParamType(), envvar="BOLT_SERVER_ADDR")
@click.option("-v", "--verbose", count=True, callback=watch_log, expose_value=False, is_eager=True)
def proxy(server_addr, listen_addr):
proxy_server = ProxyServer(server_addr, listen_addr)
proxy_server.start()
@bolt.command(help="List available Neo4j releases")
def dist():
try:
distributor = Distributor()
for name, r in distributor.releases.items():
if name == r.name.upper():
click.echo(r.name)
except KeyboardInterrupt:
exit(130)
except Exception as e:
click.echo(" ".join(map(str, e.args)), err=True)
exit(1)
@bolt.command(help="""\
Download Neo4j.
""")
@click.option("-e", "--enterprise", is_flag=True)
@click.option("-s", "--s3", is_flag=True)
@click.option("-t", "--teamcity", is_flag=True)
@click.option("-v", "--verbose", count=True, callback=watch_log, expose_value=False, is_eager=True)
@click.option("-w", "--windows", is_flag=True)
@click.argument("version")
def get(version, enterprise, s3, teamcity, windows):
try:
distributor = Distributor()
edition = "enterprise" if enterprise else "community"
if windows:
package_format = "windows.zip"
else:
package_format = "unix.tar.gz"
if s3:
distributor.download_from_s3(edition, version, package_format)
elif teamcity:
distributor.download_from_teamcity(edition, version, package_format)
else:
distributor.download(edition, version, package_format)
except KeyboardInterrupt:
exit(130)
except Exception as e:
click.echo(" ".join(map(str, e.args)), err=True)
exit(1)
@bolt.command(context_settings={"ignore_unknown_options": True}, help="""\
Run a Neo4j cluster or standalone server in one or more local Docker
containers.
If an additional COMMAND is supplied, this will be executed after startup,
with a shutdown occurring immediately afterwards. If no COMMAND is supplied,
the service will remain available until manually shutdown by Ctrl+C.
A couple of environment variables will also be made available to any COMMAND
passed. These are:
\b
- BOLT_SERVER_ADDR
- NEO4J_AUTH
""")
@click.option("-a", "--auth", type=AuthParamType(), envvar="NEO4J_AUTH",
help="Credentials with which to bootstrap the service. These "
"must be specified as a 'user:password' pair and may "
"alternatively be supplied via the NEO4J_AUTH environment "
"variable. These credentials will also be exported to any "
"COMMAND executed during the service run.")
@click.option("-B", "--bolt-port", type=int,
help="A port number (standalone) or base port number (cluster) "
"for Bolt traffic.")
@click.option("-c", "--n-cores", type=int,
help="If specified, a cluster with this many cores will be "
"created. If omitted, a standalone service will be created "
"instead. See also -r for specifying the number of read "
"replicas.")
@click.option("-H", "--http-port", type=int,
help="A port number (standalone) or base port number (cluster) "
"for HTTP traffic.")
@click.option("-i", "--image",
help="The Docker image tag to use for building containers. The "
"repository can also be included, but will default to "
"'neo4j'. Note that a Neo4j Enterprise Edition image is "
"required for building clusters.")
@click.option("-n", "--name",
help="A Docker network name to which all servers will be "
"attached. If omitted, an auto-generated name will be "
"used.")
@click.option("-r", "--n-replicas", type=int,
help="The number of read replicas to include within the "
"cluster. This option will only take effect if -c is also "
"used.")
@click.option("-v", "--verbose", count=True, callback=watch_log,
expose_value=False, is_eager=True,
help="Show more detail about the startup and shutdown process.")
@click.argument("command", nargs=-1, type=click.UNPROCESSED)
def server(command, name, **parameters):
try:
with Neo4jService(name, **parameters) as neo4j:
addr = AddressList(chain(*(r.addresses for r in neo4j.routers)))
auth = "{}:{}".format(neo4j.auth.user, neo4j.auth.password)
if command:
run(" ".join(map(shlex_quote, command)), shell=True, env={
"BOLT_SERVER_ADDR": str(addr),
"NEO4J_AUTH": auth,
})
else:
click.echo("BOLT_SERVER_ADDR='{}'".format(addr))
click.echo("NEO4J_AUTH='{}'".format(auth))
click.echo("Press Ctrl+C to exit")
while True:
sleep(0.1)
except KeyboardInterrupt:
exit(130)
except Exception as e:
click.echo(" ".join(map(str, e.args)), err=True)
exit(1)
if __name__ == "__main__":
bolt()
|
{"/boltkit/server/stub.py": ["/boltkit/server/scripting.py"], "/test/test_stub_server.py": ["/boltkit/server/stub.py"], "/boltkit/__main__.py": ["/boltkit/server/__init__.py", "/boltkit/server/stub.py"]}
|
4,696
|
codingwithjbear/django-personal-portfolio
|
refs/heads/main
|
/blog/models.py
|
from django.db import models
# Create your models here.
class Blog(models.Model):
title = models.CharField(max_length=200)
description = models.TextField()
date = models.DateField()
def __str__(self): # functions have nothing to do with the database so it doesn't need to be migrated
return self.title
|
{"/portfolio/views.py": ["/portfolio/models.py"], "/blog/views.py": ["/blog/models.py"], "/portfolio/admin.py": ["/portfolio/models.py"]}
|
4,697
|
codingwithjbear/django-personal-portfolio
|
refs/heads/main
|
/portfolio/views.py
|
from django.shortcuts import render
from .models import Project
# Create your views here.
def home(request):
projects = Project.objects.all() # all the project objects from the database are placed into this variable
return render(request, 'portfolio/home.html', {'projects':projects}) #pass into the template a dictonary with the key being "projects" then pass forward the project objects
|
{"/portfolio/views.py": ["/portfolio/models.py"], "/blog/views.py": ["/blog/models.py"], "/portfolio/admin.py": ["/portfolio/models.py"]}
|
4,698
|
codingwithjbear/django-personal-portfolio
|
refs/heads/main
|
/blog/views.py
|
from django.shortcuts import render, get_object_or_404 # get_Obj... tries to get and show an object or shows the 404 error
from .models import Blog
# Create your views here.
def all_blogs(request):
blog_count = Blog.objects.count
blogs = Blog.objects.order_by('-date')[:5]
#order by -date makes it so the most current posts show first
# [:5] limits the length to the first 5 blog posts
# you can add a button to go to the next page to see more post or show more
## blogs = Blog.objects.order_by('-date')
return render(request, 'blog/all_blogs.html', {'blogs':blogs,'blogcount':blog_count})
def detail(request, blog_id):
blog = get_object_or_404(Blog, pk=blog_id) #pk = primary key for us that is 'id'
return render(request, 'blog/detail.html',{'blog':blog})
|
{"/portfolio/views.py": ["/portfolio/models.py"], "/blog/views.py": ["/blog/models.py"], "/portfolio/admin.py": ["/portfolio/models.py"]}
|
4,699
|
codingwithjbear/django-personal-portfolio
|
refs/heads/main
|
/portfolio/admin.py
|
from django.contrib import admin
# Register your models here.
from .models import Project
admin.site.register(Project) # says that I want to see this model inside of admin
|
{"/portfolio/views.py": ["/portfolio/models.py"], "/blog/views.py": ["/blog/models.py"], "/portfolio/admin.py": ["/portfolio/models.py"]}
|
4,700
|
codingwithjbear/django-personal-portfolio
|
refs/heads/main
|
/portfolio/models.py
|
from django.db import models
class Project(models.Model):
title = models.CharField(max_length=100)
description = models.CharField(max_length=250)
image = models.ImageField(upload_to='portfolio/images/') #upload_to automatically creates a media folder so this would be in media/portfolio/images
url = models.URLField(blank=True) # blank = true makes the visibity optional
def __str__(self):
return self.title
|
{"/portfolio/views.py": ["/portfolio/models.py"], "/blog/views.py": ["/blog/models.py"], "/portfolio/admin.py": ["/portfolio/models.py"]}
|
4,701
|
codingwithjbear/django-personal-portfolio
|
refs/heads/main
|
/blog/urls.py
|
from django.urls import path, include
from . import views
app_name = 'blog' #declaring the app name will help limit errors if another app has the same page name "detail" in our example
urlpatterns = [
path('',views.all_blogs, name='all_blogs'),
path('<int:blog_id>/',views.detail, name='detail'), # if any one enters an integer after blog I want you to represent that integer as the blog id
]
|
{"/portfolio/views.py": ["/portfolio/models.py"], "/blog/views.py": ["/blog/models.py"], "/portfolio/admin.py": ["/portfolio/models.py"]}
|
4,735
|
chenhangjun/Spam_SMS_Classify
|
refs/heads/master
|
/SignUp.py
|
from tkinter import *
from tkinter.messagebox import *
import pymysql
class SignUp(object):
def __init__(self, master=None):
self.db = pymysql.connect("localhost", "chenhangjun", "1030416518", "InfoDB", charset='utf8')
self.cursor = self.db.cursor()
self.root = master # 定义内部变量root
self.root = Toplevel()
self.root.title("注册")
self.root.geometry('%dx%d' % (400, 250)) # 设置窗口大小
self.username = StringVar()
self.password1 = StringVar()
self.password2 = StringVar()
self.createPage()
def createPage(self):
self.page = Frame(self.root) # 创建Frame
self.page.pack()
Label(self.page).grid(row=0, stick=W)
Label(self.page, text='账号: ').grid(row=1, stick=W, pady=10)
Entry(self.page, textvariable=self.username).grid(row=1, column=1, stick=E)
Label(self.page, text='密码: ').grid(row=2, stick=W, pady=10)
Entry(self.page, textvariable=self.password1, show='*').grid(row=2, column=1, stick=E)
Label(self.page, text='重复密码: ').grid(row=3, stick=W, pady=10)
Entry(self.page, textvariable=self.password2, show='*').grid(row=3, column=1, stick=E)
Button(self.page, text='确定', command=self.confirm).grid(row=4, stick=W, pady=10)
Button(self.page, text='取消', command=self.pageQuit).grid(row=4, column=1, stick=E)
def pageQuit(self):
self.db.close()
self.root.destroy()
def confirm(self):
name = self.username.get()
pwd1 = self.password1.get()
pwd2 = self.password2.get()
if name == '':
showinfo(title='错误', message='请填写账号!')
elif pwd1 == '':
showinfo(title='错误', message='请填写密码!')
elif pwd2 == '':
showinfo(title='错误', message='请重复密码!')
elif pwd1 != pwd2:
showinfo(title='错误', message='两次密码输入不一致!')
else:
sql1 = "SELECT * FROM USER WHERE USER_NAME = '%s'" % (name)
sql2 = "INSERT INTO USER(USER_NAME, PASSWORD) VALUES('%s', '%s')" % (name, pwd1)
self.cursor.execute(sql1)
res = self.cursor.fetchall()
if res != ():
showinfo(title='错误', message='该账号已存在!')
else:
try:
# 执行sql语句
self.cursor.execute(sql2)
# 提交到数据库执行
self.db.commit()
showinfo(title='恭喜', message='注册成功!')
except:
# 发生错误时回滚
self.db.rollback()
print("error")
finally:
self.db.close()
self.root.destroy()
|
{"/NBTest.py": ["/NaiveBayes.py"], "/UIMain.py": ["/Login.py"], "/Login.py": ["/MainPage.py", "/SignUp.py"]}
|
4,736
|
chenhangjun/Spam_SMS_Classify
|
refs/heads/master
|
/TextProcess.py
|
import csv
import numpy as np
import re
from zhon.hanzi import punctuation
import jieba
import pandas as pd
# 读取数据
csvFile = open("dataset.csv", "r")
reader = csv.reader(csvFile)
# type(reader) == _csv.reader
rows = [row for row in reader]
# type(rows) == list
data = np.array(rows)
# type(data) == numpy.ndarray
# data.shape == (50000, 2)
# 第一列为标签,第二列为短信内容
# 获取短信内容(文本)
text = data[:,1]
# type(text.element) == numpy.str_
# words 存放jieba分词结果 type(words_temp) == list
words = []
# 根据原始分词得到的高频停用词
stop_words = ['有', '和', '是', '在', '我', '了', '的']
remove_chars = '[0-9’a-zA-Z!"#$%&\'()*\\\\+,-./:;<=>?@?★…‘’[\\]^_`{|}~(\s*)]+'
# 逐行去除特殊字符、数字英文(地址链接)和标点
for i in range(0, len(text)):
newstr = re.sub(remove_chars, '', text[i])
text[i] = re.sub("[{}]+".format(punctuation), "", newstr)
# print(text[i])
# 进行分词/
# words.append(jieba.lcut(text[i])) # 不管停用词,直接添加
list_words = jieba.lcut(text[i])
for word in list_words:
if word in stop_words:
list_words.remove(word)
words.append(list_words)
# print(len(words[0]))
with open('words.csv','w',newline='') as f:
writer=csv.writer(f)
for word in words:
data=','.join(word)
writer.writerow([data])
# data_in = pd.DataFrame(words)
# try:
# # csv_headers = ['sentence']
# data_in.to_csv('words.csv', header=False, index=False, mode='a+', encoding='utf-8')
#
# except UnicodeEncodeError:
# print("编码错误, 该数据无法写到文件中, 直接忽略该数据")
|
{"/NBTest.py": ["/NaiveBayes.py"], "/UIMain.py": ["/Login.py"], "/Login.py": ["/MainPage.py", "/SignUp.py"]}
|
4,737
|
chenhangjun/Spam_SMS_Classify
|
refs/heads/master
|
/NBTest.py
|
import numpy as np
import csv
from NaiveBayes import NaiveBayes
import matplotlib.pyplot as plt
# 读取数据
csvFile = open("dataset.csv", "r")
reader = csv.reader(csvFile)
rows = [row for row in reader]
data = np.array(rows)
# 获取标签
Y_data_str = data[:, 0]
Y_data_int = np.zeros(len(Y_data_str), dtype=int)
# Y_data_str 值的类型从字符转化为int
for i in range(0, len(Y_data_str)):
Y_data_int[i] = int(Y_data_str[i])
# 获取短信分词
csvFile = open("words.csv", "r")
reader = csv.reader(csvFile)
rows = [row for row in reader]
data = np.array(rows)
# data.shape == (50000, 1)
data = data[:,0]
# ==> data.shape == (50000,)
words = []
for obj in data:
words.append(obj.split(','))
# len(words) == 50000
N1 = int(len(Y_data_int) * 0.8)
#数据集按4:1 划分为训练集:测试集
Y_train = Y_data_int[0 : N1]
X_train = words[0 : N1]
Y_test = Y_data_int[N1 : len(Y_data_int)]
X_test = words[N1: len(words)]
x_axis = np.zeros(1001)
accuracy = np.zeros(1001)
precision = np.zeros(1001)
# str = 0.45
# end = 0.50
# for i in range(0, 1001):
# para = str + (end - str) * 0.001 * i
# print("para = %f" %(para))
# x_axis[i] = para
para = 0.4743
NB = NaiveBayes()
NB.fit(X_train, Y_train, para)
NB.save()
'''
count = 0
PSpam = 0
TSpam = 0
for j in range(0, len(Y_test)):
tag = NB.predict(X_test[j])
if Y_test[j] == tag:
count += 1
# 1为spam, 0为ham,与标签一致
if tag == 1:
PSpam += 1
if Y_test[j] == 1:
TSpam += 1
# else:
# print(X_test[j], i, tag)
length = len(Y_test)
print("With Laplacial correction")
print("Threshold is %f" % para)
print("准确率: %.2f%% 查准率: %.2f%%" %(((count / length) * 100),((TSpam / PSpam) * 100)))
'''
'''
accuracy[i] = count / length * 100
if TSpam == 0:
precision[i] = 0
else:
precision[i] = TSpam / PSpam * 100
# sub_axix = filter(lambda x: x % 200 == 0, x_axis)
# plt.title('NaiveBayes--Laplacian correction')
plt.title('NaiveBayes--without Laplacian correction')
plt.plot(x_axis, accuracy, color='red', label='accuracy')
plt.plot(x_axis, precision, color='blue', label='precision')
plt.legend() # 显示图例
plt.xlabel('threshold')
plt.ylabel('percentage')
plt.show()
'''
|
{"/NBTest.py": ["/NaiveBayes.py"], "/UIMain.py": ["/Login.py"], "/Login.py": ["/MainPage.py", "/SignUp.py"]}
|
4,738
|
chenhangjun/Spam_SMS_Classify
|
refs/heads/master
|
/UIMain.py
|
from tkinter import *
from Login import *
root = Tk()
root.title('登录')
Login(root)
# MainPage(root)
root.mainloop()
|
{"/NBTest.py": ["/NaiveBayes.py"], "/UIMain.py": ["/Login.py"], "/Login.py": ["/MainPage.py", "/SignUp.py"]}
|
4,739
|
chenhangjun/Spam_SMS_Classify
|
refs/heads/master
|
/MainPage.py
|
from tkinter import *
import json
import math
from zhon.hanzi import punctuation
import jieba
class MainPage(object):
def __init__(self, master=None):
self.content = ''
self.result = ''
self.root = master # 定义内部变量root
self.root = Tk(className='垃圾短信识别') # 窗口标题
self.root.geometry('%dx%d' % (600, 400)) # 设置窗口大小
self.root.resizable(0, 0)
# 输入框
self.text_1 = Text(self.root, width=62, height=12)
# label 3 显示结果
self.label_3 = Label(self.root)
self.createPage()
self.ham_map, self.spam_map = self.ReadModel('modeldict.json')
def createPage(self):
# 添加一个label
self.page = Frame(self.root) # 创建Frame
self.page.pack()
Label(self.page).grid(row=0, stick=W)
'''
Label(self.page, text='输入短信').grid(row=1, column=1, stick=W)
Entry(self.page, textvariable=self.content, width=70).grid(row=3, column=1)
Label(self.page, text='判定结果为:').grid(row=5, stick=W, pady=10)
Label(self.page, text=self.result).grid(row=5, stick=W, pady=10)
Button(self.page, text='判定', command=self.Judge).grid(row=5, column=7, stick=W)
'''
# 添加一个label
label_1 = Label(self.root)
label_1['text'] = '输入短信:'
label_1.place(x=50, y=50)
self.text_1.place(x=50, y=90)
# label 2
label_2 = Label(self.root)
label_2['text'] = '判定结果为:'
label_2.place(x=50, y=320)
self.label_3.place(x=130, y=320)
# button
button = Button(self.root, text='判定', command=self.Judge)
button.place(x=500, y=310)
def Judge(self):
# 获取输入内容
sentence = self.text_1.get('0.0', 'end')
# 分词
stop_words = ['有', '和', '是', '在', '我', '了', '的']
remove_chars = '[0-9’a-zA-Z!"#$%&\'()*\\\\+,-./:;<=>?@?★…‘’[\\]^_`{|}~(\s*)]+'
newstr = re.sub(remove_chars, '', sentence)
sentence = re.sub("[{}]+".format(punctuation), "", newstr)
list_words = jieba.lcut(sentence)
for word in list_words:
if word in stop_words:
list_words.remove(word)
# 预测
tag = self.Predict(list_words)
# 修改标签显示
if tag == 1:
self.result = '垃圾邮件'
else:
self.result = '正常邮件'
self.label_3['text'] = self.result
def Predict(self, text):
ham_words_count = 323632
spam_words_count = 97890
ham_count = 35978
spam_count = 4022
words_set_size = 62133
para = 0.4743
ham_probability = ham_count / (ham_count + spam_count)
spam_probability = spam_count / (ham_count + spam_count)
ham_pro = 0.0
spam_pro = 0.0
for word in text:
ham_pro += math.log((self.ham_map.get(word, 0) + 1) / (ham_words_count + words_set_size))
spam_pro += math.log((self.spam_map.get(word, 0) + 1) / (spam_words_count + words_set_size))
ham_pro += math.log(ham_probability)
spam_pro += math.log(spam_probability)
# 1为spam, 0为ham,与标签一致
# return int(spam_pro >= ham_pro)
tot = spam_pro + ham_pro
threshold = tot * para
if spam_pro >= threshold:
return 1
else:
return 0
def ReadModel(self, filename):
with open(filename) as f:
dictObj = json.load(f)
ham_map = dictObj['ham']
spam_map = dictObj['spam']
return ham_map, spam_map
|
{"/NBTest.py": ["/NaiveBayes.py"], "/UIMain.py": ["/Login.py"], "/Login.py": ["/MainPage.py", "/SignUp.py"]}
|
4,740
|
chenhangjun/Spam_SMS_Classify
|
refs/heads/master
|
/Login.py
|
from tkinter import *
from tkinter.messagebox import *
from MainPage import *
from SignUp import *
import pymysql
class Login(object):
def __init__(self, master=None):
self.db = pymysql.connect("localhost", "chenhangjun", "1030416518", "InfoDB", charset='utf8')
self.cursor = self.db.cursor()
self.root = master # 定义内部变量root
self.root.geometry('%dx%d' % (300, 180)) # 设置窗口大小
self.username = StringVar()
self.password = StringVar()
self.createPage()
def createPage(self):
self.page = Frame(self.root) # 创建Frame
self.page.pack()
Label(self.page).grid(row=0, stick=W)
Label(self.page, text='账号: ').grid(row=1, stick=W, pady=10)
Entry(self.page, textvariable=self.username).grid(row=1, column=1, stick=E)
Label(self.page, text='密码: ').grid(row=2, stick=W, pady=10)
Entry(self.page, textvariable=self.password, show='*').grid(row=2, column=1, stick=E)
Button(self.page, text='登录', command=self.loginCheck).grid(row=3, stick=W, pady=10)
Button(self.page, text='注册', command=self.signUp).grid(row=3, column=1, stick=E)
def loginCheck(self):
name = self.username.get()
pwd1 = self.password.get()
if name == '':
showinfo(title='错误', message='请输入账号!')
elif pwd1 == '':
showinfo(title='错误', message='请输入密码!')
else:
sql = "SELECT PASSWORD FROM USER WHERE USER_NAME = '%s'" % (name)
try:
# 执行SQL语句
self.cursor.execute(sql)
# 获取所有记录列表
pwd2 = self.cursor.fetchall() # pwd2 为 tuple, (("pwd", ), )
if pwd2 == ():
showinfo(title='错误', message='账号不存在!')
elif pwd1 == pwd2[0][0]:
self.db.close()
MainPage(self.root)
self.root.destroy()
else:
showinfo(title='错误', message='密码错误!')
except:
print("except")
self.db.close()
def signUp(self):
# self.root.withdraw()
SignUp(self.root)
# print("signup")
# self.root.destroy()
|
{"/NBTest.py": ["/NaiveBayes.py"], "/UIMain.py": ["/Login.py"], "/Login.py": ["/MainPage.py", "/SignUp.py"]}
|
4,741
|
chenhangjun/Spam_SMS_Classify
|
refs/heads/master
|
/NaiveBayes.py
|
import math
import json
# 参考:https://www.cnblogs.com/liweiwei1419/p/9870956.html
class NaiveBayes:
def __init__(self):
self.ham_count = 0 # 非垃圾短信数量
self.spam_count = 0 # 垃圾短信数量
self.ham_words_count = 0 # 非垃圾短信总次数
self.spam_words_count = 0 # 垃圾短信总词数
self.ham_words = list() # 非垃圾短信词语列表
self.spam_words = list() # 垃圾短信词语列表
self.words_set = set() # 两类短信所有词语的集合,不重复
self.words_set_size = 0
self.ham_map = dict() # 非垃圾短信词频统计
self.spam_map = dict() # 垃圾短信词频统计
# 先验概率 P(c)
self.ham_probability = 0
self.spam_probability = 0
self.para = 0
def fit(self, X_train, Y_train, para):
self.build_words_set(X_train, Y_train)
self.word_count()
self.para = para
# 建立单词集合
def build_words_set(self, X_train, Y_train):
for words, y in zip(X_train, Y_train):
if y == 0:
# 非垃圾短信
self.ham_count += 1
self.ham_words_count += len(words)
for word in words:
self.ham_words.append(word)
self.words_set.add(word)
if y == 1:
# 垃圾短信
self.spam_count += 1
self.spam_words_count += len(words)
for word in words:
self.spam_words.append(word)
self.words_set.add(word)
self.words_set_size = len(self.words_set)
# 统计词频并计算先验概率
def word_count(self):
# 统计各类中各词的频次
for word in self.ham_words:
# 默认初值为0
self.ham_map[word] = self.ham_map.setdefault(word, 0) + 1
for word in self.spam_words:
self.spam_map[word] = self.spam_map.setdefault(word, 0) + 1
# 【下面两行计算先验概率】
# 非垃圾短信的概率
self.ham_probability = self.ham_count / (self.ham_count + self.spam_count)
# 垃圾短信的概率
self.spam_probability = self.spam_count / (self.ham_count + self.spam_count)
def predict(self, sentence_words):
# 基于词袋模型的朴素贝叶斯算法; 多项式模型的平滑/拉普拉斯平滑
# P(x_i|c) = P(“某个词”|c) = (c类短信中出现“某个词”的次数的总和+1) /
# c类短信中所有词出现次数(计算重复次数)的总和 + 总不重复的词语数量
ham_pro = 0.0
spam_pro = 0.0
for word in sentence_words:
# if self.ham_map.get(word, 0) != 0:
# ham_pro += math.log(self.ham_map.get(word, 0) / self.ham_words_count)
# else:
# ham_pro += math.log((self.ham_map.get(word, 0) + 1) / (self.ham_words_count + self.words_set_size))
# if self.spam_map.get(word, 0) != 0:
# spam_pro += math.log(self.spam_map.get(word, 0) / self.spam_words_count)
# else:
# spam_pro += math.log((self.spam_map.get(word, 0) + 1) / (self.spam_words_count + self.words_set_size))
ham_pro += math.log((self.ham_map.get(word, 0) + 1) / (self.ham_words_count + self.words_set_size))
spam_pro += math.log((self.spam_map.get(word, 0) + 1) / (self.spam_words_count + self.words_set_size))
ham_pro += math.log(self.ham_probability)
spam_pro += math.log(self.spam_probability)
# 1为spam, 0为ham,与标签一致
# return int(spam_pro >= ham_pro)
tot = spam_pro + ham_pro
threshold = tot * self.para
if spam_pro >= threshold:
return 1
else :
return 0
def save(self):
dictObj = {'ham':self.ham_map, 'spam':self.spam_map}
jsObj = json.dumps(dictObj)
fileObject = open('modeldict.json', 'w')
fileObject.write(jsObj)
fileObject.close()
# print("ham_words_count = %d" %self.ham_words_count)
# print("spam_words_count = %d" % self.spam_words_count)
# print("ham_count = %d" %self.ham_count)
# print("spam_words_count = %d" %self.spam_count)
# print("words_set_size = %d" %self.words_set_size)
|
{"/NBTest.py": ["/NaiveBayes.py"], "/UIMain.py": ["/Login.py"], "/Login.py": ["/MainPage.py", "/SignUp.py"]}
|
4,743
|
gboquizo/hackathon_optimizacion_entregas_material
|
refs/heads/master
|
/app/modules/donor.py
|
"""Routes for donor."""
from app import csrf
from app.models import Donor, Product, StockDonor
from flask import Blueprint, request
from flask_login import login_required
from flask import jsonify
# Blueprint Configuration
donor_bp = Blueprint('donor', __name__)
@donor_bp.route('/api/v1/donor', methods=['GET'])
def get_donors():
donors = [ donor.json() for donor in Donor.query.all() ]
return jsonify({'donors': donors })
@donor_bp.route('/api/v1/donor/stock', methods=['GET'])
def get_stocks():
stocks = [ stock.serialize for stock in StockDonor.query.all() ]
return jsonify({'stocks': stocks })
@csrf.exempt
@donor_bp.route('/api/v1/donor/<id>/stock/', methods=['POST'])
def create_donor_stock(id):
json = request.get_json(force=True)
donor = Donor.query.filter_by(id=id).first()
product = Product.query.filter_by(id=json['product_id']).first()
if donor is None:
return jsonify({'message': 'Donor does not exists'}), 404
if product is None:
return jsonify({'message': 'Product does not exits'}), 404
stock = StockDonor.create(json['donor_id'], json['product_id'], json['quantity'])
return jsonify({'stock': stock.serialize})
@donor_bp.route('/api/v1/donor/<id>/stock', methods=['GET'])
def get_donor_stock(id):
donor_stock = [ stock.serialize for stock in StockDonor.query.filter_by(donor_id=id).all() ]
return jsonify({'donor_stock': donor_stock })
@csrf.exempt
@donor_bp.route('/api/v1/donor/stock/<id>', methods=['PUT'])
def update_stock(id):
stock = StockDonor.query.filter_by(id=id).first()
json = request.get_json(force=True)
stock.quantity = json['quantity']
stock.update()
return jsonify({'stock': stock.serialize })
@csrf.exempt
@donor_bp.route('/api/v1/donor/stock/<id>', methods=['DELETE'])
def delete_stock(id):
stock = StockDonor.query.filter_by(id=id).first()
if stock is None:
return jsonify({'message': 'Stock does not exists'}), 404
stock.delete()
return jsonify({'stock': stock.serialize })
@donor_bp.route('/register', methods=["POST"])
def register():
"""
# Register user
call auth method
register_form = RegisterForm(request.form)
if request.method == 'POST' and register_form.validate_on_submit():
existing_user = User.query.filter_by(email=register_form.email.data).first()
if existing_user is None:
user = User(
email=request.form.get('email'),
password=request.form.get('password'),
username=request.form.get('username')
)
db.session.add(user)
db.session.commit()
login_user(user)
return redirect(url_for('manager.index'))
flash('A user already exists with that email address')
return redirect(url_for('auth.register'))
"""
json_data = request.get_json()
if not json_data:
return {"message": "No input data provided"}, 400
# Register address
# Validate and deserialize input
try:
address_data = AddressSchema(json_data)
except ma.ValidationError as err:
print(err.messages)
return err.messages, 422
address = Address(address_data)
db.session.add(address)
db.session.commit()
id_address = AddressSchema().dump(Address.query.get(address.id))
# register donor
# Validate and deserialize input
try:
donor_data = DonorSchema(json_data)
donor_data['address'] = id_address
except ma.ValidationError as err:
print(err.messages)
return err.messages, 422
donor = Donor(donor_data)
db.session.add(donor)
db.session.commit()
id_donor = DonorSchema().dump(donor.query.get(donor.id))
return {"message": "Donor user registered.", "id": id_donor}, 200
|
{"/app/modules/donor.py": ["/app/__init__.py", "/app/models.py"], "/app/schemas.py": ["/app/__init__.py", "/app/models.py"], "/app/__init__.py": ["/app/modules/auth.py", "/app/modules/admin.py", "/app/modules/dealer.py", "/app/modules/donor.py"], "/app/models.py": ["/app/__init__.py"], "/app/modules/admin.py": ["/app/models.py"], "/app/modules/dealer.py": ["/app/__init__.py"], "/main.py": ["/app/__init__.py"], "/app/modules/auth.py": ["/app/__init__.py", "/app/forms.py", "/app/models.py"]}
|
4,744
|
gboquizo/hackathon_optimizacion_entregas_material
|
refs/heads/master
|
/app/config.py
|
# app/config.py
import os
basedir = os.path.abspath(os.path.dirname(__file__))
class BaseConfig(object):
"""Base configuration."""
APP_NAME = os.getenv('APP_NAME', 'Delia')
BCRYPT_LOG_ROUNDS = 4
DEBUG_TB_ENABLED = False
SECRET_KEY = os.getenv('SECRET_KEY', 'secret_key')
SQLALCHEMY_TRACK_MODIFICATIONS = False
WTF_CSRF_ENABLED = False
class DevelopmentConfig(BaseConfig):
"""Development configuration."""
DEBUG_TB_ENABLED = True
DEBUG_TB_INTERCEPT_REDIRECTS = False
SQLALCHEMY_DATABASE_URI = 'sqlite:///{0}'.format(
os.path.join(basedir, 'dev.sqlite')
)
class TestingConfig(BaseConfig):
"""Testing configuration."""
PRESERVE_CONTEXT_ON_EXCEPTION = False
SQLALCHEMY_DATABASE_URI = 'sqlite:///'
TESTING = True
class ProductionConfig(BaseConfig):
"""Production configuration."""
DB_NAME = os.getenv('PSQL_DB_NAME', 'example')
DB_USER = os.getenv('PSQL_DB_USER', 'postgres')
DB_PASSWD = os.getenv('PSQL_DB_PASSWD', '')
BCRYPT_LOG_ROUNDS = 13
SQLALCHEMY_DATABASE_URI = 'postgresql://{0}:{1}@localhost/{2}'.format(DB_USER, DB_PASSWD, DB_NAME)
WTF_CSRF_ENABLED = True
|
{"/app/modules/donor.py": ["/app/__init__.py", "/app/models.py"], "/app/schemas.py": ["/app/__init__.py", "/app/models.py"], "/app/__init__.py": ["/app/modules/auth.py", "/app/modules/admin.py", "/app/modules/dealer.py", "/app/modules/donor.py"], "/app/models.py": ["/app/__init__.py"], "/app/modules/admin.py": ["/app/models.py"], "/app/modules/dealer.py": ["/app/__init__.py"], "/main.py": ["/app/__init__.py"], "/app/modules/auth.py": ["/app/__init__.py", "/app/forms.py", "/app/models.py"]}
|
4,745
|
gboquizo/hackathon_optimizacion_entregas_material
|
refs/heads/master
|
/app/schemas.py
|
"""
Map a database model to json data
Schemas are equivalent to Django serializers.
"""
from app import ma
from .models import *
class DonorSchema(ma.Schema):
class Meta:
model = Donor
sqla_session = db.session
fields = ('user', 'address', 'updated_at')
class AddressSchema(ma.Schema):
class Meta:
model = Donor
sqla_session = db.session
fields = ('state', 'city', 'postal_code', 'street', 'number', 'extra_details_address')
|
{"/app/modules/donor.py": ["/app/__init__.py", "/app/models.py"], "/app/schemas.py": ["/app/__init__.py", "/app/models.py"], "/app/__init__.py": ["/app/modules/auth.py", "/app/modules/admin.py", "/app/modules/dealer.py", "/app/modules/donor.py"], "/app/models.py": ["/app/__init__.py"], "/app/modules/admin.py": ["/app/models.py"], "/app/modules/dealer.py": ["/app/__init__.py"], "/main.py": ["/app/__init__.py"], "/app/modules/auth.py": ["/app/__init__.py", "/app/forms.py", "/app/models.py"]}
|
4,746
|
gboquizo/hackathon_optimizacion_entregas_material
|
refs/heads/master
|
/app/__init__.py
|
# app/__init__.py
import os
from flask import Flask, render_template
from flask_debugtoolbar import DebugToolbarExtension
from flask_login import LoginManager
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from flask_wtf import CSRFProtect
from flask_marshmallow import Marshmallow
# Instantiate the extensions
db = SQLAlchemy()
ma = Marshmallow()
csrf = CSRFProtect()
login_manager = LoginManager()
migrate = Migrate()
toolbar = DebugToolbarExtension()
def create_app():
app = Flask(__name__)
# Set config
app_settings = os.getenv('APP_SETTINGS', 'app.config.DevelopmentConfig')
app.config.from_object(app_settings)
# Set up extensions
login_manager.init_app(app)
db.init_app(app)
ma.init_app(app)
csrf.init_app(app)
toolbar.init_app(app)
migrate.init_app(app, db)
with app.app_context():
from app.modules.auth import auth_bp
from app.modules.admin import admin_bp
from app.modules.dealer import dealer_bp
from app.modules.donor import donor_bp
app.register_blueprint(auth_bp)
app.register_blueprint(admin_bp)
app.register_blueprint(dealer_bp)
app.register_blueprint(donor_bp)
# Initialize Global db
db.create_all()
# Error handlers
@app.errorhandler(403)
def forbidden_page(error):
return render_template('errors/403.html'), 403
@app.errorhandler(404)
def page_not_found(error):
return render_template('errors/404.html'), 404
@app.errorhandler(500)
def server_error_page(error):
return render_template('errors/500.html'), 500
# shell context for flask cli
@app.shell_context_processor
def ctx():
return {'app': app, 'db': db}
return app
|
{"/app/modules/donor.py": ["/app/__init__.py", "/app/models.py"], "/app/schemas.py": ["/app/__init__.py", "/app/models.py"], "/app/__init__.py": ["/app/modules/auth.py", "/app/modules/admin.py", "/app/modules/dealer.py", "/app/modules/donor.py"], "/app/models.py": ["/app/__init__.py"], "/app/modules/admin.py": ["/app/models.py"], "/app/modules/dealer.py": ["/app/__init__.py"], "/main.py": ["/app/__init__.py"], "/app/modules/auth.py": ["/app/__init__.py", "/app/forms.py", "/app/models.py"]}
|
4,747
|
gboquizo/hackathon_optimizacion_entregas_material
|
refs/heads/master
|
/app/routes.py
|
import os
from flask import render_template
@app.route('/')
@app.route('/index')
def index():
print(os.getenv('APP_LOCALE'))
user = {'username': 'Germán'}
files = [
{
'properties': {'hash': '1234123412341234'},
'name': 'try.txt'
},
{
'properties': {'hash': '1234123412341234'},
'name': 'try2.txt'
}
]
return render_template('index.html', title='Index', user=user, files=files)
|
{"/app/modules/donor.py": ["/app/__init__.py", "/app/models.py"], "/app/schemas.py": ["/app/__init__.py", "/app/models.py"], "/app/__init__.py": ["/app/modules/auth.py", "/app/modules/admin.py", "/app/modules/dealer.py", "/app/modules/donor.py"], "/app/models.py": ["/app/__init__.py"], "/app/modules/admin.py": ["/app/models.py"], "/app/modules/dealer.py": ["/app/__init__.py"], "/main.py": ["/app/__init__.py"], "/app/modules/auth.py": ["/app/__init__.py", "/app/forms.py", "/app/models.py"]}
|
4,748
|
gboquizo/hackathon_optimizacion_entregas_material
|
refs/heads/master
|
/app/models.py
|
# app/models.py
import os
import datetime
import hashlib
import humanfriendly
from werkzeug import generate_password_hash, check_password_hash
from flask_login import UserMixin
from app import db
from sqlalchemy import CheckConstraint
class User(UserMixin, db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
name = db.Column(db.String(50), unique=True)
email = db.Column(db.String(255), unique=True)
username = db.Column(db.String(255), unique=True, nullable=True)
password = db.Column(db.String(255), nullable=False)
token = db.Column(db.String(255))
admin = db.Column(db.Boolean, nullable=False, default=False)
status = db.Column(db.Boolean, nullable=False, default= 1)
created_at = db.Column(db.DateTime, nullable=False, default=datetime.datetime.now)
updated_at = db.Column(db.DateTime, nullable=True)
class Address(db.Model):
__tablename__ = 'address'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
state = db.Column(db.String(100))
city = db.Column(db.String(100))
postal_code = db.Column(db.String(100))
street = db.Column(db.String(100))
number = db.Column(db.String(10))
extra_details_address = db.Column(db.String(255))
created_at = db.Column(db.DateTime, nullable=False, default=datetime.datetime.now)
updated_at = db.Column(db.DateTime, nullable=True)
class Donor(db.Model):
__tablename__ = 'donor'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
user = db.Column(db.Integer, db.ForeignKey('users.id'), nullable=False)
address = db.Column(db.Integer, db.ForeignKey('address.id'), nullable=False)
created_at = db.Column(db.DateTime, nullable=False, default=datetime.datetime.now)
updated_at = db.Column(db.DateTime, nullable=True)
class Applicant(db.Model):
__tablename__ = 'applicant'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
user = db.Column(db.Integer, db.ForeignKey('users.id'), nullable=False)
address = db.Column(db.Integer, db.ForeignKey('address.id'), nullable=False)
created_at = db.Column(db.DateTime, nullable=False, default=datetime.datetime.now)
updated_at = db.Column(db.DateTime, nullable=True)
class Dealer(db.Model):
__tablename__ = 'dealer'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
user = db.Column(db.Integer, db.ForeignKey('users.id'), nullable=False)
address = db.Column(db.Integer, db.ForeignKey('address.id'), nullable=False)
created_at = db.Column(db.DateTime, nullable=False, default=datetime.datetime.now)
updated_at = db.Column(db.DateTime, nullable=True)
class ProductType(db.Model):
tablename = 'product_type'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
name = db.Column(db.String(50), nullable=False)
description = db.Column(db.String(100))
created_at = db.Column(db.DateTime, nullable=False, default=datetime.datetime.now)
updated_at = db.Column(db.DateTime, nullable=True)
class Product(db.Model):
__tablename__ = 'product'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
product_type_id = db.Column(db.Integer, db.ForeignKey('product_type.id'), nullable=False)
description = db.Column(db.String(100), nullable=False)
image_url = db.Column(db.String(100))
created_at = db.Column(db.DateTime, nullable=False, default=datetime.datetime.now)
updated_at = db.Column(db.DateTime, nullable=True)
class StockDonor(db.Model):
__tablename__ = 'stock_donor'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
product_id = db.Column(db.Integer, db.ForeignKey('product.id'), nullable=False)
donor_id = db.Column(db.Integer, db.ForeignKey('donor.id'), nullable=False)
quantity = db.Column(db.Integer)
created_at = db.Column(db.DateTime, nullable=False, default=datetime.datetime.now)
updated_at = db.Column(db.DateTime, nullable=True)
@property
def serialize(self):
return {
'id': self.id,
'product_id': self.product_id,
'donor_id': self.donor_id,
'quantity': self.quantity,
'created_at': self.created_at,
'updated_at': self.updated_at,
}
@classmethod
def create(self, product_id, donor_id, quantity):
new_stock = StockDonor(product_id=product_id, donor_id=donor_id, quantity=quantity)
new_stock.save()
return new_stock
def update(self):
self.updated_at = datetime.datetime.now()
self.save()
def save(self):
db.session.add(self)
db.session.commit()
def delete(self):
db.session.delete(self)
db.session.commit()
class RequestApplicant(db.Model):
__tablename__ = 'request_applicant'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
product_id = db.Column(db.Integer, db.ForeignKey('product.id'), nullable=False)
applicant_id = db.Column(db.Integer, db.ForeignKey('applicant.id'), nullable=False)
quantitiy = db.Column(db.Integer)
created_at = db.Column(db.DateTime, nullable=False, default=datetime.datetime.now)
updated_at = db.Column(db.DateTime, nullable=True)
class Journey(db.Model):
__tablename__ = 'journey'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
dealer_id = db.Column(db.Integer, db.ForeignKey('dealer.id'), nullable=False)
initial_lat = db.Column(db.Float)
initial_long = db.Column(db.Float)
final_lat = db.Column(db.Float)
final_long = db.Column(db.Float)
valoration = db.Column(db.Float)
status = db.Column(db.Integer, CheckConstraint('status IN (1, 2, 3)'))
created_at = db.Column(db.DateTime, nullable=False, default=datetime.datetime.now)
updated_at = db.Column(db.DateTime, nullable=True)
class Package(db.Model):
__tablename__ = 'package'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
journey_id = db.Column(db.Integer, db.ForeignKey('journey.id'), nullable=False)
donor_id = db.Column(db.Integer, db.ForeignKey('donor.id'), nullable=False)
applicant_id = db.Column(db.Integer, db.ForeignKey('applicant.id'), nullable=False)
ts_pickup = db.Column(db.DateTime, nullable=True)
ts_delivery = db.Column(db.DateTime, nullable=True)
status = db.Column(db.Integer, default = 0)
package_valoration = db.Column(db.Float, nullable=True)
created_at = db.Column(db.DateTime, nullable=False, default=datetime.datetime.now)
updated_at = db.Column(db.DateTime, nullable=True)
class PackageContent(db.Model):
__tablename__ = 'package_content'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
package_id = db.Column(db.Integer, db.ForeignKey('package.id'), autoincrement=True)
product_id = db.Column(db.Integer, db.ForeignKey('product.id'), nullable=False)
quantity = db.Column(db.Integer, default = 0)
created_at = db.Column(db.DateTime, nullable=False, default=datetime.datetime.now)
updated_at = db.Column(db.DateTime, nullable=True)
|
{"/app/modules/donor.py": ["/app/__init__.py", "/app/models.py"], "/app/schemas.py": ["/app/__init__.py", "/app/models.py"], "/app/__init__.py": ["/app/modules/auth.py", "/app/modules/admin.py", "/app/modules/dealer.py", "/app/modules/donor.py"], "/app/models.py": ["/app/__init__.py"], "/app/modules/admin.py": ["/app/models.py"], "/app/modules/dealer.py": ["/app/__init__.py"], "/main.py": ["/app/__init__.py"], "/app/modules/auth.py": ["/app/__init__.py", "/app/forms.py", "/app/models.py"]}
|
4,749
|
gboquizo/hackathon_optimizacion_entregas_material
|
refs/heads/master
|
/app/modules/admin.py
|
# Blueprint Configuration
from flask import Blueprint, render_template
from flask_login import current_user
from werkzeug.exceptions import abort
from app.models import User
admin_bp = Blueprint('admin', __name__)
@admin_bp.route('/admin-panel')
def index():
if not current_user.is_admin():
abort(403)
users = User.query.all()
return render_template('admin/index.html', users=users)
|
{"/app/modules/donor.py": ["/app/__init__.py", "/app/models.py"], "/app/schemas.py": ["/app/__init__.py", "/app/models.py"], "/app/__init__.py": ["/app/modules/auth.py", "/app/modules/admin.py", "/app/modules/dealer.py", "/app/modules/donor.py"], "/app/models.py": ["/app/__init__.py"], "/app/modules/admin.py": ["/app/models.py"], "/app/modules/dealer.py": ["/app/__init__.py"], "/main.py": ["/app/__init__.py"], "/app/modules/auth.py": ["/app/__init__.py", "/app/forms.py", "/app/models.py"]}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.