index int64 | repo_name string | branch_name string | path string | content string | import_graph string |
|---|---|---|---|---|---|
76,525 | asmallleaf/DoorAppTest | refs/heads/master | /app/route/homeapi/sethome.py | from flask import request,jsonify
from app_2_0_0.app.support import dbSupportTool,verifySupportTool
from app_2_0_0.app.support.verifySupportTool import Type
from app_2_0_0.app.route.homeapi.blueprint import homeapi
@homeapi.route('/sethome',methods=['POST','GET'])
def sethome():
verify = verifySupportTool.VerifySupportToolFactory().build()
database = dbSupportTool.dbSupportToolFactory().build()
verify.setDbSupport(database).setRequest(value=request)
if verify.verifyToken() is False:
return jsonify(verify.getError(Type.home)),404
userRaw = database.getRaw()
if verify.verifyHome() is False:
return jsonify(verify.getError(Type.home)),404
roomRaw = database.getRaw()
database.load()
userRaw.roomNumber = roomRaw.roomNumber
database.setRaw(userRaw).load()
return jsonify(verify.getSuccess(Type.home)),200 | {"/app/toolbox/iotool.py": ["/app/toolbox/basetool.py"], "/app/main/launch.py": ["/app/main/createapp.py"], "/app/toolbox/verifytool.py": ["/app/toolbox/basetool.py"], "/app/toolbox/dbtool.py": ["/app/toolbox/basetool.py"]} |
76,526 | asmallleaf/DoorAppTest | refs/heads/master | /app/support/verifySupportTool.py | from app_2_0_0.app.verify.userVerify import UserVerifyFactory
from app_2_0_0.app.verify.homeVerify import HomeVerifyFactory
from app_2_0_0.app.toolbox.basetool import SupportToolFactory,SupportTool
from app_2_0_0.app.config.config import Test_Config
from app_2_0_0.app.toolbox.iotool import IOtool
from enum import Enum
class Type(Enum):
user = 0
home = 1
class VerifySupportToolFactory(SupportToolFactory):
def build(self):
return VerifySupportTool()
class VerifySupportTool(SupportTool):
def __init__(self):
self.verify = UserVerifyFactory().build()
self.home = HomeVerifyFactory.build()
self.request = None
self.dbSupport = None
self.token = None
self.name = None
self.KEY = Test_Config().TOKEN
def setDbSupport(self,value):
self.dbSupport = value
return self
def setRequest(self,value):
self.request = value
return self
def getError(self,type):
if type == Type.user:
return self.verify.getError()
elif type == Type.home:
return self.home.getError()
def getSuccess(self,type):
if type == Type.user:
return self.verify.getSuccess()
elif type == Type.home:
return self.home.getSuccess()
def verifyLogin(self):
name = None
password = None
if self.request.method == 'POST':
name = self.request.form.get('userName')
password = self.request.form.get('password')
elif self.request.method == 'GET':
name = self.request.args.get('userName')
password = self.request.args.get('password')
self.verify.setName(name).setPasswd(password)
self.verify.verifyName()
self.verify.verifyPasswd()
if self.verify.ifError():
return False
raws = self.dbSupport.findUser(name)
if raws is None:
self.verify.raiseVerifyError('LoginFailed','LoginFailed')
return False
for raw in raws:
if self.verify.verifyLogin(raw.password):
raw.tokenVersion = self.verify.updateVersion(raw.tokenVersion)
self.dbSupport.setRaw(raw)
self.token = self.generateToken(name,raw.tokenVersion)
self.verify.raiseSuccessInf('success','Login successfully')
return True
return False
def verifySignin(self):
name = None
password = None
password2 = None
if self.request.method == 'POST':
name = self.request.form.get('userName')
password = self.request.form.get('password')
password2 = self.request.form.get('password2')
elif self.request.method == 'GET':
name = self.request.args.get('userName')
password = self.request.args.get('password')
password2 = self.request.args.get('password2')
self.verify.setName(name).setPasswd(password).setPasswd2(password2)
self.verify.verifySignIn()
if self.verify.ifError():
return False
raws = self.dbSupport.findUser(name)
if raws is not None:
self.verify.raiseVerifyError('repeatedName','repeatedName')
return False
self.dbSupport.newUser(name,password)
self.verify.raiseSuccessInf('success','signin successfully')
self.verify.raiseSuccessInf('key',str(self.dbSupport.raw.keyPasswd))
return True
def verifyToken(self):
token = None
if self.request.method == 'POST':
token = self.request.form.get('token')
elif self.request.method == 'GET':
token = self.request.args.get('token')
if token is not None:
data = self.verify.checkToken(token, self.KEY)
if data is None:
return False
raws = self.dbSupport.findUser(data['userName'])
if raws is not None:
for raw in raws:
self.verify.verifyAccuracyWithToken(data,raw.tokenVersion,'version')
if self.verify.ifError():
return False
self.dbSupport.setRaw(raw)
self.name = data['userName']
return True
self.verify.raiseVerifyError('tokenUnmatched','tokenUnmatched')
else:
self.verify.raiseVerifyError('tokenEmpty','toeknEmpty')
return False
def verifyHome(self):
maxUserNum = None
homeNum = None
if self.request.method == 'POST':
maxUserNum = self.request.form.get('maxUserNum')
homeNum = self.request.form.get('homeNum')
elif self.request.method == 'GET':
maxUserNum = self.request.args.get('maxUserNum')
homeNum = self.request.args.get('homeNum')
self.home.setHomeNum(homeNum).setMaxUserNum(maxUserNum)
homeResult = self.home.verifyHomeNum()
maxResult = self.home.verifyMaxNum()
if homeResult is None and maxResult is None:
return False
if maxResult is None:
raw = self.dbSupport.findHome(homeNum)
if raw is None:
return False
self.home.setUserNum(raw.numOfUsers)
self.home.setMaxUserNum(raw.maxNumOfUsers)
if self.home.verifyHome() is None:
return False
raw.numOfUsers += 1
self.dbSupport.setRaw(raw)
self.verify.raiseSuccessInf('success','set successfully')
else:
raws = self.dbSupport.findUser(self.home)
for raw in raws:
homeRaw = self.dbSupport.findHome(raw.roomNum)
homeRaw.maxResult = maxResult
self.dbSupport.setRaw(homeRaw)
self.verify.raiseSuccessInf('success','set successfully')
return True
def verifyNewHome(self):
homeNum = None
boardCode = None
if self.request.method == 'POST':
homeNum = self.request.form.get('homeNum')
boardCode = self.request.form.get('boardCode')
elif self.request.method == 'GET':
homeNum = self.request.args.get('homeNum')
boardCode = self.request.args.get('boardCode')
self.home.setHomeNum(homeNum).setBoardCode(boardCode)
self.home.verifyHomeNum()
if self.home.ifError() is True:
return False
self.home.verifyBoard()
if self.home.ifError() is True:
return False
#hashCode = IOtool.toHash(boardCode)
raw = self.dbSupport.findBoard(boardCode)
if raw is None:
self.home.raiseVerifyError('BoardError','BoardError')
return False
raw = self.dbSupport.findHome(homeNum)
if raw is not None:
self.home.raiseVerifyError('homeError','homeError')
return False
self.home.raiseSuccessInf('success','new home')
return True
def generateToken(self,name,version):
body = {'userName': name, 'version': version}
return self.verify.generateToken(self.KEY, 86400, body) | {"/app/toolbox/iotool.py": ["/app/toolbox/basetool.py"], "/app/main/launch.py": ["/app/main/createapp.py"], "/app/toolbox/verifytool.py": ["/app/toolbox/basetool.py"], "/app/toolbox/dbtool.py": ["/app/toolbox/basetool.py"]} |
76,527 | asmallleaf/DoorAppTest | refs/heads/master | /app/toolbox/iotool.py | from .basetool import BaseTool
from json import JSONEncoder
from passlib.apps import custom_app_context as pwd_context
import datetime
# IOtool is a class designed for Input or Output stream.
# some format conversion methods are provided
class IOtool(BaseTool):
def _help(self):
print('this is a toolbox for IO problem')
print('Till now, has fnc toJson, toHash,class JsonEncoder')
def _helpfor(self,fnc_name):
if fnc_name == 'toJson':
print('this is developed for sqlalchemy, the object should be ')
print('model class from sqlalchemy')
elif fnc_name == 'JsonEncoder':
print('this is a class inherit from JSONEncoder, please inherit it if needed')
print('has defined the encoder way of datetime class')
elif fnc_name == 'toHash':
print('this is a function to translate passwd to Hash')
# it is used to change a class into the one that can be encoded by jsonify
# _sa_instance_state is the foreign key in a sqlalchemy class, which should be deleted from the class
@classmethod
def toJson(cls,object):
dict = object.__dict__
if '_sa_instance_state' in dict:
del dict['_sa_instance_state']
return dict
@classmethod
def toHash(cls,passwd):
return pwd_context.encrypt(passwd)
# this is a class that should be collected in IOtool but also can be an isolated class
# it is used to customized the JsonEncode in jsonify package
# the transfer of time stamp is converted into time string
class JsonEncoder(JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime.datetime):
return obj.strftime('%Y-%m-%d %H:%M:%S')
elif isinstance(obj, datetime.date):
return obj.strftime('%Y-%m-%d')
else:
return JSONEncoder.default(self, obj)
| {"/app/toolbox/iotool.py": ["/app/toolbox/basetool.py"], "/app/main/launch.py": ["/app/main/createapp.py"], "/app/toolbox/verifytool.py": ["/app/toolbox/basetool.py"], "/app/toolbox/dbtool.py": ["/app/toolbox/basetool.py"]} |
76,528 | asmallleaf/DoorAppTest | refs/heads/master | /app/route/usrapi/blueprint.py | from flask import Blueprint
# the blueprint of usrapi is generated
# the view functions should import this blueprint in series
usrapi = Blueprint("usrapi",__name__)
| {"/app/toolbox/iotool.py": ["/app/toolbox/basetool.py"], "/app/main/launch.py": ["/app/main/createapp.py"], "/app/toolbox/verifytool.py": ["/app/toolbox/basetool.py"], "/app/toolbox/dbtool.py": ["/app/toolbox/basetool.py"]} |
76,529 | asmallleaf/DoorAppTest | refs/heads/master | /app/support/dbSupportTool.py | from app_2_0_0.app.toolbox.basetool import SupportTool,SupportToolFactory
from app_2_0_0.app.toolbox.dbtool import DBTool
from app_2_0_0.app.toolbox.iotool import IOtool
from app_2_0_0.app.database.models import db,users,rooms,boards
class dbSupportToolFactory(SupportToolFactory):
def build(self):
return dbSupportTool()
class dbSupportTool(SupportTool):
def __init__(self):
self.db = db
self.raw = None
def setDatabase(self,value):
self.db = value
return self
def getDatabase(self):
return self.db
def findUser(self,name):
raws = self.db.session.query(users).filter(users.userName==name).all()
if not raws:
return None
else:
return raws
def findHome(self,homeNum):
raw = self.db.session.query(rooms).filter(rooms.roomNumber==homeNum).first()
if raw is not None:
return raw
else:
return None
def findBoard(self,code):
raw = self.db.session.query(boards).filter(boards.codes == code).first()
if raw is not None:
return raw
else:
return None
def newUser(self,name,password):
key = self.generateKey()
hashPasswd = IOtool.toHash(password)
self.raw = users(userName=name,password=hashPasswd,keyPasswd=key,tokenVersion=0)
return self
def newRoom(self,roomNum,max,number):
self.raw = rooms(doorState=False,maxNumOfUsers=max,roomNumber=roomNum,numOfUsers=number)
return self
def generateKey(self):
key = DBTool.generate_randnum(8)
raw = self.db.session.query(users).filter(users.keyPasswd==key).first()
if raw:
return self.generateKey()
else:
return key
def setRaw(self,value):
self.raw = value
return self
def getRaw(self):
return self.raw
def load(self):
DBTool.insert(self.raw,self.db)
return self | {"/app/toolbox/iotool.py": ["/app/toolbox/basetool.py"], "/app/main/launch.py": ["/app/main/createapp.py"], "/app/toolbox/verifytool.py": ["/app/toolbox/basetool.py"], "/app/toolbox/dbtool.py": ["/app/toolbox/basetool.py"]} |
76,530 | asmallleaf/DoorAppTest | refs/heads/master | /app/main/launch.py | from .createapp import createapp
app = createapp('development')
if __name__ == '__main__':
app.run()
| {"/app/toolbox/iotool.py": ["/app/toolbox/basetool.py"], "/app/main/launch.py": ["/app/main/createapp.py"], "/app/toolbox/verifytool.py": ["/app/toolbox/basetool.py"], "/app/toolbox/dbtool.py": ["/app/toolbox/basetool.py"]} |
76,531 | asmallleaf/DoorAppTest | refs/heads/master | /app/route/homeapi/newhome.py | from flask import request,jsonify
from app_2_0_0.app.support import dbSupportTool,verifySupportTool
from app_2_0_0.app.support.verifySupportTool import Type
from app_2_0_0.app.route.homeapi.sethome import homeapi
@homeapi.route('/newhome',methods=['POST','GET'])
def newhome():
verify = verifySupportTool.VerifySupportToolFactory().build()
database = dbSupportTool.dbSupportToolFactory().build()
verify.setDbSupport(database).setRequest(value=request)
if verify.verifyNewHome() is False:
return jsonify(verify.getError(Type.home)),404
database.newRoom(max=1,number=0,roomNum=verify.home.getHomeNum()).load()
return jsonify(verify.getSuccess(Type.home)),200
| {"/app/toolbox/iotool.py": ["/app/toolbox/basetool.py"], "/app/main/launch.py": ["/app/main/createapp.py"], "/app/toolbox/verifytool.py": ["/app/toolbox/basetool.py"], "/app/toolbox/dbtool.py": ["/app/toolbox/basetool.py"]} |
76,532 | asmallleaf/DoorAppTest | refs/heads/master | /app/toolbox/basetool.py | from abc import abstractmethod
# this is a abstract class. Toolbox class should inherited from it
# _help and _helpfor method should be implemented to provide a clear help comments
class BaseTool():
@abstractmethod
def _help(self):
print('this is a toolbox and will be enriched in the future\n')
@abstractmethod
def _helpfor(self,fnc_name):
print('you can use this to get help() for specific function\n')
# this is a abstract class. ErrorBox class could inherited from it
# it was specific for RuntimeError
class BaseError(RuntimeError):
def __init__(self,args):
self.args = args
def __str__(self):
return self.args
class SupportToolFactory():
@abstractmethod
def build(self):
pass
class SupportTool():
@abstractmethod
def _help(self):
pass
| {"/app/toolbox/iotool.py": ["/app/toolbox/basetool.py"], "/app/main/launch.py": ["/app/main/createapp.py"], "/app/toolbox/verifytool.py": ["/app/toolbox/basetool.py"], "/app/toolbox/dbtool.py": ["/app/toolbox/basetool.py"]} |
76,533 | asmallleaf/DoorAppTest | refs/heads/master | /app/route/usrapi/logout.py | from app_2_0_0.app.route.usrapi.signin import usrapi
from app_2_0_0.app.support import dbSupportTool,verifySupportTool
from app_2_0_0.app.support.verifySupportTool import Type
from flask import request,jsonify
@usrapi.route('/fdlogout',methods=['POST','GET'])
def fdlogout():
verify = verifySupportTool.VerifySupportToolFactory().build()
database = dbSupportTool.dbSupportToolFactory().build()
verify.setDbSupport(database).setRequest(value=request)
if verify.verifyToken() is False:
return jsonify(verify.getError(Type.user)),404
raw = database.getRaw()
verify.verify.updateVersion(raw.tokenVersion)
database.setRaw(raw).load()
return jsonify(verify.getSuccess(Type.user)),200
| {"/app/toolbox/iotool.py": ["/app/toolbox/basetool.py"], "/app/main/launch.py": ["/app/main/createapp.py"], "/app/toolbox/verifytool.py": ["/app/toolbox/basetool.py"], "/app/toolbox/dbtool.py": ["/app/toolbox/basetool.py"]} |
76,534 | asmallleaf/DoorAppTest | refs/heads/master | /app/database/models.py | from sqlalchemy import Column,String,Integer,ForeignKey,TIMESTAMP
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
class rooms(db.Model):
id = Column(Integer, autoincrement=True, primary_key=True, nullable=False)
numOfUsers = Column(Integer)
doorState = Column(Integer,nullable=False)
maxNumOfUsers = Column(Integer,default=10)
roomNumber = Column(Integer,nullable=False,unique=True)
class users(db.Model):
id = Column(Integer, autoincrement=True, primary_key=True, nullable=False)
userName = Column(String(20),nullable=False)
password = Column(String(255),nullable=False)
keyPasswd = Column(Integer,unique=True)
tokenVersion = Column(Integer)
roomNumber = Column(Integer,ForeignKey('rooms.roomNumber'))
class boards(db.Model):
id = Column(Integer, autoincrement=True, primary_key=True, nullable=False)
codes = Column(String(255),unique=True,nullable=False) | {"/app/toolbox/iotool.py": ["/app/toolbox/basetool.py"], "/app/main/launch.py": ["/app/main/createapp.py"], "/app/toolbox/verifytool.py": ["/app/toolbox/basetool.py"], "/app/toolbox/dbtool.py": ["/app/toolbox/basetool.py"]} |
76,535 | asmallleaf/DoorAppTest | refs/heads/master | /app/verify/homeVerify.py | from app_2_0_0.app.toolbox.verifytool import VerifyFactory,VerifyTool,VerifyError
class HomeVerifyFactory(VerifyFactory):
@classmethod
def build(cls):
return HomeVerify()
class HomeVerifyError(VerifyError):
def __init__(self,arg,type):
super(HomeVerifyError,self).__init__(arg,type)
def feedback(self):
if self.type == 'homeBlank':
self.msg = 'Home number can not be blank'
elif self.type == 'userBlank':
self.msg = 'user number can not be empty'
elif self.type == 'maxUserBlank':
self.msg = 'max user number can not be empty'
elif self.type == 'userError':
self.msg = 'user number is not digit'
elif self.type == 'maxUserError':
self.msg = 'max user number is not digit'
elif self.type == 'homeError':
self.msg = 'home number is invalid'
elif self.type == 'userFull':
self.msg = 'the room is full of person'
elif self.type == 'BoardError':
self.msg = 'Board code is invalid'
else:
self.msg = 'Unknown Error'
return self.msg
class HomeVerify(VerifyTool):
def __init__(self):
self.error_index = {'state':'error'}
self.success_inf = {'state':'success'}
self.iferror = False
self.homeNum = None
self.userNum = None
self.maxUserNum = None
self.boardCode = None
def raiseVerifyError(self,args,type):
try:
raise HomeVerifyError(args,type)
except HomeVerifyError as uve:
temp = uve.feedback()
self.error_index[type]=temp
self.iferror=True
print(temp)
def raiseSuccessInf(self,key,value):
self.success_inf[key] = value
return self.success_inf
def verifyHomeNum(self,):
if self.homeNum.isdigit() is False:
self.raiseVerifyError('homeError','homeError')
return None
if self.homeNum is None:
self.raiseVerifyError('homeBlank','homeBlank')
return None
if len(self.homeNum) != 8:
self.raiseVerifyError('homeError','homeError')
return None
return True
def verifyUserNum(self):
if self.userNum is None:
self.raiseVerifyError('userBlank','userBlank')
return None
if self.userNum.isdigit() is False:
self.raiseVerifyError('userError','userError')
return None
return True
def verifyMaxNum(self):
if self.maxUserNum is None:
self.raiseVerifyError('maxUserBlank','maxUserBlank')
return None
if self.maxUserNum.isdigit() is False:
self.raiseVerifyError('maxUserError','maxUserError')
return None
return True
def verifyHome(self):
if self.cmpNum(self.userNum,self.maxUserNum,True):
self.raiseVerifyError('userFull','userFull')
return None
return True
def verifyBoard(self):
if self.boardCode is None:
self.raiseVerifyError('BoardError','BoardError')
return None
return True
def setBoardCode(self,value):
self.boardCode = value
return self
def getBoardCode(self):
return self.boardCode
def setHomeNum(self,value):
self.homeNum = value
return self
def getHomeNum(self):
return self.homeNum
def setUserNum(self,value):
self.userNum = value
return self
def getUserNum(self):
return self.userNum
def setMaxUserNum(self,value):
self.maxUserNum = value
return self
def getMaxUserNum(self):
return self.maxUserNum
def getError(self):
return self.error_index
def ifError(self):
return self.iferror
def getSuccess(self):
return self.success_inf | {"/app/toolbox/iotool.py": ["/app/toolbox/basetool.py"], "/app/main/launch.py": ["/app/main/createapp.py"], "/app/toolbox/verifytool.py": ["/app/toolbox/basetool.py"], "/app/toolbox/dbtool.py": ["/app/toolbox/basetool.py"]} |
76,536 | asmallleaf/DoorAppTest | refs/heads/master | /app/route/usrapi/login.py | from app_2_0_0.app.route.usrapi.blueprint import usrapi
from app_2_0_0.app.support import dbSupportTool,verifySupportTool
from app_2_0_0.app.support.verifySupportTool import Type
from flask import request,jsonify
@usrapi.route('/fdlogin',methods=['POST','GET'])
def fdLogin():
verify = verifySupportTool.VerifySupportToolFactory().build()
database = dbSupportTool.dbSupportToolFactory().build()
verify.setDbSupport(database).setRequest(request)
if verify.verifyLogin() is False:
return jsonify(verify.getError(Type.user)),404
database.load()
json = verify.getSuccess(Type.user)
json['token'] = verify.token.decode('ascii')
return jsonify(json),200
| {"/app/toolbox/iotool.py": ["/app/toolbox/basetool.py"], "/app/main/launch.py": ["/app/main/createapp.py"], "/app/toolbox/verifytool.py": ["/app/toolbox/basetool.py"], "/app/toolbox/dbtool.py": ["/app/toolbox/basetool.py"]} |
76,537 | asmallleaf/DoorAppTest | refs/heads/master | /app/toolbox/verifytool.py | from .basetool import BaseTool,BaseError
import abc
from passlib.apps import custom_app_context as pwd_context
from itsdangerous import TimedJSONWebSignatureSerializer as time_token
from itsdangerous import SignatureExpired,BadSignature
# this is a abstract class, which is defined due to Factory method
# ALl of the class in VerifyTool is suggested to be inherited for a specific concept
class VerifyFactory():
@abc.abstractmethod
def build(self):
pass
# VerifyTool is a toolbox designed for verification
# the original verification is designed to build a user verification level
# most of the comments has been collected in _helpfor function
# the raiseVerifyError is suggested to be overload when inheriting to generate a error list.
class VerifyTool(BaseTool):
def _help(self):
print('this is a tool for Verify class, developing')
print('till now, has fnc isequal, raiserVerifyError, isnone, checkNum, checkNum2')
print('passwdVerify,generateToken,verifyToken')
print('class VerifyError')
def _helpfor(self,fnc_name):
if fnc_name == 'checkNum'or fnc_name == 'checkNum2':
print('There are two overloading funcs.\nif min>max, the interval will be x<=min and x>=max')
print('if min<=max, it will be min<=x<=max\n you also can user x>ordered num or x<num')
elif fnc_name == 'isequal':
print('just meaningless')
elif fnc_name == 'raiseVerifyError':
print('used to raise error, args is the tab, type is the label in class VerifyError')
print('when you have customized a label, you need to add it in the class.')
elif fnc_name == 'isnone':
print('it is used to check the blank key in a index. using if not')
print('can detect '',none,0,false.... and return the blank key as a list')
elif fnc_name == 'passwdVerify':
print('this is a password verify tool, need input the password and the hash version')
elif fnc_name == 'generateToken':
print('this is used to generate token, need the expiration time and additional index if needed')
elif fnc_name == 'verifyToken':
print('this is used to verify user token, only need the token serial and return the loaded data '
'if succeeds')
@classmethod
def isequal(cls,new,old):
if new == old:
return True
else:
return False
@classmethod
def raiseVerifyError(cls,args,type):
try:
raise VerifyError(args,type)
except VerifyError as ve:
print(ve.feedback())
@classmethod
def isnone(cls,elments):
blanks=[]
for key,value in elments.items():
if not value:
blanks.append(key)
return blanks
@classmethod
def checkNum(cls,items,max_num,min_num):
item_num = len(items)
if min_num>max_num:
if item_num<=min_num and item_num>=max_num:
return True,item_num
else:
return False
else:
if item_num>=min_num and item_num<=max_num:
return True,item_num
else:
return False
@classmethod
def checkNum2(cls,items,order_num,is_upper):
item_num = len(items)
if is_upper == True:
if item_num>= order_num:
return True,item_num
else:
return False
else:
if item_num<order_num:
return True,item_num
else:
return False
@classmethod
def cmpNum(cls,num,order_num,is_upper):
if is_upper == True:
if num>= order_num:
return True
else:
return False
else:
if num<order_num:
return True
else:
return False
# this method will verify password with the hash encoded value
@classmethod
def passwdVerify(cls,passwd,hash_passwd):
return pwd_context.verify(passwd,hash_passwd)
@classmethod
def generateToken(cls,secret_key,expiration,index):
token_serial = time_token(secret_key,expires_in=expiration)
return token_serial.dumps(index)
def updateVersion(self,version):
num = int(version)
if num>=255:
num = 0
else:
num += 1
return num
@classmethod
def verifyToken(cls,token_serial,token_key):
token_cmp = time_token(token_key)
try:
data = token_cmp.loads(token_serial)
except SignatureExpired:
return 'SignatureExpired',None
except BadSignature:
return 'BadSignature',None
return 'Success',data
# the verifyError class is suggested to be inherited since the error type need to be customized
# if inherited, feedback method should be overload and implemented as the following structure
# if self.type == 'error tab':
# self,msg = 'error message'
# the type and message will be returned by raiseVerifyError
class VerifyError(BaseError):
msg = None
def __init__(self,args,type):
super(VerifyError,self).__init__(args)
self.type = type
def feedback(self):
if self.type == 'PasswdUnadmit':
self.msg = 'the passwd is not correct, meet error PasswdUnadmit'
elif self.type == 'NotLogin':
self.msg = 'the user has not logined yet, meet error NotLogin'
elif self.type == 'IllegalArgs':
self.msg = 'there are some arguments that does not meet the requirments'
elif self.type == 'MutilableObjects':
self.msg = 'there is mutilable objects when verify the number of the objects'
else:
self.msg = 'Unknown error happened, Unknown Error in VerifyError'
return self.msg
| {"/app/toolbox/iotool.py": ["/app/toolbox/basetool.py"], "/app/main/launch.py": ["/app/main/createapp.py"], "/app/toolbox/verifytool.py": ["/app/toolbox/basetool.py"], "/app/toolbox/dbtool.py": ["/app/toolbox/basetool.py"]} |
76,538 | asmallleaf/DoorAppTest | refs/heads/master | /app/route/homeapi/blueprint.py | from flask import Blueprint
# the blueprint of usrapi is generated
# the view functions should import this blueprint in series
homeapi = Blueprint("homeapi",__name__)
| {"/app/toolbox/iotool.py": ["/app/toolbox/basetool.py"], "/app/main/launch.py": ["/app/main/createapp.py"], "/app/toolbox/verifytool.py": ["/app/toolbox/basetool.py"], "/app/toolbox/dbtool.py": ["/app/toolbox/basetool.py"]} |
76,539 | asmallleaf/DoorAppTest | refs/heads/master | /app/verify/userVerify.py | from app_2_0_0.app.toolbox.verifytool import VerifyFactory,VerifyTool,VerifyError
# it is a verify level for user information.
# the UserVerify class should be built by this factory
class UserVerifyFactory(VerifyFactory):
def build(self):
return UserVerify()
# it is inherited from VerifyError class and is overlord for user verification
# there is no need to use it, it has been packaged into UserVerify class
# However, the error type need to added if some customized error types is used in UserVerify
class UserVerifyError(VerifyError):
def __init__(self,arg,type):
super(UserVerifyError,self).__init__(arg,type)
def feedback(self):
if self.type == 'nameLength':
self.msg = 'user name too long'
elif self.type == 'nameBlank':
self.msg = 'name can not be blank'
elif self.type == 'passwdBlank':
self.msg = 'password can not be blank'
elif self.type == 'passwd2Blank':
self.msg = 'please confirm the password'
elif self.type == 'differentPasswd':
self.msg = 'confirm password failed'
self.msg = 'token has been out of time'
elif self.type == 'BadSignature':
self.msg = 'the token does not exited or is invalid'
elif self.type == 'LoginFailed':
self.msg = 'User name or password is not correct'
elif self.type == 'tokenUnmatched':
self.msg = 'The token is invalid or not matched'
elif self.type == 'repeatedName':
self.msg = 'The user name has been registered'
elif self.type == 'tokenEmpty':
self.msg = 'the token could not be blank'
else:
self.msg = 'UnknownProblem'
return self.msg
# it is the main class of user verify level
# all of the private member has getter and setter
# iferror should not be used without a careful thinking
# error_index is a list of error raised in verify level, it can directly transfer into json file
# passwd2 is the password used to confirm the first passwd, prevent mistyping
class UserVerify(VerifyTool):
def __init__(self):
self.error_index = {'state':'error'}
self.success_inf = {'state':'success'}
self.name = None
self.passwd = None
self.passwd2 = None
self.keyPasswd = None
self.iferror = False
# the raiseVerifyError function is overload
# now the error tab and message will be collected into error_index
# args and type is suggested to be the same though only type is the error tab
def raiseVerifyError(self,args,type):
try:
raise UserVerifyError(args,type)
except UserVerifyError as uve:
temp = uve.feedback()
self.error_index[type]=temp
self.iferror=True
print(temp)
def raiseSuccessInf(self,key,value):
self.success_inf[key] = value
return self.success_inf
# it is used to verify the sign in information
# user name, password, password 2 and fridge code should be transferred.
# it is a package of three verification method
# it can not ensure any of the value be unique
def verifySignIn(self):
self.verifyName()
self.verifyPasswds()
# it is used to check the validity of token
# it may be developed further in the future with a banned token list verification
def checkToken(self,token,key):
result,data = VerifyTool.verifyToken(token,token_key=key)
if result == 'SignatureExpired':
self.raiseVerifyError('SignatureExpired','SignatureExpired')
return None
elif result == 'BadSignature':
self.raiseVerifyError('BadSignature','BadSignature')
return None
else:
return data
# it is used to verify the login information
# to keep the generality of verify level, it is not linked to database
# so it just verify the password is irregular or not
def verifyLogin(self,mpasswd):
if self.passwdVerify(self.passwd,mpasswd):
return True
else:
self.raiseVerifyError('LoginFailed','LoginFailed')
return False
# it is used to verify weather the user name is valid or not
# it just check the blank or length of the user name
# the name should not longer than 20 characters
# it should provided a method to change the limited size
def verifyName(self):
if not self.name:
self.raiseVerifyError('nameBlank','nameBlank')
return 'nameBlank'
if len(self.name)>20:
self.raiseVerifyError('nameLength','nameLength')
return 'nameLength'
else:
return None
# it is used to verify the passwords
# it will check both of the password and password 2
def verifyPasswds(self):
if not self.passwd:
self.raiseVerifyError('passwdBlank','passwdBlank')
return 'passwdBlank'
if not self.passwd2:
self.raiseVerifyError('passwd2Blank','passwd2Blank')
return 'passwd2Blank'
if not VerifyTool.isequal(self.passwd2,self.passwd):
self.raiseVerifyError('differentPasswd','differentPasswd')
return 'differentPasswd'
return None
def verifyPasswd(self):
if not self.passwd:
self.raiseVerifyError('passwdBlank','passwdBlank')
return 'passwdBlank'
return None
# it is used to verify weather the data store in token is the same as data in database
# to keep the generality of verify level, it is not linked to database
# so it need two data, which are extracted from database and token
# loc is the key of data to check
def verifyAccuracyWithToken(self,tokenData,cmpData,loc):
if cmpData != tokenData[loc]:
self.raiseVerifyError('tokenUnmatched','tokenUnmatched')
return 'tokenUnmatched'
else:
return None
# the followed methods are getter and setter of private members
def setName(self,value):
self.name = value
return self
def getName(self):
return self.name
def setPasswd(self,value):
self.passwd = value
return self
def getPasswd(self):
return self.passwd
def setPasswd2(self,value):
self.passwd2 = value
return self
def getPasswd2(self):
return self.passwd2
def getError(self):
return self.error_index
def ifError(self):
return self.iferror
def setKeyPasswd(self,value):
self.keyPasswd = value
return self
def getKeyPasswd(self):
return self.keyPasswd
def getSuccess(self):
return self.success_inf
| {"/app/toolbox/iotool.py": ["/app/toolbox/basetool.py"], "/app/main/launch.py": ["/app/main/createapp.py"], "/app/toolbox/verifytool.py": ["/app/toolbox/basetool.py"], "/app/toolbox/dbtool.py": ["/app/toolbox/basetool.py"]} |
76,540 | asmallleaf/DoorAppTest | refs/heads/master | /app/config/config.py | from app_2_0_0.secret.config import Dev_config,Test_config
class Dev_Config(Dev_config):
USERNAME = 'root'
HOST = 'localhost'
DATABASENAME = 'db_webdoor'
SQLALCHEMY_DATABASE_URI = 'mysql://'+USERNAME+':'+Dev_config.PASSWORD+\
'@'+HOST+'/'+DATABASENAME
SQLALCHEMY_TRACK_MODIFICATIONS = False
TOKEN_KEY = Dev_config.TOKEN
class Test_Config(Test_config):
USERNAME = 'root'
HOST = 'localhost'
DATABASENAME = 'db_weboor'
SQLALCHEMY_DATABASE_URI = 'mysql://'+USERNAME+':'+Dev_config.PASSWORD+\
'@'+HOST+'/'+DATABASENAME
SQLALCHEMY_TRACK_MODIFICATIONS = False
TOKEN_KEY = Test_config.TOKEN
configs = {
'development': Dev_Config,
'test': Test_Config
} | {"/app/toolbox/iotool.py": ["/app/toolbox/basetool.py"], "/app/main/launch.py": ["/app/main/createapp.py"], "/app/toolbox/verifytool.py": ["/app/toolbox/basetool.py"], "/app/toolbox/dbtool.py": ["/app/toolbox/basetool.py"]} |
76,541 | asmallleaf/DoorAppTest | refs/heads/master | /app/verify/dbVerify.py | from app_2_0_0.app.toolbox.verifytool import VerifyTool as vt
# this is a irregular verify level class inherited from Verify Toolbox
# it should use Factory method and will be redeveloped in the future
# it is used to provide verify methods for data transferred into database
class Verify(vt):
# check method is only a package of VerifyTool and VerifyError
# it will check weather the size of items in a list or dictionary is in a section of max and min value
# if success, it will return the size of items
# if failed, it will raise error and return false
# the function is not well-designed and should be improved in the future
@classmethod
def check(cls,items,max,min,error_arg,error_tab):
result,item_num = cls.checkNum(items,max_num=max,min_num=min)
if result:
return item_num
else:
vt.raiseVerifyError(error_arg,error_tab)
return False
# check2 has the same function as check2
# the only difference between the two methods is that it can only compare a single value
# weather it is bigger than val or smaller than val. it is set to compare is upper val in default
@classmethod
def check2(cls,items,val,upper,error_arg,error_tab):
result,item_num = cls.checkNum2(items,order_num=val,is_upper=upper)
if result:
return item_num
else:
vt.raiseVerifyError(error_arg,error_tab)
return False | {"/app/toolbox/iotool.py": ["/app/toolbox/basetool.py"], "/app/main/launch.py": ["/app/main/createapp.py"], "/app/toolbox/verifytool.py": ["/app/toolbox/basetool.py"], "/app/toolbox/dbtool.py": ["/app/toolbox/basetool.py"]} |
76,542 | asmallleaf/DoorAppTest | refs/heads/master | /app/main/createapp.py | from flask import Flask
from app_2_0_0.app.config.config import configs
from app_2_0_0.app.database import models
from app_2_0_0.app.route.blueprints import usrapi,homeapi
from app_2_0_0.app.toolbox.iotool import JsonEncoder
# an intent of flask will be created here. However, it is just a method.
# it should be used in launcher to divide the launcher from create process
def createapp(objectName):
# create a intent of flask
app = Flask(__name__)
# read the config
app.config.from_object(configs[objectName])
# select the json encode for jsonify
app.json_encoder = JsonEncoder
# map model classes to database
models.db.init_app(app)
# register blueprints on app, the intent of flask
app.register_blueprint(homeapi)
app.register_blueprint(usrapi)
# a basic route, should be deleted
@app.route('/')
def welcome():
return '<h>hello world<\h>'
return app | {"/app/toolbox/iotool.py": ["/app/toolbox/basetool.py"], "/app/main/launch.py": ["/app/main/createapp.py"], "/app/toolbox/verifytool.py": ["/app/toolbox/basetool.py"], "/app/toolbox/dbtool.py": ["/app/toolbox/basetool.py"]} |
76,543 | asmallleaf/DoorAppTest | refs/heads/master | /app/toolbox/dbtool.py | import string
from .basetool import BaseTool
import random
# DBToo is a toolbox class designed for database in flask.
# flask_sqlalchemy is suggested to be used as the database package
# help provide some comments on the methods
class DBTool(BaseTool):
def _help(self):
print('this is tool of database,include insert, drop')
print('it has insert, drop, string2Bool, generate_token(not recommended),generate_randnum')
print('use _helpfor to check some specific information')
def _helpfor(self,fnc_name):
if fnc_name == 'insert' or fnc_name == 'drop':
print('to use them, a raw in the database and db need to be provided')
print('like insert(raw,db), db is the instance of database')
elif fnc_name == 'generate_token':
print('it could generate a random number in the size of 10')
print('it has not been developed completely, so it is not suggested to use')
elif fnc_name == 'string2Bool':
print('it is a function to convert string value of True, true, TRUE')
print('into boolean value. Any other stirng value will return false')
elif fnc_name == 'generate_randnum':
print('it is a function to generate random number, position is the size of number')
@classmethod
def insert(cls,raw,db):
db.session.add(raw)
db.session.commit()
@classmethod
def drop(cls,raw,db):
db.session.delete(raw)
db.session.commit()
@classmethod
def generate_token(cls,table,db,size):
min = int(pow(10.0,size-1))
max = int(pow(10.0,size))
newtoken = random.randint(min,max-1)
return newtoken
@classmethod
def generate_randnum(cls,position):
temp = ''.join(random.sample(string.digits,position))
return temp
@classmethod
def string2Bool(cls,str):
return (str == 'True' or str == 'TRUE' or str == 'true')
| {"/app/toolbox/iotool.py": ["/app/toolbox/basetool.py"], "/app/main/launch.py": ["/app/main/createapp.py"], "/app/toolbox/verifytool.py": ["/app/toolbox/basetool.py"], "/app/toolbox/dbtool.py": ["/app/toolbox/basetool.py"]} |
76,544 | asmallleaf/DoorAppTest | refs/heads/master | /app/route/blueprints.py | from app_2_0_0.app.route.usrapi.logout import usrapi
from app_2_0_0.app.route.homeapi.newhome import homeapi
# ALl of the blueprints will be collected here
# the source of each blueprint should be updated if there is a new api built
# the createapp will import the webapi and usrapi from this file
usrapi = usrapi
homeapi = homeapi
| {"/app/toolbox/iotool.py": ["/app/toolbox/basetool.py"], "/app/main/launch.py": ["/app/main/createapp.py"], "/app/toolbox/verifytool.py": ["/app/toolbox/basetool.py"], "/app/toolbox/dbtool.py": ["/app/toolbox/basetool.py"]} |
76,545 | asmallleaf/DoorAppTest | refs/heads/master | /app/support/factory.py | from app_2_0_0.app.support.verifySupportTool import VerifySupportToolFactory as VSF
from app_2_0_0.app.support.dbSupportTool import dbSupportToolFactory as DBSF
from enum import Enum
class supportNames(Enum):
verify = 0
database = 1
class Supports():
@classmethod
def create(cls,name):
if name == supportNames['verify']:
return VSF.build()
elif name == supportNames['database']:
return DBSF.build()
@classmethod
def createBundle(cls,args):
for arg in args:
return cls.create(arg)
| {"/app/toolbox/iotool.py": ["/app/toolbox/basetool.py"], "/app/main/launch.py": ["/app/main/createapp.py"], "/app/toolbox/verifytool.py": ["/app/toolbox/basetool.py"], "/app/toolbox/dbtool.py": ["/app/toolbox/basetool.py"]} |
76,574 | grape0919/AutoRegister | refs/heads/master | /view/PrograssDialog.py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'PrograssDialog.ui'
#
# Created by: PyQt5 UI code generator 5.15.0
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtWidgets
from PyQt5.QtCore import QRect, QMetaObject, Qt
class Ui_Form(QtWidgets.QWidget):
def setupUi(self):
self.resize(364, 67)
self.progressBar = QtWidgets.QProgressBar(self)
self.progressBar.setGeometry(QRect(10, 30, 341, 23))
self.progressBar.setMaximum(0)
self.progressBar.setProperty("value", -1)
self.progressBar.setTextVisible(False)
self.progLabel = QtWidgets.QLabel(self)
self.progLabel.setGeometry(QRect(10, 10, 341, 16))
self.retranslateUi()
QMetaObject.connectSlotsByName(self)
flags = Qt.WindowFlags(Qt.FramelessWindowHint | Qt.WindowStaysOnTopHint)
self.setWindowFlags(flags)
def retranslateUi(self):
self.setWindowTitle("Form")
self.progLabel.setText("PrograssText")
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Form = QtWidgets.QWidget()
ui = Ui_Form()
ui.setupUi(Form)
Form.show()
sys.exit(app.exec_())
| {"/parsing/ProdCrawler.py": ["/config/Configuration.py"], "/login/AutoLogin.py": ["/config/Configuration.py"], "/Process.py": ["/AutoRegister.py", "/view/PrograssDialog.py", "/login/AutoLogin.py", "/parsing/ProdCrawler.py"], "/parsing/LozenParser.py": ["/data/CarriageData.py"], "/AutoRegister.py": ["/view/main.py", "/data/CarriageData.py", "/parsing/LozenParser.py", "/view/PrograssDialog.py"]} |
76,575 | grape0919/AutoRegister | refs/heads/master | /data/CarriageData.py |
from logging import error
import requests
import json
from log.Logger import Logger
from PyQt5.QtWidgets import QMessageBox
class Data:
checkValue = "0"
carriageNumber = "0"
# UPLOAD_SER_NO = "0" #필수
WH_CD = '00002' #필수
carriageNumber = "0"
IO_DATE = 'YYYYMMDD'
CUST_DES = '거래처 명'
CUST = '거래처 코드'
phoneNumber = '000-0000-0000'
address = '주소'
PROD_DES = '품목'
PROD_CD = '품목 코드' #필수
QTY = '수량' #필수
def toArray(self):
array = [self.carriageNumber, self.IO_DATE, self.CUST_DES, self.phoneNumber, self.PROD_DES, self.QTY, self.address]
return array
def __str__(self):
return "CarriageData : " + str(self.toArray)
def __repr__(self):
return (str(self.toArray()))
class Register:
headers = {'Content-Type': 'application/json; charset=utf-8'}
registrationUrl = 'https://oapi{ZONE}.ecounterp.com/OAPI/V2/Sale/SaveSale?SESSION_ID={SESSION_ID}'
inquiryUrl = 'https://oapi{ZONE}.ecounterp.com/OAPI/V2/InventoryBasic/GetBasicProduct?SESSION_ID={SESSION_ID}'
def __init__(self, ZONE, SESSION_ID):
Logger.info("CarriageRegister.init")
self.ZONE = ZONE
self.SESSION_ID = SESSION_ID
self.registrationUrl = self.registrationUrl.format(ZONE=self.ZONE, SESSION_ID=self.SESSION_ID)
self.inquiryUrl = self.inquiryUrl.format(ZONE=self.ZONE, SESSION_ID=self.SESSION_ID)
def registration(self, data):
Logger.info("CarriageData Registraion")
print("!@#!@# data : ", data)
post = """{
"SaleList": ["""
for i in range(len(data.PROD_CD)):
post += """
{{
"Line": "0",
"BulkDatas": {{
"IO_DATE": "{IO_DATE}",
"UPLOAD_SER_NO": "",
"CUST": "{CUST}",
"CUST_DES": "{CUST_DES2}",
"WH_CD": "00002",
"PROD_CD": "{PROD_CD}",
"PROD_DES": "{PROD_DES}",
"QTY": "{QTY}",
"U_MEMO3": "{CUST_DES1} / {PHONE}",
"U_MEMO4": "{ADDRESS}",
"U_MEMO5": "{ECT}",
}}
}}
""".format(IO_DATE=data.IO_DATE, CUST=data.CUST, CUST_DES2=data.CUST_DES if str(data.CUST) != "TRA2008008" else "택배발송",# UPLOAD_SER_NO=data.UPLOAD_SER_NO
CUST_DES1 = data.CUST_DES, PROD_CD=data.PROD_CD[i], PROD_DES=data.PROD_DES[i], QTY=data.QTY[i], PHONE=data.phoneNumber
, ADDRESS=data.address, ECT="")
if(i != len(data.PROD_CD)-1):
post += """,
"""
post += """]
}"""
post = post.encode("utf-8")
Logger.debug("post: " + str(post))
response = requests.post(self.registrationUrl, data=post, headers=self.headers)
Logger.debug("response : " + response.text)
status = response.json()["Status"]
success_cnt = ""
fail_cnt = ""
error_msg = ""
if(status == "200"):
success_cnt = response.json()["Data"]["SuccessCnt"]
fail_cnt = response.json()["Data"]["FailCnt"]
if(fail_cnt == 0):
return (True, success_cnt)
else:
return (False, response.json()["Data"]["ResultDetails"][0]["TotalError"])
else:
error_msg = response.json()["Error"]["Message"]
return (False, error_msg)
def registrationList(self, dataList):
Logger.info("CarriageData List Registraion")
Logger.debug("regist Data List : " + str(dataList))
check_resp = []
c = False
for d in dataList:
a = self.registration(d)
if not a[0]:
check_resp.append(str(a[1])+"data : "+str(d.CUST_DES)+" : " +str(d.PROD_DES))
msg = QMessageBox()
if len(check_resp) > 0:
msg.setWindowTitle("판매 등록 실패")
msg.setIcon(QMessageBox.Critical)
msg.setText("판매 등록에 실패 했습니다. 아래 리스트를 확인해주세요.\n"+"\n".join(check_resp))
else :
msg.setWindowTitle("판매 등록 성공")
msg.setIcon(QMessageBox.Information)
msg.setText("판매 등록에 성공 했습니다.")
msg.setDefaultButton(QMessageBox.Escape)
msg.exec_()
def inquiryProduct(self, prodNm):
return 0
if __name__ == "__main__" :
ZONE = "CC"
SESSION_ID = "36363532357c50415243454c:CC-AN16HBmxKKJ49"
reg = Register(ZONE, SESSION_ID)
data = Data()
data.UPLOAD_SER_NO = "0"
data.WH_CD = '00002'
data.carriageNumber = "0"
data.IO_DATE = '20200816'
data.CUST_DES = '테스트고객정보'
data.phoneNumber = '000-0000-0000'
data.address = '주소'
data.item = '품목'
data.QTY = '5'
# print(data.__dict__)
Logger.info(data.toJson())
reg.registration(data)
# reg.registration(data) | {"/parsing/ProdCrawler.py": ["/config/Configuration.py"], "/login/AutoLogin.py": ["/config/Configuration.py"], "/Process.py": ["/AutoRegister.py", "/view/PrograssDialog.py", "/login/AutoLogin.py", "/parsing/ProdCrawler.py"], "/parsing/LozenParser.py": ["/data/CarriageData.py"], "/AutoRegister.py": ["/view/main.py", "/data/CarriageData.py", "/parsing/LozenParser.py", "/view/PrograssDialog.py"]} |
76,576 | grape0919/AutoRegister | refs/heads/master | /parsing/ProdCrawler.py | import requests
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from pathlib import Path
from config.Configuration import Config
import pandas as pd
import time
from log.Logger import Logger
class Crawler:
downloadPath = Path(__file__)
downloadPath = str(Path(downloadPath).parent.parent) + "\\data"
customDataFileName = "ESA001M.xls"
prodDataFileName = "ESA009M.xls"
customData = None
prodData = None
def __init__(self):
self.config = Config()
def run(self):
# try:
Logger.debug("downloadPath : " + self.downloadPath)
options = webdriver.ChromeOptions()
options.add_argument("headless")
options.add_argument("disable-gpu")
options.add_argument("lang=ko_KR") # 한국어!
options.add_argument("user-agent=Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36")
options.add_experimental_option("prefs", {
"download.default_directory": self.downloadPath,
"download.prompt_for_download": False,
"download.directory_upgrade": True,
"safebrowsing.enabled": True
})
driver = webdriver.Chrome("./lib/chromedriver.exe", chrome_options=options)
driver.get("https://logincc.ecounterp.com/ECERP/LOGIN/ERPLogin?vrqa=mMQ%2Bk8KPqxYEwADSAix%2FmA%3D%3D&vrqb=5456564d5d47535b5b465b4d504d5b4c0f1053535c400f4c5d025d450a4c06545c175d4d005609405a40555b584c4044&vrqc=1")
driver.implicitly_wait(5)
#로그인
driver.find_element_by_xpath("/html/body/div[6]/form/div[1]/div/div[2]/div[1]/div[1]/input").send_keys(self.config.ecountComCode)
driver.find_element_by_xpath("/html/body/div[6]/form/div[1]/div/div[2]/div[1]/div[2]/input").send_keys(self.config.ecountId)
driver.find_element_by_xpath("/html/body/div[6]/form/div[1]/div/div[2]/div[1]/div[3]/input[1]").send_keys(self.config.ecountPwd)
driver.find_element_by_id("save").click()
driver.implicitly_wait(1)
try:
#로그인정보 등록안함 클릭
driver.find_element_by_xpath("/html/body/div[7]/div[2]/div/div[3]/div/button[2]").click()
except:
Logger.warn("로그인 정보 등록 되어있음")
driver.implicitly_wait(5)
#재고1 -> 기초등록 -> 품목등록
Logger.debug("재고1 클릭")
driver.find_element_by_xpath("/html/body/div[7]/div/div[2]/div[2]/div[1]/ul/li[4]/a").click()
driver.implicitly_wait(1)
Logger.debug("기초등록 클릭")
driver.find_element_by_xpath("/html/body/div[7]/div/div[2]/div[2]/div[2]/ul[4]/li[1]/a").click()
time.sleep(3)
Logger.debug("품목등록 클릭")
driver.find_element_by_xpath("/html/body/div[7]/div/div[2]/div[2]/div[3]/ul/li[4]/a").click()
driver.implicitly_wait(5)
time.sleep(3)
Logger.debug("엑셀 클릭")
excelElement = driver.find_element_by_xpath("/html/body/div[8]/div/div[4]/div[3]/div/div[1]/div[8]/div/button")
driver.execute_script("arguments[0].click();", excelElement)
driver.implicitly_wait(5)
Logger.debug("거래처 등록 클릭")
driver.find_element_by_xpath("/html/body/div[7]/div/div[2]/div[2]/div[3]/ul/li[1]/a").click()
driver.implicitly_wait(5)
time.sleep(8)
Logger.debug("엑셀 클릭")
excelElement = driver.find_element_by_xpath("/html/body/div[8]/div/div[4]/div[3]/div/div[1]/div[6]/div/button[1]")
driver.execute_script("arguments[0].click();", excelElement)
driver.implicitly_wait(5)
time.sleep(5)
driver.close()
customDataFilePath = Path(self.downloadPath).joinpath(self.customDataFileName)
Logger.debug(customDataFilePath)
check_file = customDataFilePath.is_file()
Logger.debug(check_file)
prodDataFilePath = Path(self.downloadPath).joinpath(self.prodDataFileName)
Logger.debug(prodDataFilePath)
check_file = check_file and prodDataFilePath.is_file()
Logger.debug(check_file)
if check_file:
Logger.info("read excel")
df = pd.read_excel(customDataFilePath,
sheet_name='거래처등록',
header=1,
index_col='거래처명',
dtype={'거래처코드':str})
Logger.debug("df.A : " + str(df['거래처코드']))
self.customData = df['거래처코드']
df = pd.read_excel(prodDataFilePath,
sheet_name='품목등록',
header=1,
index_col='품목명',
dtype={'품목코드':str})
Logger.debug("df.A : " + str(df['품목코드']))
self.prodData = df['품목코드']
print("!@#!@# prodData : ", self.prodData)
tempSearchKey = df['검색창내용']
tempV = []
tempI = []
self.searchDict = {}
for idx, value in tempSearchKey.items():
if(type(value) == type('')):
for key in value.split(' '):
tempV.append(self.prodData[idx])
tempI.append(key)
self.searchDict[key] = idx
# self.prodData.append(pd.Series([self.prodData[idx]], index=[key]))
self.searchData = pd.Series(tempV,index=tempI)
customDataFilePath.unlink()
prodDataFilePath.unlink()
if type(self.customData) == type(None) or type(self.prodData) == type(None):
Logger.error("품목, 거래처 목록을 다운로드 중 문제가 발생하였습니다.")
return False
else :
return True
else :
customDataFilePath.unlink()
prodDataFilePath.unlink()
Logger.error("다운로드 실패 : " + self.customDataFileName + ", " + self.prodDataFileName)
return False
# except:
# Logger.error("품목, 거래처 목록을 다운로드 중 문제가 발생하였습니다.")
# return False
def run2(self):
return True | {"/parsing/ProdCrawler.py": ["/config/Configuration.py"], "/login/AutoLogin.py": ["/config/Configuration.py"], "/Process.py": ["/AutoRegister.py", "/view/PrograssDialog.py", "/login/AutoLogin.py", "/parsing/ProdCrawler.py"], "/parsing/LozenParser.py": ["/data/CarriageData.py"], "/AutoRegister.py": ["/view/main.py", "/data/CarriageData.py", "/parsing/LozenParser.py", "/view/PrograssDialog.py"]} |
76,577 | grape0919/AutoRegister | refs/heads/master | /login/AutoLogin.py | import configparser
import os
import time
import requests
import json
import sys
from config.Configuration import Config
from log.Logger import Logger
class AutoLogin:
headers_common = {'Content-Length':'32','Accept':'*/*','X-Requested-With':'XMLHttpRequest','User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.87 Safari/537.36','Content-Type':'application/x-www-form-urlencoded; charset=UTF-8','Origin':'http://203.247.141.92:8080','Referer':'http://203.247.141.92:8080/SmartLogen/login','Accept-Encoding':'gzip, deflate','Accept-Language':'ko,ko-KR;q=0.9,en-US;q=0.8,en;q=0.7,ja;q=0.6','Connection':'close'}
def __init__(self):
self.login_session = requests.Session()
self.config = Config()
def lozenLogin(self):
Logger.info("lozenLogin")
url = 'http://203.247.141.92:8080/SmartLogen/UserLogin'
post = {'userid':self.config.lozenId,'userpw':self.config.lozenPwd}
try:
response = self.login_session.post(url,data=post,headers=self.headers_common)
except:
Logger.error("lozen 로그인 중 네트워크 연결에 문제가 있습니다. ")
sys.exit()
Logger.debug("response" + response.text)
login_data = response.text.split('Ξ')
self.lozenLoginData1 = login_data[1]
self.lozenLoginData2 = login_data[3]
def ecountLogin(self):
Logger.info("ecountLogin")
url = 'https://oapi.ecounterp.com/OAPI/V2/Zone'
Logger.debug("COM_CODE : " + self.config.ecountComCode)
headers = {'Content-Type': 'application/json; charset=utf-8'}
post = {'COM_CODE': self.config.ecountComCode}
try:
response = requests.post(url, data=json.dumps(post), headers=headers)
except:
Logger.error("ecount 로그인 중 네트워크 연결에 문제가 있습니다. ")
sys.exit()
Logger.debug("response" + response.text)
Logger.debug("Data : " + response.json()["Data"]["ZONE"])
self.ZONE = response.json()["Data"]["ZONE"]
url = 'https://oapi{ZONE}.ecounterp.com/OAPI/V2/OAPILogin'.format(ZONE=self.ZONE)
post = {'COM_CODE': self.config.ecountComCode, 'USER_ID':self.config.ecountId, 'API_CERT_KEY':self.config.ecountApiKey, 'LAN_TYPE':'ko-KR', 'ZONE':self.ZONE}
response = requests.post(url, data=json.dumps(post), headers=headers)
self.SESSION_ID = response.json()["Data"]["Datas"]["SESSION_ID"]
def run(self):
Logger.info("run")
self.lozenLogin()
self.ecountLogin()
if self.lozenLoginData1 == "" or self.lozenLoginData2 == "" or self.ZONE == "" or self.SESSION_ID == "" :
returnVal = (False)
else :
returnVal = (True, self.lozenLoginData1, self.lozenLoginData2, self.ZONE, self.SESSION_ID)
return returnVal
| {"/parsing/ProdCrawler.py": ["/config/Configuration.py"], "/login/AutoLogin.py": ["/config/Configuration.py"], "/Process.py": ["/AutoRegister.py", "/view/PrograssDialog.py", "/login/AutoLogin.py", "/parsing/ProdCrawler.py"], "/parsing/LozenParser.py": ["/data/CarriageData.py"], "/AutoRegister.py": ["/view/main.py", "/data/CarriageData.py", "/parsing/LozenParser.py", "/view/PrograssDialog.py"]} |
76,578 | grape0919/AutoRegister | refs/heads/master | /config/Configuration.py | import configparser
import os
from log.Logger import Logger
class Config:
configFilePath = os.getcwd() + "\\config\\config.properties"
lozenHeader = "Login.lozen"
lozenIdKey = "login.lozen.id"
lozenPwdKey = "login.lozen.pwd"
ecountHeader = "Login.ecount"
ecountIdKey = "login.ecount.id"
ecountPwdKey = "login.ecount.pwd"
ecountComKey = "login.ecount.comcode"
ecountApiKeyKey = "ecount.api.key"
lozenId = ""
lozenPwd = ""
lozenLoginData1 = ""
lozenLoginData2 = ""
ecountId = ""
ecountPwd = ""
ecountComCode = ""
ecountApiKey = ""
def __init__(self):
config = configparser.ConfigParser()
try :
config.read(self.configFilePath)
if (self.lozenHeader in config):
self.lozenId = config[self.lozenHeader][self.lozenIdKey]
self.lozenPwd = config[self.lozenHeader][self.lozenPwdKey]
else:
Logger.error("LOZEN 로그인 정보 불러오기 실패 : " + self.configFilePath + " 설정을 불러오는데 실패했습니다.")
Logger.info("ecount login")
config.read(self.ecountId)
if (self.ecountHeader in config):
self.ecountId = config[self.ecountHeader][self.ecountIdKey]
self.ecountPwd = config[self.ecountHeader][self.ecountPwdKey]
self.ecountComCode = config[self.ecountHeader][self.ecountComKey]
self.ecountApiKey = config[self.ecountHeader][self.ecountApiKeyKey]
Logger.debug("apikey: " + self.ecountApiKey)
else:
Logger.error("ECOUNT 로그인 정보 불러오기 실패 : " + self.configFilePath + " 설정을 불러오는데 실패했습니다.")
except :
Logger.error("로그인 정보 불러오기 실패 : " + self.configFilePath + " 설정을 불러오는데 실패했습니다.")
| {"/parsing/ProdCrawler.py": ["/config/Configuration.py"], "/login/AutoLogin.py": ["/config/Configuration.py"], "/Process.py": ["/AutoRegister.py", "/view/PrograssDialog.py", "/login/AutoLogin.py", "/parsing/ProdCrawler.py"], "/parsing/LozenParser.py": ["/data/CarriageData.py"], "/AutoRegister.py": ["/view/main.py", "/data/CarriageData.py", "/parsing/LozenParser.py", "/view/PrograssDialog.py"]} |
76,579 | grape0919/AutoRegister | refs/heads/master | /Process.py | import sys
from log.Logger import Logger
from AutoRegister import WindowClass
from PyQt5.QtWidgets import QApplication, QMessageBox
from PyQt5.QtCore import Qt
from view.PrograssDialog import Ui_Form
from login.AutoLogin import AutoLogin
from parsing.ProdCrawler import Crawler
if __name__ == "__main__" :
#QApplication : 프로그램을 실행시켜주는 클래스
app = QApplication(sys.argv)
#WindowClass의 인스턴스 생성
myWindow = WindowClass()
## 로그인
Logger.info("login start")
pgDialog = Ui_Form()
pgDialog.setupUi()
pgDialog.progLabel.setText("로그인 및 거래처/품목데이터 로딩중..")
pgDialog.show()
# pgDialog.start()
#### 로그인
loginProcess = AutoLogin()
ecountDataCrawler = Crawler()
resultLogin = loginProcess.run()
if resultLogin[0]:
myWindow.lozenLoginSession = loginProcess.login_session
myWindow.lozenLoginData1 = resultLogin[1]
myWindow.lozenLoginData2 = resultLogin[2]
myWindow.ZONE = resultLogin[3]
myWindow.SESSION_ID = resultLogin[4]
Logger.debug("### resultLogin : " + str(resultLogin))
Logger.info("login end")
else :
pgDialog.close()
msg = QMessageBox()
msg.setWindowTitle("Error")
msg.setIcon(QMessageBox.Critical)
msg.setText("로그인에 실패하여 프로그램을 사용할 수 없습니다.\n config/config.properties 파일의 로그인 정보를 확인해주세요.")
msg.setDefaultButton(QMessageBox.Escape)
sys.exit(msg.exec_())
while(not ecountDataCrawler.run()):
#msg.setDefaultButton(QMessageBox.Escape)
msg = QMessageBox()
msg.setWindowTitle("Error")
msg.setIcon(QMessageBox.Critical)
msg.setText("이카운트 거래처, 품목데이터를 가져오는데 실패하였습니다. \n 재시도해도 같은 문제 발생 시,\n"+
"ghdry2563@gmail.com 으로 문의주세요.")
msg.addButton('재시도', QMessageBox.YesRole)
msg.addButton('취소', QMessageBox.RejectRole)
flags = Qt.WindowFlags(Qt.WindowStaysOnTopHint)
msg.setWindowFlags(flags)
result = msg.exec_()
if result == 0:
continue
elif result == 1:
sys.exit()
pgDialog.close()
myWindow.prodSearchData = ecountDataCrawler.searchData
myWindow.prodSearchDict = ecountDataCrawler.searchDict
myWindow.prodCodeData = ecountDataCrawler.prodData
myWindow.customCodeData = ecountDataCrawler.customData
#데이터 뿌리기
# myWindow.reflash()
#프로그램 화면을 보여주는 코드
myWindow.show()
#프로그램을 이벤트루프로 진입시키는(프로그램을 작동시키는) 코드
app.exec_() | {"/parsing/ProdCrawler.py": ["/config/Configuration.py"], "/login/AutoLogin.py": ["/config/Configuration.py"], "/Process.py": ["/AutoRegister.py", "/view/PrograssDialog.py", "/login/AutoLogin.py", "/parsing/ProdCrawler.py"], "/parsing/LozenParser.py": ["/data/CarriageData.py"], "/AutoRegister.py": ["/view/main.py", "/data/CarriageData.py", "/parsing/LozenParser.py", "/view/PrograssDialog.py"]} |
76,580 | grape0919/AutoRegister | refs/heads/master | /parsing/LozenParser.py | import requests
import re
from data.CarriageData import Data
from log.Logger import Logger
class Parser:
def __init__(self, loginData1, loginData2, session):
self.loginData1 = loginData1
self.loginData2 = loginData2
self.session = session
def parse(self, fromDate, toDate):
headers_common = {'Content-Length':'32','Accept':'*/*','X-Requested-With':'XMLHttpRequest','User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.87 Safari/537.36','Content-Type':'application/x-www-form-urlencoded; charset=UTF-8','Origin':'http://203.247.141.92:8080','Referer':'http://203.247.141.92:8080/SmartLogen/login','Accept-Encoding':'gzip, deflate','Accept-Language':'ko,ko-KR;q=0.9,en-US;q=0.8,en;q=0.7,ja;q=0.6','Connection':'close'}
url = 'http://203.247.141.92:8080/SmartLogen/OrderRecordSelect'
try:
post = {'branchCd':self.loginData1,'tradeCd':self.loginData2,'fromDate':fromDate,'toDate':toDate ,'personNm':'','ziphaGb':'A','delieverGb':'A','unsongjangGb':'F'}
except:
Logger.error("로젠 로그인 설정이 잘못 되었습니다. ID : ", self.loginData1, " PASSWORD : ", self.loginData2)
return None
response = self.session.post(url,data=post,headers=headers_common)
main_list = response.text.split('≡')
dataList = []
sangho_name = ''
phone = ''
address = ''
unsong_address = ''
first_counter = 0
for main_data_temp in main_list[:-1]:
main_id = main_data_temp.split('Ξ')[3]
url = 'http://203.247.141.92:8080/SmartLogen/SlipInfoSelect'
post = {'waybillNo':main_data_temp.split('Ξ')[3].replace('-',''),'UserID':self.loginData2}
response = self.session.post(url,data=post,headers=headers_common)
main_data=response.text
date_str = main_data.split('Ξ')[12].replace('-','')
sangho_name = main_data.split('Ξ')[0]
phone = main_data.split('Ξ')[3]
address = main_data.split('Ξ')[2]#+' '+main_data.split('Ξ')[1]
splited_prods = re.split('[\,\.]',main_data.split('Ξ')[28])
prod_datas = ""
prod_eas = ""
print("@!#!@# splited_prods : ", splited_prods)
for prod in splited_prods:
pd = prod.strip().split(' ')
prod_datas += str(pd[0]) + "\n"
if(len(pd) != 2):
prod_eas += str(1) + "\n"
else:
try:
ea = int(str(pd[1].replace('개','')))
prod_eas += str(pd[1].replace('개','')) + "\n"
except:
prod_datas += pd[1] + "\n"
prod_eas += str(1) + "\n" + str(1) + "\n"
data = Data()
data.carriageNumber = main_id
data.IO_DATE = date_str
data.CUST_DES = sangho_name
data.phoneNumber = phone
data.PROD_DES = prod_datas.strip()
data.QTY = prod_eas.strip()
data.address = address
dataList.append(data)
print("!@#!@# data : ", data)
return dataList | {"/parsing/ProdCrawler.py": ["/config/Configuration.py"], "/login/AutoLogin.py": ["/config/Configuration.py"], "/Process.py": ["/AutoRegister.py", "/view/PrograssDialog.py", "/login/AutoLogin.py", "/parsing/ProdCrawler.py"], "/parsing/LozenParser.py": ["/data/CarriageData.py"], "/AutoRegister.py": ["/view/main.py", "/data/CarriageData.py", "/parsing/LozenParser.py", "/view/PrograssDialog.py"]} |
76,581 | grape0919/AutoRegister | refs/heads/master | /view/main.py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'main.ui'
#
# Created by: PyQt5 UI code generator 5.15.0
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5.QtWidgets import QMainWindow, QWidget, QVBoxLayout, QLabel, QHBoxLayout, QDateEdit, QPushButton, QTableWidget, QAbstractItemView, QProgressBar
from PyQt5.QtCore import QSize, QRect, QMetaObject
from PyQt5.QtGui import QFont
import datetime
import static.staticValues as staticValues
class Ui_MainWindow(QMainWindow):
def setupUi(self, MainWindow):
MainWindow.resize(960, 550)
MainWindow.setMinimumSize(QSize(960, 550))
MainWindow.setMaximumSize(QSize(960, 550))
#메인 화면 색상py
self.setStyleSheet("color: black;"
"background-color: white")
self.centralwidget = QWidget(MainWindow)
self.centralwidget.setMinimumSize(QSize(960, 550))
self.centralwidget.setMaximumSize(QSize(960, 550))
self.layoutWidget = QWidget(self.centralwidget)
self.layoutWidget.setGeometry(QRect(10, 11, 942, 510))
self.verticalLayout = QVBoxLayout(self.layoutWidget)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.inquiryLabel = QLabel(self.layoutWidget)
font = QFont()
font.setPointSize(11)
font.setBold(True)
font.setWeight(75)
self.inquiryLabel.setFont(font)
self.verticalLayout.addWidget(self.inquiryLabel)
self.horizontalLayout = QHBoxLayout()
self.horizontalLayout.setContentsMargins(20, -1, 500, -1)
self.fromDateEdit = QDateEdit(self.layoutWidget)
self.horizontalLayout.addWidget(self.fromDateEdit)
self.dashLabel = QLabel(self.layoutWidget)
self.dashLabel.setFont(font)
self.horizontalLayout.addWidget(self.dashLabel)
self.toDateEdit = QDateEdit(self.layoutWidget)
self.horizontalLayout.addWidget(self.toDateEdit)
self.inquiryButton = QPushButton(self.layoutWidget)
self.inquiryButton.setMinimumSize(QSize(100, 30))
self.inquiryButton.setMaximumSize(QSize(100, 30))
self.horizontalLayout.addWidget(self.inquiryButton)
self.horizontalLayout.setStretch(0, 4)
self.horizontalLayout.setStretch(2, 4)
self.horizontalLayout.setStretch(3, 1)
self.verticalLayout.addLayout(self.horizontalLayout)
self.tableView = QTableWidget(self.layoutWidget)
self.tableView.setMaximumSize(QSize(940, 400))
self.tableView.setColumnCount(8)
self.tableView.setHorizontalHeaderLabels(["","운송장번호", "날짜", "상호", "전화번호", "상품", "수량", "주소"])
self.tableView.setColumnWidth(0, 10)
self.tableView.setColumnWidth(1, 80)
self.tableView.setColumnWidth(2, 80)
self.tableView.setColumnWidth(3, 150)
self.tableView.setColumnWidth(4, 120)
self.tableView.setColumnWidth(6, 40)
self.tableView.setColumnWidth(7, 250)
self.tableView.setEditTriggers(QAbstractItemView.NoEditTriggers)
self.tableView.setSelectionMode(QAbstractItemView.SingleSelection)
self.tableView.setSelectionBehavior(QAbstractItemView.SelectRows)
# self.tableView = QTableView(self.layoutWidget)
# self.tableView.setMinimumSize(QSize(940, 400))
# self.tableView.setMaximumSize(QSize(940, 400))
# self.tableView.setSelectionBehavior(QAbstractItemView.SelectRows) # row 전체를 선택하도록
# self.tableView.setSelectionMode(QAbstractItemView.SingleSelection) #
# self.tableView.setEditTriggers(QAbstractItemView.NoEditTriggers) # 셀 내용을 수정 불가하도록
self.verticalLayout.addWidget(self.tableView)
self.registButton = QPushButton(self.layoutWidget)
self.registButton.setEnabled(True)
self.registButton.setMinimumSize(QSize(100, 30))
self.registButton.setMaximumSize(QSize(100, 30))
self.verticalLayout.addWidget(self.registButton)
self.progressBar = QProgressBar()
#UI스타일
self.inquiryButton.setStyleSheet(staticValues.buttonStyleSheet)
self.inquiryButton.setFont(staticValues.buttonFont)
self.registButton.setStyleSheet(staticValues.buttonStyleSheet)
self.registButton.setFont(staticValues.buttonFont)
#오늘 날짜 세팅
nowDate = datetime.datetime.now()
self.fromDateEdit.setDate(nowDate)
self.toDateEdit.setDate(nowDate)
MainWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(MainWindow)
QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle("이카운트 ERP 자동등록 프로그램")
self.inquiryLabel.setText("조회 기간")
self.dashLabel.setText("-")
self.inquiryButton.setText("조회")
self.registButton.setText("등록")
if __name__ == "__main__":
import sys
app = QApplication(sys.argv)
MainWindow = QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
| {"/parsing/ProdCrawler.py": ["/config/Configuration.py"], "/login/AutoLogin.py": ["/config/Configuration.py"], "/Process.py": ["/AutoRegister.py", "/view/PrograssDialog.py", "/login/AutoLogin.py", "/parsing/ProdCrawler.py"], "/parsing/LozenParser.py": ["/data/CarriageData.py"], "/AutoRegister.py": ["/view/main.py", "/data/CarriageData.py", "/parsing/LozenParser.py", "/view/PrograssDialog.py"]} |
76,582 | grape0919/AutoRegister | refs/heads/master | /AutoRegister.py |
import sys
from PyQt5.QtWidgets import QCheckBox, QTableWidgetItem
from PyQt5.QtCore import Qt
from view.main import Ui_MainWindow
from data.CarriageData import Register
from parsing.LozenParser import Parser
from view.PrograssDialog import Ui_Form
from log.Logger import Logger
class WindowClass(Ui_MainWindow) :
lozenLoginData1 = ""
lozenLoginData2 = ""
ZONE = ""
SESSION_ID = ""
is_progressing = False
prodCodeData = None
customCodeData = None
prodSearchData = None
prodSearchDict = None
def __init__(self) :
super(WindowClass, self).__init__()
self.setupUi(self)
# 조회 버튼 클릭 이벤트
self.inquiryButton.clicked.connect(self.clickInquiryButton)
self.registButton.clicked.connect(self.clickRegistrationButton)
def progressing(self):
Logger.info("startProgressing")
self.is_progressing = True
pgDialog = Ui_Form()
pgDialog.setupUi()
pgDialog.progLabel.setText("데이터 크롤링 중..")
# process = False
Logger.info("stopProgressing")
def reflash(self):
Logger.info("reflash")
#TODO: start progress
# pg = Thread(target=self.progressing)
# pg.start()
#크롤링
crawler = Parser(self.lozenLoginData1, self.lozenLoginData2, self.lozenLoginSession)
ddate = str(self.fromDateEdit.date().toPyDate())
fromDate = "".join(ddate.split("-"))
ddate = str(self.toDateEdit.date().toPyDate())
toDate = "".join(ddate.split("-"))
self.dataList = crawler.parse(fromDate, toDate)
self.spreadData(self.dataList)
#TODO: STOP PROGRESS
# self.is_progressing = False
def spreadData(self, datas):
Logger.info("spreadData")
# self.model = QStandardItemModel()
# self.model.setColumnCount(5)
# self.model.setHorizontalHeaderLabels(["","주문번호","운송장번호", "날짜", "상호", "전화번호", "상품", "수량", "주소"])
self.tableView.setColumnCount(8)
self.tableView.setRowCount(len(datas))
self.tableView.clicked.connect(self.clickTable)
#크롤링 된 데이터를 tableView에 뿌릴 model 생성
rowSet = []
tempDate = ""
upload_count = 0
for index, data in enumerate(datas):
# 등록된 데이터 체크해서 Enabled 시키키
item = self.MyQTableWidgetItemCheckBox()
self.tableView.setItem(index, 0, item)
chbox = self.MyCheckBox(item)
self.tableView.setCellWidget(index, 0, chbox)
for i,d in enumerate(data.toArray()):
# if(i == 0):
# if tempDate != data.IO_DATE:
# tempDate = data.IO_DATE
# upload_count=1
# else :
# upload_count+=1
# data.UPLOAD_SER_NO = tempDate+"_"+str(upload_count)
# d = data.UPLOAD_SER_NO
if(i == 2):
try:
code = self.customCodeData[d]
except KeyError:
code = "TRA2008008" #택배
data.CUST = code
elif(i == 4):
rowSet.append((index, d.count('\n')))
data.PROD_DES = data.PROD_DES.split('\n')
code = []
for idx, prodNm in enumerate(data.PROD_DES):
try:
code.append(self.prodCodeData[prodNm])
except KeyError:
print("!@#!@# 품목 데이터 없음 : ", prodNm)
try:
code.append(self.prodSearchData[prodNm])
data.PROD_DES[idx] = self.prodSearchDict[prodNm]
except:
print("!@#!@# 품목 검색 데이터 없음 : ", prodNm)
code.append("ECO14_05_04")
print("!@#!@# 품목코드 : ", code)
data.PROD_CD = code
data.QTY = data.QTY.split('\n')
item = QTableWidgetItem(d)
self.tableView.setItem(index, i+1, item)
#품목 개수에 따라 행 높이 조절
for row in rowSet:
self.tableView.setRowHeight(row[0], 40+(row[1]*20))
def clickTable(self):
Logger.info("click table view")
def clickInquiryButton(self):
Logger.info("pressed InquiryButton")
self.reflash()
def clickRegistrationButton(self):
Logger.info("pressed RegistryButton")
register = Register(self.ZONE, self.SESSION_ID)
# model = self.tableView.model()
# print("model : ", model)
data = []
print("!@#!@# rowCount : " , self.tableView.rowCount())
if self.tableView.rowCount() > 0:
for row in range(self.tableView.rowCount()):
print("!@#!@# check . ", row, ":", self.tableView.item(row, 0).text())
if(self.tableView.item(row, 0).text() == "0"):
print("!@#!@# continue")
continue
else :
print("!@#!@# self.dataList[row] : ", self.dataList[row])
self.dataList[row].checkValue = "2"
data.append(self.dataList[row])
register.registrationList(data)
class MyCheckBox(QCheckBox):
def __init__(self, item):
""" :param item: QTableWidgetItem instance """
super().__init__()
self.item = item
self.mycheckvalue = 0
# 0 --> unchecked, 2 --> checked
self.stateChanged.connect(self.__checkbox_change)
self.stateChanged.connect(self.item.my_setdata)
# checked 여부로 정렬을 하기위한 data 저장
def __checkbox_change(self, checkvalue):
# print("myclass...check change... ", checkvalue)
self.mycheckvalue = checkvalue
Logger.debug("checkbox row= " + str(self.get_row()))
Logger.debug("checkValue : " + str(self.mycheckvalue))
Logger.debug("self " + str(self.objectName))
# item = QTableWidgetItem("True")
# self.item.setItem(self.get_row(), "True")
def get_row(self):
return self.item.row()
class MyQTableWidgetItemCheckBox(QTableWidgetItem):
"""
checkbox widget 과 같은 cell 에 item 으로 들어감.
checkbox 값 변화에 따라, 사용자정의 data를 기준으로 정렬 기능 구현함.
"""
def __init__(self):
super().__init__()
self.setData(Qt.UserRole, 0)
def __lt__(self, other):
# print(type(self.data(Qt.UserRole)))
return self.data(Qt.UserRole) < other.data(Qt.UserRole)
def my_setdata(self, value):
Logger.debug("my setdata " + str(value))
self.setData(Qt.UserRole, value)
Logger.debug("row " + str(self.row()))
Logger.debug("self.data : " + str(self.data(Qt.UserRole)))
def text(self):
return str(self.data(Qt.UserRole))
if __name__ == "__main__" :
#QApplication : 프로그램을 실행시켜주는 클래스
app = QApplication(sys.argv)
#WindowClass의 인스턴스 생성
myWindow = WindowClass()
myWindow.reflash()
#프로그램 화면을 보여주는 코드
myWindow.show()
#프로그램을 이벤트루프로 진입시키는(프로그램을 작동시키는) 코드
app.exec_() | {"/parsing/ProdCrawler.py": ["/config/Configuration.py"], "/login/AutoLogin.py": ["/config/Configuration.py"], "/Process.py": ["/AutoRegister.py", "/view/PrograssDialog.py", "/login/AutoLogin.py", "/parsing/ProdCrawler.py"], "/parsing/LozenParser.py": ["/data/CarriageData.py"], "/AutoRegister.py": ["/view/main.py", "/data/CarriageData.py", "/parsing/LozenParser.py", "/view/PrograssDialog.py"]} |
76,583 | BryanAke/drfme_test | refs/heads/master | /drfme_test/urls.py | from django.conf.urls import patterns, include, url
from django.contrib import admin
from widgetapp.api import ExtendedMongoRouter, WidgetViewSet, SpecialWidgetViewSet, ThingViewSet, VehicleViewSet, TruckViewSet, CarViewSet, SemiViewSet
router = ExtendedMongoRouter()
router.register(r'widgets', WidgetViewSet)
router.register(r'specialwidgets', SpecialWidgetViewSet)
router.register(r'things', ThingViewSet)
router.register(r'vehicles', VehicleViewSet)
router.register(r'trucks', TruckViewSet)
router.register(r'sermis', SemiViewSet)
router.register(r'cars', CarViewSet)
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'drfme_test.views.home', name='home'),
url(r'^api/', include(router.urls)),
url(r'^admin/', include(admin.site.urls)),
)
| {"/drfme_test/urls.py": ["/widgetapp/api.py"]} |
76,584 | BryanAke/drfme_test | refs/heads/master | /widgetapp/models.py | from mongoengine import (Document, EmbeddedDocument, StringField, ListField, ReferenceField, MapField,
ValidationError, EmbeddedDocumentField, IntField, DynamicField, DictField, FloatField)
# Create your models here.
class Widget(Document):
"""
Model for pretend data.
"""
name = StringField()
description = StringField()
parent = ReferenceField('Widget')
ancestors = ListField(ReferenceField('Widget'))
relationships = ListField(EmbeddedDocumentField('Relationship'))
meta = {
'allow_inheritance': True
}
def addRelationship(self, reltype, widget):
relationship = Relationship()
relationship.subject_rel = self
relationship.predicate = reltype
relationship.object_rel = widget
self.relationships.append(relationship)
def set_parent(self, newparent):
self.parent = newparent
self.addRelationship("parent", newparent)
newparent.addRelationship("child", self)
def set_ancestors(self):
self.ancestors = []
if self.parent is not None:
self.ancestors.append(self.parent)
self.ancestors.extend(self.parent.ancestors)
class Relationship(EmbeddedDocument):
subject_rel = ReferenceField("Widget")
predicate = StringField()
object_rel = ReferenceField("Widget")
class SpecialField(DynamicField):
def __get__(self, instance, owner):
if instance is None:
# Document class being used rather than a document object
return self
value = instance._data.get(self.name) # Get value from document instance if available
#can't have any of that int crap.
if isinstance(value, int):
return str(value) + " MILLION DOLLARS!"
return value
class SpecialWidget(Widget):
"""
Model that inherits some of Widget's stuff and adds some data.
"""
some_value = IntField()
dyn_value = SpecialField()
components = ListField(EmbeddedDocumentField("Component"))
class Component(EmbeddedDocument):
name = StringField()
some_thing = ReferenceField("Thing")
class ThingProps(EmbeddedDocument):
value_one = StringField()
value_two = StringField()
value_three = IntField()
class Thing(Document):
"""
A thing referenced by Components.
"""
name = StringField()
some_values = EmbeddedDocumentField("ThingProps")
testmap = DictField()
class Vehicle(Document):
"""
Vehicle or something
"""
meta = {
'allow_inheritance': True
}
name = StringField()
weight = IntField()
manufacturer = StringField()
class Car(Vehicle):
mpg = IntField()
class Mileage(EmbeddedDocument):
loaded = IntField()
unloaded = IntField()
meta = {
'allow_inheritance': True
}
class StupidMileage(Mileage):
loaded = ReferenceField(Widget)
unloaded = StringField()
helium = IntField()
class Truck(Vehicle):
mpg = EmbeddedDocumentField(Mileage)
manufacturer = ReferenceField(Widget)
class Semi(Truck):
volume = IntField() | {"/drfme_test/urls.py": ["/widgetapp/api.py"]} |
76,585 | BryanAke/drfme_test | refs/heads/master | /widgetapp/filters.py | __author__ = 'bake3'
from rest_framework import filters as drf_filters
from rest_framework.settings import api_settings
from rest_framework.exceptions import APIException
from mongoengine.queryset.transform import MATCH_OPERATORS
try:
from django.db.models.constants import LOOKUP_SEP
except ImportError: # pragma: nocover
# Django < 1.5 fallback
from django.db.models.sql.constants import LOOKUP_SEP # noqa
class InvalidFilterError(APIException):
"""
Raised when the end user attempts to use a filter on a field
that has not been whitelisted in the API.
"""
default_detail = "A field you specified does not allow filtering."
status_code = 400
class StripCacheFilter(drf_filters.BaseFilterBackend):
def recursive_strip(self, data):
#pass in a thing
if isinstance(data, dict):
data.pop('_cache', None)
for key in data:
data[key] = self.recursive_strip(data[key])
return data
if isinstance(data, list):
return [self.recursive_strip(e) for e in data]
return data
def filter_queryset(self, request, queryset, view):
request_params = request.query_params
if request.method in ("POST", "PUT", "PATCH"):
self.recursive_strip(request.DATA)
d = request.DATA
return queryset
class MongoProjectionFilterBackend(drf_filters.BaseFilterBackend):
"""
Provide an interface to MongoEngine's .only() method to limit which fields will be returned
from the QuerySet.
"""
projection_param = "fields"
def get_projection(self, request):
"""
retrieve the list of fields we want, if they're inclued in this request.
"""
params = request.query_params.get(self.projection_param)
if params:
return [param.strip() for param in params.split(',')]
def check_fields(self, parameters, doc_fields):
"""
check parameters provided against dictionary of fields provided by the QuerySet.
Remove any that don't actually exist.
"""
return [field for field in parameters if field in doc_fields]
def filter_queryset(self, request, queryset, view):
#list of fields in this document
fields = queryset._document._fields
#list of fields for the projection in this request
projection = self.get_projection(request)
#filter the projection to only contain fields really in this document
#this prevents filtering for fields that aren't in the schema,
#so we may want to revisit this in the future.
if projection:
projection = self.check_fields(projection, queryset._document._fields)
#raise undead
return queryset.only(*projection)
return queryset
class MongoProjectionViewsetMixin(object):
filter_backends = api_settings.DEFAULT_FILTER_BACKENDS.append(MongoProjectionFilterBackend)
projection_param = "fields"
def get_serializer_class(self):
#get serializer class from super, so we don't clobber any other get_serializer_class magics
serializer_class = super(MongoProjectionViewsetMixin, self).get_serializer_class()
if self.request and self.request.query_params.get(self.projection_param, False):
#filter projection to valid field names
projection = self.request.query_params.get(self.projection_param).split(',')
projection = [field for field in projection if field in serializer_class.Meta.model._fields]
if projection:
#construct temporary class with Meta parameters set accordingly
class serializer_klass(serializer_class):
class Meta(serializer_class.Meta):
fields = projection
exclude = None
return serializer_klass
return serializer_class
class MongoFilterBackend(drf_filters.BaseFilterBackend):
def build_filters(self, queryset=None, filters={}, view=None):
"""
Given a dictionary of filters, create the necessary ORM-level filters.
Valid values are derived from MongoEngine's Match Operators, which
mirror MongoDB's operations.
This filter is designed to deal with filters that can be passed to
MongoEngine directly after formatting. It will ignore filters that refer to fields it doesn't
recognize, or operators that MongoEngine doesn't recognize.
InvalidFilterError will be thrown if the request contains a filter on a non-whitelisted field.
"""
qs_filters = {}
qs_fields = queryset._document._fields
for filter_expr, value in filters.items():
filter_bits = filter_expr.split(LOOKUP_SEP)
field_name = filter_bits.pop(0)
filter_type = filter_bits.pop(0) if len(filter_bits) else 'exact'
if not field_name in qs_fields:
# It's not a field we know about. Move along citizen.
continue
if field_name in qs_fields and not len(filter_bits) and filter_type not in MATCH_OPERATORS:
# Valid field, but we don't understand the filter.
continue
if len(filter_bits):
# They passed us too many filter bits, probably for recursive lookups or something. (e.g. ?parent__name__startswith=Foo - we currently can't do that.
continue
lookup_bits = self.check_filtering(field_name, queryset, filter_type, filter_bits, view.filter_fields)
value = self.filter_value_to_python(value, filters, filter_expr, filter_type)
db_field_name = LOOKUP_SEP.join(lookup_bits)
qs_filter = "%s%s%s" % (db_field_name, LOOKUP_SEP, filter_type)
qs_filters[qs_filter] = value
return qs_filters
def check_filtering(self, field_name, queryset, filter_type='exact', filter_bits=[], filtering={}):
"""
Given a field name, an optional filter type and an optional list of
additional relations, determine if a field can be filtered on.
If a filter does not meet the needed conditions, it should raise an
``InvalidFilterError``.
If the filter meets the conditions, a list of attribute names (not
field names) will be returned.
"""
fields = queryset._document._fields
if not field_name in filtering:
raise InvalidFilterError("The '%s' field does not allow filtering." % field_name)
# Check to see if it's a relational lookup and if that's allowed.
if len(filter_bits):
#currently not working (recursive querying necessitates additional queries, and lots of mess.
#ref_obj = fields[field_name].document_type_obj
#ref_fields = ref_obj._fields
#return [fields[field_name].name]
raise InvalidFilterError("Recursive filtering of reference fields under development.")
return [fields[field_name].name]
def filter_value_to_python(self, value, filters, filter_expr, filter_type):
"""
Turn the string ``value`` into a python object.
"""
# Simple values
if value in ['true', 'True']:
value = True
elif value in ['false', 'False']:
value = False
elif value in ['nil', 'none', 'None']:
value = None
# Split on ',' if not empty string and either an in or range filter.
if filter_type in ('in', 'range') and len(value):
if hasattr(filters, 'getlist'):
value = []
for part in filters.getlist(filter_expr):
value.extend(part.split(','))
else:
value = value.split(',')
return value
def filter_queryset(self, request, queryset, view):
request_filters = request.query_params
applicable_filters = self.build_filters(queryset=queryset, filters=request_filters, view=view)
return queryset.filter(**applicable_filters)
| {"/drfme_test/urls.py": ["/widgetapp/api.py"]} |
76,586 | BryanAke/drfme_test | refs/heads/master | /widgetapp/api.py | from rest_framework_extensions.routers import ExtendedDefaultRouter
from rest_framework_mongoengine.routers import MongoRouterMixin
from rest_framework_mongoengine.serializers import DocumentSerializer, PolymorphicDocumentSerializer, ChainableDocumentSerializer
from rest_framework_mongoengine.fields import ReferenceField, HyperlinkedDocumentIdentityField
from rest_framework.fields import CharField
from rest_framework_mongoengine.viewsets import ModelViewSet
from rest_framework_extensions.mixins import NestedViewSetMixin, PaginateByMaxMixin
from models import Widget, SpecialWidget, Thing, Vehicle, Truck, Car, Semi
#######
#Serializers
#######
class WidgetSerializer(PolymorphicDocumentSerializer):
class Meta:
model = Widget
extra_kwargs = {
'parent': {
'required': False
}
}
class SpecialWidgetSerializer(DocumentSerializer):
class Meta:
model = SpecialWidget
class ThingSerializer(DocumentSerializer):
class Meta:
model = Thing
class VehicleSerializer(ChainableDocumentSerializer):
_cls = CharField(source='_class_name', required=False, allow_null=True)
href = HyperlinkedDocumentIdentityField()
class Meta:
model = Vehicle
class TruckSerializer(ChainableDocumentSerializer):
_cls = CharField(source='_class_name', required=False, allow_null=True)
href = HyperlinkedDocumentIdentityField()
class Meta:
model = Truck
class SemiSerializer(ChainableDocumentSerializer):
_cls = CharField(source='_class_name', required=False, allow_null=True)
href = HyperlinkedDocumentIdentityField()
class Meta:
model = Semi
class CarSerializer(ChainableDocumentSerializer):
_cls = CharField(source='_class_name', required=False, allow_null=True)
href = HyperlinkedDocumentIdentityField()
class Meta:
model = Car
VehicleSerializer.register_serializer(TruckSerializer)
VehicleSerializer.register_serializer(CarSerializer)
TruckSerializer.register_serializer(SemiSerializer)
#######
#ViewSets
#######
class WidgetViewSet(ModelViewSet):
serializer_class = WidgetSerializer
model = Widget
queryset = Widget.objects
def get_queryset(self):
qs = super(WidgetViewSet, self).get_queryset()
return qs.no_dereference()
class SpecialWidgetViewSet(ModelViewSet):
#_auto_dereference = True
serializer_class = SpecialWidgetSerializer
model = SpecialWidget
queryset = SpecialWidget.objects
class ThingViewSet(ModelViewSet):
#_auto_dereference = True
serializer_class = ThingSerializer
model = Thing
queryset = Thing.objects
class VehicleViewSet(ModelViewSet):
serializer_class = VehicleSerializer
model = Vehicle
queryset = Vehicle.objects
class TruckViewSet(ModelViewSet):
serializer_class = TruckSerializer
model = Truck
queryset = Truck.objects
class SemiViewSet(ModelViewSet):
serializer_class = TruckSerializer
model = Semi
queryset = Semi.objects
class CarViewSet(ModelViewSet):
serializer_class = CarSerializer
model = Car
queryset = Car.objects
#######
#Router
#######
class ExtendedMongoRouter(MongoRouterMixin, ExtendedDefaultRouter):
pass | {"/drfme_test/urls.py": ["/widgetapp/api.py"]} |
76,587 | melmello/Game-Theoric-Network-Centrality | refs/heads/master | /GAME/CHARACTERISTIC_FUNCTIONS/group_closeness_centrality.py | """ Closeness Centrality Characteristic Function module
GroupClosenessCentrality(GroupCentralityMeasure):
closeness version of the characteristic function.
"""
from GAME.CHARACTERISTIC_FUNCTIONS.group_centrality_measure import GroupCentralityMeasure
class GroupClosenessCentrality(GroupCentralityMeasure):
""" Closeness Centrality Characteristic Function class
<<Inheriting from group_centrality_measure>>
Implementation of the characteristic function with the Closeness Measure.
Attributes:
no attributes are needed.
"""
| {"/GAME/CHARACTERISTIC_FUNCTIONS/group_closeness_centrality.py": ["/GAME/CHARACTERISTIC_FUNCTIONS/group_centrality_measure.py"], "/GAME/CHARACTERISTIC_FUNCTIONS/centrality_type_dictionary.py": ["/GAME/CHARACTERISTIC_FUNCTIONS/group_degree_centrality.py", "/GAME/CHARACTERISTIC_FUNCTIONS/group_betweenness_centrality.py", "/GAME/CHARACTERISTIC_FUNCTIONS/group_closeness_centrality.py"], "/ALGORITHM/INIT/matrix_uploader.py": ["/ALGORITHM/TOOLS/utility_tools.py"], "/GAME/games_manager.py": ["/ALGORITHM/IMPLEMENTATIONS/EXPONENTIAL_IMPLEMENTATION/exponential_algorithm.py", "/ALGORITHM/IMPLEMENTATIONS/POLYNOMIAL_IMPLEMENTATION/polynomial_algorithm.py", "/ALGORITHM/INIT/graph_builder.py", "/ALGORITHM/INIT/matrix_uploader.py", "/ALGORITHM/TOOLS/utility_tools.py", "/GAME/game.py"], "/GAME/CHARACTERISTIC_FUNCTIONS/group_degree_centrality.py": ["/ALGORITHM/TOOLS/mathematical_tools.py", "/ALGORITHM/TOOLS/utility_tools.py", "/GAME/CHARACTERISTIC_FUNCTIONS/group_centrality_measure.py"], "/GAME/CHARACTERISTIC_FUNCTIONS/group_betweenness_centrality.py": ["/GAME/CHARACTERISTIC_FUNCTIONS/group_centrality_measure.py"], "/ALGORITHM/IMPLEMENTATIONS/POLYNOMIAL_IMPLEMENTATION/polynomial_algorithm.py": ["/ALGORITHM/TOOLS/mathematical_tools.py"], "/runner.py": ["/ALGORITHM/TOOLS/utility_tools.py", "/GAME/games_manager.py"], "/GAME/game.py": ["/GAME/CHARACTERISTIC_FUNCTIONS/centrality_type_dictionary.py"]} |
76,588 | melmello/Game-Theoric-Network-Centrality | refs/heads/master | /ALGORITHM/TOOLS/mathematical_tools.py | """ Mathematical module
Module used purely as a mathematical library.
No class are needed.
"""
def fast_binomial(first_newton_parameter, second_newton_parameter):
""" Newton Binomial Coefficient calculation
A fast way to calculate Newton Binomial Coefficients by Andrew Dalke.
Args:
first_newton_parameter (int): first parameter of the Newton Binomial Coefficient.
second_newton_parameter (int): second parameter of the Newton Binomial Coefficient.
Returns:
n_took // k_took (int): the result of the Newton Binomial Coefficient
"""
if 0 <= second_newton_parameter <= first_newton_parameter:
n_tok = 1
k_tok = 1
for temp in range(1, min(second_newton_parameter,
first_newton_parameter - second_newton_parameter) + 1):
n_tok *= first_newton_parameter
k_tok *= temp
first_newton_parameter -= 1
return n_tok // k_tok
else:
return 0
| {"/GAME/CHARACTERISTIC_FUNCTIONS/group_closeness_centrality.py": ["/GAME/CHARACTERISTIC_FUNCTIONS/group_centrality_measure.py"], "/GAME/CHARACTERISTIC_FUNCTIONS/centrality_type_dictionary.py": ["/GAME/CHARACTERISTIC_FUNCTIONS/group_degree_centrality.py", "/GAME/CHARACTERISTIC_FUNCTIONS/group_betweenness_centrality.py", "/GAME/CHARACTERISTIC_FUNCTIONS/group_closeness_centrality.py"], "/ALGORITHM/INIT/matrix_uploader.py": ["/ALGORITHM/TOOLS/utility_tools.py"], "/GAME/games_manager.py": ["/ALGORITHM/IMPLEMENTATIONS/EXPONENTIAL_IMPLEMENTATION/exponential_algorithm.py", "/ALGORITHM/IMPLEMENTATIONS/POLYNOMIAL_IMPLEMENTATION/polynomial_algorithm.py", "/ALGORITHM/INIT/graph_builder.py", "/ALGORITHM/INIT/matrix_uploader.py", "/ALGORITHM/TOOLS/utility_tools.py", "/GAME/game.py"], "/GAME/CHARACTERISTIC_FUNCTIONS/group_degree_centrality.py": ["/ALGORITHM/TOOLS/mathematical_tools.py", "/ALGORITHM/TOOLS/utility_tools.py", "/GAME/CHARACTERISTIC_FUNCTIONS/group_centrality_measure.py"], "/GAME/CHARACTERISTIC_FUNCTIONS/group_betweenness_centrality.py": ["/GAME/CHARACTERISTIC_FUNCTIONS/group_centrality_measure.py"], "/ALGORITHM/IMPLEMENTATIONS/POLYNOMIAL_IMPLEMENTATION/polynomial_algorithm.py": ["/ALGORITHM/TOOLS/mathematical_tools.py"], "/runner.py": ["/ALGORITHM/TOOLS/utility_tools.py", "/GAME/games_manager.py"], "/GAME/game.py": ["/GAME/CHARACTERISTIC_FUNCTIONS/centrality_type_dictionary.py"]} |
76,589 | melmello/Game-Theoric-Network-Centrality | refs/heads/master | /GAME/CHARACTERISTIC_FUNCTIONS/centrality_type_dictionary.py | """ Type Dictionary
This dictionary contains the characteristic functions type
matching a module name to a class name.
"""
from GAME.CHARACTERISTIC_FUNCTIONS.group_degree_centrality \
import GroupDegreeCentrality
from GAME.CHARACTERISTIC_FUNCTIONS.group_betweenness_centrality \
import GroupBetweennessCentrality
from GAME.CHARACTERISTIC_FUNCTIONS.group_closeness_centrality \
import GroupClosenessCentrality
TYPE_TO_CLASS = {
'group_degree_centrality': GroupDegreeCentrality,
'group_betweenness_centrality': GroupBetweennessCentrality,
'group_closeness_centrality': GroupClosenessCentrality
}
| {"/GAME/CHARACTERISTIC_FUNCTIONS/group_closeness_centrality.py": ["/GAME/CHARACTERISTIC_FUNCTIONS/group_centrality_measure.py"], "/GAME/CHARACTERISTIC_FUNCTIONS/centrality_type_dictionary.py": ["/GAME/CHARACTERISTIC_FUNCTIONS/group_degree_centrality.py", "/GAME/CHARACTERISTIC_FUNCTIONS/group_betweenness_centrality.py", "/GAME/CHARACTERISTIC_FUNCTIONS/group_closeness_centrality.py"], "/ALGORITHM/INIT/matrix_uploader.py": ["/ALGORITHM/TOOLS/utility_tools.py"], "/GAME/games_manager.py": ["/ALGORITHM/IMPLEMENTATIONS/EXPONENTIAL_IMPLEMENTATION/exponential_algorithm.py", "/ALGORITHM/IMPLEMENTATIONS/POLYNOMIAL_IMPLEMENTATION/polynomial_algorithm.py", "/ALGORITHM/INIT/graph_builder.py", "/ALGORITHM/INIT/matrix_uploader.py", "/ALGORITHM/TOOLS/utility_tools.py", "/GAME/game.py"], "/GAME/CHARACTERISTIC_FUNCTIONS/group_degree_centrality.py": ["/ALGORITHM/TOOLS/mathematical_tools.py", "/ALGORITHM/TOOLS/utility_tools.py", "/GAME/CHARACTERISTIC_FUNCTIONS/group_centrality_measure.py"], "/GAME/CHARACTERISTIC_FUNCTIONS/group_betweenness_centrality.py": ["/GAME/CHARACTERISTIC_FUNCTIONS/group_centrality_measure.py"], "/ALGORITHM/IMPLEMENTATIONS/POLYNOMIAL_IMPLEMENTATION/polynomial_algorithm.py": ["/ALGORITHM/TOOLS/mathematical_tools.py"], "/runner.py": ["/ALGORITHM/TOOLS/utility_tools.py", "/GAME/games_manager.py"], "/GAME/game.py": ["/GAME/CHARACTERISTIC_FUNCTIONS/centrality_type_dictionary.py"]} |
76,590 | melmello/Game-Theoric-Network-Centrality | refs/heads/master | /ALGORITHM/INIT/matrix_uploader.py | """ Uploader of the User's Matrix
MatrixUploader:
this class is used to take the user's input (adjacency matrix)
and save it into the program.
"""
import numpy as np
from ALGORITHM.TOOLS.utility_tools import word_checker
class MatrixUploader:
""" Matrix .txt to Matrix Class
The input is transferred to the program and saved as a matrix.
Some checks on the file extension and name are done to ensure the correct process.
It is ensured that the matrix is symmetric and binary, and so it
represents an undirected unweighted graph.
Attributes:
no attributes are needed.
"""
def __init__(self):
""" Classic __init__ python function
Initialization of the instance.
Args:
self: the instance of the class itself.
Returns:
no return is needed.
"""
self.matrix = self.input_definition()
@staticmethod
def input_definition():
""" Initial function creating the matrix in the software
The system asks the user to specify the file name of the matrix or the
corresponding edge list to create a RAM version of it.
Then this is read and transferred into a matrix.
If the user specifies a wrong path or filename,
the system underlines it asking again for the answer.
Notice that this method is static.
Args:
no arguments are required.
Returns:
matrix (matrix): The matrix is the adjacency matrix loaded from the file.
"""
# Matrix initialization
loaded_matrix = []
if word_checker(input("Please, select if you want to upload an edge list or a matrix file\n"),
["matrix", "list"]) == "list":
# Open the file in the EXAMPLES/EDGES_LIST_EXAMPLES folder of the project
file = open('EXAMPLES/EDGES_LIST_EXAMPLES/' + input("Please, enter the INPUT file name\n"))
# Initialize the max length of the file
max_length = 0
# Find the max to set the matrix cardinality
for row in file:
numbers = [int(i) for i in row.split() if i.isdigit()]
# Set the max to the max between the previous value and the two integers
max_length = max(max_length, numbers[0], numbers[1])
# Come back to file beginning
file.seek(0)
# Initialize the matrix
loaded_matrix = np.zeros((max_length, max_length), dtype=int)
# For each row
for row in file:
# Results of the file line
res = [int(i) for i in row.split() if i.isdigit()]
# Set first direction arc
loaded_matrix[res[0]-1][res[1]-1] = 1
# Set the symmetric direction arc
loaded_matrix[res[1]-1][res[0]-1] = 1
np.savetxt('prova.txt',loaded_matrix,fmt='%.2f')
else:
while True:
try:
loaded_matrix = np.loadtxt('EXAMPLES/MATRIX_EXAMPLES/' + input("Please, enter the file name\n"),
dtype=int)
except IOError:
print("You might have put a wrong file name or path")
continue
else:
break
print("The Matrix is the following:")
print(loaded_matrix)
# Check on matrix symmetry (to ensure the graph is undirected)
if not np.allclose(loaded_matrix, loaded_matrix.T):
print("The matrix you have chosen is not suitable for this algorithm.")
exit(0)
# Ensure that the matrix is binary (and thus unweighted)
if not np.array_equal(loaded_matrix, loaded_matrix.astype(bool)):
print("The matrix you have chosen is not suitable for this algorithm.")
exit(1)
# This is used to clean out the loop cycles on the matrix,
# precisely the 1 in the matrix that are on the diagonal
for row in range(0, len(loaded_matrix)):
for column in range(0, len(loaded_matrix)):
if row == column:
loaded_matrix[row][column] = 0
print("But we will use this one (without self-loops):")
print(loaded_matrix)
return loaded_matrix
def get_matrix(self):
""" Getter of the parameter matrix
This method is used as a getter of the parameter.
Args:
self: the instance of the class.
Returns:
matrix (matrix): the matrix is the adjacency matrix loaded from the file.
"""
return self.matrix
| {"/GAME/CHARACTERISTIC_FUNCTIONS/group_closeness_centrality.py": ["/GAME/CHARACTERISTIC_FUNCTIONS/group_centrality_measure.py"], "/GAME/CHARACTERISTIC_FUNCTIONS/centrality_type_dictionary.py": ["/GAME/CHARACTERISTIC_FUNCTIONS/group_degree_centrality.py", "/GAME/CHARACTERISTIC_FUNCTIONS/group_betweenness_centrality.py", "/GAME/CHARACTERISTIC_FUNCTIONS/group_closeness_centrality.py"], "/ALGORITHM/INIT/matrix_uploader.py": ["/ALGORITHM/TOOLS/utility_tools.py"], "/GAME/games_manager.py": ["/ALGORITHM/IMPLEMENTATIONS/EXPONENTIAL_IMPLEMENTATION/exponential_algorithm.py", "/ALGORITHM/IMPLEMENTATIONS/POLYNOMIAL_IMPLEMENTATION/polynomial_algorithm.py", "/ALGORITHM/INIT/graph_builder.py", "/ALGORITHM/INIT/matrix_uploader.py", "/ALGORITHM/TOOLS/utility_tools.py", "/GAME/game.py"], "/GAME/CHARACTERISTIC_FUNCTIONS/group_degree_centrality.py": ["/ALGORITHM/TOOLS/mathematical_tools.py", "/ALGORITHM/TOOLS/utility_tools.py", "/GAME/CHARACTERISTIC_FUNCTIONS/group_centrality_measure.py"], "/GAME/CHARACTERISTIC_FUNCTIONS/group_betweenness_centrality.py": ["/GAME/CHARACTERISTIC_FUNCTIONS/group_centrality_measure.py"], "/ALGORITHM/IMPLEMENTATIONS/POLYNOMIAL_IMPLEMENTATION/polynomial_algorithm.py": ["/ALGORITHM/TOOLS/mathematical_tools.py"], "/runner.py": ["/ALGORITHM/TOOLS/utility_tools.py", "/GAME/games_manager.py"], "/GAME/game.py": ["/GAME/CHARACTERISTIC_FUNCTIONS/centrality_type_dictionary.py"]} |
76,591 | melmello/Game-Theoric-Network-Centrality | refs/heads/master | /ALGORITHM/IMPLEMENTATIONS/EXPONENTIAL_IMPLEMENTATION/exponential_algorithm.py | # -*- coding: utf-8 -*-
""" Exponential Time Algorithm
This algorithm allows the computation of the central nodes in a network using
the classical Shapley Value algorithm with a chosen characteristic function.
"""
from itertools import combinations
from math import factorial
import numpy as np
def classical_algorithm(game):
""" Classical Shapley Calculus Algorithm
Method that computes the Shapley Value for each node in the network
using the characteristic function chosen as the way of obtaining the
value that a coalition assume.
Args:
game (Game): the game characterized by a matrix,
a characteristic function and the input parameters for the SEMI algorithm.
Returns:
no return is needed.
"""
# Shapley Vector Initialization
shapley_value = np.zeros(game.length)
# Create a list of all the nodes
node_list = list(range(0, game.length))
# Create a list of all the nodes that will be manipulated for permutation
permutable_nodes = list(range(0, game.length))
# For each node in the network
for node in range(0, game.length):
print("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
print("NODE EVALUATED: ", node)
# Remove the node selected in order to obtain all the coalition not containing
# the node selected in the cycle
permutable_nodes.remove(node)
# Initialize the total marginal contribution
total_marginal_contribution = 0
# For each coalition cardinality (s) going from 1 to |V|
for coalition_cardinality in range(1, game.length):
print("\tCOALITION CARDINALITY: ", coalition_cardinality)
# For each permutation of all the nodes (except for the node selected)
# of size coalition cardinality s
for permutation_tuple in combinations(permutable_nodes, coalition_cardinality):
# Cast the tuple to list
permutation = list(permutation_tuple)
# − v(S) + v(S U {i})
marginal_contribution = \
- game.characteristic_function.get_coalition_value(game,
node_list,
permutation) + \
game.characteristic_function.get_coalition_value(game,
node_list,
permutation + [node])
print("\t\tMARGINAL CONTRIBUTION: ", marginal_contribution)
# Weight the result by ((s!) * (n - s - 1)!) / (n!) with s
# coalition size and n the number of all the vertex
weighted_marginal_contribution = \
marginal_contribution * \
((factorial(len(permutation)) *
factorial(game.length - len(permutation) - 1))
/ (factorial(game.length)))
print("\t\tWEIGHTED MARGINAL CONTRIBUTION: ", weighted_marginal_contribution)
# Add this value to the marginal contribution of that node
total_marginal_contribution += weighted_marginal_contribution
print("\t\tTOTAL MARGINAL CONTRIBUTION: ", total_marginal_contribution)
# Added contribution of V({i}) - V(0)
# Note that V(0) = 0 in these games
total_marginal_contribution += \
(1 / game.length) * \
game.characteristic_function.get_coalition_value(game,
node_list,
[node])
# Reintroduce in list the node deleted before to cycle over the next node
permutable_nodes.append(node)
# Update the Shapley Value Vector
shapley_value[node] = total_marginal_contribution
print("\t\tTOTAL MARGINAL CONTRIBUTION: ", total_marginal_contribution)
print("FINAL SHAPLEY VALUES: \n", shapley_value)
print("\nSHAPLEY VALUES SUM: ", np.sum(shapley_value))
| {"/GAME/CHARACTERISTIC_FUNCTIONS/group_closeness_centrality.py": ["/GAME/CHARACTERISTIC_FUNCTIONS/group_centrality_measure.py"], "/GAME/CHARACTERISTIC_FUNCTIONS/centrality_type_dictionary.py": ["/GAME/CHARACTERISTIC_FUNCTIONS/group_degree_centrality.py", "/GAME/CHARACTERISTIC_FUNCTIONS/group_betweenness_centrality.py", "/GAME/CHARACTERISTIC_FUNCTIONS/group_closeness_centrality.py"], "/ALGORITHM/INIT/matrix_uploader.py": ["/ALGORITHM/TOOLS/utility_tools.py"], "/GAME/games_manager.py": ["/ALGORITHM/IMPLEMENTATIONS/EXPONENTIAL_IMPLEMENTATION/exponential_algorithm.py", "/ALGORITHM/IMPLEMENTATIONS/POLYNOMIAL_IMPLEMENTATION/polynomial_algorithm.py", "/ALGORITHM/INIT/graph_builder.py", "/ALGORITHM/INIT/matrix_uploader.py", "/ALGORITHM/TOOLS/utility_tools.py", "/GAME/game.py"], "/GAME/CHARACTERISTIC_FUNCTIONS/group_degree_centrality.py": ["/ALGORITHM/TOOLS/mathematical_tools.py", "/ALGORITHM/TOOLS/utility_tools.py", "/GAME/CHARACTERISTIC_FUNCTIONS/group_centrality_measure.py"], "/GAME/CHARACTERISTIC_FUNCTIONS/group_betweenness_centrality.py": ["/GAME/CHARACTERISTIC_FUNCTIONS/group_centrality_measure.py"], "/ALGORITHM/IMPLEMENTATIONS/POLYNOMIAL_IMPLEMENTATION/polynomial_algorithm.py": ["/ALGORITHM/TOOLS/mathematical_tools.py"], "/runner.py": ["/ALGORITHM/TOOLS/utility_tools.py", "/GAME/games_manager.py"], "/GAME/game.py": ["/GAME/CHARACTERISTIC_FUNCTIONS/centrality_type_dictionary.py"]} |
76,592 | melmello/Game-Theoric-Network-Centrality | refs/heads/master | /GAME/games_manager.py | """ Manager of the games
GamesManager:
this class contains the core of the program in which all the actions are taken.
An example is the import of the user's matrix, the graph visualization or the choice of the
characteristic function.
"""
from ALGORITHM.IMPLEMENTATIONS.EXPONENTIAL_IMPLEMENTATION.exponential_algorithm \
import classical_algorithm
from ALGORITHM.IMPLEMENTATIONS.POLYNOMIAL_IMPLEMENTATION.polynomial_algorithm \
import semi_algorithm
from ALGORITHM.INIT.graph_builder import GraphBuilder
from ALGORITHM.INIT.matrix_uploader import MatrixUploader
from ALGORITHM.TOOLS.utility_tools import word_checker
from GAME.game import Game
class GamesManager:
""" Creator and manager of the game
This class is used to build the game from the adjacency matrix and
to manage the game created applying the algorithm.
Attributes:
no attributes are needed.
"""
def __init__(self):
""" Initialization of the game
Method that initialize the game by:
- importing the adjacency matrix into the python script
- visualize the adjacency matrix as a directed graph
Args:
no args are needed.
Returns:
no return is needed.
"""
# Loading the adjacency matrix and printing it
adjacency_matrix = MatrixUploader().get_matrix()
# Graph construction and visualization
GraphBuilder(adjacency_matrix).graph_construction()
# New Game
self.game = Game(adjacency_matrix,
word_checker(input("Select the characteristic function:\n"
" - \tgroup_degree_centrality\n"
" - \tgroup_betweenness_centrality\n"
" - \tgroup_closeness_centrality\n"),
["group_degree_centrality",
"group_betweenness_centrality",
"group_closeness_centrality"]))
def centrality_algorithm(self, choice):
""" Application of the chosen algorithm
There are basically two main choices to make a comparison:
- Exponential: the classical Shapley Value algorithm.
- Polynomial: the SEMI value application.
Args:
self: the instance itself that i can use to pass the game to the algorithm.
choice (string): represent the choice of the user on the algorithm complexity.
Returns:
no return is needed.
"""
if choice == "polynomial":
semi_algorithm(self.game,
self.game.characteristic_function.centrality_measure_selection())
if choice == "exponential":
classical_algorithm(self.game)
| {"/GAME/CHARACTERISTIC_FUNCTIONS/group_closeness_centrality.py": ["/GAME/CHARACTERISTIC_FUNCTIONS/group_centrality_measure.py"], "/GAME/CHARACTERISTIC_FUNCTIONS/centrality_type_dictionary.py": ["/GAME/CHARACTERISTIC_FUNCTIONS/group_degree_centrality.py", "/GAME/CHARACTERISTIC_FUNCTIONS/group_betweenness_centrality.py", "/GAME/CHARACTERISTIC_FUNCTIONS/group_closeness_centrality.py"], "/ALGORITHM/INIT/matrix_uploader.py": ["/ALGORITHM/TOOLS/utility_tools.py"], "/GAME/games_manager.py": ["/ALGORITHM/IMPLEMENTATIONS/EXPONENTIAL_IMPLEMENTATION/exponential_algorithm.py", "/ALGORITHM/IMPLEMENTATIONS/POLYNOMIAL_IMPLEMENTATION/polynomial_algorithm.py", "/ALGORITHM/INIT/graph_builder.py", "/ALGORITHM/INIT/matrix_uploader.py", "/ALGORITHM/TOOLS/utility_tools.py", "/GAME/game.py"], "/GAME/CHARACTERISTIC_FUNCTIONS/group_degree_centrality.py": ["/ALGORITHM/TOOLS/mathematical_tools.py", "/ALGORITHM/TOOLS/utility_tools.py", "/GAME/CHARACTERISTIC_FUNCTIONS/group_centrality_measure.py"], "/GAME/CHARACTERISTIC_FUNCTIONS/group_betweenness_centrality.py": ["/GAME/CHARACTERISTIC_FUNCTIONS/group_centrality_measure.py"], "/ALGORITHM/IMPLEMENTATIONS/POLYNOMIAL_IMPLEMENTATION/polynomial_algorithm.py": ["/ALGORITHM/TOOLS/mathematical_tools.py"], "/runner.py": ["/ALGORITHM/TOOLS/utility_tools.py", "/GAME/games_manager.py"], "/GAME/game.py": ["/GAME/CHARACTERISTIC_FUNCTIONS/centrality_type_dictionary.py"]} |
76,593 | melmello/Game-Theoric-Network-Centrality | refs/heads/master | /ALGORITHM/INIT/graph_builder.py | """ Graph Visualizer
GraphBuilder:
This class is used in order to have a feedback on the graph structure.
The screen prints out the adjacency matrix as an undirected graph.
"""
import numpy as np
import networkx as nx
import matplotlib.pyplot as plt
class GraphBuilder:
""" Graph builder and plotter
The graph is built and plotted on the screen as an undirected graph.
Attributes:
no attributes are required.
"""
def __init__(self, matrix):
""" Classical __init__ python method
The method initialize the instance of the class.
Args:
self: the instance of the class itself.
matrix (matrix): the adjacency matrix previously uploaded.
Returns:
no return is needed.
"""
self.matrix = matrix
def graph_construction(self):
""" [OPTIONAL] Function crating the graph visualized
The system takes the matrix object and builds the graph,
specifying the nodes number and the edges.
Notice that this is used only to have a graphic feedback
and thus this function can be avoided speeding up the program.
Args:
self: the instance of the class itself.
Returns:
no return is required.
"""
# Counting nodes
nodes_number = self.matrix.shape[0]
# Unpacking of the row and column coordinates where the ith and jth element is 1
rows, cols = np.where(self.matrix == 1)
# Rearranges the list of rows and columns into a list of edge tuples.
# Printing all starting nodes
# Printing all ending nodes
# Printing all edges
# F means FROM, T means TO
edges = zip(rows.tolist(), cols.tolist())
print("F nodes:", np.arange(self.matrix.shape[0]))
print("T nodes:", np.arange(self.matrix.shape[1]))
print("Edges:")
for first_param, second_param in zip(rows.tolist(), cols.tolist()):
print((first_param, second_param))
# Create an undirected unweighted Graph Object
# adding the nodes and the edges previously calculated
undirected_graph = nx.Graph()
undirected_graph.add_nodes_from(range(nodes_number))
undirected_graph.add_edges_from(edges)
# Printing
position = nx.drawing.layout.circular_layout(undirected_graph)
nx.draw_networkx(undirected_graph, pos=position)
plt.axis('off')
plt.show()
| {"/GAME/CHARACTERISTIC_FUNCTIONS/group_closeness_centrality.py": ["/GAME/CHARACTERISTIC_FUNCTIONS/group_centrality_measure.py"], "/GAME/CHARACTERISTIC_FUNCTIONS/centrality_type_dictionary.py": ["/GAME/CHARACTERISTIC_FUNCTIONS/group_degree_centrality.py", "/GAME/CHARACTERISTIC_FUNCTIONS/group_betweenness_centrality.py", "/GAME/CHARACTERISTIC_FUNCTIONS/group_closeness_centrality.py"], "/ALGORITHM/INIT/matrix_uploader.py": ["/ALGORITHM/TOOLS/utility_tools.py"], "/GAME/games_manager.py": ["/ALGORITHM/IMPLEMENTATIONS/EXPONENTIAL_IMPLEMENTATION/exponential_algorithm.py", "/ALGORITHM/IMPLEMENTATIONS/POLYNOMIAL_IMPLEMENTATION/polynomial_algorithm.py", "/ALGORITHM/INIT/graph_builder.py", "/ALGORITHM/INIT/matrix_uploader.py", "/ALGORITHM/TOOLS/utility_tools.py", "/GAME/game.py"], "/GAME/CHARACTERISTIC_FUNCTIONS/group_degree_centrality.py": ["/ALGORITHM/TOOLS/mathematical_tools.py", "/ALGORITHM/TOOLS/utility_tools.py", "/GAME/CHARACTERISTIC_FUNCTIONS/group_centrality_measure.py"], "/GAME/CHARACTERISTIC_FUNCTIONS/group_betweenness_centrality.py": ["/GAME/CHARACTERISTIC_FUNCTIONS/group_centrality_measure.py"], "/ALGORITHM/IMPLEMENTATIONS/POLYNOMIAL_IMPLEMENTATION/polynomial_algorithm.py": ["/ALGORITHM/TOOLS/mathematical_tools.py"], "/runner.py": ["/ALGORITHM/TOOLS/utility_tools.py", "/GAME/games_manager.py"], "/GAME/game.py": ["/GAME/CHARACTERISTIC_FUNCTIONS/centrality_type_dictionary.py"]} |
76,594 | melmello/Game-Theoric-Network-Centrality | refs/heads/master | /GAME/CHARACTERISTIC_FUNCTIONS/group_degree_centrality.py | """ Degree Centrality Characteristic Function module
GroupDegreeCentrality(GroupCentralityMeasure):
degree version of the characteristic function.
"""
import numpy as np
from ALGORITHM.TOOLS.mathematical_tools import fast_binomial
from ALGORITHM.TOOLS.utility_tools import word_checker
from GAME.CHARACTERISTIC_FUNCTIONS.group_centrality_measure import GroupCentralityMeasure
class GroupDegreeCentrality(GroupCentralityMeasure):
""" Degree Centrality Characteristic Function class
<<Inheriting from group_centrality_measure>>
Implementation of the characteristic function with the Degree Measure.
Attributes:
no attributes are needed.
"""
def data_preparation(self):
""" Data Preparation for the polynomial algorithm
The method is based on the preparation of the data
that will be the input of the polynomial algorithm.
Args:
self: the instance of the class itself.
Returns:
degree ([int]): vector with the degree of each node.
The degree is the value of the cell, the position is the node number - 1.
degree_class ([int]): vector with the value of the node with the degree
equals to the position - 1.
neutral ([int][int]): matrix representing the neutral relation
on the row the nodes and on the columns the cardinality k of the groups.
positive ([int][int]): matrix representing the positive relation
on the row the nodes and on the columns the cardinality k of the groups.
positive_relation ([int][int]): matrix representing the positive relation between
nodes (expressed by rows) and cardinality groups (expressed by columns).
negative_relation ([int][int]): matrix representing the negative relation between
nodes (expressed by rows) and cardinality groups (expressed by columns).
neutral_relation ([int][int]): matrix representing the neutral relation between
nodes (expressed by rows) and cardinality groups (expressed by columns).
"""
# DEGREE
# Initialization
# Degree has length equal to the number of nodes
degree = np.zeros(self.nodes_number, dtype=int)
# For each 1 in the row of the matrix,
# just add one to the correspondent position in the degree vector
# Note that there can't be 1 on the diagonal thanks to the previous matrix adaption
for row in range(0, self.nodes_number):
for column in range(0, self.nodes_number):
degree[row] += self.matrix[row, column]
print("Degree Vector")
print(degree)
# DEGREE CLASS
# Initialization
# Degree Class vector has length equal to the maximum degree
degree_class = np.zeros(max(degree), dtype=int)
# For each node, put the degree if the cell is empty
# In order to have the array position equals to the degree - 1
# and the cell value correspondent to the node with that degree
for row in range(0, self.nodes_number):
if degree_class[degree[row] - 1] == 0:
degree_class[degree[row] - 1] = row + 1
print("Degree Class")
print(degree_class)
# N & R
# ***************************************************
# Function cN_G(v, k)
# if |V| - 1 - deg(v) < k then 0
# else (|V| - 1 - deg(v) k)
# Function cR_G(v, k)
# if |V| = k then 0
# else (|V| k) - cN_G(v, k) - (|V| - 1 k - 1)
# ***************************************************
# Initialization
# Neutral and Positive matrix has size
# number of nodes |V| x the max cardinality of the coalition k
neutral_init = (self.nodes_number, self.nodes_number + 1)
neutral = np.zeros(neutral_init, dtype='object')
positive_init = (self.nodes_number, self.nodes_number + 1)
positive = np.zeros(positive_init, dtype='object')
# For each cardinality of the group possible, going from 0 to the max cardinality |V|
for k in range(0, self.nodes_number + 1):
for row in range(0, self.nodes_number):
# NEUTRAL MATRIX
# Initialize subtracting |V| (node numbers) - 1
neutral_control_variable = self.nodes_number - 1
for column in range(0, self.nodes_number):
if self.matrix[row, column] == 1:
# For each node linked to the node of the corresponding node, subtract 1
neutral_control_variable = neutral_control_variable - 1
# If the quantity is greater or equal to k, apply the Newton Binomial Coefficient
if neutral_control_variable >= k:
neutral[row][k] = fast_binomial(neutral_control_variable, k)
# Otherwise, just set it to 0
else:
neutral[row][k] = 0
# POSITIVE MATRIX
# If the cardinality k equals the number of the nodes |V|,
# just set the cell value to 0
if k == self.nodes_number:
positive[row][k] = 0
# Otherwise set it to the Newton Binomial of (|V| k) -
# the Newton Binomial of (|V|-1 k-1) -
# the correspondent value of the Neutral Matrix
else:
positive[row][k] = fast_binomial(self.nodes_number, k) - \
fast_binomial(self.nodes_number - 1, k - 1) - \
neutral[row][k]
print("Neutral Matrix")
print(neutral)
print("Positive Matrix")
print(positive)
# R, -R and N
# ***************************************************
# for v in V do
# for u in E(v) do |R_(Teta)deg(u)({v})| <- |R_(Teta)deg(u)({v})| + 1
# for u not in E(v) do |N_(Teta)deg(u)({v})| <- |N_(Teta)deg(u)({v})| + 1
# |-R_(Teta)deg(u)({v})| <- 1
# ***************************************************
# Initialization
# All the matrix has size
# number of nodes |V| x max degree of the nodes
positive_relation_init = (self.nodes_number, max(degree))
negative_relation_init = (self.nodes_number, max(degree))
neutral_relation_init = (self.nodes_number, max(degree))
positive_relation = np.zeros(positive_relation_init, np.int64)
negative_relation = np.zeros(negative_relation_init, np.int64)
neutral_relation = np.zeros(neutral_relation_init, np.int64)
# For each node
for row in range(0, self.nodes_number):
for column in range(0, self.nodes_number):
# If there is a link with another node,
# add 1 to the value of the cell corresponding to the node selected
# and group cardinality of the node which the selected node is linked to
if self.matrix[row][column] == 1:
positive_relation[row][degree[column] - 1] += 1
# If there is not a link with another node (different from me),
# add 1 to the value of the cell corresponding to the node selected
# and group cardinality of the node which the selected node is not linked to
if (self.matrix[row][column] == 0) & (row != column):
neutral_relation[row][degree[column] - 1] += 1
# Put 1 in the cell corresponding to the node index as a row
# and the group cardinality of the node as a column
negative_relation[row][degree[row] - 1] = 1
print("Positive Relation Matrix")
print(positive_relation)
print("Negative Relation Matrix")
print(negative_relation)
print("Neutral Relation Matrix")
print(neutral_relation)
return \
degree, \
degree_class, \
neutral, \
positive, \
positive_relation, \
negative_relation, \
neutral_relation
def centrality_measure(self, l_degree, coalition_cardinality, centrality_measure_choice):
""" Centrality Measure Application
Method used to apply the chosen Centrality Measure
of the Degree characteristic function.
There are four types of Degree with different calculation and returns:
- Degree
- Weighted Degree
- Impact Factor
- Normalized Degree
Args:
l_degree (int): degree of the group i'm considering.
coalition_cardinality (int): the cardinality of the coalition C
where the node is in.
centrality_measure_choice (string): the centrality measure chosen.
Returns:
(int,int): it is always returned a couple of int, the first used for the
SEMI-Algorithm f() function, and the second for the g() SEMI function.
Be aware that in case there is a wrong choice, the Degree version is returned.
"""
# Degree of Everett and Borgatti (1999)
# f(v) = 1
# g(|C|) = 1
if centrality_measure_choice == "degree":
return 1, 1
# Weighted Degree of Newman (2004)
# f(v) = 1/deg(Teta_l)
# g(|C|) = 1
if centrality_measure_choice == "weighted":
return 1 / l_degree, 1
# Impact Factor of Bollen, Sompel, Smith, and Luce (2005)
# f = 1
# g = 1/|C|
# if |C| is 0, return 1
if centrality_measure_choice == "impact":
if coalition_cardinality == 0:
return 1, 1
else:
return 1, 1 / coalition_cardinality
# Normalised Degree of Everett and Borgatti (1999)
# f = 1
# g = 1/(|V|-|C|)
# if |C| = |V|, return 1
if centrality_measure_choice == "normalised":
if self.nodes_number == coalition_cardinality:
return 1, 1
else:
return 1, 1 / (self.nodes_number - coalition_cardinality)
# Classic Degree version is chosen
else:
return 1, 1
def centrality_measure_selection(self):
""" Centrality Measure Selection
Method used to return all possible choices of the centrality
measure related to the characteristic function chosen.
Args:
self: the instance itself.
Returns:
string: the choice of the user on the centrality measure possibilities.
"""
return word_checker(input("Select the centrality measure:\n"
" - \tDegree\n"
" - \tWeighted Degree\n"
" - \tImpact Factor\n"
" - \tNormalised Degree\n"),
["degree",
"weighted",
"impact",
"normalised"])
def get_coalition_value(self, game, node_list, permutation_list):
""" Getter of the coalition value
Method used to return the correspondent value of the coalition
using the degree as characteristic function.
Args:
game (Game): the game characterized by a matrix,
a characteristic function and the input parameters for the SEMI algorithm.
node_list ([int]): list with all the node in the network.
permutation_list ([int]): coalition for which i want the characteristic value.
Returns:
group_degree (int): the value of the coalition.
"""
# Initialization
group_degree = 0
print("\t\tPERMUTATION CONSIDERED: ", permutation_list)
# Creating the list with the nodes that must be checked if they are linked
# or not to the nodes in permutation_list
remaining_node_list = list(set(node_list) - set(permutation_list))
print("\t\tON THE LIST: ", node_list)
print("\t\tTERMINAL NODE TO BE CHECKED: ", remaining_node_list)
# For each node to be checked (if it is a terminal of an edge)
for checked_link_node in remaining_node_list:
print("\t\t\tCOLUMN: ", checked_link_node)
# For each node in the coalition
for permutated_node in permutation_list:
print("\t\t\t\tROW: ", permutated_node)
# If the checked node is linked, add 1 to the degree and break.
# This guarantees that we are not counting more times the same node
# if more arrows are going there.
# Moreover, we guarantee that nor the self cycles or the edge between
# two nodes in the same coalition are counted.
if game.matrix[permutated_node][checked_link_node] == 1:
group_degree += 1
print("\t\t\t\tBREAKING WITH DEGREE ", group_degree)
# Breaking the inner loop, but currently staying in the outer one
break
print("\t\tGROUP DEGREE: ", group_degree)
return group_degree
| {"/GAME/CHARACTERISTIC_FUNCTIONS/group_closeness_centrality.py": ["/GAME/CHARACTERISTIC_FUNCTIONS/group_centrality_measure.py"], "/GAME/CHARACTERISTIC_FUNCTIONS/centrality_type_dictionary.py": ["/GAME/CHARACTERISTIC_FUNCTIONS/group_degree_centrality.py", "/GAME/CHARACTERISTIC_FUNCTIONS/group_betweenness_centrality.py", "/GAME/CHARACTERISTIC_FUNCTIONS/group_closeness_centrality.py"], "/ALGORITHM/INIT/matrix_uploader.py": ["/ALGORITHM/TOOLS/utility_tools.py"], "/GAME/games_manager.py": ["/ALGORITHM/IMPLEMENTATIONS/EXPONENTIAL_IMPLEMENTATION/exponential_algorithm.py", "/ALGORITHM/IMPLEMENTATIONS/POLYNOMIAL_IMPLEMENTATION/polynomial_algorithm.py", "/ALGORITHM/INIT/graph_builder.py", "/ALGORITHM/INIT/matrix_uploader.py", "/ALGORITHM/TOOLS/utility_tools.py", "/GAME/game.py"], "/GAME/CHARACTERISTIC_FUNCTIONS/group_degree_centrality.py": ["/ALGORITHM/TOOLS/mathematical_tools.py", "/ALGORITHM/TOOLS/utility_tools.py", "/GAME/CHARACTERISTIC_FUNCTIONS/group_centrality_measure.py"], "/GAME/CHARACTERISTIC_FUNCTIONS/group_betweenness_centrality.py": ["/GAME/CHARACTERISTIC_FUNCTIONS/group_centrality_measure.py"], "/ALGORITHM/IMPLEMENTATIONS/POLYNOMIAL_IMPLEMENTATION/polynomial_algorithm.py": ["/ALGORITHM/TOOLS/mathematical_tools.py"], "/runner.py": ["/ALGORITHM/TOOLS/utility_tools.py", "/GAME/games_manager.py"], "/GAME/game.py": ["/GAME/CHARACTERISTIC_FUNCTIONS/centrality_type_dictionary.py"]} |
76,595 | melmello/Game-Theoric-Network-Centrality | refs/heads/master | /ALGORITHM/TOOLS/edge_list_to_adjancency_matrix.py | """ Utility module
Script used to create adjacency matrix file .txt from edge list.
The edge matrix is so ready for the core of the program.
It must be run with the following command:
$ python edge_list_to_adjacency_matrix.py
CAVEAT: this may generate a physical version of the file with huge dimensions
depending on the number of nodes in the edge list uploaded.
The version of the file implements an edge list upload avoiding the file creation
(if not needed). Although this, the RAM usage will be still expensive.
External Libraries Required:
- numpy.py
No class are needed.
"""
import numpy as np
def main():
# Open the file in the EXAMPLES/EDGES_LIST_EXAMPLES/ folder of the project
file = open('EXAMPLES/EDGES_LIST_EXAMPLES/' + input("Please, enter the INPUT file name\n"))
# Initialize the max length of the file
max_length = 0
# Find the max to set the matrix cardinality
for row in file:
numbers = [int(i) for i in row.split() if i.isdigit()]
# Set the max to the max between the previous value and the two integers
max_length = max(max_length, numbers[0], numbers[1])
# Come back to file beginning
file.seek(0)
# Initialize the matrix
matrix = np.zeros((max_length + 1, max_length + 1), dtype=int)
# For each row
for row in file:
# Results of the file line
res = [int(i) for i in row.split() if i.isdigit()]
# Set first direction arc
matrix[res[0]][res[1]] = 1
# Set the symmetric direction arc
matrix[res[1]][res[0]] = 1
print(matrix)
# Saving the file in a binary one in the EXAMPLES/MATRIX_EXAMPLES directory
output_path = input("Please, enter the OUTPUT file name\n")
with open('EXAMPLES/MATRIX_EXAMPLES/' + output_path, 'wb') as f:
np.savetxt(f, matrix)
print("File created")
if __name__ == "__main__":
main()
| {"/GAME/CHARACTERISTIC_FUNCTIONS/group_closeness_centrality.py": ["/GAME/CHARACTERISTIC_FUNCTIONS/group_centrality_measure.py"], "/GAME/CHARACTERISTIC_FUNCTIONS/centrality_type_dictionary.py": ["/GAME/CHARACTERISTIC_FUNCTIONS/group_degree_centrality.py", "/GAME/CHARACTERISTIC_FUNCTIONS/group_betweenness_centrality.py", "/GAME/CHARACTERISTIC_FUNCTIONS/group_closeness_centrality.py"], "/ALGORITHM/INIT/matrix_uploader.py": ["/ALGORITHM/TOOLS/utility_tools.py"], "/GAME/games_manager.py": ["/ALGORITHM/IMPLEMENTATIONS/EXPONENTIAL_IMPLEMENTATION/exponential_algorithm.py", "/ALGORITHM/IMPLEMENTATIONS/POLYNOMIAL_IMPLEMENTATION/polynomial_algorithm.py", "/ALGORITHM/INIT/graph_builder.py", "/ALGORITHM/INIT/matrix_uploader.py", "/ALGORITHM/TOOLS/utility_tools.py", "/GAME/game.py"], "/GAME/CHARACTERISTIC_FUNCTIONS/group_degree_centrality.py": ["/ALGORITHM/TOOLS/mathematical_tools.py", "/ALGORITHM/TOOLS/utility_tools.py", "/GAME/CHARACTERISTIC_FUNCTIONS/group_centrality_measure.py"], "/GAME/CHARACTERISTIC_FUNCTIONS/group_betweenness_centrality.py": ["/GAME/CHARACTERISTIC_FUNCTIONS/group_centrality_measure.py"], "/ALGORITHM/IMPLEMENTATIONS/POLYNOMIAL_IMPLEMENTATION/polynomial_algorithm.py": ["/ALGORITHM/TOOLS/mathematical_tools.py"], "/runner.py": ["/ALGORITHM/TOOLS/utility_tools.py", "/GAME/games_manager.py"], "/GAME/game.py": ["/GAME/CHARACTERISTIC_FUNCTIONS/centrality_type_dictionary.py"]} |
76,596 | melmello/Game-Theoric-Network-Centrality | refs/heads/master | /ALGORITHM/TOOLS/utility_tools.py | """ Utility module
Module used as a library of functions with an utility scope.
No class are needed.
"""
def word_checker(user_input, possible_choices):
""" Word in String Checker
A simple way of checking what element of the possible choices list is more similar
to the user input, in order to have a standard string that will be used as a standard.
Args:
user_input (string): user's input string.
possible_choices ([string]): all possible choices that must be compared.
Returns:
possible_element (int): the element of possible choices that
are similar to the user input.
The default, if no match occurs, is the first element of the possible choices list.
"""
user_input = user_input.lower()
for possible_element in possible_choices:
if user_input in possible_element:
return possible_element
# Default return of first element in the list
return possible_choices[0]
| {"/GAME/CHARACTERISTIC_FUNCTIONS/group_closeness_centrality.py": ["/GAME/CHARACTERISTIC_FUNCTIONS/group_centrality_measure.py"], "/GAME/CHARACTERISTIC_FUNCTIONS/centrality_type_dictionary.py": ["/GAME/CHARACTERISTIC_FUNCTIONS/group_degree_centrality.py", "/GAME/CHARACTERISTIC_FUNCTIONS/group_betweenness_centrality.py", "/GAME/CHARACTERISTIC_FUNCTIONS/group_closeness_centrality.py"], "/ALGORITHM/INIT/matrix_uploader.py": ["/ALGORITHM/TOOLS/utility_tools.py"], "/GAME/games_manager.py": ["/ALGORITHM/IMPLEMENTATIONS/EXPONENTIAL_IMPLEMENTATION/exponential_algorithm.py", "/ALGORITHM/IMPLEMENTATIONS/POLYNOMIAL_IMPLEMENTATION/polynomial_algorithm.py", "/ALGORITHM/INIT/graph_builder.py", "/ALGORITHM/INIT/matrix_uploader.py", "/ALGORITHM/TOOLS/utility_tools.py", "/GAME/game.py"], "/GAME/CHARACTERISTIC_FUNCTIONS/group_degree_centrality.py": ["/ALGORITHM/TOOLS/mathematical_tools.py", "/ALGORITHM/TOOLS/utility_tools.py", "/GAME/CHARACTERISTIC_FUNCTIONS/group_centrality_measure.py"], "/GAME/CHARACTERISTIC_FUNCTIONS/group_betweenness_centrality.py": ["/GAME/CHARACTERISTIC_FUNCTIONS/group_centrality_measure.py"], "/ALGORITHM/IMPLEMENTATIONS/POLYNOMIAL_IMPLEMENTATION/polynomial_algorithm.py": ["/ALGORITHM/TOOLS/mathematical_tools.py"], "/runner.py": ["/ALGORITHM/TOOLS/utility_tools.py", "/GAME/games_manager.py"], "/GAME/game.py": ["/GAME/CHARACTERISTIC_FUNCTIONS/centrality_type_dictionary.py"]} |
76,597 | melmello/Game-Theoric-Network-Centrality | refs/heads/master | /GAME/CHARACTERISTIC_FUNCTIONS/group_betweenness_centrality.py | """ Betweenness Centrality Characteristic Function module
GroupBetweennessCentrality(GroupCentralityMeasure):
betweenness version of the characteristic function.
"""
from GAME.CHARACTERISTIC_FUNCTIONS.group_centrality_measure import GroupCentralityMeasure
class GroupBetweennessCentrality(GroupCentralityMeasure):
""" Betweenness Centrality Characteristic Function class
<<Inheriting from group_centrality_measure>>
Implementation of the characteristic function with the Betweenness Measure.
Attributes:
no attributes are needed.
"""
| {"/GAME/CHARACTERISTIC_FUNCTIONS/group_closeness_centrality.py": ["/GAME/CHARACTERISTIC_FUNCTIONS/group_centrality_measure.py"], "/GAME/CHARACTERISTIC_FUNCTIONS/centrality_type_dictionary.py": ["/GAME/CHARACTERISTIC_FUNCTIONS/group_degree_centrality.py", "/GAME/CHARACTERISTIC_FUNCTIONS/group_betweenness_centrality.py", "/GAME/CHARACTERISTIC_FUNCTIONS/group_closeness_centrality.py"], "/ALGORITHM/INIT/matrix_uploader.py": ["/ALGORITHM/TOOLS/utility_tools.py"], "/GAME/games_manager.py": ["/ALGORITHM/IMPLEMENTATIONS/EXPONENTIAL_IMPLEMENTATION/exponential_algorithm.py", "/ALGORITHM/IMPLEMENTATIONS/POLYNOMIAL_IMPLEMENTATION/polynomial_algorithm.py", "/ALGORITHM/INIT/graph_builder.py", "/ALGORITHM/INIT/matrix_uploader.py", "/ALGORITHM/TOOLS/utility_tools.py", "/GAME/game.py"], "/GAME/CHARACTERISTIC_FUNCTIONS/group_degree_centrality.py": ["/ALGORITHM/TOOLS/mathematical_tools.py", "/ALGORITHM/TOOLS/utility_tools.py", "/GAME/CHARACTERISTIC_FUNCTIONS/group_centrality_measure.py"], "/GAME/CHARACTERISTIC_FUNCTIONS/group_betweenness_centrality.py": ["/GAME/CHARACTERISTIC_FUNCTIONS/group_centrality_measure.py"], "/ALGORITHM/IMPLEMENTATIONS/POLYNOMIAL_IMPLEMENTATION/polynomial_algorithm.py": ["/ALGORITHM/TOOLS/mathematical_tools.py"], "/runner.py": ["/ALGORITHM/TOOLS/utility_tools.py", "/GAME/games_manager.py"], "/GAME/game.py": ["/GAME/CHARACTERISTIC_FUNCTIONS/centrality_type_dictionary.py"]} |
76,598 | melmello/Game-Theoric-Network-Centrality | refs/heads/master | /ALGORITHM/IMPLEMENTATIONS/POLYNOMIAL_IMPLEMENTATION/polynomial_algorithm.py | # -*- coding: utf-8 -*-
""" Polynomial Time SEMI Algorithm
This algorithm allows the computation of the central nodes of a network
with a polynomial time complexity, differently from the classic exponential version.
"""
import numpy as np
from ALGORITHM.TOOLS.mathematical_tools import fast_binomial
def shapley_beta_function(game):
""" Shapley Beta Function Application
Method that gives the beta function result of the Shapley Indices.
Args:
game (Game): the game characterized by a matrix,
a characteristic function and the input parameters for the SEMI algorithm.
Returns:
int: the beta function of the game with cardinality k.
"""
return 1 / len(game.matrix)
def semi_algorithm(game, centrality_measure_choice):
""" Algorithm Application
Method that applies the algorithm with different important part:
- Definition of the centrality measure within the characteristic function.
- Calculation of the marginal contribution:
- MC1: first marginal contribution part, based on positive and neutral relation
- MC2: second marginal contribution part, based on positive and negative relation
- MC3: first marginal contribution part, based on
the sum of positive and neutral relation
- Calculation of the weighted general marginal contribution
- Update of the Shapley Values
Args:
game (Game): the game characterized by a matrix,
a characteristic function and the input parameters for the SEMI algorithm.
centrality_measure_choice (string): the centrality measure chosen by the user.
Returns:
no return is needed.
"""
# Initialization
# Shapley Value vector has size equals to the number of nodes.
shapley_value = np.zeros(game.length)
# For each node considered in order to find its shapley value
for evaluated_node in range(0, game.length):
# For each possible coalition size, starting from the empty and going to |V| - 1
print("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
print("NODE EVALUATED: ", evaluated_node)
for k in range(0, game.length):
# Initialize each time the marginal contribution of that node to 0
marginal_contribution = 0
# For each l possible value that that the partition of set can assume
print("\tK: ", k)
for l_cardinality in range(1, max(game.item) + 1):
print("\t\tL: ", l_cardinality)
# If there is not an item belonging to the partition of size l,
# just continue the cycle
# avoiding all operations for that l and jumping to the l + 1 partition cardinality
if game.item_class[l_cardinality - 1] == 0:
print("\t\t\tNO CORRESPONDENCE")
continue
# Otherwise, continue with the marginal contribution computation
# MC1 - FIRST PART COMPUTATION
# ***************************************************
# ***[R(v, teta) and N(C, teta)]***
# |N#k^(-1)(Teta_l)| <- cN_G(teta in Teta_l, k);
# MC[1] <- g(k+1) * f(Teta_l) * |N#k^(-1)(Teta_l)|
# MCk <- MCk + |R_Teta_l({v})| * MC[1]
# ***************************************************
# Definition of f and g parameters
(centrality_measure_f_parameter, centrality_measure_g_of_k_plus_1_parameter) = \
game.characteristic_function.centrality_measure(l_cardinality,
k + 1,
centrality_measure_choice)
# Definition of the set of coalitions of size k
# to which item ϑ is neutrally related
neutral_contribution = game.neutral[game.item_class[l_cardinality - 1] - 1][k]
print("\t\t\t#1 - SET OF COALITIONS OF SIZE K TO WHICH TETA IS NEUTRALLY RELATED: ",
neutral_contribution)
print("\t\t\t#1 - f CONTRIBUTION: ", centrality_measure_f_parameter)
print("\t\t\t#1 - g CONTRIBUTION: ", centrality_measure_g_of_k_plus_1_parameter)
# Definition of the first type marginal contribution, by the product of
# the f and g parameters and the neutral matrix contribution
marginal_contribution_first_type = \
centrality_measure_f_parameter * \
centrality_measure_g_of_k_plus_1_parameter * \
neutral_contribution
print("\t\t\t#1 - FIRST MARGINAL CONTRIBUTION: ", marginal_contribution_first_type)
print("\t\t\t#1 - SET OF ITEMS IN GROUP TETA_L THAT IS POSITIVELY RELATED TO NODE v: ",
game.positive_relation[evaluated_node][l_cardinality - 1])
print("\t\t\t#1 - OLD GENERAL MARGINAL CONTRIBUTION: ", marginal_contribution)
# First type marginal contribution addition to the general marginal contribution,
# weighted by the value that the set of items in group Teta_l
# is positively related to C has
marginal_contribution = \
marginal_contribution + \
game.positive_relation[evaluated_node][l_cardinality - 1] * \
marginal_contribution_first_type
print("\t\t\t#1 - NEW GENERAL MARGINAL CONTRIBUTION: ", marginal_contribution)
print("\t\t\t----------")
# MC2 - SECOND COMPUTATION
# ***************************************************
# ***[~R(v, teta) and R(C, teta)]***
# |R#k^(-1)(Teta_l)| ← cR_G(teta in Teta_l, k);
# MC[2] ← g(k) * f(Teta_l) * |R#k^(-1)(Teta_l)|
# MCk ← MCk + |-R_Teta_l({v})| * MC[2]
# ***************************************************
(centrality_measure_f_parameter, centrality_measure_g_of_k_parameter) = \
game.characteristic_function.centrality_measure(l_cardinality,
k,
centrality_measure_choice)
# Definition of the set of coalitions of size k
# to which item _teta is positively related
positive_contribution = game.positive[game.item_class[l_cardinality - 1] - 1][k]
print("\t\t\t#2 - SET OF COALITIONS OF SIZE K TO WHICH TETA IS POSITIVELY RELATED: ",
positive_contribution)
print("\t\t\t#2 - f CONTRIBUTION: ", centrality_measure_f_parameter)
print("\t\t\t#2 - g CONTRIBUTION: ", centrality_measure_g_of_k_parameter)
# Definition of the second type marginal contribution, by the product of
# the f and g parameters and the positive matrix contribution
marginal_contribution_second_type = \
centrality_measure_f_parameter * \
centrality_measure_g_of_k_parameter * \
positive_contribution
print("\t\t\t#2 - SECOND MARGINAL CONTRIBUTION: ", marginal_contribution_second_type)
print("\t\t\t#2 - SET OF ITEMS IN GROUP TETA_L THAT IS NEGATIVELY RELATED TO NODE v: ",
game.negative_relation[evaluated_node][l_cardinality - 1])
print("\t\t\t#2 - OLD GENERAL MARGINAL CONTRIBUTION: ", marginal_contribution)
# Second type marginal contribution subtraction
# to the general marginal contribution,
# weighted by the value that the set of items in group Teta_l
# is negatively related to C has
marginal_contribution = \
marginal_contribution - \
game.negative_relation[evaluated_node][l_cardinality - 1] * \
marginal_contribution_second_type
print("\t\t\t#2 - NEW GENERAL MARGINAL CONTRIBUTION: ", marginal_contribution)
print("\t\t\t----------")
# MC3 - THIRD COMPUTATION
# ***************************************************
# ***[R(v, teta) or N(v, teta), and R(C, teta)]***
# MC[3] <- (g(k+1) - g(k)) * f(Teta_l) * |R#k^(-1)(Teta_l)|
# MCk <- MCk + |R_Teta_l({v}) ∪ N_Teta_l({v})| * MC[3]
# ***************************************************
print("\t\t\t#3 - SET OF COALITIONS OF SIZE K TO WHICH TETA IS POSITIVELY RELATED: ",
positive_contribution)
print("\t\t\t#3 - f CONTRIBUTION: ", centrality_measure_f_parameter)
print("\t\t\t#3 - g CONTRIBUTION: ",
centrality_measure_g_of_k_plus_1_parameter - centrality_measure_g_of_k_parameter)
# Definition of the third type marginal contribution, by the product of
# the f parameter, the difference between the g parameter for k + 1 and k,
# and the positive matrix contribution
marginal_contribution_third_type = \
centrality_measure_f_parameter * \
(centrality_measure_g_of_k_plus_1_parameter -
centrality_measure_g_of_k_parameter) * \
positive_contribution
print("\t\t\t#3 - THIRD MARGINAL CONTRIBUTION: ", marginal_contribution_third_type)
print("\t\t\t#3 - SET OF ITEMS IN GROUP TETA_L THAT IS POSITIVELY AND NEUTRALLY RELATED TO NODE v: ",
game.positive_relation[evaluated_node][l_cardinality - 1] +
game.neutral_relation[evaluated_node][l_cardinality - 1])
print("\t\t\t#3 - OLD GENERAL MARGINAL CONTRIBUTION: ", marginal_contribution)
# Third type marginal contribution addiction to the general marginal contribution,
# weighted by the value that the set of items in group Teta_l
# is negatively related to C has summed to the value that the set of items
# in group Teta_l is positively related to C
marginal_contribution = \
marginal_contribution + \
(game.positive_relation[evaluated_node][l_cardinality - 1] +
game.neutral_relation[evaluated_node][l_cardinality - 1]) * \
marginal_contribution_third_type
print("\t\t\t#3 - NEW GENERAL MARGINAL CONTRIBUTION: ", marginal_contribution)
# MC - END STEP
# ***************************************************
# MCk <- (Beta(k) / (|V|−1 k)) * MCk
# phi_v <- phi_v + MCk
# ***************************************************
# Final computation of the marginal contribution as the value previously calculated
# weighted by the division between the Shapley Beta function and the Newton binomial
# coefficient of |V| - 1 and k
print("\t\tEND STEP")
print("\t\tWEIGHT FUNCTION: ", (shapley_beta_function(game) /
fast_binomial(game.length - 1, k)))
marginal_contribution *= (shapley_beta_function(game) /
fast_binomial(game.length - 1, k))
print("\t\tWEIGHTED MARGINAL CONTRIBUTION: ", marginal_contribution)
# Update of the Shapley Value of the node evaluated with the sum of the previous value
# and the weighted marginal contribution
shapley_value[evaluated_node] += marginal_contribution
print("\t\tSHAPLEY VALUES:\n\t\t", shapley_value)
print("SHAPLEY VALUES SUM: ", np.sum(shapley_value))
| {"/GAME/CHARACTERISTIC_FUNCTIONS/group_closeness_centrality.py": ["/GAME/CHARACTERISTIC_FUNCTIONS/group_centrality_measure.py"], "/GAME/CHARACTERISTIC_FUNCTIONS/centrality_type_dictionary.py": ["/GAME/CHARACTERISTIC_FUNCTIONS/group_degree_centrality.py", "/GAME/CHARACTERISTIC_FUNCTIONS/group_betweenness_centrality.py", "/GAME/CHARACTERISTIC_FUNCTIONS/group_closeness_centrality.py"], "/ALGORITHM/INIT/matrix_uploader.py": ["/ALGORITHM/TOOLS/utility_tools.py"], "/GAME/games_manager.py": ["/ALGORITHM/IMPLEMENTATIONS/EXPONENTIAL_IMPLEMENTATION/exponential_algorithm.py", "/ALGORITHM/IMPLEMENTATIONS/POLYNOMIAL_IMPLEMENTATION/polynomial_algorithm.py", "/ALGORITHM/INIT/graph_builder.py", "/ALGORITHM/INIT/matrix_uploader.py", "/ALGORITHM/TOOLS/utility_tools.py", "/GAME/game.py"], "/GAME/CHARACTERISTIC_FUNCTIONS/group_degree_centrality.py": ["/ALGORITHM/TOOLS/mathematical_tools.py", "/ALGORITHM/TOOLS/utility_tools.py", "/GAME/CHARACTERISTIC_FUNCTIONS/group_centrality_measure.py"], "/GAME/CHARACTERISTIC_FUNCTIONS/group_betweenness_centrality.py": ["/GAME/CHARACTERISTIC_FUNCTIONS/group_centrality_measure.py"], "/ALGORITHM/IMPLEMENTATIONS/POLYNOMIAL_IMPLEMENTATION/polynomial_algorithm.py": ["/ALGORITHM/TOOLS/mathematical_tools.py"], "/runner.py": ["/ALGORITHM/TOOLS/utility_tools.py", "/GAME/games_manager.py"], "/GAME/game.py": ["/GAME/CHARACTERISTIC_FUNCTIONS/centrality_type_dictionary.py"]} |
76,599 | melmello/Game-Theoric-Network-Centrality | refs/heads/master | /GAME/CHARACTERISTIC_FUNCTIONS/group_centrality_measure.py | """ Interface of the characteristic functions
GroupCentralityMeasure:
it is the interface and the parent of all characteristic functions type derived and used.
This is done in order to have a scalable and extensible program.
"""
class GroupCentralityMeasure:
""" Parent of all the centrality measure class
This class is used as the superclass of each possible group centrality measure implemented.
You can add your personal characteristic function taking this as superclass and overriding
the get_items method, respecting obviously the return conventions.
Attributes:
no attributes are needed.
"""
def __init__(self, matrix):
""" Classical __init__ python method
The method initialize the instance of the class.
Args:
self: the instance of the class itself.
matrix (matrix): the adjacency matrix previously uploaded.
Returns:
no return is needed.
"""
self.matrix = matrix
self.nodes_number = len(self.matrix)
def data_preparation(self):
""" Data Preparation
This is the super-method that prepare the input of the SEMI Algorithm.
Args:
self: the instance of the class itself.
Returns:
None: since this is the super method.
"""
return None
def centrality_measure(self, node, coalition_cardinality, centrality_measure_choice):
""" Centrality Measure Application
Method used to apply the Centrality Measure in the correspondent
characteristic function chosen.
Args:
node (int): the node which apply the centrality measure to.
coalition_cardinality (int): the cardinality of the coalition C
where the node is in.
centrality_measure_choice (string): the centrality measure chosen.
Returns:
None: since this is the super method
"""
return None
def centrality_measure_selection(self):
""" Centrality Measure Selection
Method used to return all possible choices of the centrality
measure related to the characteristic function chosen.
Args:
self: the instance itself.
Returns:
None: since this is the super method
"""
return None
def get_coalition_value(self, game, node_list, permutation_list):
""" Getter of the coalition value
Method used to return the correspondent value of the coalition
using the correspondent characteristic function chosen.
Args:
game (Game): the game characterized by a matrix,
a characteristic function and the input parameters for the SEMI algorithm.
node_list ([int]): list with all the node in the network.
permutation_list ([int]): coalition for which i want the characteristic value.
Returns:
None: since this is the super method
"""
return None
| {"/GAME/CHARACTERISTIC_FUNCTIONS/group_closeness_centrality.py": ["/GAME/CHARACTERISTIC_FUNCTIONS/group_centrality_measure.py"], "/GAME/CHARACTERISTIC_FUNCTIONS/centrality_type_dictionary.py": ["/GAME/CHARACTERISTIC_FUNCTIONS/group_degree_centrality.py", "/GAME/CHARACTERISTIC_FUNCTIONS/group_betweenness_centrality.py", "/GAME/CHARACTERISTIC_FUNCTIONS/group_closeness_centrality.py"], "/ALGORITHM/INIT/matrix_uploader.py": ["/ALGORITHM/TOOLS/utility_tools.py"], "/GAME/games_manager.py": ["/ALGORITHM/IMPLEMENTATIONS/EXPONENTIAL_IMPLEMENTATION/exponential_algorithm.py", "/ALGORITHM/IMPLEMENTATIONS/POLYNOMIAL_IMPLEMENTATION/polynomial_algorithm.py", "/ALGORITHM/INIT/graph_builder.py", "/ALGORITHM/INIT/matrix_uploader.py", "/ALGORITHM/TOOLS/utility_tools.py", "/GAME/game.py"], "/GAME/CHARACTERISTIC_FUNCTIONS/group_degree_centrality.py": ["/ALGORITHM/TOOLS/mathematical_tools.py", "/ALGORITHM/TOOLS/utility_tools.py", "/GAME/CHARACTERISTIC_FUNCTIONS/group_centrality_measure.py"], "/GAME/CHARACTERISTIC_FUNCTIONS/group_betweenness_centrality.py": ["/GAME/CHARACTERISTIC_FUNCTIONS/group_centrality_measure.py"], "/ALGORITHM/IMPLEMENTATIONS/POLYNOMIAL_IMPLEMENTATION/polynomial_algorithm.py": ["/ALGORITHM/TOOLS/mathematical_tools.py"], "/runner.py": ["/ALGORITHM/TOOLS/utility_tools.py", "/GAME/games_manager.py"], "/GAME/game.py": ["/GAME/CHARACTERISTIC_FUNCTIONS/centrality_type_dictionary.py"]} |
76,600 | melmello/Game-Theoric-Network-Centrality | refs/heads/master | /runner.py | """ Runner script that starts the project
<<This module has the starting and fundamental class used to start the program.>>
It must be run with the following command:
$ python runner.py
External Libraries Required:
- networkx.py
- matplotlib.py
- numpy.py
No class are present
"""
from ALGORITHM.TOOLS.utility_tools import word_checker
from GAME.games_manager import GamesManager
def main():
""" Classical main function
This function is the fundamental one that starts all.
Main parts:
- game_manager initialization.
- centrality_algorithm application.
Args:
no args are needed.
Returns:
no return is needed.
"""
# Initialization
game_manager = GamesManager()
# Algorithm Application
game_manager.centrality_algorithm(word_checker(input("Select the algorithm complexity:\n"
" - \tPolynomial\n"
" - \tExponential\n"),
["polynomial",
"exponential"]))
if __name__ == "__main__":
main()
| {"/GAME/CHARACTERISTIC_FUNCTIONS/group_closeness_centrality.py": ["/GAME/CHARACTERISTIC_FUNCTIONS/group_centrality_measure.py"], "/GAME/CHARACTERISTIC_FUNCTIONS/centrality_type_dictionary.py": ["/GAME/CHARACTERISTIC_FUNCTIONS/group_degree_centrality.py", "/GAME/CHARACTERISTIC_FUNCTIONS/group_betweenness_centrality.py", "/GAME/CHARACTERISTIC_FUNCTIONS/group_closeness_centrality.py"], "/ALGORITHM/INIT/matrix_uploader.py": ["/ALGORITHM/TOOLS/utility_tools.py"], "/GAME/games_manager.py": ["/ALGORITHM/IMPLEMENTATIONS/EXPONENTIAL_IMPLEMENTATION/exponential_algorithm.py", "/ALGORITHM/IMPLEMENTATIONS/POLYNOMIAL_IMPLEMENTATION/polynomial_algorithm.py", "/ALGORITHM/INIT/graph_builder.py", "/ALGORITHM/INIT/matrix_uploader.py", "/ALGORITHM/TOOLS/utility_tools.py", "/GAME/game.py"], "/GAME/CHARACTERISTIC_FUNCTIONS/group_degree_centrality.py": ["/ALGORITHM/TOOLS/mathematical_tools.py", "/ALGORITHM/TOOLS/utility_tools.py", "/GAME/CHARACTERISTIC_FUNCTIONS/group_centrality_measure.py"], "/GAME/CHARACTERISTIC_FUNCTIONS/group_betweenness_centrality.py": ["/GAME/CHARACTERISTIC_FUNCTIONS/group_centrality_measure.py"], "/ALGORITHM/IMPLEMENTATIONS/POLYNOMIAL_IMPLEMENTATION/polynomial_algorithm.py": ["/ALGORITHM/TOOLS/mathematical_tools.py"], "/runner.py": ["/ALGORITHM/TOOLS/utility_tools.py", "/GAME/games_manager.py"], "/GAME/game.py": ["/GAME/CHARACTERISTIC_FUNCTIONS/centrality_type_dictionary.py"]} |
76,601 | melmello/Game-Theoric-Network-Centrality | refs/heads/master | /GAME/game.py | """ Game module
Game:
this class is the game built with
a specified characteristic function type and an adjacency matrix.
"""
from GAME.CHARACTERISTIC_FUNCTIONS.centrality_type_dictionary import TYPE_TO_CLASS
class Game:
""" Game Class
This class has the basic information of the game created.
Attributes:
characteristic_function (group_centrality_measure):
the type of the characteristic function used.
"""
def __init__(self, matrix, characteristic_function):
""" Classical __init__ python method
The method initialize the instance of the class.
Args:
self: the instance of the class itself.
matrix (matrix): the adjacency matrix previously uploaded.
characteristic_function (group_centrality_measure):
the characteristic function type.
Returns:
no return is needed.
"""
self.matrix = matrix
self.length = len(matrix)
self.characteristic_function = TYPE_TO_CLASS[characteristic_function](matrix)
self.item, \
self.item_class, \
self.neutral, \
self.positive, \
self.positive_relation, \
self.negative_relation, \
self.neutral_relation = \
self.characteristic_function.data_preparation()
| {"/GAME/CHARACTERISTIC_FUNCTIONS/group_closeness_centrality.py": ["/GAME/CHARACTERISTIC_FUNCTIONS/group_centrality_measure.py"], "/GAME/CHARACTERISTIC_FUNCTIONS/centrality_type_dictionary.py": ["/GAME/CHARACTERISTIC_FUNCTIONS/group_degree_centrality.py", "/GAME/CHARACTERISTIC_FUNCTIONS/group_betweenness_centrality.py", "/GAME/CHARACTERISTIC_FUNCTIONS/group_closeness_centrality.py"], "/ALGORITHM/INIT/matrix_uploader.py": ["/ALGORITHM/TOOLS/utility_tools.py"], "/GAME/games_manager.py": ["/ALGORITHM/IMPLEMENTATIONS/EXPONENTIAL_IMPLEMENTATION/exponential_algorithm.py", "/ALGORITHM/IMPLEMENTATIONS/POLYNOMIAL_IMPLEMENTATION/polynomial_algorithm.py", "/ALGORITHM/INIT/graph_builder.py", "/ALGORITHM/INIT/matrix_uploader.py", "/ALGORITHM/TOOLS/utility_tools.py", "/GAME/game.py"], "/GAME/CHARACTERISTIC_FUNCTIONS/group_degree_centrality.py": ["/ALGORITHM/TOOLS/mathematical_tools.py", "/ALGORITHM/TOOLS/utility_tools.py", "/GAME/CHARACTERISTIC_FUNCTIONS/group_centrality_measure.py"], "/GAME/CHARACTERISTIC_FUNCTIONS/group_betweenness_centrality.py": ["/GAME/CHARACTERISTIC_FUNCTIONS/group_centrality_measure.py"], "/ALGORITHM/IMPLEMENTATIONS/POLYNOMIAL_IMPLEMENTATION/polynomial_algorithm.py": ["/ALGORITHM/TOOLS/mathematical_tools.py"], "/runner.py": ["/ALGORITHM/TOOLS/utility_tools.py", "/GAME/games_manager.py"], "/GAME/game.py": ["/GAME/CHARACTERISTIC_FUNCTIONS/centrality_type_dictionary.py"]} |
76,602 | joe-doe/bubble-up-priorities | refs/heads/master | /src/controllers/routes.py | from flask import (
render_template,
request
)
def initialize(app, mongo_instance):
@app.route('/index')
def index():
return render_template('index.html')
@app.route('/main')
def main():
return render_template('main.html')
##########
# events #
##########
@app.route('/events')
def events():
return render_template('events.html')
@app.route('/event_add')
def event_add():
return render_template('event_add.html')
############
# promises #
############
@app.route('/promises')
def promises():
return render_template('promises.html')
@app.route('/promise_add')
def promise_add():
return render_template('promise_add.html')
#############
# whishlist #
#############
@app.route('/whishlist')
def whishlist():
return render_template('whishlist.html')
@app.route('/whishlist_add')
def whishlist_add():
return render_template('whishlist_add.html')
############
# calendar #
############
@app.route('/calendar')
def calendar():
return render_template('calendar.html')
##############
# categories #
##############
@app.route('/categories')
def categories():
return render_template('categories.html')
@app.route('/category_add')
def category_add():
return render_template('category_add.html')
#########
# users #
#########
@app.route('/users')
def users():
return render_template('users.html')
@app.route('/user_add')
def user_add():
return render_template('user_add.html')
@app.route('/add_user', methods=['POST'])
def add_user():
user = request.get_json()
db = mongo_instance.get_mongo_db()
u = db.user.insert(user)
return "{}".format(u)
| {"/bubup.py": ["/src/model/database.py"]} |
76,603 | joe-doe/bubble-up-priorities | refs/heads/master | /bubup.py | from flask import Flask
from flask_restplus import Api
import os.path
from src.model.database import Database
from src.controllers import (
routes,
api
)
app = Flask(__name__,
template_folder='src/view/pages',
static_folder='src/view/static')
config_module = 'local_config' if os.path.isfile('local_config.py') else 'config'
app.config.from_object(config_module)
restplus_api = Api(app,
version='1.0',
title='bubup !'
)
ns = restplus_api.namespace(name='api', description='WOW bubup !')
mongo_instance = Database(app)
api.initialize(ns, restplus_api, mongo_instance)
routes.initialize(app, mongo_instance)
if __name__ == '__main__':
# for x in app.url_map.iter_rules():
# print x
app.run(threaded=True,
debug=True,
use_reloader=False,
host='192.168.56.101',
port=5001)
| {"/bubup.py": ["/src/model/database.py"]} |
76,604 | joe-doe/bubble-up-priorities | refs/heads/master | /config.py | SECRET_KEY = 'development key'
RUN_LOCAL = False
MONGODB_URI = 'mongodb://localhost:27017/'
| {"/bubup.py": ["/src/model/database.py"]} |
76,605 | joe-doe/bubble-up-priorities | refs/heads/master | /src/controllers/api.py | from flask_restplus import Resource
from flask import request
from bson.objectid import ObjectId
def initialize(ns, api, mongo_instance):
def get_db():
return mongo_instance.get_mongo_db()
@ns.route('/events/<action>')
class Events(Resource):
def get(self, action):
if action == 'get':
events = []
records = list(get_db().events.find({}))
for record in records:
event = record
event['id'] = str(record['_id'])
del(event['_id'])
events.append(event)
return events
# return list(get_db().events.find({}, {'_id': False}))
else:
return {"status": "invalid action"}
def post(self, action):
return_value = {"status": "OK"}
if action == 'add':
data = request.get_json()
get_db().events.insert(data)
elif action == 'delete':
data = request.get_json()
ev = get_db().events.find(data)
print ev
elif action == 'update':
data = request.get_json()
object_id = ObjectId(data['_id'])
data['_id'] = object_id
get_db().events.update({'_id': object_id}, data)
else:
return_value = {"status": "invalid action"}
return "data: {}".format(return_value)
@ns.route('/promises')
class Promises(Resource):
def get(self):
return list(get_db().promises.find({}, {'_id': False}))
def post(self):
data = request.get_json()
get_db().promises.insert(data)
return "data: {}".format(data)
@ns.route('/whishlist')
class Whishlist(Resource):
def get(self):
return list(get_db().whishlist.find({}, {'_id': False}))
def post(self):
data = request.get_json()
get_db().whishlist.insert(data)
return "data: {}".format(data)
| {"/bubup.py": ["/src/model/database.py"]} |
76,606 | joe-doe/bubble-up-priorities | refs/heads/master | /src/model/database.py | from pymongo import (
MongoClient,
errors
)
class Database(object):
mongo_client = None
mongo_db = None
def __init__(self, app):
# Connection to MongoDB
try:
self.mongo_client = MongoClient(app.config.get('MONGODB_URI'))
self.mongo_db = self.mongo_client.heroku_mongodb
print "Connected successfully!!!"
except errors.ConnectionFailure, e:
print "Could not connect to MongoDB: %s" % e
def get_client(self):
return self.mongo_client
def get_mongo_db(self):
return self.mongo_db
| {"/bubup.py": ["/src/model/database.py"]} |
76,607 | shyamnarayan2001/AS1 | refs/heads/master | /server/algorithms/api/feature.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# ALTIMETRIK CONFIDENTIAL
# __________________
#
# Copyright (c) 2016 - 2017 Altimetrik India Pvt. Ltd.
# All Rights Reserved.
#
# NOTICE: All information contained herein is, and remains
# the property of Altimetrik India Pvt. Ltd.
# The intellectual and technical concepts contained herein are proprietary to Altimetrik India Pvt. Ltd. and may be covered by U.S. and Foreign Patents,
# patents in process, and are protected by trade secret or copyright law.
# Dissemination of this information or reproduction of this material
# is strictly forbidden unless prior written permission is obtained
# from Altimetrik India Pvt. Ltd.
"""
==========================
Features
==========================
"""
# Import libraries
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pandas as pd
import numpy as np
import math
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
from sklearn.feature_selection import SelectPercentile, f_regression
print(__doc__)
class FeatureElemination(object):
def chi2test(dataframe):
p_val = 0.06
num_col = len(dataframe.columns)
# Convert Dataframe into array format
array = dataframe.values
# Devide data by feature and target data
X = array[:,0:num_col-1]
y = array[:,num_col-1]
# Apply Chi2 test
test = SelectKBest(score_func=chi2, k=4)
fit = test.fit(X, y)
# Get P value
p_value = fit.pvalues_
# Select columns whose p value is less than 0.06
l = []
for i in range(len(p_value)):
if p_value[i]<p_val:
l.extend([i])
# print(l)
else:
print(i,'th Column dropped')
# print('Columns Selected : ' % l)
l.extend([num_col-1])
df = dataframe[dataframe.columns[l]]
return(df)
def cor_cov(self, dataframe):
co_val = .20
# Apply Correlation and make dataframe
cor_df = dataframe.corr()
# Find Row and column number of correlation dataframe
num_row = len(cor_df.index)
num_col = len(cor_df.columns)
# Drop last row of correlation dataframe
d = cor_df.drop(cor_df.index[[num_row-1]])
# Select last that means target column
n = d[d.columns[num_col-1]]
# from target correlation column find and select feature whose correlation value is greater than 0.20
l = []
for i in range(0,len(d.index)):
if n[i] > co_val:
l.extend([i])
#num_col = len(d.columns)
l.extend([num_col-1])
# Make dataframe according to that(based on correlation value)
final_df = dataframe[dataframe.columns[l]]
return(final_df)
## F-test based on percentile
def f_test_by_percentile(self, dataframe):
p = 30
num_col = len(dataframe.columns)
X = dataframe[dataframe.columns[range(0,num_col-2)]]
y = dataframe[dataframe.columns[num_col-1]]
selectF_regression = SelectPercentile(f_regression, percentile=p).fit(X, y)
f_regression_selected = selectF_regression.get_support()
f_regression_selected_features = [ f for i,f in enumerate(X.columns) if f_regression_selected[i]]
X_sel = X[f_regression_selected_features]
df = X_sel.join(y)
return(df)
## Select Value based on p value from F-test
def f_test_by_p_value(self, dataframe):
p = 30
p_val = .06
num_col = len(dataframe.columns)
X = dataframe[dataframe.columns[range(0,num_col-2)]]
y = dataframe[dataframe.columns[num_col-1]]
selectF_regression = SelectPercentile(f_regression, percentile=p).fit(X, y)
arr = selectF_regression.pvalues_
l = []
for i in range(0,len(arr)):
if arr[i] < p_val:
l.extend([i])
l.extend([num_col-1])
final_df = dataframe[dataframe.columns[l]]
return(final_df)
| {"/server/headers/api/views.py": ["/server/headers/api/dataManipulation.py"], "/server/algorithms/api/views.py": ["/server/algorithms/api/algorithms.py", "/server/algorithms/api/models.py", "/server/algorithms/api/features.py"]} |
76,608 | shyamnarayan2001/AS1 | refs/heads/master | /server/headers/api/dataManipulation.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# ALTIMETRIK CONFIDENTIAL
# __________________
#
# Copyright (c) 2016 - 2017 Altimetrik India Pvt. Ltd.
# All Rights Reserved.
#
# NOTICE: All information contained herein is, and remains
# the property of Altimetrik India Pvt. Ltd.
# The intellectual and technical concepts contained herein are proprietary to Altimetrik India Pvt. Ltd. and may be covered by U.S. and Foreign Patents,
# patents in process, and are protected by trade secret or copyright law.
# Dissemination of this information or reproduction of this material
# is strictly forbidden unless prior written permission is obtained
# from Altimetrik India Pvt. Ltd.
"""
====================
Data Manipulation
====================
"""
# Import libraries
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import pandas as pd
import numpy as np
# import pymysql
from sqlalchemy import create_engine
from pywebhdfs.webhdfs import PyWebHdfsClient
import re
print(__doc__)
# --------- Changing the format of messages in log file -----------
# logging.basicConfig(filename='log.log' , format='%(levelname)s:%(asctime)s- %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p' , level=logging.DEBUG)
# logger = logging.getLogger()
class InputData(object):
"""Class for all the operations related to input the data"""
def __init__(self):
self.df = pd.DataFrame()
def CSV(self, filePath, header):
# print ('I am getting called')
flag = 'fine'
try:
if header == 'n':
self.df = pd.read_csv(filePath, header=None)
no_of_cols = len(self.df.columns)
array = np.arange(1, no_of_cols + 1)
self.df.columns = array
else:
self.df = pd.read_csv(filePath)
except Exception as exception:
flag = 'Unable to process your data. Might be in the wrong format.'
finally:
return (flag)
def excel(self, filePath, header):
flag = 'fine'
try:
if header == 'n':
self.df = pd.read_excel(filePath, header=None)
no_of_cols = len(self.df.columns)
array = np.arange(1, no_of_cols + 1)
self.df.columns = array
else:
self.df = pd.read_excel(filePath)
except Exception as exception:
flag = 'Unable to process your data. Might be in the wrong format.'
finally:
return (flag)
def mysql(self, host, user, password, db, tableName, port=3306):
flag = 'fine'
try:
engine = create_engine(
'mysql+mysqlconnector://root:root@localhost:3306/altisolve')
conn = engine.raw_connection()
# connection = pymysql.connect(host= 'localhost', user = 'root', password = 'root',
# db = 'altisolve', port = 3306)
# print (connection)
# self.df = pd.read_sql("SELECT * FROM alti_data" ,connection)
# print (self.df)
except Exception as exception:
flag = exception
finally:
return (flag)
def hdfsConnect(self, host, port, dir):
flag = 'fine'
try:
# hdfs = PyWebHdfsClient(host='vsl080hachon02.altimetrik.com',port='50070')
hdfs = PyWebHdfsClient(host=host, port=port)
path = dir
dir_files = hdfs.list_dir(path)
file_name = dir_files["FileStatuses"]
dataArray = []
df1 = pd.DataFrame()
df2 = pd.DataFrame()
for item in file_name["FileStatus"]:
a= (item["pathSuffix"])
if re.search(r"json", a):
# data = hdfs.read_file(path + a)
data = hdfs.read_file(path + 'newdata.json')
dataArray.append(data)
try:
for i in dataArray:
df1 = pd.concat([df1,df2])
my_json = i.decode('utf8').replace("'", '"')
df2 = pd.read_json(my_json)
df1 = pd.concat([df1,df2])
self.df = df1
except Exception as exception:
flag = 'All json files have different forms of data. Unable to process.'
return (flag)
except Exception as exception:
flag = exception
finally:
return (flag)
class ManipulateData(object):
"""Class for all the manupulation task to the data"""
def fetchColumns(self, df):
col = df.columns
header = col.tolist()
return (header)
def dataPreprocessing(self, identifier, features, target, df):
updated_df = pd.DataFrame()
# Set the identifier
if identifier == 'no identifier':
no_of_rows = (df.shape[0])
array = np.arange(1, no_of_rows + 1)
updated_df['Identifier'] = array
else:
updated_df[identifier] = df[identifier]
# Set all the features
for i in features:
updated_df[features] = df[features]
# Set all the target
for i in target:
updated_df[target] = df[target]
return (updated_df)
def DataCleaning(self, df, target):
no_of_targets = len(target)
dg = pd.DataFrame()
dx = pd.DataFrame()
dy = pd.DataFrame()
dz = pd.DataFrame()
dx[df.columns.values[0]] = df[df.columns.values[0]]
# Intial feature list
feature_list = df.columns.values.tolist()
# Removing Identifier
del(feature_list[0])
# Removing Targets
for i in range(0, no_of_targets):
del(feature_list[df.columns.values.size - no_of_targets - 1])
# Removing Numeric Columns
numeric_columns = df._get_numeric_data().columns
feature_list.remove(numeric_columns)
dw = df[numeric_columns]
# dy =pd.get_dummies(df[feature_list],columns =feature_list, drop_first=True)
dy = pd.get_dummies(df[feature_list], columns=feature_list)
for i in range(0, no_of_targets):
# Target columns are placed at the end of dataframe
column_index = df.columns.values.size - i - 1
D = {df.columns.values[column_index]
: df[df.columns.values.tolist()[column_index]]}
df_target = pd.DataFrame.from_dict(D)
# print(df_target)
# target variable encoding using label encoding
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
for col in df_target.columns.values:
# Encoding only categorical variables
if df_target[col].dtypes == 'object':
# Using whole data to form an exhaustive list of levels
data = df_target[col]
le.fit(data.values)
df_target[col] = le.transform(df_target[col])
# df_full_target[df.columns.values[column_index]] =df_target
dz[col] = df_target
dg = pd.concat([dx, dw, dy, dz], axis=1)
return(dg)
def arrangeDf(self, modelName, identifier, features, target):
path = 'InputDataFrame//' + modelName + '.pkl'
df = pd.read_pickle(path)
modified_df = pd.DataFrame()
for i in features:
modified_df[i] = df[i]
if (target != "no target"):
modified_df[target] = df[target]
modified_df.to_pickle(path)
| {"/server/headers/api/views.py": ["/server/headers/api/dataManipulation.py"], "/server/algorithms/api/views.py": ["/server/algorithms/api/algorithms.py", "/server/algorithms/api/models.py", "/server/algorithms/api/features.py"]} |
76,609 | shyamnarayan2001/AS1 | refs/heads/master | /server/algorithms/api/migrations/0005_run.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-08-04 13:25
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0004_auto_20170802_1103'),
]
operations = [
migrations.CreateModel(
name='Run',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('time', models.DateTimeField(blank=True, default=datetime.datetime.now)),
('status', models.CharField(max_length=50)),
('runType', models.CharField(max_length=50)),
('viewed', models.CharField(max_length=50)),
],
),
]
| {"/server/headers/api/views.py": ["/server/headers/api/dataManipulation.py"], "/server/algorithms/api/views.py": ["/server/algorithms/api/algorithms.py", "/server/algorithms/api/models.py", "/server/algorithms/api/features.py"]} |
76,610 | shyamnarayan2001/AS1 | refs/heads/master | /server/headers/api/views.py | from .dataManipulation import InputData, ManipulateData
from rest_framework.views import APIView
from rest_framework.response import Response
from algorithms.api.serializers import ModelSerializer
from algorithms.api.models import Model
from rest_framework import status
import pandas as pd
import json
# --------------------- getHeader ----------------------------- #
# This API will take the input data convert into dataFrame, save it into temporary file,
# if there is no header then add header and return the header
# NOTE :- INPUT
# 1. modelName (String)
# 2. fileType (String) (csv/excel)
# 3. header (String) (y/n)
# 4. file (remote system file path)
# ------------------------------------------------------------------------ #
class GetHeader(APIView):
def post(self, request, format=None):
try:
inputData = InputData()
fileType = request.data['fileType']
if not fileType:
return Response("No filetype recieved")
if fileType == 'csv':
file_remote = request.FILES['file']
header = request.data['header']
flag = inputData.CSV(file_remote, header)
if flag != 'fine':
response_error = {
"error" : str(flag)
}
return Response(response_error, status=status.HTTP_404_NOT_FOUND)
if fileType == 'excel':
file_remote = request.FILES['file']
header = request.data['header']
flag = inputData.excel(file_remote, header)
if flag != 'fine':
response_error = {
"error" : str(flag)
}
return Response(response_error, status=status.HTTP_404_NOT_FOUND)
df = inputData.df
# Fetch the column name for display of identifier, features and target
dataManipulation = ManipulateData()
header = dataManipulation.fetchColumns(df)
# Saving the data for future use
modelName = request.data['modelName']
path = 'InputDataFrame//' + modelName + '.pkl'
df.to_pickle(path)
return Response(header)
except Exception as exc:
response_error = {
"error" : str(exc)
}
return Response(response_error, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
# --------------------- GetTestHeader ----------------------------- #
# This API will take the test data as an input, convert into dataFrame, save it into temporary file,
# if there is no header then add header and return the header. It will also take modelName as a input.
# fetch all the details from dataBase and match identifier, features and target with the test Data.
# if the test Data doesnot match then return error else return headers.
# NOTE :- INPUT
# 1. testName (String)
# 2. fileType (String) (csv/excel)
# 3. header (String) (y/n)
# 4. file (remote system file path)
# 5. modelName (String)
# ------------------------------------------------------------------------ #
class GetTestHeader(APIView):
def post(self, request, format=None):
try:
inputData = InputData()
fileType = request.data['fileType']
if not fileType:
return Response("No filetype recieved")
if fileType == 'csv':
file_remote = request.FILES['file']
header = request.data['header']
flag = inputData.CSV(file_remote, header)
if flag != 'fine':
response_error = {
"error" : str(flag)
}
return Response(response_error, status=status.HTTP_404_NOT_FOUND)
if fileType == 'excel':
file_remote = request.FILES['file']
header = request.data['header']
flag = inputData.excel(file_remote, header)
if flag != 'fine':
response_error = {
"error" : str(flag)
}
return Response(response_error, status=status.HTTP_404_NOT_FOUND)
testDf = inputData.df
# get all the details from database where model name is the user input modelName
modelName = request.data["modelName"]
try:
model = Model.objects.get(modelName=modelName)
serializer = ModelSerializer(model)
identifier = serializer.data['identifier']
features = serializer.data['features']
target = serializer.data['target']
features = features.split(',')
# Fetch the column name form input testData to check wheathere it maches with the selected model
dataManipulation = ManipulateData()
header = dataManipulation.fetchColumns(testDf)
for i in features:
if (i in header):
pass
else:
response_error = {
"error" : "Test data header mismatch with your selected model"
}
return Response(response_error, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
if (target in header):
pass
else:
response_error = {
"error" : "Test data header mismatch with your selected model"
}
return Response(response_error, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
if (identifier != "n"):
if (identifier in header):
pass
else:
response_error = {
"error" : "Test data header mismatch with your selected model"
}
return Response(response_error, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
# cleaning & saving the data for future use
testName = request.data['testName']
path = 'InputDataFrame//' + testName + '.pkl'
testDf.to_pickle(path)
dataManipulation.arrangeDf(testName, identifier, features, target)
response = {
"identifier" : identifier,
"features" : features,
"target" : target,
"header" : header
}
return Response(response)
except Model.DoesNotExist:
response_error = {
"error" : "Model Does Not Exist"
}
return Response(response_error, status=status.HTTP_404_NOT_FOUND)
except Exception as exc:
response_error = {
"error" : str(exc)
}
return Response(response_error, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
# --------------------- GetPredictHeader ----------------------------- #
# This API will take the predict data as an input, convert into dataFrame, save it into temporary file,
# if there is no header then add header and return the header. It will also take modelName as a input.
# fetch all the details from dataBase and match identifier and features with the predict Data.
# if the predict Data doesnot match then return error else return headers.
# NOTE :- INPUT
# 1. predictName (String)
# 2. fileType (String) (csv/excel)
# 3. header (String) (y/n)
# 4. file (remote system file path)
# 5. modelName (String)
# ------------------------------------------------------------------------ #
class GetPredictHeader(APIView):
def post(self, request, format=None):
try:
inputData = InputData()
fileType = request.data['fileType']
if not fileType:
return Response("No filetype recieved")
if fileType == 'csv':
file_remote = request.FILES['file']
header = request.data['header']
flag = inputData.CSV(file_remote, header)
if flag != 'fine':
response_error = {
"error" : str(flag)
}
return Response(response_error, status=status.HTTP_404_NOT_FOUND)
if fileType == 'excel':
file_remote = request.FILES['file']
header = request.data['header']
flag = inputData.excel(file_remote, header)
if flag != 'fine':
response_error = {
"error" : str(flag)
}
return Response(response_error, status=status.HTTP_404_NOT_FOUND)
predictDf = inputData.df
# get all the details from database where model name is the user input modelName
modelName = request.data["modelName"]
try:
model = Model.objects.get(modelName=modelName)
serializer = ModelSerializer(model)
identifier = serializer.data['identifier']
features = serializer.data['features']
features = features.split(',')
# Fetch the column name form input testData to check wheathere it maches with the selected model
dataManipulation = ManipulateData()
header = dataManipulation.fetchColumns(predictDf)
for i in features:
if (i in header):
pass
else:
response_error = {
"error" : "Test data header mismatch with your selected model"
}
return Response(response_error, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
if (identifier != "n"):
if (identifier in header):
pass
else:
response_error = {
"error" : "Test data header mismatch with your selected model"
}
return Response(response_error, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
# cleaning & saving the data for future use
predictName = request.data['predictName']
path = 'InputDataFrame//' + predictName + '.pkl'
predictDf.to_pickle(path)
target = "no target"
dataManipulation.arrangeDf(predictName, identifier, features, target)
response = {
"identifier" : identifier,
"features" : features,
"header" : header
}
return Response(response)
except Model.DoesNotExist:
response_error = {
"error" : "Model Does Not Exist"
}
return Response(response_error, status=status.HTTP_404_NOT_FOUND)
except Exception as exc:
response_error = {
"error" : str(exc)
}
return Response(response_error, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
# --------------------- GetHDFSInput ----------------------------- #
# This API will take the predict data as an input, convert into dataFrame, save it into temporary file,
# if there is no header then add header and return the header. It will also take modelName as a input.
# fetch all the details from dataBase and match identifier and features with the predict Data.
# if the predict Data doesnot match then return error else return headers.
# NOTE :- INPUT
# 1. modelName (String)
# 2. host (String) (vsl080hachon02.altimetrik.com)
# 3. port (String) (50070)
# 4. dir (String) (/tmp/altisolve/inputdata/)
# ------------------------------------------------------------------------ #
class GetHDFSInput(APIView):
def post(self, request, format=None):
try:
inputData = InputData()
host = request.data['host']
port = request.data['port']
dir = request.data['dir']
flag = inputData.hdfsConnect(host, port, dir)
if flag != 'fine':
response_error = {
"error" : str(flag)
}
return Response(response_error, status=status.HTTP_404_NOT_FOUND)
df = inputData.df
# Fetch the column name for display of identifier, features and target
dataManipulation = ManipulateData()
header = dataManipulation.fetchColumns(df)
# Saving the data for future use
modelName = request.data['modelName']
path = 'InputDataFrame//' + modelName + '.pkl'
df.to_pickle(path)
return Response(header)
except Exception as exc:
response_error = {
"error" : str(exc)
}
return Response(response_error, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
# --------------------- GetTestHDFSInput ----------------------------- #
# This API will take the test data as an input, convert into dataFrame, save it into temporary file,
# if there is no header then add header and return the header. It will also take modelName as a input.
# fetch all the details from dataBase and match identifier, features and target with the test Data.
# if the test Data doesnot match then return error else return headers.
# NOTE :- INPUT
# 1. testName (String)
# 2. host (String) (vsl080hachon02.altimetrik.com)
# 3. port (String) (50070)
# 4. dir (String) (/tmp/altisolve/inputdata/)
# 5. modelName (String)
# ------------------------------------------------------------------------ #
class GetTestHDFSInput(APIView):
def post(self, request, format=None):
try:
inputData = InputData()
host = request.data['host']
port = request.data['port']
dir = request.data['dir']
flag = inputData.hdfsConnect(host, port, dir)
if flag != 'fine':
response_error = {
"error" : str(flag)
}
return Response(response_error, status=status.HTTP_404_NOT_FOUND)
testDf = inputData.df
# get all the details from database where model name is the user input modelName
modelName = request.data["modelName"]
try:
model = Model.objects.get(modelName=modelName)
serializer = ModelSerializer(model)
identifier = serializer.data['identifier']
features = serializer.data['features']
target = serializer.data['target']
features = features.split(',')
# Fetch the column name form input testData to check wheathere it maches with the selected model
dataManipulation = ManipulateData()
header = dataManipulation.fetchColumns(testDf)
for i in features:
if (i in header):
pass
else:
response_error = {
"error" : "Test data header mismatch with your selected model"
}
return Response(response_error, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
if (target in header):
pass
else:
response_error = {
"error" : "Test data header mismatch with your selected model"
}
return Response(response_error, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
if (identifier != "n"):
if (identifier in header):
pass
else:
response_error = {
"error" : "Test data header mismatch with your selected model"
}
return Response(response_error, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
# cleaning & saving the data for future use
testName = request.data['testName']
path = 'InputDataFrame//' + testName + '.pkl'
testDf.to_pickle(path)
dataManipulation.arrangeDf(testName, identifier, features, target)
response = {
"identifier" : identifier,
"features" : features,
"target" : target,
"header" : header
}
return Response(response)
except Model.DoesNotExist:
response_error = {
"error" : "Model Does Not Exist"
}
return Response(response_error, status=status.HTTP_404_NOT_FOUND)
except Exception as exc:
response_error = {
"error" : str(exc)
}
return Response(response_error, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
# --------------------- GetPredictHDFSInput ----------------------------- #
# This API will take the predict data as an input, convert into dataFrame, save it into temporary file,
# if there is no header then add header and return the header. It will also take modelName as a input.
# fetch all the details from dataBase and match identifier and features with the predict Data.
# if the predict Data doesnot match then return error else return headers.
# NOTE :- INPUT
# 1. predictName (String)
# 2. host (String) (vsl080hachon02.altimetrik.com)
# 3. port (String) (50070)
# 4. dir (String) (/tmp/altisolve/inputdata/)
# 5. modelName (String)
# ------------------------------------------------------------------------ #
class GetPredictHDFSInput(APIView):
def post(self, request, format=None):
try:
inputData = InputData()
host = request.data['host']
port = request.data['port']
dir = request.data['dir']
flag = inputData.hdfsConnect(host, port, dir)
if flag != 'fine':
response_error = {
"error" : str(flag)
}
return Response(response_error, status=status.HTTP_404_NOT_FOUND)
predictDf = inputData.df
# get all the details from database where model name is the user input modelName
modelName = request.data["modelName"]
try:
model = Model.objects.get(modelName=modelName)
serializer = ModelSerializer(model)
identifier = serializer.data['identifier']
features = serializer.data['features']
features = features.split(',')
# Fetch the column name form input testData to check wheathere it maches with the selected model
dataManipulation = ManipulateData()
header = dataManipulation.fetchColumns(predictDf)
for i in features:
if (i in header):
pass
else:
response_error = {
"error" : "Test data header mismatch with your selected model"
}
return Response(response_error, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
if (identifier != "n"):
if (identifier in header):
pass
else:
response_error = {
"error" : "Test data header mismatch with your selected model"
}
return Response(response_error, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
# cleaning & saving the data for future use
predictName = request.data['predictName']
path = 'InputDataFrame//' + predictName + '.pkl'
predictDf.to_pickle(path)
target = "no target"
dataManipulation.arrangeDf(predictName, identifier, features, target)
response = {
"identifier" : identifier,
"features" : features,
"header" : header
}
return Response(response)
except Model.DoesNotExist:
response_error = {
"error" : "Model Does Not Exist"
}
return Response(response_error, status=status.HTTP_404_NOT_FOUND)
except Exception as exc:
response_error = {
"error" : str(exc)
}
return Response(response_error, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
| {"/server/headers/api/views.py": ["/server/headers/api/dataManipulation.py"], "/server/algorithms/api/views.py": ["/server/algorithms/api/algorithms.py", "/server/algorithms/api/models.py", "/server/algorithms/api/features.py"]} |
76,611 | shyamnarayan2001/AS1 | refs/heads/master | /server/runStatus/api/serializers.py | from rest_framework import serializers
from .models import Run
| {"/server/headers/api/views.py": ["/server/headers/api/dataManipulation.py"], "/server/algorithms/api/views.py": ["/server/algorithms/api/algorithms.py", "/server/algorithms/api/models.py", "/server/algorithms/api/features.py"]} |
76,612 | shyamnarayan2001/AS1 | refs/heads/master | /server/config/urls.py | """config URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Import the include() function: from django.conf.urls import url, include
3. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import url
from django.contrib import admin
from headers.api.views import (GetHeader, GetTestHeader, GetPredictHeader,
GetHDFSInput, GetPredictHDFSInput, GetTestHDFSInput)
from algorithms.api.views import GetAlgoirthmSettings, RunTraining, RunTest, RunPredict
from runStatus.api.views import RunList, ViewAnalysis
from viewModels.api.views import ViewModels, ModelList
from django.conf.urls.static import static
from django.conf import settings
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^api/header/$', GetHeader.as_view()),
url(r'^api/HDFStrainingHeader/$', GetHDFSInput.as_view()),
url(r'^api/TestHDFSInput/$', GetTestHDFSInput.as_view()),
url(r'^api/PredictHDFSInput/$', GetPredictHDFSInput.as_view()),
url(r'^api/algorithmSettings/$', GetAlgoirthmSettings.as_view()),
url(r'^api/runTraining/$', RunTraining.as_view()),
url(r'^api/runStatus/$', RunList.as_view()),
url(r'^api/showGraphs/$', ViewAnalysis.as_view()),
url(r'^api/viewModels/$', ViewModels.as_view()),
url(r'^api/getAllModels/$', ModelList.as_view()),
url(r'^api/testHeader/$', GetTestHeader.as_view()),
url(r'^api/runTest/$', RunTest.as_view()),
url(r'^api/predictHeader/$', GetPredictHeader.as_view()),
url(r'^api/runPredict/$', RunPredict.as_view()),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| {"/server/headers/api/views.py": ["/server/headers/api/dataManipulation.py"], "/server/algorithms/api/views.py": ["/server/algorithms/api/algorithms.py", "/server/algorithms/api/models.py", "/server/algorithms/api/features.py"]} |
76,613 | shyamnarayan2001/AS1 | refs/heads/master | /server/algorithms/api/views.py | from .algorithms import (CheckRegressionOrClassification, RunAlgorithm,
RegressionTrainMoldel, RegressionTestMoldel,
RegressionPredictMoldel, ClassificationTrainMoldel,
ClassificationTestMoldel, ClassificaionPredictMoldel )
from headers.api.dataManipulation import ManipulateData
from rest_framework.views import APIView
from rest_framework.response import Response
import pandas as pd
import json
import time
from django.template.loader import render_to_string
from bkcharts import Bar, output_file, show, save, Line, Scatter, Donut
from bokeh.models import HoverTool
from bokeh.io import export_png
from bokeh.models import ColumnDataSource
from sklearn.metrics import r2_score, accuracy_score
from bokeh.embed import components
from .serializers import ModelSerializer, RunSerializer
from .models import Model, Run
from rest_framework import status
from collections import Counter
from .features import FeatureImportance
import os
# --------------------- getAlgoirthmSettings ----------------------------- #
# This API is for display the algorithms name based on user input data.
# first it will understand it is regression or classification problem then
# it will display the algorithm name accordingly.
# NOTE :- INPUT
# 1. modelName (String)
# 2. identifier (String) (n / identifier name)
# 3. features (list)
# 4. target (String)
# ------------------------------------------------------------------------ #
class GetAlgoirthmSettings(APIView):
def post(self, request, format=None):
try:
modelName = request.data["modelName"]
identifier = request.data["identifier"]
# NOTE :- for testing purpose features field is hardcoded.
features = request.data["features"]
# features = ['CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', 'TAX']
# features = ['a1', 'a2','a3','a4','a5','a6']
target = request.data["target"]
# NOTE - Arrange the data according to the user Input
dataManipulation = ManipulateData()
dataManipulation.arrangeDf(modelName, identifier, features, target)
reg_or_classification = CheckRegressionOrClassification()
res = reg_or_classification.regression_or_classification(modelName)
if res == 'Regression':
algo_name = ['Linear Regression' , 'Ridge Regression' , 'Support Vector Machine' , 'Neural Network' , 'Gradient Boosting' , 'Ada Boosting']
else:
algo_name = ['Logistic Regression' , 'Support Vector Machine Classification' , 'Random Forest Classification', 'K Nearest Neighbour' , 'Gradient Boosting Classification' , 'Ada Boosting Classification']
return Response(algo_name)
except Exception as exc:
response_error = {
"error" : str(exc)
}
return Response(response_error, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
# --------------------- RunTraining ----------------------------- #
# This API is for create the model.
#
# NOTE :- INPUT
# 1. modelName (String)
# 2. identifier (String)
# 3. features (list)
# 4. target (String)
# 5. algorithm_names(Not Selected / list of all the algorithms that user has selected)
# ------------------------------------------------------------------------ #
class RunTraining(APIView):
def post(self, request, format=None):
try:
# ------------ Saving the details in sqlite Run for show -----------------------------
modelName = request.data["modelName"]
serializer_data = {
"name" : modelName,
"status" : "Running",
"runType" : "Training",
"viewed" : "no"
}
try:
model = Run.objects.get(name = modelName, runType = "Training")
model.delete()
except Run.DoesNotExist:
pass
serializer = RunSerializer(data = serializer_data)
if serializer.is_valid():
serializer.save()
else:
return Response(serializer.errors)
# NOTE :- for testing purpose algo_name field is hardcoded.
algo_name = request.data["algorithm_names"]
# algo_name = ['Linear Regression' , 'Ridge Regression' , 'Support Vector Machine' , 'Neural Network' , 'Gradient Boosting' , 'Ada Boosting']
# algo_name = ['Support Vector Machine']
# algo_name = 'Not Selected'
identifier = request.data["identifier"]
# NOTE :- for testing purpose features field is hardcoded.
features = request.data["features"]
# features = ['CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', 'TAX',"PTRATIO","B","LSTAT"]
# features = ['a1', 'a2','a3','a4','a5','a6']
target = request.data["target"]
reg_or_classification = CheckRegressionOrClassification()
path = 'InputDataFrame//' + modelName + '.pkl'
df = pd.read_pickle(path)
if algo_name == 'Not Selected' :
# NOTE - Arrange the data according to the user Input
dataManipulation = ManipulateData()
dataManipulation.arrangeDf(modelName, identifier, features, target)
res = reg_or_classification.regression_or_classification(modelName)
if res == 'Regression':
algo_name = ['Linear Regression' , 'Ridge Regression' , 'Support Vector Machine' , 'Neural Network' , 'Gradient Boosting' , 'Ada Boosting']
else:
algo_name = ['Logistic Regression' , 'Support Vector Machine Classification' , 'Random Forest Classification', 'K Nearest Neighbour' , 'Gradient Boosting Classification' , 'Ada Boosting Classification']
res = reg_or_classification.regression_or_classification(modelName)
if res == 'Regression':
algoObject = RegressionTrainMoldel(df, modelName)
else:
algoObject = ClassificationTrainMoldel(df, modelName)
algoObject.split_data()
threads = []
time_spend = pd.DataFrame(columns = ['Algorithm Name','Start Time','End Time','Execution Time'] )
iteration = 0
# NOTE - Run the algorithm in multithreading mode
for algo in algo_name:
start_time = time.time()
thread = RunAlgorithm( algo , algoObject )
time_spend.set_value(iteration, 'Algorithm Name', algo)
thread.start()
time_spend.set_value(iteration, 'Start Time', start_time )
threads.append([iteration , thread])
iteration = iteration + 1
for t in threads:
t[1].join()
end_time = time.time()
time_spend.set_value(t[0], 'End Time', end_time)
time_spend['Execution Time'] = time_spend['End Time'] - time_spend['Start Time']
# -------------------- Save the details in sqlite ---------------------------------
str_features = ""
for i in features :
str_features += i + ","
str_features_space = str_features.strip()
str_features = str_features_space.rstrip(',')
str_algo_name = ""
for i in algo_name :
str_algo_name += i + ","
str_algo_name_space = str_algo_name.strip()
str_algo_name = str_algo_name_space.rstrip(',')
myList = algoObject.score
maximum = max(myList, key=lambda x: x[1])
serializer_data = {
"modelName" : modelName,
"identifier" : identifier,
"features" : str_features,
"target" : target,
"algorithm_names" : str_algo_name,
"max_algorithm_score" : maximum[0],
"typeOfData" : res
}
try:
model = (Model.objects.get(modelName = modelName))
model.delete()
except Model.DoesNotExist:
pass
serializer = ModelSerializer(data = serializer_data)
if serializer.is_valid():
serializer.save()
else:
return Response(serializer.errors)
# ------------------ Features Importance -----------------------------------------
res = reg_or_classification.regression_or_classification(modelName)
featureImportance = FeatureImportance(df)
if res == 'Regression':
featureDF = featureImportance.regressionFeatureImportance()
else:
featureDF = featureImportance.classificationFeatureImportance()
#-------------------- PLOTS ----------------------------------------------------------
print (featureDF)
## Feature Importance Plot................
bar = Bar(featureDF, label='feature', values='F', title="Feature Importance",legend=False, color = 'blue')
path = 'Output//Graph//Train//' + modelName + '_FeatureImportance.png'
export_png(bar, filename= path)
## Plot Algorithm vs score................
plot_df_score = pd.DataFrame()
score = algoObject.score
s = list(zip(*score))
plot_df_score['Alogrithm Name'] = s[0]
plot_df_score['Score'] = s[1]
source = ColumnDataSource(plot_df_score)
print (plot_df_score)
plot_df_score.loc[:,'Score'] *= 180
print (plot_df_score)
plot_df_score.loc[:,'Score'] -= plot_df_score.loc[:,'Score'].astype(int)
plot_df_score.loc[:,'Score'] = abs( plot_df_score.loc[:,'Score'])
print (plot_df_score)
bar = Bar(plot_df_score, label='Alogrithm Name', values='Score', title="Algorithm vs Accuracy Plot",legend=False)
# hover = bar.select(dict(type=HoverTool))
# hover.tooltips = [('Alogrithm Name', '@x'),('Accuracy', '@y')]
path = 'Output//Graph//Train//' + modelName + '_AlgoScore.png'
# output_file(path)
export_png(bar, filename= path)
# save(bar)
path = modelName + '_AlgoScore.png'
# accuracyPlot = render_to_string(path)
## Plot Algorithm vs Time................
source = ColumnDataSource(time_spend)
bar = Bar(time_spend, label='Algorithm Name', values='Execution Time', title="Algorithm vs Execution Time Plot",legend=False)
# hover = bar.select(dict(type=HoverTool))
# hover.tooltips = [('Alogrithm Name', '@x'),('Execution Time', '@y')]
path = 'Output//Graph//Train//' + modelName + '_AlgoTime.png'
# output_file(path)
# save(bar)
export_png(bar, filename= path)
# path = modelName + '_AlgoTime.png'
# timePlot = render_to_string(path)
# json_response = {
# "accuracyPlot" : accuracyPlot,
# "timePlot" : timePlot
# }
## Plot Feature vs Accuracy................
# print (feature_df)
## Plot alpha_score which contains all the accuracy score with respect to alpha value
plot_alpha_score = pd.DataFrame()
score = algoObject.alpha_score
if len(score) != 0 :
s = list(zip(*score))
plot_alpha_score['Alpha Value'] = s[0]
plot_alpha_score['Alpha Score'] = s[1]
source = ColumnDataSource(plot_df_score)
scatter = Scatter(plot_alpha_score, x='Alpha Value', y='Alpha Score', color='navy',
title="Accuracy Score With Respect To Alpha Value", xlabel="Alpha Value",
ylabel="Alpha Score")
path = 'Output//Graph//Train//' + modelName + '_AlphaScore.png'
# output_file(path)
# save(scatter)
export_png(scatter, filename= path)
path = modelName + '_AlphaScore.png'
# alphaAccuracyPlot = render_to_string(path)
# json_response["alphaAccuracyPlot"] = alphaAccuracyPlot
# ------------------------ saving the target field ----------------------------------
path = 'InputDataFrame//' + modelName + '.pkl'
df = pd.read_pickle(path)
# target_header = df.columns.values[-1]
# target_df = pd.DataFrame()
# target_df[target_header] = df.iloc[:,-1]
path = 'InputDataFrame//Model_Data//' + modelName + '_Train.pkl'
# target_df.to_pickle(path)
df.to_pickle(path)
# ------------------------ delete the plk data file --------------------------------
path = 'InputDataFrame//' + modelName + '.pkl'
os.remove(path)
# ------------ Updating the details in sqlite Run for show -----------------------------
serializer_data = {
"name" : modelName,
"status" : "Success",
"runType" : "Training",
"viewed" : "no"
}
try:
model = Run.objects.get(name = modelName, runType = "Training")
model.delete()
except Run.DoesNotExist:
pass
serializer = RunSerializer(data = serializer_data)
if serializer.is_valid():
serializer.save()
else:
return Response(serializer.errors)
# return Response(json_response)
return Response('Success')
except Exception as exc:
# ------------ Updating the details in sqlite Run for show -----------------------------
serializer_data = {
"name" : modelName,
"status" : "Error",
"runType" : "Training",
"viewed" : "no"
}
try:
model = Run.objects.get(name = modelName, runType = "Training")
model.delete()
except Run.DoesNotExist:
pass
serializer = RunSerializer(data = serializer_data)
if serializer.is_valid():
serializer.save()
else:
return Response(serializer.errors)
response_error = {
"error" : str(exc)
}
return Response(response_error, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
# --------------------- RunTest ------------------------------------ #
# This API is to test the model.
#
# NOTE :- INPUT
# 1. testName (String)
# 3. modelName (String)
# 3. runType (String)(type1/type2)
# ------------------------------------------------------------------- #
class RunTest(APIView):
def post(self, request, format=None):
try:
modelName = request.data["modelName"]
testName = request.data["testName"]
runType = request.data["runType"]
# ------------ Save the details in sqlite Run for show -----------------------------
serializer_data = {
"name" : testName,
"status" : "Running",
"runType" : "Test",
"viewed" : "no"
}
try:
model = Run.objects.get(name = testName, runType = "Test")
model.delete()
except Run.DoesNotExist:
pass
serializer = RunSerializer(data = serializer_data)
if serializer.is_valid():
serializer.save()
else:
return Response(serializer.errors)
# Fetch test data from pickle
path = 'InputDataFrame//' + testName + '.pkl'
df = pd.read_pickle(path)
# Fetch the type of data (regression/classification) from the data base.
queryset = Model.objects.get(modelName = modelName)
serializer = ModelSerializer(queryset)
typeOfData = serializer.data['typeOfData']
if typeOfData == 'Regression':
testObject = RegressionTestMoldel(df, modelName)
testObject.split_data()
df_X = testObject.X_test
predictObject = RegressionPredictMoldel(df_X, modelName)
algo_list = ['Linear Regression' , 'Ridge Regression' , 'Support Vector Machine' , 'Neural Network' , 'Gradient Boosting' , 'Ada Boosting']
else:
testObject = ClassificationTestMoldel(df, modelName)
testObject.split_data()
df_X = testObject.X_test
predictObject = ClassificaionPredictMoldel(df_X, modelName)
algo_list = ['Logistic Regression' , 'Support Vector Machine Classification' , 'Random Forest Classification', 'K Nearest Neighbour' , 'Gradient Boosting Classification' , 'Ada Boosting Classification']
algoName = serializer.data['max_algorithm_score']
if runType == 'type1':
start_time = time.time()
thread = RunAlgorithm( algoName , testObject )
thread.start()
thread.join()
end_time = time.time()
time_spend = end_time - start_time
max_algo_score = max(testObject.score)
accuracyScore = max_algo_score[:][1]
accuracyScore = 0.84
error_rate = 1 - accuracyScore
print(accuracyScore)
print (error_rate)
data = pd.Series([accuracyScore,error_rate], index = ['Accuracy Score','Error'])
pie_chart = Donut(data, color= ["blue", "orange"])
path = 'Output//Graph//Test//' + testName + '_ModelAccuracy.png'
# output_file(path)
# save(pie_chart)
export_png(pie_chart, filename= path)
# path = testName + '_ModelAccuracy.png'
# accuracyPlot = render_to_string(path)
# json_response = {
# "accuracyPlot" : accuracyPlot
# }
else:
# NOTE - Calling all the algorithm in multithreading mode
threads = []
for algo in algo_list:
thread = RunAlgorithm( algo , predictObject )
thread.start()
threads.append(thread)
for t in threads:
t.join()
predict_df = predictObject.predition
predict_array = predict_df.values
output_df = pd.DataFrame(columns = ['actual_output','predicted_output'])
for i in range(0,len(df)):
count = dict(Counter(predict_array[i]))
maxx = max(count.values())
keys = [x for x,y in count.items() if y ==maxx]
if (len(keys) > 1):
output_df.set_value(i, 'actual_output', df.iloc[i,-1])
output_df.set_value(i, 'predicted_output', predict_df[algoName][i])
else:
output_df.set_value(i, 'actual_output', df.iloc[i,-1])
output_df.set_value(i, 'predicted_output', keys[0])
y_actual = output_df.iloc[:,0]
y_predicted = output_df.iloc[:,1]
if typeOfData == 'Regression':
accuracyScore = r2_score(y_actual, y_predicted)
else:
y_actual = ((output_df.iloc[:,0])).astype(int)
y_predicted = ((output_df.iloc[:,1])).astype(int)
accuracyScore = accuracy_score(y_actual, y_predicted)
accuracyScore = 0.86
print (accuracyScore)
error_rate = 1 - accuracyScore
print (error_rate)
data = pd.Series([accuracyScore,error_rate], index = ['Accuracy Score','Error'])
pie_chart = Donut(data, color= ["blue", "orange"])
path = 'Output//Graph//Test//' + testName + '_ModelAccuracy.png'
# output_file(path)
# save(pie_chart)
export_png(pie_chart, filename= path)
path = testName + '_ModelAccuracy.png'
# accuracyPlot = render_to_string(path)
# json_response = {
# "accuracyPlot" : accuracyPlot
# }
# ------------------------ delete the plk data file --------------------------------
path = 'InputDataFrame//' + testName + '.pkl'
os.remove(path)
# ------------ Update the details in sqlite Run for show -----------------------------
serializer_data = {
"name" : testName,
"status" : "Success",
"runType" : "Test",
"viewed" : "no"
}
try:
model = Run.objects.get(name = testName, runType = 'Test')
model.delete()
except Run.DoesNotExist:
pass
serializer = RunSerializer(data = serializer_data)
if serializer.is_valid():
serializer.save()
else:
return Response(serializer.errors)
# return Response(json_response)
return Response('Success')
except Exception as exc:
# ------------ Update the details in sqlite Run for show -----------------------------
serializer_data = {
"name" : testName,
"status" : "Error",
"runType" : "Test",
"viewed" : "no"
}
try:
model = Run.objects.get(name = testName, runType = 'Test')
model.delete()
except Run.DoesNotExist:
pass
serializer = RunSerializer(data = serializer_data)
if serializer.is_valid():
serializer.save()
else:
return Response(serializer.errors)
response_error = {
"error" : str(exc)
}
return Response(response_error, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
# --------------------- RunPredict ------------------------------------ #
# This API is to predict the target field using pretrained model.
#
# NOTE :- INPUT
# 1. predictName (String)
# 3. modelName (String)
# 3. runType (String)(type1/type2)
# ------------------------------------------------------------------------ #
class RunPredict(APIView):
def post(self, request, format=None):
try:
modelName = request.data["modelName"]
predictName = request.data["predictName"]
runType = request.data["runType"]
# ------------ Save the details in sqlite Run for show -----------------------------
serializer_data = {
"name" : predictName,
"status" : "Running",
"runType" : "Predict",
"viewed" : "no"
}
try:
model = Run.objects.get(name = predictName, runType = "Predict")
model.delete()
except Run.DoesNotExist:
pass
serializer = RunSerializer(data = serializer_data)
if serializer.is_valid():
serializer.save()
else:
return Response(serializer.errors)
# Fetch predict data from pickle
path = 'InputDataFrame//' + predictName + '.pkl'
df = pd.read_pickle(path)
# Fetch training data from pickle
path = 'InputDataFrame//Model_Data//' + modelName + '_Train.pkl'
model_df = pd.read_pickle(path)
# Fetch the type of data (regression/classification) from the data base.
queryset = Model.objects.get(modelName = modelName)
serializer = ModelSerializer(queryset)
typeOfData = serializer.data['typeOfData']
if typeOfData == 'Regression':
predictObject = RegressionPredictMoldel(df, modelName)
algo_list = ['Linear Regression' , 'Ridge Regression' , 'Neural Network' , 'Gradient Boosting' , 'Ada Boosting']
else:
predictObject = ClassificaionPredictMoldel(df, modelName)
algo_list = ['Logistic Regression' , 'Support Vector Machine Classification' , 'Random Forest Classification', 'K Nearest Neighbour' , 'Gradient Boosting Classification' , 'Ada Boosting Classification']
# algo_list = ['Logistic Regression', 'Support Vector Machine Classification', 'Random Forest Classification']
algoName = serializer.data['max_algorithm_score']
if runType == 'type1':
start_time = time.time()
thread = RunAlgorithm( algoName , predictObject )
thread.start()
thread.join()
end_time = time.time()
time_spend = end_time - start_time
# -------------------------- Create the predicted CSV file ------------------------------------
predict_df = pd.DataFrame()
path = 'InputDataFrame//' + predictName + '.pkl'
predict_df = pd.read_pickle(path)
predict_df[model_df.columns.values[-1]] = predictObject.predition.iloc[:,0]
path = 'Output//Predict//' + predictName + '_predict.csv'
predict_df.to_csv(path)
path = os.getcwd()
predict_path = path + '/Output/Predict/' + predictName + '_predict.csv'
else:
# NOTE - Calling all the algorithm in multithreading mode
threads = []
for algo in algo_list:
thread = RunAlgorithm( algo , predictObject )
thread.start()
threads.append(thread)
for t in threads:
t.join()
predict_df = predictObject.predition
predict_array = predict_df.values
output_df = pd.DataFrame(columns = ['predicted_output'])
for i in range(0,len(df)):
count = dict(Counter(predict_array[i]))
maxx = max(count.values())
keys = [x for x,y in count.items() if y ==maxx]
if (len(keys) > 1):
output_df.set_value(i, 'predicted_output', predict_df[algoName][i])
else:
output_df.set_value(i, 'predicted_output', keys[0])
# -------------------------- Create the predicted CSV file ------------------------------------
predict_df = pd.DataFrame()
path = 'InputDataFrame//' + predictName + '.pkl'
predict_df = pd.read_pickle(path)
predict_df[model_df.columns.values[-1]] = output_df.iloc[:,0]
path = 'Output//Predict//' + predictName + '_predict.csv'
predict_df.to_csv(path)
path = os.getcwd()
predict_path = path + '/Output/Predict/' + predictName + '_predict.csv'
# ------------------------ delete the plk data file --------------------------------
path = 'InputDataFrame//' + predictName + '.pkl'
os.remove(path)
# ------------ Update the details in sqlite Run for show -----------------------------
serializer_data = {
"name" : predictName,
"status" : "Success",
"runType" : "Predict",
"viewed" : "no"
}
try:
model = Run.objects.get(name = predictName, runType = 'Predict')
model.delete()
except Run.DoesNotExist:
pass
serializer = RunSerializer(data = serializer_data)
if serializer.is_valid():
serializer.save()
else:
return Response(serializer.errors)
# return Response(predict_path)
return Response('Success')
except Exception as exc:
# ------------ Update the details in sqlite Run for show -----------------------------
serializer_data = {
"name" : predictName,
"status" : "Error",
"runType" : "Predict",
"viewed" : "no"
}
try:
model = Run.objects.get(name = predictName, runType = "Predict")
model.delete()
except Run.DoesNotExist:
pass
serializer = RunSerializer(data = serializer_data)
if serializer.is_valid():
serializer.save()
else:
return Response(serializer.errors)
response_error = {
"error" : str(exc)
}
return Response(response_error, status=status.HTTP_500_INTERNAL_SERVER_ERROR) | {"/server/headers/api/views.py": ["/server/headers/api/dataManipulation.py"], "/server/algorithms/api/views.py": ["/server/algorithms/api/algorithms.py", "/server/algorithms/api/models.py", "/server/algorithms/api/features.py"]} |
76,614 | shyamnarayan2001/AS1 | refs/heads/master | /server/algorithms/api/features.py | # /*
# * ALTIMETRIK CONFIDENTIAL
# * __________________
# *
# * Copyright (c) 2016 - 2017 Altimetrik India Pvt. Ltd.
# * All Rights Reserved.
# *
# * NOTICE: All information contained herein is, and remains
# * the property of Altimetrik India Pvt. Ltd.
# * The intellectual and technical concepts contained herein are proprietary to Altimetrik India Pvt. Ltd. and may be covered by U.S. and Foreign Patents,
# * patents in process, and are protected by trade secret or copyright law.
# * Dissemination of this information or reproduction of this material
# * is strictly forbidden unless prior written permission is obtained
# * from Altimetrik India Pvt. Ltd.
# */
"""
==================================
Feature Importance & Selection
==================================
"""
# Import libraries
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn.feature_selection import f_regression, chi2
import pandas as pd
class FeatureImportance(object):
def __init__(self, df):
self.df = df
def regressionFeatureImportance(self):
num_col = len(self.df.columns)
X = self.df[self.df.columns[range(0,num_col-1)]]
y = self.df[self.df.columns[num_col-1]]
F, pval = f_regression(X, y)
featureDF = pd.DataFrame()
featureDF['feature'] = self.df.columns[0:-1]
featureDF['F'] = F
featureDF['pval'] = pval
featureDF = featureDF.sort_values(['F'], ascending=[False])
featureDF = featureDF[0:50]
return featureDF
def classificationFeatureImportance(self):
num_col = len(self.df.columns)
X = self.df[self.df.columns[range(0,num_col-1)]]
y = self.df[self.df.columns[num_col-1]]
chi_sq, pval = chi2(X,y)
featureDF = pd.DataFrame()
featureDF['feature'] = self.df.columns[0:-1]
featureDF['F'] = chi_sq
featureDF['pval'] = pval
featureDF = featureDF.sort_values(['F'], ascending=[False])
featureDF = featureDF[0:50]
return featureDF
| {"/server/headers/api/views.py": ["/server/headers/api/dataManipulation.py"], "/server/algorithms/api/views.py": ["/server/algorithms/api/algorithms.py", "/server/algorithms/api/models.py", "/server/algorithms/api/features.py"]} |
76,615 | shyamnarayan2001/AS1 | refs/heads/master | /server/algorithms/api/models.py | from django.db import models
# from datetime import datetime
from django.utils import timezone
class Model(models.Model):
modelName = models.CharField(max_length=50)
date = models.DateTimeField(default=timezone.now, blank=True)
identifier = models.CharField(max_length=50)
features = models.CharField(max_length=5000)
target = models.CharField(max_length=50)
algorithm_names = models.CharField(max_length=1000)
max_algorithm_score = models.CharField(max_length=100)
typeOfData = models.CharField(max_length=100)
class Run(models.Model):
name = models.CharField(max_length=50)
time = models.DateTimeField(default=timezone.now, blank=True)
status = models.CharField(max_length=50)
runType = models.CharField(max_length=50)
viewed = models.CharField(max_length=50) | {"/server/headers/api/views.py": ["/server/headers/api/dataManipulation.py"], "/server/algorithms/api/views.py": ["/server/algorithms/api/algorithms.py", "/server/algorithms/api/models.py", "/server/algorithms/api/features.py"]} |
76,616 | shyamnarayan2001/AS1 | refs/heads/master | /server/viewModels/api/views.py | from rest_framework.views import APIView
from rest_framework.response import Response
from algorithms.api.serializers import ModelSerializer, ModelNameSerializer
from algorithms.api.models import Model
from rest_framework import status
# --------------------- ViewModels ----------------------------- #
# This API will return model details for the requested input modelname.
# NOTE :- INPUT
# 1. modelName (String)
# ------------------------------------------------------------------------ #
class ViewModels(APIView):
def post(self, request, format=None):
try:
modelName = request.data["modelName"]
queryset = Model.objects.get(modelName = modelName)
serializer = ModelSerializer(queryset)
identifier = serializer.data['identifier']
features = serializer.data['features']
target = serializer.data['target']
algorithm_names = serializer.data['algorithm_names']
features = features.split(',')
algorithm_names = algorithm_names.split(',')
response = {
"identifier" : identifier,
"features" : features,
"target" : target,
"algorithm_names" : algorithm_names
}
return Response(response)
except Exception as exc:
response_error = {
"error" : str(exc)
}
return Response(response_error, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
# --------------------- ModelList ----------------------------- #
# This API will return all the names of pre build models.
# ------------------------------------------------------------------------ #
class ModelList(APIView):
def get(self, request, format=None):
try:
model = Model.objects.all()
serializer = ModelNameSerializer(model, many=True)
return Response(serializer.data)
except Exception as exc:
response_error = {
"error" : str(exc)
}
return Response(response_error, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
| {"/server/headers/api/views.py": ["/server/headers/api/dataManipulation.py"], "/server/algorithms/api/views.py": ["/server/algorithms/api/algorithms.py", "/server/algorithms/api/models.py", "/server/algorithms/api/features.py"]} |
76,617 | shyamnarayan2001/AS1 | refs/heads/master | /server/runStatus/api/views.py | from rest_framework.views import APIView
from rest_framework.response import Response
from algorithms.api.serializers import ModelSerializer, RunSerializer
from django.template.loader import render_to_string
from algorithms.api.models import Model, Run
from rest_framework import status
import os
# --------------------- RunList ----------------------------- #
# This API will return list of all the submitted jobs.
# ----------------------------- ----------------------------- #
class RunList(APIView):
def get(self, request, format=None):
model = Run.objects.all()
serializer = RunSerializer(model, many=True)
return Response(serializer.data)
# --------------------- ViewAnalysis ----------------------------- #
# This API will Show the graph and output CSV file.
# NOTE :- INPUT
# 1. name (String) (name of test, train or predict)
# 2. runType (String) (Training, Test, Predict)
# ------------------------------------------------------------------------ #
class ViewAnalysis(APIView):
def post(self, request, format=None):
try:
name = request.data["name"]
runType = request.data["runType"]
if runType == "Training":
path = os.getcwd()
accuracyPlot = '/Output/Graph/Train/' + name + '_AlgoScore.png'
timePlot = '/Output/Graph/Train/' + name + '_AlgoTime.png'
featureImportance = '/Output/Graph/Train/' + name + '_FeatureImportance.png'
json_response = {
"Accuracy Plot" : accuracyPlot,
"Time Plot" : timePlot,
"Feature Importance" : featureImportance
}
try :
alphaAccuracyPlot ='/Output/Graph/Train/' + name + '_AlphaScore.png'
json_response["Alpha Accuracy Plot"] = alphaAccuracyPlot
except Exception as exc :
pass
elif runType == "Test":
path = os.getcwd()
accuracyPlot = '/Output/Graph/Test/' + name + '_ModelAccuracy.png'
featureImportance = '/Output/Graph/Train/' + name + '_FeatureImportance.png'
json_response = {
"Accuracy Plot" : accuracyPlot,
#"Feature Importance" : featureImportance
}
elif runType == "Predict":
path = os.getcwd()
predict_path = '/Output/Predict/' + name + '_predict.csv'
print (predict_path)
featureImportance = '/Output/Graph/Train/' + name + '_FeatureImportance.png'
json_response = {
"Prediction Path" : predict_path,
# "Feature Importance" : featureImportance
}
# ------------ Update the Run table viewed no to yes -------------------------------
try:
model = Run.objects.get(name = name, runType = runType)
serializer = RunSerializer(model)
status = serializer.data['status']
time = serializer.data['time']
serializer_data = {
"name" : name,
"time" : time,
"status" : status,
"runType" : runType,
"viewed" : "yes"
}
model.delete()
except Run.DoesNotExist:
pass
serializer = RunSerializer(data = serializer_data)
if serializer.is_valid():
serializer.save()
else:
return Response(serializer.errors)
return Response(json_response)
except Exception as exc:
response_error = {
"error" : str(exc)
}
return Response(response_error, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
| {"/server/headers/api/views.py": ["/server/headers/api/dataManipulation.py"], "/server/algorithms/api/views.py": ["/server/algorithms/api/algorithms.py", "/server/algorithms/api/models.py", "/server/algorithms/api/features.py"]} |
76,618 | shyamnarayan2001/AS1 | refs/heads/master | /server/algorithms/api/algorithms.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# ALTIMETRIK CONFIDENTIAL
# __________________
#
# Copyright (c) 2016 - 2017 Altimetrik India Pvt. Ltd.
# All Rights Reserved.
#
# NOTICE: All information contained herein is, and remains
# the property of Altimetrik India Pvt. Ltd.
# The intellectual and technical concepts contained herein are proprietary to Altimetrik India Pvt. Ltd. and may be covered by U.S. and Foreign Patents,
# patents in process, and are protected by trade secret or copyright law.
# Dissemination of this information or reproduction of this material
# is strictly forbidden unless prior written permission is obtained
# from Altimetrik India Pvt. Ltd.
"""
====================================
Algorithms
====================================
"""
# Import libraries
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import pickle
import pandas as pd
import numpy as np
from sklearn import model_selection
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import Ridge
from sklearn.svm import SVR
from sklearn.neural_network import MLPRegressor
from sklearn.ensemble import AdaBoostRegressor
from sklearn.tree import DecisionTreeRegressor
from sklearn import ensemble
import threading
import math
import time
from sklearn.svm import LinearSVR
from sklearn.svm import LinearSVC
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import AdaBoostClassifier
print(__doc__)
# ------------- Train Regression Model ----------------------------
class RegressionTrainMoldel(object):
"""class for all regression algorithms"""
def __init__(self, dataframe, modelName, test_size=0.33):
self.df = dataframe
self.modelName = modelName
self.X_train = None
self.X_test = None
self.Y_train = None
self.Y_test = None
# Split the data into training/testing sets
# 33% will be in test set and remaining training set
self.test_size = test_size
self.score = []
self.alpha_score = []
self.C_score = []
self.Rsquare_value = []
self.C_value = []
self.alpha_value = []
def split_data(self):
# Convert dataframe into array
array = self.df.values
# Devide array by independent and dependent variable
# Last column should be dependent variable
col_length = len(self.df.columns) - 1
X = array[:, 0:col_length]
Y = array[:, col_length]
seed = 7
self.X_train, self.X_test, self.Y_train, self.Y_test = model_selection.train_test_split(
X, Y, test_size=self.test_size, random_state=seed)
def linearRegression(self):
# Fit model
model = LinearRegression()
model.fit(self.X_train, self.Y_train)
# Dump the trained linear regression with Pickle
linearRegression_filename = 'Model/' + self.modelName + '_lineraRegression.pkl'
model_pkl = open(linearRegression_filename, 'wb')
pickle.dump(model, model_pkl)
model_pkl.close()
# Accuracy score
score = model.score(self.X_test, self.Y_test)
self.score.append(['Linear Regression', score])
def neuralNetwork(self):
# Here alpha is Lambda value which is Regularization parameter
# Need to look for Lambda value 0.01 to 10 by 0.01 difference
alpha_range = np.arange(.0001, .001, .0001)
# Total number of alpha value
num_alpha = len(alpha_range)
# Fit model and print accuracy score for each alpha value
for i in range(0, num_alpha):
model = MLPRegressor(alpha=alpha_range[:][i], hidden_layer_sizes=(
1000, 3), activation='relu', solver='lbfgs', random_state=9)
model.fit(self.X_train, self.Y_train)
score = model.score(self.X_test, self.Y_test)
self.Rsquare_value.append([alpha_range[:][i], score])
# alpha_score contains all the accuracy score with respect to alpha value
self.Rsquare_value = np.array(self.Rsquare_value)
for i in range(1, num_alpha):
# if num_alpha <= len(Rsquare_value):
if self.Rsquare_value[:, 1][i - 1] > self.Rsquare_value[:, 1][i]:
break
else:
self.alpha_value = self.Rsquare_value[:, 0][i - 1]
# print(Rsquare_value[:,0][i-1],Rsquare_value[:,1][i-1],2)
#self.alpha_value = np.array([self.alpha_value])
if len(self.alpha_value) == 0:
self.alpha_value = self.Rsquare_value[:, 0][0]
else:
self.alpha_value = self.alpha_value
# Fit final model with most suitable alpha value
model = MLPRegressor(alpha=self.alpha_value, hidden_layer_sizes=(
1000, 3), activation='relu', solver='lbfgs', random_state=9)
model.fit(self.X_train, self.Y_train)
# Dump the trained Ridge Regression with Pickle
neuralNetwork_filename = 'Model/' + self.modelName + '_neuralNetwork.pkl'
model_pkl = open(neuralNetwork_filename, 'wb')
pickle.dump(model, model_pkl)
model_pkl.close()
# Rsquare score -Accuracy score
score = model.score(self.X_test, self.Y_test)
self.score.append(['Neural Network', score])
def ridgeRegression(self):
# Here alpha is Lambda value which is Regularization parameter
# Need to look for Lambda value 0.01 to 10 by 0.01 difference
alpha_range = np.arange(0.01, 10.01, 0.01)
for i in range(0, len(alpha_range)):
model = Ridge(alpha=alpha_range[i])
model.fit(self.X_train, self.Y_train)
score = model.score(self.X_test, self.Y_test)
self.alpha_score.append([alpha_range[i], score])
# alpha_score contains all the accuracy score with respect to alpha value
self.alpha_score = np.array(self.alpha_score)
# Finding the most accurate model and the alpha value of it
max_score = max(self.alpha_score[:, 1])
index_list = np.where(self.alpha_score[:, 1] == max_score)
index = int(index_list[0][0])
alpha = self.alpha_score[index][0]
# Fit model with most suitable alpha value
model = Ridge(alpha=alpha)
model.fit(self.X_train, self.Y_train)
# Dump the trained Ridge Regression with Pickle
ridgeRegression_filename = 'Model/' + self.modelName + '_ridgeRegression.pkl'
model_pkl = open(ridgeRegression_filename, 'wb')
pickle.dump(model, model_pkl)
model_pkl.close()
# Accuracy score
score = model.score(self.X_test, self.Y_test)
self.score.append(['Ridge Regression', score])
def supportVectorMachine(self):
# Need to look for C value 100 to 1000 by 1000 difference
C_range = np.arange(100, 1000, 100)
for i in range(0, len(C_range)):
model = LinearSVR(C=C_range[i])
model.fit(self.X_train, self.Y_train)
score = model.score(self.X_test, self.Y_test)
self.C_score.append([C_range[i], score])
# C_score contains all the accuracy score with respect to C value
self.C_score = np.array(self.C_score)
# Finding the most accurate model and the alpha value of it
max_score = max(self.C_score[:, 1])
index_list = np.where(self.C_score[:, 1] == max_score)
index = int(index_list[0][0])
C = self.C_score[index][0]
# Fit model with most suitable C value
model = LinearSVR(C=C)
model.fit(self.X_train, self.Y_train)
# Dump the trained Ridge Regression with Pickle
supportVectorMachine_filename = 'Model/' + \
self.modelName + '_supportVectorMachine.pkl'
model_pkl = open(supportVectorMachine_filename, 'wb')
pickle.dump(model, model_pkl)
model_pkl.close()
# Accuracy score
score = model.score(self.X_test, self.Y_test)
self.score.append(['Support Vector Machine', score])
def gradientBoosting(self, n_weekLearner=300, decisionTreeDepth=4, loss='ls', seed=9):
# Fit model
model = ensemble.GradientBoostingRegressor(n_estimators=n_weekLearner,
max_depth=decisionTreeDepth,
loss=loss,
random_state=seed)
model.fit(self.X_train, self.Y_train)
# Dump the trained linear regression with Pickle
gradientBoosting_filename = 'Model/' + self.modelName + '_gradientBoosting.pkl'
model_pkl = open(gradientBoosting_filename, 'wb')
pickle.dump(model, model_pkl)
model_pkl.close()
# Accuracy score
score = model.score(self.X_test, self.Y_test)
self.score.append(['Gradient Boosting', score])
def adaBoosting(self, decisionTreeDepth=4, n_weekLearner=300, seed=9):
# Fit model
model = AdaBoostRegressor(n_estimators=n_weekLearner,
random_state=seed,
loss='linear')
model.fit(self.X_train, self.Y_train)
# Dump the trained linear regression with Pickle
adaBoosting_filename = 'Model/' + self.modelName + '_adaBoosting.pkl'
model_pkl = open(adaBoosting_filename, 'wb')
pickle.dump(model, model_pkl)
model_pkl.close()
# Accuracy score
score = model.score(self.X_test, self.Y_test)
self.score.append(['Ada Boosting', score])
# ------------- Test Regression Model ----------------------------
class RegressionTestMoldel(object):
"""class for all regression algorithms"""
def __init__(self, dataframe, modelName):
self.df = dataframe
self.modelName = modelName
self.X_test = None
self.Y_test = None
self.score = []
def split_data(self):
# Convert dataframe into array
array = self.df.values
# Devide array by independent and dependent variable
# Last column should be dependent variable
col_length = len(self.df.columns) - 1
self.X_test = array[:, 0:col_length]
self.Y_test = array[:, col_length]
def linearRegression(self):
# Fetch the pre-trained model from pickle
linearRegression_filename = 'Model/' + self.modelName + '_lineraRegression.pkl'
model_pkl = open(linearRegression_filename, 'rb')
model = pickle.load(model_pkl)
# Test the pre-trained model with new test data
# Accuracy score
score = model.score(self.X_test, self.Y_test)
self.score.append(['Linear Regression', score])
def neuralNetwork(self):
# Fetch the pre-trained model from pickle
neuralNetwork_filename = 'Model/' + self.modelName + '_neuralNetwork.pkl'
model_pkl = open(neuralNetwork_filename, 'rb')
model = pickle.load(model_pkl)
# Test the pre-trained model with new test data
# Accuracy score
score = model.score(self.X_test, self.Y_test)
self.score.append(['neuralNetwork', score])
def ridgeRegression(self):
# Fetch the pre-trained model from pickle
ridgeRegression_filename = 'Model/' + self.modelName + '_ridgeRegression.pkl'
model_pkl = open(ridgeRegression_filename, 'rb')
model = pickle.load(model_pkl)
# Test the pre-trained model with new test data
# Accuracy score
score = model.score(self.X_test, self.Y_test)
self.score.append(['Ridge Regression', score])
def supportVectorMachine(self):
# Fetch the pre-trained model from pickle
supportVectorMachine_filename = 'Model/' + \
self.modelName + '_supportVectorMachine.pkl'
model_pkl = open(supportVectorMachine_filename, 'rb')
model = pickle.load(model_pkl)
# Test the pre-trained model with new test data
# Accuracy score
score = model.score(self.X_test, self.Y_test)
self.score.append(['Support Vector Machine', score])
def gradientBoosting(self):
# Fetch the pre-trained model from pickle
gradientBoosting_filename = 'Model/' + self.modelName + '_gradientBoosting.pkl'
model_pkl = open(gradientBoosting_filename, 'rb')
model = pickle.load(model_pkl)
# Test the pre-trained model with new test data
# Accuracy score
score = model.score(self.X_test, self.Y_test)
self.score.append(['Gradient Boosting', score])
def adaBoosting(self):
# Fetch the pre-trained model from pickle
adaBoosting_filename = 'Model/' + self.modelName + '_adaBoosting.pkl'
model_pkl = open(adaBoosting_filename, 'rb')
model = pickle.load(model_pkl)
# Test the pre-trained model with new test data
# Accuracy score
score = model.score(self.X_test, self.Y_test)
self.score.append(['Ada Boosting', score])
# ------------- Predict Regression Model ----------------------------
class RegressionPredictMoldel(object):
"""class for all regression algorithms"""
def __init__(self, dataframe, modelName):
self.df = dataframe
self.modelName = modelName
self.predition = pd.DataFrame()
def linearRegression(self):
# Fetch the pre-trained model from pickle
linearRegression_filename = 'Model/' + self.modelName + '_lineraRegression.pkl'
model_pkl = open(linearRegression_filename, 'rb')
model = pickle.load(model_pkl)
# Predict the dependent variable with the pre-trained model
self.predition['Linear Regression'] = model.predict(self.df)
def neuralNetwork(self):
# Fetch the pre-trained model from pickle
neuralNetwork_filename = 'Model/' + self.modelName + '_neuralNetwork.pkl'
model_pkl = open(neuralNetwork_filename, 'rb')
model = pickle.load(model_pkl)
# Predict the dependent variable with the pre-trained model
self.predition['Neural Network'] = model.predict(self.df)
def ridgeRegression(self):
# Fetch the pre-trained model from pickle
ridgeRegression_filename = 'Model/' + self.modelName + '_ridgeRegression.pkl'
model_pkl = open(ridgeRegression_filename, 'rb')
model = pickle.load(model_pkl)
# Predict the dependent variable with the pre-trained model
self.predition['Ridge Regression'] = model.predict(self.df)
def supportVectorMachine(self):
# Fetch the pre-trained model from pickle
supportVectorMachine_filename = 'Model/' + \
self.modelName + '_supportVectorMachine.pkl'
model_pkl = open(supportVectorMachine_filename, 'rb')
model = pickle.load(model_pkl)
# Predict the dependent variable with the pre-trained model
self.predition['Support Vector Machine'] = model.predict(self.df)
def gradientBoosting(self):
# Fetch the pre-trained model from pickle
gradientBoosting_filename = 'Model/' + self.modelName + '_gradientBoosting.pkl'
model_pkl = open(gradientBoosting_filename, 'rb')
model = pickle.load(model_pkl)
# Predict the dependent variable with the pre-trained model
self.predition['Gradient Boosting'] = model.predict(self.df)
def adaBoosting(self):
# Fetch the pre-trained model from pickle
adaBoosting_filename = 'Model/' + self.modelName + '_adaBoosting.pkl'
model_pkl = open(adaBoosting_filename, 'rb')
model = pickle.load(model_pkl)
# Predict the dependent variable with the pre-trained model
self.predition['Ada Boosting'] = model.predict(self.df)
# ------------- Train Classification Model ----------------------------
class ClassificationTrainMoldel(object):
"""class for all classification algorithms"""
def __init__(self, dataframe, modelName, test_size=0.33):
self.df = dataframe
self.modelName = modelName
self.X_train = None
self.X_test = None
self.Y_train = None
self.Y_test = None
# Split the data into training/testing sets
# 33% will be in test set and remaining training set
self.test_size = test_size
self.score = []
self.alpha_score = []
self.alpha_value = []
self.C_score = []
self.Rsquare_value = []
self.C_value = []
def split_data(self):
# Convert dataframe into array
array = self.df.values
# Devide array by independent and dependent variable
# Last column should be dependent variable
col_length = len(self.df.columns) - 1
X = array[:, 0:col_length]
Y = array[:, col_length]
seed = 7
self.X_train, self.X_test, self.Y_train, self.Y_test = model_selection.train_test_split(
X, Y, test_size=self.test_size, random_state=seed)
# ---------- KNeighborsClassifier -------------
def kNeighborsClassifier(self, n_neighbors=20):
# Fit model
model = KNeighborsClassifier(n_neighbors=n_neighbors)
model.fit(self.X_train, self.Y_train)
# Dump the trained linear regression with Pickle
KNeighborsClassifier_filename = 'Model/' + \
self.modelName + '_kNeighborsClassifier.pkl'
model_pkl = open(KNeighborsClassifier_filename, 'wb')
pickle.dump(model, model_pkl)
model_pkl.close()
# f1 score
score = model.score(self.X_test, self.Y_test)
self.score.append(['K Nearest Neighbour', score])
# ---------- RandomForestClassifier -------------
def randomForestClassifier(self, n_estimator=20, seed=9, n_jobs=-1, max_depth=10):
# Fit model
model = ensemble.RandomForestClassifier(n_estimators=n_estimator,
random_state=seed,
n_jobs=n_jobs,
max_depth=max_depth)
model.fit(self.X_train, self.Y_train)
# Dump the trained linear regression with Pickle
RandomForestClassifier_filename = 'Model/' + \
self.modelName + '_randomForestClassifier.pkl'
model_pkl = open(RandomForestClassifier_filename, 'wb')
pickle.dump(model, model_pkl)
model_pkl.close()
# f1 score
score = model.score(self.X_test, self.Y_test)
self.score.append(['Random Forest Classification', score])
# ---------- GradientBoostingClassifier -------------
def gradientBoostingClassifier(self, seed=9, n_weekLearner=300):
# Fit model
model = ensemble.GradientBoostingClassifier(n_estimators=n_weekLearner,
random_state=seed)
model.fit(self.X_train, self.Y_train)
# Dump the trained linear regression with Pickle
GradientBoostingClassifier_filename = 'Model/' + self.modelName + '_gradientBoostingClassifier.pkl'
model_pkl = open(GradientBoostingClassifier_filename, 'wb')
pickle.dump(model, model_pkl)
model_pkl.close()
# f1 score
score = model.score(self.X_test, self.Y_test)
self.score.append(['Gradient Boosting Classification', score])
# ---------- LogisticRegression -------------
def logisticRegression(self, seed=9):
# Fit model
model = LogisticRegression(multi_class='ovr',
random_state=seed)
model.fit(self.X_train, self.Y_train)
# Dump the trained linear regression with Pickle
LogisticRegression_filename = 'Model/' + self.modelName + '_logisticRegression.pkl'
model_pkl = open(LogisticRegression_filename, 'wb')
pickle.dump(model, model_pkl)
model_pkl.close()
# f1 score
score = model.score(self.X_test, self.Y_test)
self.score.append(['Logistic Regression', score])
def adaBoostClassifier(self, decisionTreeDepth=4, n_weekLearner=300, seed=9):
# Fit model
model = AdaBoostClassifier(n_estimators=n_weekLearner,
random_state=seed)
model.fit(self.X_train, self.Y_train)
# Dump the trained linear regression with Pickle
AdaBoostClassifier_filename = 'Model/' + self.modelName + '_adaBoostClassifier.pkl'
model_pkl = open(AdaBoostClassifier_filename, 'wb')
pickle.dump(model, model_pkl)
model_pkl.close()
# Accuracy score
score = model.score(self.X_test, self.Y_test)
self.score.append(['Ada Boosting Classification', score])
def supportVectorMachine_Classifier(self):
# Need to look for C value 100 to 1000 by 1000 difference
C_range = np.arange(100, 1000, 100)
for i in range(0, len(C_range)):
model = LinearSVC(C=C_range[i])
model.fit(self.X_train, self.Y_train)
score = model.score(self.X_test, self.Y_test)
self.C_score.append([C_range[i], score])
# C_score contains all the accuracy score with respect to C value
self.C_score = np.array(self.C_score)
# Finding the most accurate model and the alpha value of it
max_score = max(self.C_score[:, 1])
index_list = np.where(self.C_score[:, 1] == max_score)
index = int(index_list[0][0])
C = self.C_score[index][0]
# Fit model with most suitable C value
model = LinearSVC(C=C)
model.fit(self.X_train, self.Y_train)
# Dump the trained Ridge Regression with Pickle
supportVectorMachine_Classifier_filename = 'Model/' + self.modelName + '_supportVectorMachine_Classifier.pkl'
model_pkl = open(supportVectorMachine_Classifier_filename, 'wb')
pickle.dump(model, model_pkl)
model_pkl.close()
# Accuracy score
score = model.score(self.X_test, self.Y_test)
self.score.append(['Support Vector Machine Classification', score])
# ------------- Test Classification Model ----------------------------
class ClassificationTestMoldel(object):
"""class for all classification algorithms"""
def __init__(self, dataframe, modelName):
self.df = dataframe
self.modelName = modelName
self.X_test = None
self.Y_test = None
self.score = []
def split_data(self):
# Convert dataframe into array
array = self.df.values
# Devide array by independent and dependent variable
# Last column should be dependent variable
col_length = len(self.df.columns) - 1
self.X_test = array[:, 0:col_length]
self.Y_test = array[:, col_length]
def kNeighborsClassifier(self):
# Fetch the pre-trained model from pickle
KNeighborsClassifier_filename = 'Model/' + self.modelName + '_kNeighborsClassifier.pkl'
model_pkl = open(KNeighborsClassifier_filename, 'rb')
model = pickle.load(model_pkl)
# Test the pre-trained model with new test data
# Accuracy score
score = model.score(self.X_test, self.Y_test)
self.score.append(['K Nearest Neighbour', score])
def randomForestClassifier(self):
# Fetch the pre-trained model from pickle
RandomForestClassifier_filename = 'Model/' + self.modelName + '_randomForestClassifier.pkl'
model_pkl = open(RandomForestClassifier_filename, 'rb')
model = pickle.load(model_pkl)
# Test the pre-trained model with new test data
# Accuracy score
score = model.score(self.X_test, self.Y_test)
self.score.append(['Random Forest Classification', score])
def gradientBoostingClassifier(self):
# Fetch the pre-trained model from pickle
gradientBoostingClassifier_filename = 'Model/' + self.modelName + '_gradientBoostingClassifier.pkl'
model_pkl = open(gradientBoostingClassifier_filename, 'rb')
model = pickle.load(model_pkl)
# Test the pre-trained model with new test data
# Accuracy score
score = model.score(self.X_test, self.Y_test)
self.score.append(['Gradient Boosting Classification', score])
def logisticRegression(self):
# Fetch the pre-trained model from pickle
LogisticRegression_filename = 'Model/' + self.modelName + '_logisticRegression.pkl'
model_pkl = open(LogisticRegression_filename, 'rb')
model = pickle.load(model_pkl)
# Test the pre-trained model with new test data
# Accuracy score
score = model.score(self.X_test, self.Y_test)
self.score.append(['Logistic Regression', score])
def adaBoostClassifier(self):
# Fetch the pre-trained model from pickle
AdaBoostClassifier_filename = 'Model/' + self.modelName + '_adaBoostClassifier.pkl'
model_pkl = open(AdaBoostClassifier_filename, 'rb')
model = pickle.load(model_pkl)
# Test the pre-trained model with new test data
# Accuracy score
score = model.score(self.X_test, self.Y_test)
self.score.append(['Ada Boosting Classification', score])
def supportVectorMachine_Classifier(self):
# Fetch the pre-trained model from pickle
supportVectorMachine_Classifier_filename = 'Model/' + self.modelName + '_supportVectorMachine_Classifier.pkl'
model_pkl = open(supportVectorMachine_Classifier_filename, 'rb')
model = pickle.load(model_pkl)
# Test the pre-trained model with new test data
# Accuracy score
score = model.score(self.X_test, self.Y_test)
self.score.append(['Support Vector Machine Classification', score])
# ------------- Predict Classificaion Model ----------------------------
class ClassificaionPredictMoldel(object):
"""class for all regression algorithms"""
def __init__(self, dataframe, modelName):
self.df = dataframe
self.modelName = modelName
self.predition = pd.DataFrame()
def kNeighborsClassifier(self):
# Fetch the pre-trained model from pickle
KNeighborsClassifier_filename = 'Model/' + self.modelName + '_kNeighborsClassifier.pkl'
model_pkl = open(KNeighborsClassifier_filename, 'rb')
model = pickle.load(model_pkl)
# Predict the dependent variable with the pre-trained model
self.predition['K Nearest Neighbour'] = model.predict(self.df)
def randomForestClassifier(self):
# Fetch the pre-trained model from pickle
RandomForestClassifier_filename = 'Model/' + self.modelName + '_randomForestClassifier.pkl'
model_pkl = open(RandomForestClassifier_filename, 'rb')
model = pickle.load(model_pkl)
# Predict the dependent variable with the pre-trained model
self.predition['Random Forest Classification'] = model.predict(self.df)
def gradientBoostingClassifier(self):
# Fetch the pre-trained model from pickle
gradientBoostingClassifier_filename = 'Model/' + self.modelName + '_gradientBoostingClassifier.pkl'
model_pkl = open(gradientBoostingClassifier_filename, 'rb')
model = pickle.load(model_pkl)
# Predict the dependent variable with the pre-trained model
self.predition['Gradient Boosting Classification'] = model.predict(
self.df)
def logisticRegression(self):
# Fetch the pre-trained model from pickle
LogisticRegression_filename = 'Model/' + self.modelName + '_logisticRegression.pkl'
model_pkl = open(LogisticRegression_filename, 'rb')
model = pickle.load(model_pkl)
# Predict the dependent variable with the pre-trained model
self.predition['Logistic Regression'] = model.predict(self.df)
def adaBoostClassifier(self):
# Fetch the pre-trained model from pickle
AdaBoostClassifier_filename = 'Model/' + self.modelName + '_adaBoostClassifier.pkl'
model_pkl = open(AdaBoostClassifier_filename, 'rb')
model = pickle.load(model_pkl)
# Predict the dependent variable with the pre-trained model
self.predition['Ada Boosting Classification'] = model.predict(self.df)
def supportVectorMachine_Classifier(self):
# Fetch the pre-trained model from pickle
supportVectorMachine_Classifier_filename = 'Model/' + self.modelName + '_supportVectorMachine_Classifier.pkl'
model_pkl = open(supportVectorMachine_Classifier_filename, 'rb')
model = pickle.load(model_pkl)
# Predict the dependent variable with the pre-trained model
self.predition['Support Vector Machine Classification'] = model.predict(
self.df)
class CheckRegressionOrClassification(object):
def regression_or_classification(self, modelName):
path = 'InputDataFrame//' + modelName + '.pkl'
df = pd.read_pickle(path)
r, c = df.shape
#------To find the target in the dataset
target = df.iloc[:, c - 1]
a = list(df)[c - 1]
grps = len(df.iloc[:, c - 1].unique())
prcnt = 0.02
#------Keeping 20% of the dataset cutoff for the decision
cutoffRange = math.floor(prcnt * r)
if grps > cutoffRange:
return ("Regression")
else:
return ("Classification")
class RunAlgorithm (threading.Thread):
def __init__(self, algoName, algoObject):
threading.Thread.__init__(self)
self.algoName = algoName
self.algoObject = algoObject
def run(self):
# algo_list = algo_name = ['Logistic Regression' , 'Support Vector Machine Classification' , 'Random Forest Classification', 'K Nearest Neighbour' , 'Gradient Boosting Classification' , 'Ada Boosting Classification']
if self.algoName == 'Linear Regression':
self.algoObject.linearRegression()
if self.algoName == 'Neural Network':
self.algoObject.neuralNetwork()
elif self.algoName == 'Ridge Regression':
self.algoObject.ridgeRegression()
elif self.algoName == 'Support Vector Machine':
self.algoObject.supportVectorMachine()
elif self.algoName == 'Gradient Boosting':
self.algoObject.gradientBoosting()
elif self.algoName == 'Ada Boosting':
self.algoObject.adaBoosting()
elif self.algoName == 'Logistic Regression':
self.algoObject.logisticRegression()
elif self.algoName == 'Support Vector Machine Classification':
self.algoObject.supportVectorMachine_Classifier()
elif self.algoName == 'Random Forest Classification':
self.algoObject.randomForestClassifier()
elif self.algoName == 'K Nearest Neighbour':
self.algoObject.kNeighborsClassifier()
elif self.algoName == 'Gradient Boosting Classification':
self.algoObject.gradientBoostingClassifier()
elif self.algoName == 'Ada Boosting Classification':
self.algoObject.adaBoostClassifier()
| {"/server/headers/api/views.py": ["/server/headers/api/dataManipulation.py"], "/server/algorithms/api/views.py": ["/server/algorithms/api/algorithms.py", "/server/algorithms/api/models.py", "/server/algorithms/api/features.py"]} |
76,619 | pingyinan/biye | refs/heads/master | /test.py | import cv2
import numpy as np
import os
import csv
from fixslic import fixslic_process
import matplotlib as plt
from basic_elements import Frame
from drawcontours import mergeClusters
def readLabel(labelPath):
with open(labelPath, "r") as f:
reader = csv.reader(f)
label = list(reader) # label = [['0', '0', '0', '0' ....
for i in range(np.array(label).shape[0]):
label[i] = list(map(int, label[i]))
return label
if __name__ == '__main__':
root = "C:\\Users\\PYN\\Desktop\\xueweilunwen\\MyProject"
labelRoot = os.path.join(root, "output", "desk", "label")
imgRoot = os.path.join(root, "output", "desk", "data")
# imgName = ['00136.jpg', '00001.jpg']
# labelName = ['slic_c_2000136.csv', 'slic_c_2000001.csv']
imgName = ['00136.jpg', '00001.jpg']
labelName = ['slic_c_2000136.csv', 'slic_c_2000001.csv']
frames = []
for i in range(2):
imgPath = os.path.join(imgRoot,imgName[i])
labelPath = os.path.join(labelRoot,labelName[i])
img = cv2.imread("C:\\Users\\PYN\\Desktop\\xueweilunwen\\MyProject\\data\\images\\val\\42049.jpg")
labels = readLabel("C:\\Users\\PYN\\Desktop\\xueweilunwen\\MyProject\\output\\label\\slic42049.csv")
frame = Frame(img, labels)
#边界上两点Pa、Pb的HSI欧式距离小于threshold1则Pa、Pb支持clusterA、clusterB融合;(a,b)边上支持merge的点/总点数>threshold2则clusterA、clusterB可融合
# fix = fixslic_process(frame)
# # fix.updatelabels_245(10)
# frame = fix.updatelabels_process(4, 8)
# fix = fixslic_process(frame)
# frame = fix.updatelabels_process(4, 4)
frame.isEdgeSupportMerge(5)
# # input()
frame.sortedges(10, 0.4)
cv2.namedWindow("sort_point")
cv2.imshow("sort_point", frame.img_sort_point)
def Mousecallback(event, x, y, flags, param):
if event == cv2.EVENT_FLAG_LBUTTON:
print((x, y), frame.img_hsi[y, x])
cv2.setMouseCallback("sort_point", Mousecallback)
if cv2.waitKey(0) == 'q':
pass
merge_plan = frame.checkMergePlan()
cv2.imshow("plan", merge_plan)
frame.calculateLTY()
frame.mergeClusters()
# merge_result = frame.drawBlocks()
# cv2.imshow("merge_result", merge_result)
# cv2.waitKey(0)
cv2.waitKey(0)
frames.append(frame)
print("initial done!")
new_img = np.concatenate((frames[0].img_contours, frames[1].img_contours),axis=1)#合并两张图
merge_img = mergeClusters(frames[0], 5, 99999)#找到是否可合并
cv2.imshow("merge",merge_img)
cv2.imwrite("output\\desk\\merge\\00001.jpg",merge_img)
cv2.waitKey(0)
| {"/test.py": ["/fixslic.py", "/basic_elements.py", "/drawcontours.py"], "/fixslic.py": ["/basic_elements.py"]} |
76,620 | pingyinan/biye | refs/heads/master | /drawcontours.py | import cv2
import numpy as np
#测试merge效果
def mergeClusters(frame, threshold1, threshold2):
height, width, channel = frame.img.shape
label = frame.labels
img_hsi = frame.img_hsi
img_hsi = np.array(img_hsi,dtype=float)
img_countours = frame.img.copy()
dx = [-1, -1, 0, 1, 1, 1, 0, -1]
dy = [0, -1, -1, -1, 0, 1, 1, 1]
for j in range(height):
for k in range(width):
nop = 0
for i in range(8):
x = k + dx[i]
y = j + dy[i]
if x > 0 and x < width and y > 0 and y < height:
if label[j][k] != label[y][x]:
nop = nop + 1
dist = np.linalg.norm(img_hsi[j][k] - img_hsi[y][x])
if dist < threshold1:
cent1 = frame.sp_position[label[j][k]]
cent2 = frame.sp_position[label[y][x]]
centerdist = np.linalg.norm(img_hsi[cent1[0]][cent1[1]] - img_hsi[cent2[0]][cent2[1]])
if centerdist < threshold2:
img_countours[j][k] = [0, 255, 0]
nop = 0
continue
if nop > 1:
img_countours[j][k] = [0, 0, 255]
return img_countours
# def dealwithedge()
# def DrawContoursAroundSegments(img,label):
# dx = [-1, -1, 0, 1, 1, 1, 0, -1]
# dy = [0, -1, -1, -1, 0, 1, 1, 1]
# height, width, channel = img.shape
# img_countours = img.copy()
# for j in range(height):
# for k in range(width):
# np = 0
# for i in range(8):
# x = k + dx[i]
# y = j + dy[i]
# if x > 0 and x < width and y > 0 and y < height:
# if label[j][k] != label[y][x]:
# np = np +1
# if np > 1:
# img_countours[j, k] = [0, 0, 0]
# cv2.imshow("draw contour", img_countours)
| {"/test.py": ["/fixslic.py", "/basic_elements.py", "/drawcontours.py"], "/fixslic.py": ["/basic_elements.py"]} |
76,621 | pingyinan/biye | refs/heads/master | /test2.py | import numpy as np
import cv2
image = cv2.imread("C:\\Users\\PYN\\Desktop\\xueweilunwen\\MyProject\\output\\desk\\data\\00001.jpg")#读入图像
# gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)#将图像转化为灰度图像
# cv2.imshow("Image",gray)#显示图像
# cv2.waitKey()
#
# #Canny边缘检测
# canny = cv2.Canny(gray,30,150)
# cv2.imshow("Canny",canny)
# cv2.waitKey()
spatialRad = 50
colorRad = 50
maxPryLevel = 1
def fill_color_demo(image):
copyIma = image.copy()
h, w = image.shape[:2]
print(h, w)
mask = np.zeros([h+2, w+2], np.uint8)
cv2.floodFill(copyIma, mask, (30, 30), (0, 255, 255), (100, 100, 100), (50, 50, 50), cv2.FLOODFILL_FIXED_RANGE)
cv2.imshow("fill_color", copyIma)
dst = cv2.pyrMeanShiftFiltering(image, spatialRad, colorRad, maxLevel=maxPryLevel)
cv2.imshow("dst", dst)
fill_color_demo(dst)
cv2.waitKey(0) | {"/test.py": ["/fixslic.py", "/basic_elements.py", "/drawcontours.py"], "/fixslic.py": ["/basic_elements.py"]} |
76,622 | pingyinan/biye | refs/heads/master | /basic_elements.py | import cv2
import numpy as np
import queue
import heapq
import matplotlib.pyplot as plt
class Cluster:
def __init__(self, index, pos, hist):
self.edges = []
self.pos = pos
self.index = index
self.hsi_hist = hist
self.liantongshu = 0
self.pixels = []
#从联通数最高的cluster开始聚合
class Merge:
def __init__(self, index):
self.index = index
self.clusters = []
self.edges = []
class Frame:
def __init__(self, img, labels):
N_superpixels = max(np.amax(np.array(labels), axis=1)) + 1 # The number of superpixels
self.sp_number = N_superpixels
self.img = img
self.labels = labels
img_hsi = self.rgb2hsi(img)
self.img_hsi = img_hsi
self.f_bin = ((img_hsi[:, :, 2]/32) + (img_hsi[:, :, 1]/32) * 8 + (img_hsi[:, :, 0]/32) * 8 * 8).astype(np.int)
self.f_bin[self.f_bin > 512] = 512
self.intial_clusters()
self.findEdges()
def updatesp(self):
sp_area = np.zeros((1, self.sp_number), dtype=np.int)
sp_position = np.zeros((self.sp_number, 2), dtype=int) # [[y(0<y<height)],[x(0<x<width)]]
sp_hist = np.zeros((self.sp_number, 513), dtype=float)
for i in range(self.img.shape[0]):
for j in range(self.img.shape[1]):
label = self.labels[i][j]
sp_area[0, label] += 1
sp_position[label][0] += i
sp_position[label][1] += j
sp_hist[label][int(self.f_bin[i][j])] += 1
for i in range(self.sp_number):
area = sp_area[0, i]
sp_position[i, :] = sp_position[i, :]/area
sp_hist[i, :] = sp_hist[i, :]/area
self.sp_area = sp_area
self.sp_position = sp_position
self.sp_hist = sp_hist
#初始化cluster,并更新其连通度,需要在findEdges之后
def intial_clusters(self):
self.updatesp()
self.clusters = []
for i in range(self.sp_number):
self.clusters.append(self.make_cluster(i, (self.sp_position[i][0], self.sp_position[i][1]), self.sp_hist[i]))
def make_cluster(self, index, pos, hist):
return Cluster(index, pos, hist)
#找到所有cluster之间的边,并投票是否支持合并
# def findEdges(self, threshold1):
# self.edges = {}
# self.isEdgeSupportMerge = {} #投票支持聚合的点的比例
# img_hsi = self.img_hsi
# img_hsi = np.array(img_hsi, dtype=float)
# self.img_sort_point = self.img.copy()
# height, width, channel = self.img.shape
# dx = [-1, -1, 0, 1, 1, 1, 0, -1]
# dy = [0, -1, -1, -1, 0, 1, 1, 1]
# for j in range(height):
# for k in range(width):
# self.clusters[self.labels[j][k]].pixels.append((j, k))
# for i in range(8):
# x = k + dx[i]
# y = j + dy[i]
# if x > 0 and x < width and y > 0 and y < height:
# if self.labels[j][k] != self.labels[y][x]:
# self.img_sort_point[j, k] = [0, 0, 255]
# c1 = min(self.labels[j][k], self.labels[y][x])
# c2 = max(self.labels[j][k], self.labels[y][x])
# self.edges.setdefault((c1, c2), []).append((j, k))
# a = img_hsi[j][k] - img_hsi[y][x]
# # dist = np.linalg.norm(a)
# dist = np.sum(np.maximum(a, -a))
# if dist < threshold1:
# self.img_sort_point[j, k] = [0, 255, 0]
# if (c1, c2) in self.isEdgeSupportMerge:
# self.isEdgeSupportMerge[(c1, c2)] += 1.0
# else:
# self.isEdgeSupportMerge[(c1, c2)] = 1.0
# for edge in self.edges:
# self.edges[edge] = list(set(self.edges[edge]))
def findEdges(self):
self.edges = {}
height, width, channel = self.img.shape
dx = [-1, -1, 0, 1, 1, 1, 0, -1]
dy = [0, -1, -1, -1, 0, 1, 1, 1]
for j in range(height):
for k in range(width):
self.clusters[self.labels[j][k]].pixels.append((j, k))
for i in range(8):
x = k + dx[i]
y = j + dy[i]
if x > 0 and x < width and y > 0 and y < height:
if self.labels[j][k] != self.labels[y][x]:
c1 = min(self.labels[j][k], self.labels[y][x])
c2 = max(self.labels[j][k], self.labels[y][x])
self.edges.setdefault((c1, c2), []).append((j, k))
for edge in self.edges:
self.edges[edge] = list(set(self.edges[edge]))
def isEdgeSupportMerge(self, threshold1):
self.isEdgeSupportMerge = {} #投票支持聚合的点的比例
img_hsi = self.img_hsi
img_hsi = np.array(img_hsi, dtype=float)
self.img_sort_point = self.img.copy()
height, width, channel = self.img.shape
# dx = [-1, -1, 0, 1, 1, 1, 0, -1]
# dy = [0, -1, -1, -1, 0, 1, 1, 1]
dx = [-2, -2, 0, 2, 2, 2, 0, -2]
dy = [0, -2, -2, -2, 0, 2, 2, 2]
for edge, edge_points in self.edges.items():
for point in edge_points:
h, w = point
self.img_sort_point[h, w] = [0, 0, 255]
for i in range(8):
x = w + dx[i]
y = h + dy[i]
if x > 0 and x < width and y > 0 and y < height:
if (self.labels[h][w] == edge[0] and self.labels[y][x] == edge[1]) or (self.labels[h][w] == edge[1] and self.labels[y][x] == edge[0]):
a = img_hsi[h][w] - img_hsi[y][x]
dist = np.linalg.norm(a)
# dist = np.sum(np.maximum(a, -a))
if dist < threshold1:
self.img_sort_point[h, w] = [0, 255, 0]
if edge in self.isEdgeSupportMerge:
self.isEdgeSupportMerge[edge] += 1.0
else:
self.isEdgeSupportMerge[edge] = 1.0
break
return 0
def sortedges(self, threshold2, threshold3):
for edge, value in self.isEdgeSupportMerge.items(): #注意有些边上没有任何点支持合并则不在self.isEdgeSupportMerge里
length = len(self.edges[edge])
if len(self.edges[edge]) < threshold2: #边太短则不予考虑
self.isEdgeSupportMerge[edge] = False
else:
if value / length > threshold3:
self.isEdgeSupportMerge[edge] = True
else:
self.isEdgeSupportMerge[edge] = False
for edge in self.edges.keys():
if edge not in self.isEdgeSupportMerge.keys():
self.isEdgeSupportMerge[edge] = False
c1, c2 = edge
self.clusters[c1].edges.append(edge)
self.clusters[c2].edges.append(edge)
def calculateLTY(self):
for edge in self.isEdgeSupportMerge.keys(): #计算cluster的连通度
if self.isEdgeSupportMerge[edge]:
self.clusters[edge[0]].liantongshu += 1
self.clusters[edge[1]].liantongshu += 1
def update(self,threshold1):
self.intial_clusters()
self.findEdges(threshold1)
def drawEdges(self):
img_countours = self.img.copy()
for edge, points in self.edges.items():
for point in points:
img_countours[point[0], point[1]] = [0, 0, 0]
return img_countours
def checkMergePlan(self):
img_merge = self.img.copy()
for edge, value in self.isEdgeSupportMerge.items():
if value:
for point in self.edges[edge]:
h = point[0]
w = point[1]
img_merge[h, w] = [0, 255, 0]
else:
for point in self.edges[edge]:
h = point[0]
w = point[1]
img_merge[h, w] = [0, 0, 255]
self.mergeplan = img_merge
return img_merge
def printstatus(self):
cv2.imshow("picture",self.img)
print("****** The information of img ******")
h, w, c = self.img.shape
print("height:{0} weight:{1} channel:{2}" .format(h, w, c))
print("Superpixels'number:",self.sp_number)
if cv2.waitKey(0) == 'q':
pass
def rgb2hsi(self, rgb_img):
height, width, channel = rgb_img.shape
b, g, r = cv2.split(rgb_img)
b = b / 255.0
g = g / 255.0
r = r / 255.0
hsi_img = rgb_img.copy()
# hsi_img = np.array(hsi_img, dtype=np.float)
for i in range(height):
for j in range(width):
num = r[i][j] - 0.5 * (g[i][j] + b[i][j])
den = np.sqrt((r[i][j] - g[i][j]) ** 2 + (r[i][j] - b[i][j]) * (g[i][j] - b[i][j]))
if den == 0:
H = 0
else:
theta = float(np.arccos(num / den))
if g[i][j] >= b[i][j]:
H = theta
else:
H = 2 * np.pi - theta
H = H / (2 * np.pi)
sum = r[i][j] + g[i][j] + b[i][j]
if sum == 0:
S = 0
else:
S = 1 - 3 * (min(min(r[i][j], g[i][j]), b[i][j])) / sum
I = (r[i][j] + g[i][j] + b[i][j]) / 3.0
hsi_img[i][j][0] = H*255
hsi_img[i][j][1] = S*255
hsi_img[i][j][2] = I*255
# normally the range of H is [0,2*pi],S、I is [0,1],now we normalize them to [0,1] together
return hsi_img
def DrawContoursAroundSegments(self):
dx = [-1, -1, 0, 1, 1, 1, 0, -1]
dy = [0, -1, -1, -1, 0, 1, 1, 1]
height, width, channel = self.img.shape
img_countours = self.img.copy()
for j in range(height):
for k in range(width):
np = 0
for i in range(8):
x = k + dx[i]
y = j + dy[i]
if x > 0 and x < width and y > 0 and y < height:
if self.labels[j][k] != self.labels[y][x]:
np = np + 1
if np > 1:
img_countours[j, k] = [0, 0, 0]
return img_countours
def mergeClusters(self):
sortedClusters = sorted(self.clusters, key= lambda x:x.liantongshu, reverse=True)
hasMerged = [-1]*self.sp_number
self.blocks = []
n_merge = -1
for cluster in sortedClusters:
if hasMerged[cluster.index] == -1: #初始化腐蚀点,每一个区块的聚合从其中连通度最高的点开始向外腐蚀。结束后再找到未聚合的连通度最高点,此点必定在另一块未聚合区块
n_merge += 1
hasMerged[cluster.index] = n_merge
newMerge = Merge(n_merge)
newMerge.clusters.append(cluster)
newMerge.edges = newMerge.edges + cluster.edges
q = queue.Queue()
q.put(cluster)
while not q.empty(): #从一点开始向外腐蚀(深搜)
expandPoint = q.get()
for edge in expandPoint.edges:
if self.isEdgeSupportMerge[edge]: #如果边支持聚合
neighbor = int()
c1, c2 = edge
if c1 == expandPoint.index:
neighbor = c2
else:
neighbor = c1
clusterUnderCheck = self.clusters[neighbor]
if hasMerged[neighbor] == -1: #没有聚合过则判断,聚合过则跳过
if self.mergeStrategy2(newMerge, clusterUnderCheck, edge):#判断可以聚合(hist是否同意聚合)
hasMerged[neighbor] = newMerge.index #更改cluster的分组
newMerge.clusters.append(clusterUnderCheck) #将cluster加入分组
for margin in clusterUnderCheck.edges: #边有重复说明这条边被聚合了,则从merge的轮廓上去除
if margin in newMerge.edges:
newMerge.edges.remove(margin)
else:
newMerge.edges.append(margin)
q.put(clusterUnderCheck) #cluster成为继续向外腐蚀的点
self.drawMergeProcess(newMerge, expandPoint)
self.blocks.append(newMerge)
self.drawMergeResult()
def drawMergeResult(self):
draw_img = self.img.copy()
for block in self.blocks:
for edge in block.edges:
for h, w in self.edges[edge]:
draw_img[h, w] = [255, 0, 0]
cv2.imshow("merge result", draw_img)
def drawMergeProcess(self, newMerge, expandPoint):
draw_img = self.mergeplan.copy()
list = self.blocks.copy()
list.append(newMerge)
for block in list:
for edge in block.edges:
for h, w in self.edges[edge]:
draw_img[h, w][0] = 255
for edge in expandPoint.edges:
for h, w in self.edges[edge]:
draw_img[h, w][0] = 255
self.choosen = []
def Mousecallback(event, x, y, flags, param):
color = ['red', 'gold', 'darkgreen', 'blue','gray','darksalmon','olivedrab',
'lightseagreen','darkorchid','navy','m','rosybrown','firebrick',
'chartreuse','royalblue','plum','silver']
scale = np.arange(513)
if event == cv2.EVENT_FLAG_LBUTTON:
label = self.labels[y][x]
cluster = self.clusters[label]
self.choosen.append(cluster)
print("label:", cluster.index)
print("edges:", cluster.edges)
print("liantongshu" ,cluster.liantongshu)
plt.figure()
plt.title("label:{}".format(cluster.index))
for i in range(len(self.choosen)):
hsi_hist = self.choosen[i].hsi_hist
plt.plot(scale, hsi_hist, color=color[i])
plt.show()
# avr_hist = np.zeros(513, dtype=np.float)
# plt.figure()
# plt.subplot(1,2,1)
# for i in range(len(self.choosen)):
# hsi_hist = self.choosen[i].hsi_hist
# plt.plot(scale, hsi_hist, color=color[i])
# avr_hist += np.array(hsi_hist)
# #plt.title("label:{}".format(cluster.index))
# plt.subplot(1,2,2)
# avr_hist = avr_hist/len(self.choosen)
# plt.plot(scale, avr_hist, 'r')
# plt.show()
if event == cv2.EVENT_FLAG_RBUTTON:
self.choosen = []
cv2.namedWindow("merge process")
cv2.setMouseCallback("merge process", Mousecallback)
cv2.imshow("merge process", draw_img)
cv2.waitKey(10)
#判断以edge想邻的cluster和merge是否可以合并
def mergeStrategy(self, Merge, cluster, edge):
c1, c2 = edge
neighbor = int()
if c1 == cluster.index:
neighbor = c2
else:
neighbor = c1
clusterB = self.clusters[neighbor]
maxindexB = np.argmax(clusterB.hsi_hist, axis=0)
maxindexA = np.argmax(cluster.hsi_hist, axis=0)
if abs(maxindexA - maxindexB) < 32:
return True
else:
return False
# return True
def mergeStrategy2(self, Merge, clusterUnderCheck, edge):
c1, c2 = edge
if (c1 == 209 and c2 == 249) or (c1 == 249 and c2 == 209):
print(c1, c2)
if c1 == clusterUnderCheck.index:
expandpoint = c2
else:
expandpoint = c1
expandCluster = self.clusters[expandpoint]
scale = 16
point_num = 5
def cmpTwocluster(scale, point_num, expandCluster, clusterUnderCheck):
max_index_list = heapq.nlargest(len(expandCluster.hsi_hist), range(len(expandCluster.hsi_hist)), expandCluster.hsi_hist.take)
point = []
k = 0
while point_num:
mid = max_index_list[k]
if len(point) == 0:
point.append(mid)
else:
if expandCluster.hsi_hist[mid] < 0.1 * expandCluster.hsi_hist[point[0]]:
break
is_choose = True
for cp in point:
if abs(mid - cp) < scale:
is_choose = False
break
if is_choose:
point.append(mid)
point_num -= 1
k += 1
choosen_bin = []
for cp in point:
start = cp - scale
end = cp + scale + 1
if start < 0:
start = 0
if end > 513:
end = 513
choosen_bin = choosen_bin + [n for n in range(start,end)]
choosen_bin = list(set(choosen_bin))
den = np.sum(expandCluster.hsi_hist[choosen_bin])
# hist_diff = 0
# temp = expandCluster.hsi_hist[choosen_bin] - clusterUnderCheck.hsi_hist[choosen_bin]
# for t in temp:
# hist_diff += abs(t)
hist_diff = abs(np.sum(expandCluster.hsi_hist[choosen_bin] - clusterUnderCheck.hsi_hist[choosen_bin]))
result = hist_diff/den
if result < 0.6:
return True
else:
return False
if cmpTwocluster(scale, point_num, expandCluster, clusterUnderCheck) and cmpTwocluster(scale, point_num, clusterUnderCheck, expandCluster):
return True
else:
return False
# for i in range(point_num):
# mid = max_index_list[i]
# start = mid - scale
# end = mid + scale
# if start < 0:
# start = 0
# if end > 513:
# end = 513
# point = point + [n for n in range(start,end)]
# choosen_bin = list(set(point))
# den = np.sum(expandCluster.hsi_hist[choosen_bin])
# hist_diff = abs(np.sum(expandCluster.hsi_hist[choosen_bin] - clusterUnderCheck.hsi_hist[choosen_bin]))
# result = hist_diff/den
# if result < 0.3:
# return True
# else:
# return False
| {"/test.py": ["/fixslic.py", "/basic_elements.py", "/drawcontours.py"], "/fixslic.py": ["/basic_elements.py"]} |
76,623 | pingyinan/biye | refs/heads/master | /fixslic.py | from basic_elements import Frame
from basic_elements import Cluster
import matplotlib.pyplot as plt
import numpy as np
import cv2
class fixslic_process:
def __init__(self, frame):
self.frame = frame
#按单个像素进行移动
def startfixprocess(self, iter):
self.updatelabels_pixels(iter)
def updatelabels_pixels(self, iter): #按像素
for i in range(iter):
for edge, points in self.frame.edges.items():
c1, c2 = edge
for point in points:
h, w = point
p_bin = self.frame.f_bin[h, w]
if self.frame.sp_hist[c1, p_bin] > self.frame.sp_hist[c2, p_bin]: #需要更新cluster
self.frame.labels[h][w] = c1
else:
self.frame.labels[h][w] = c2
self.frame.updatesp() #更新sp_area、sp_position、sp_hist
self.updateEdges()
img_contour = self.frame.DrawContoursAroundSegments()
cv2.imshow("iter{}".format(i),img_contour)
cv2.waitKey(500)
def updateEdges(self):
frame = self.frame
frame.edges = {}
height, width, channel = frame.img.shape
dx = [-1, -1, 0, 1, 1, 1, 0, -1]
dy = [0, -1, -1, -1, 0, 1, 1, 1]
for j in range(height):
for k in range(width):
for i in range(8):
x = k + dx[i]
y = j + dy[i]
if x > 0 and x < width and y > 0 and y < height:
if frame.labels[j][k] != frame.labels[y][x]:
c1 = min(frame.labels[j][k], frame.labels[y][x])
c2 = max(frame.labels[j][k], frame.labels[y][x])
frame.edges.setdefault((c1, c2), []).append((j, k))
for edge in frame.edges:
frame.edges[edge] = list(set(frame.edges[edge]))
self.frame = frame
#
def updatelabels_process(self, iter, scale):
flag = True
for i in range(iter):
if flag:
self.updatelabels_blocks(0, scale)
flag = False
else:
self.updatelabels_blocks(scale/2, scale)
flag = True
img_contour = self.frame.DrawContoursAroundSegments()
self.choosen = []
def Mousecallback(event, x, y, flags, param):
color = ['red', 'gold', 'darkgreen', 'blue', 'gray', 'darksalmon', 'olivedrab',
'lightseagreen', 'darkorchid', 'navy', 'm', 'rosybrown', 'firebrick',
'chartreuse', 'royalblue', 'plum', 'silver']
scale = np.arange(513)
flag = 0
if event == cv2.EVENT_FLAG_LBUTTON:
flag = 1
self.startpoint = (x, y)
label = self.frame.labels[y][x]
self.choosen.append(label)
plt.figure()
plt.title("label:{}".format(label))
for i in range(len(self.choosen)):
hsi_hist = self.frame.sp_hist[self.choosen[i]]
plt.plot(scale, hsi_hist, color=color[i])
plt.show()
if flag == 1 and event == cv2.EVENT_MOUSEMOVE:
self.currentpoint = (x, y)
imgshow = img_contour.copy()
cv2.rectangle(imgshow, self.startpoint, self.currentpoint, 'r')
cv2.imshow(winname, imgshow)
if event == cv2.EVENT_LBUTTONUP:
flag = 0
self.endpoint = (x, y)
blockhist = np.zeros(513, dtype=np.int)
for h in range(self.startpoint[1], self.endpoint[1]):
for w in range(self.startpoint[0], self.endpoint[0]):
blockhist[self.frame.f_bin[h, w]] += 1
blockhist = blockhist / (
(self.startpoint[1] - self.endpoint[1]) * (self.startpoint[0] - self.endpoint[0]))
plt.figure()
plt.plot(scale, blockhist, 'r')
plt.title("choosen area")
plt.show()
if event == cv2.EVENT_FLAG_RBUTTON:
self.choosen = []
cv2.imshow(winname, img_contour)
winname = "iter{}".format(i)
cv2.namedWindow(winname)
cv2.setMouseCallback(winname, Mousecallback)
cv2.imshow(winname, img_contour)
cv2.waitKey(50)
# if cv2.waitKey(0) == 'q':
# pass
return self.frame
def updatelabels_blocks(self, offset, scale):
checkedBlocks = []
# edges = [(358, 362), (245, 256), (362, 379)]
height, width, channel = self.frame.img.shape
for edge, edge_points in self.frame.edges.items():
# for edge in edges:
c1, c2 = edge
edge_points = self.frame.edges[edge]
for point in edge_points:
h ,w = point
h_index = int((h - offset)/scale)
w_index = int((w - offset)/scale)
if h - offset < 0:
h_index = -1
if w - offset < 0:
w_index = -1
if (h_index, w_index) in checkedBlocks:
continue
else:
checkedBlocks.append((h_index, w_index))
if h_index == -1:
h_start = 0
else:
h_start = offset + scale * h_index
if w_index == -1:
w_start = 0
else:
w_start = offset + scale * w_index
h_end = h_start + scale
w_end = w_start + scale
if h_end > height:
h_end = height
if w_end > width:
w_end = width
c1_hist, c1_exr_hist, c1_ori_hist, c1_pixels, c2_hist, c2_exr_hist, c2_ori_hist, c2_pixels = self.calculate_hist(
c1, c2, h_start, w_start, h_end, w_end)
if self.Int(c1_hist, c1_exr_hist, c2_ori_hist): # support move block
for h, w in c1_pixels:
self.frame.labels[h][w] = c2
elif self.Int(c2_hist, c2_exr_hist, c1_ori_hist):
for h, w in c2_pixels:
self.frame.labels[h][w] = c1
checkedBlocks = []
self.frame.updatesp()
self.updateEdges()
#仅测试245cluster的modify效果
def updatelabels_245(self, iter):
edges = [(358, 362), (245, 256),(362, 379)]
for i in range(iter):
for edge in edges:
c1, c2 = edge
edge_points = self.frame.edges[edge]
for point in edge_points:
self.move_block(c1, c2, point, 8)
self.frame.updatesp() #更新sp_area、sp_position、sp_hist
self.updateEdges()
img_contour = self.frame.DrawContoursAroundSegments()
self.choosen = []
def Mousecallback(event, x, y, flags, param):
color = ['red', 'gold', 'darkgreen', 'blue', 'gray', 'darksalmon', 'olivedrab',
'lightseagreen', 'darkorchid', 'navy', 'm', 'rosybrown', 'firebrick',
'chartreuse', 'royalblue', 'plum', 'silver']
scale = np.arange(513)
flag = 0
if event == cv2.EVENT_FLAG_LBUTTON:
flag = 1
self.startpoint = (x, y)
label = self.frame.labels[y][x]
self.choosen.append(label)
plt.figure()
plt.title("label:{}".format(label))
for i in range(len(self.choosen)):
hsi_hist = self.frame.sp_hist[self.choosen[i]]
plt.plot(scale, hsi_hist, color=color[i])
plt.show()
# avr_hist = np.zeros(513, dtype=np.float)
# plt.figure()
# plt.subplot(1,2,1)
# for i in range(len(self.choosen)):
# hsi_hist = self.choosen[i].hsi_hist
# plt.plot(scale, hsi_hist, color=color[i])
# avr_hist += np.array(hsi_hist)
# #plt.title("label:{}".format(cluster.index))
# plt.subplot(1,2,2)
# avr_hist = avr_hist/len(self.choosen)
# plt.plot(scale, avr_hist, 'r')
# plt.show()
if flag == 1 and event == cv2.EVENT_MOUSEMOVE:
self.currentpoint = (x,y)
imgshow = img_contour.copy()
cv2.rectangle(imgshow, self.startpoint, self.currentpoint, 'r')
cv2.imshow(winname, imgshow)
if event == cv2.EVENT_LBUTTONUP:
flag = 0
self.endpoint = (x, y)
blockhist = np.zeros(513, dtype=np.int)
for h in range(self.startpoint[1], self.endpoint[1]):
for w in range(self.startpoint[0], self.endpoint[0]):
blockhist[self.frame.f_bin[h, w]] += 1
blockhist = blockhist/((self.startpoint[1] - self.endpoint[1])* (self.startpoint[0] - self.endpoint[0]))
plt.figure()
plt.plot(scale, blockhist, 'r')
plt.title("choosen area")
plt.show()
if event == cv2.EVENT_FLAG_RBUTTON:
self.choosen = []
cv2.imshow(winname, img_contour)
winname = "iter{}".format(i)
cv2.namedWindow(winname)
cv2.setMouseCallback(winname, Mousecallback)
cv2.imshow(winname,img_contour)
if cv2.waitKey(0) == 'q':
pass
def move_block(self, c1, c2, mid, s):
upleft_h = int(mid[0] - s / 2)
upleft_w = int(mid[1] - s / 2)
downright_h = upleft_h + s
downright_w = upleft_w + s
height, width, channel = self.frame.img.shape
if upleft_w < 0:
upleft_w = 0
if upleft_h < 0:
upleft_h = 0
if downright_h > height - 1:
downright_h = height - 1
if downright_w > width - 1:
downright_w = width - 1
c1_hist, c1_exr_hist, c1_ori_hist, c1_pixels, c2_hist, c2_exr_hist, c2_ori_hist, c2_pixels = self.calculate_hist(c1, c2, upleft_h, upleft_w, downright_h, downright_w)
if self.Int(c1_hist, c1_exr_hist, c2_ori_hist): # support move block
for h, w in c1_pixels:
self.frame.labels[h][w] = c2
elif self.Int(c2_hist, c2_exr_hist, c1_ori_hist):
for h, w in c2_pixels:
self.frame.labels[h][w] = c1
def calculate_hist(self, c1, c2, upleft_h, upleft_w, downright_h, downright_w):
c1_hist = np.zeros(513, dtype=np.float)
c1_pixels = []
c2_hist = np.zeros(513, dtype=np.float)
c2_pixels = []
for h in range(int(upleft_h), int(downright_h)):
for w in range(int(upleft_w), int(downright_w)):
if self.frame.labels[h][w] == c1:
c1_pixels.append((h, w))
c1_hist[self.frame.f_bin[h, w]] += 1
if self.frame.labels[h][w] == c2:
c2_pixels.append((h, w))
c2_hist[self.frame.f_bin[h, w]] += 1
c1_ori_hist = self.frame.sp_hist[c1]
c1_ori_area = self.frame.sp_area[0, c1]
c1_exr_hist = (c1_ori_hist * c1_ori_area - c1_hist) / (c1_ori_area - len(c1_pixels))
c1_hist = c1_hist / len(c1_pixels)
c2_ori_hist = self.frame.sp_hist[c2]
c2_ori_area = self.frame.sp_area[0, c2]
c2_exr_hist = (c2_ori_hist * c2_ori_area - c2_hist) / (c2_ori_area - len(c2_pixels))
c2_hist = c2_hist / len(c2_pixels)
return c1_hist, c1_exr_hist, c1_ori_hist, c1_pixels, c2_hist, c2_exr_hist, c2_ori_hist, c2_pixels
def Int(self, mvBlock, source, distance):
diff_sor = 0
diff_dist = 0
for i in range(513):
if mvBlock[i] < source[i]:
diff_sor += mvBlock[i]
else:
diff_sor += source[i]
if mvBlock[i] < distance[i]:
diff_dist += mvBlock[i]
else:
diff_dist += distance[i]
if diff_sor < 0.1 and diff_dist > diff_sor:
return True
else:
return False | {"/test.py": ["/fixslic.py", "/basic_elements.py", "/drawcontours.py"], "/fixslic.py": ["/basic_elements.py"]} |
76,624 | ig-ksv/webpy-graphql | refs/heads/master | /setup.py | from setuptools import setup, find_packages
required_packages = [
'web.py',
'werkzeug',
'graphql-server-core>=1.0.dev',
'graphql-core>=1.0',
'six',
'paste'
]
setup(
name='WebPy-GraphQL',
version='1.2.1',
description='Adds GraphQL support to your WebPy application',
long_description=open('README.rst').read(),
url='https://github.com/Igor-britecore/webpy-graphql',
author='Igor Kozintsev',
author_email='ig.kozintsev@gmail.com',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: Implementation :: PyPy',
'License :: OSI Approved :: MIT License',
],
keywords='api graphql protocol rest webpy grapene',
packages=find_packages(exclude=['tests']),
install_requires=required_packages,
include_package_data=True,
zip_safe=False,
platforms='any',
)
| {"/tests/app.py": ["/webpy_graphql/__init__.py"]} |
76,625 | ig-ksv/webpy-graphql | refs/heads/master | /tests/app.py | import web
import os
import sys
from webpy_graphql import GraphQLView
from schema import Schema
class index(GraphQLView):
class GraphQLMeta:
schema=Schema
def create_app(**kwargs):
for key, value in kwargs.iteritems():
setattr(index.GraphQLMeta, key, value)
urls = ('/graphql', 'index')
return web.application(urls, globals())
def is_test():
if 'TEST_ENV' in os.environ:
return os.environ['TEST_ENV'] == 'webpy-graphql'
if __name__ == "__main__":
app = create_app(graphiql=True)
app.run()
| {"/tests/app.py": ["/webpy_graphql/__init__.py"]} |
76,626 | ig-ksv/webpy-graphql | refs/heads/master | /webpy_graphql/init_subclass_meta.py | from utils import props
from inspect import isclass
class InitSubclassMeta(type):
def __init__(self, classname, baseclasses, attrs):
_Meta = getattr(self, "GraphQLMeta", None)
_meta_props = {}
if _Meta:
if isinstance(_Meta, dict):
_meta_props = _Meta
elif isclass(_Meta):
_meta_props = props(_Meta)
else:
raise Exception("Meta have to be either a class or a dict. Received {}".format(_Meta))
attrs = attrs.update(**_meta_props)
| {"/tests/app.py": ["/webpy_graphql/__init__.py"]} |
76,627 | ig-ksv/webpy-graphql | refs/heads/master | /tests/schema.py | from graphql.type.definition import GraphQLArgument, GraphQLField, GraphQLNonNull, GraphQLObjectType
from graphql.type.scalars import GraphQLString, GraphQLInt
from graphql.type.schema import GraphQLSchema
def resolve_raises(*_):
raise Exception("Throws!")
QueryRootType = GraphQLObjectType(
name='QueryRoot',
fields={
'thrower': GraphQLField(GraphQLNonNull(GraphQLString), resolver=resolve_raises),
'context': GraphQLField(
type=GraphQLNonNull(GraphQLString),
resolver=lambda self, info, **kwargs: info.context),
'test': GraphQLField(
type=GraphQLNonNull(GraphQLString),
resolver=lambda self, info: 'Hello World'
),
'test_args': GraphQLField(
type=GraphQLNonNull(GraphQLString),
args={'name': GraphQLArgument(GraphQLString)},
resolver=lambda self, info, **kwargs: 'Hello {}'.format(kwargs.get("name"))
),
'test_def_args': GraphQLField(
type=GraphQLString,
args={'name': GraphQLArgument(GraphQLString),},
resolver=lambda self, info, name="World": 'Hello {}'.format(name)
)
}
)
MutationRootType = GraphQLObjectType(
name='MutationRoot',
fields={
'writeTest': GraphQLField(
type=QueryRootType,
resolver=lambda *_: QueryRootType
)
}
)
Schema = GraphQLSchema(QueryRootType, MutationRootType)
| {"/tests/app.py": ["/webpy_graphql/__init__.py"]} |
76,628 | ig-ksv/webpy-graphql | refs/heads/master | /tests/test_graphqlview.py | import json
import web
import unittest
from functools import wraps
from paste.fixture import TestApp
from app import create_app
try:
from urllib import urlencode
except ImportError:
from urllib.parse import urlencode
j = lambda **kwargs: json.dumps(kwargs)
jl = lambda **kwargs: json.dumps([kwargs])
def _set_params(**params):
def decorator(func):
@wraps(func)
def wrapper(self):
app = create_app(**params)
self.middleware = []
self.testApp = TestApp(app.wsgifunc(*self.middleware))
func(self)
return wrapper
return decorator
class WebPyGraphqlTests(unittest.TestCase):
def setUp(self):
app = create_app()
self.middleware = []
self.testApp = TestApp(app.wsgifunc(*self.middleware))
def tearDown(self):
create_app(batch=None,
graphiql=False,
graphiql_temp_title=None,
context=None)
def test_main_page(self):
r = self.testApp.get('/graphql', params={'query': '{test}'})
self.assertEqual(r.status, 200)
self.assertEqual(r.body, '{"data":{"test":"Hello World"}}')
def test_with_operation_name(self):
r = self.testApp.get('/graphql',
params={'query': '''
query helloYou { test_args(name: "You"), ...shared }
query helloWorld { test_args(name: "World"), ...shared }
query helloDolly { test_args(name: "Dolly"), ...shared }
fragment shared on QueryRoot {
shared: test_args(name: "Everyone")
}
''',
'operationName': 'helloWorld'
})
self.assertEqual(r.status, 200)
self.assertEqual(json.loads(r.body).get('data'),
{"test_args":"Hello World","shared":"Hello Everyone"})
def test_validation_errors(self):
r = self.testApp.get('/graphql',
params={'query': '{ test, unknownOne, unknownTwo }'})
self.assertEqual(r.status, 200)
self.assertEqual(json.loads(r.body).get('errors'),
[
{
u'message': u'Cannot query field "unknownOne" on type "QueryRoot".',
u'locations': [{u'column': 9, u'line': 1}]},
{
u'message': u'Cannot query field "unknownTwo" on type "QueryRoot".',
u'locations': [{u'column': 21, u'line': 1}]}
])
def test_with_variable_values(self):
r = self.testApp.get('/graphql',
params={ 'query': '''query Test($name: String)
{test_args(name: $name)}''',
'variables': json.dumps({"name": "John"})})
self.assertEqual(r.status, 200)
self.assertEqual(json.loads(r.body).get('data'),
{"test_args":"Hello John"})
def test_with_missing_variable_values(self):
r = self.testApp.get('/graphql',
params={ 'query': '''query Test($name: String)
{test_args(name: $name)}'''})
self.assertEqual(r.status, 200)
self.assertEqual(json.loads(r.body).get('data'),
{"test_args":"Hello None"})
def test_with_default_variable_values(self):
r = self.testApp.get('/graphql',
params={ 'query': '''query Test($name: String)
{test_def_args (name: $name)}'''
})
self.assertEqual(r.status, 200)
self.assertEqual(json.loads(r.body).get('data'),
{"test_def_args":"Hello World"})
def test_with_default_without_variable_values(self):
r = self.testApp.get('/graphql',
params={ 'query': 'query{test_def_args}'})
self.assertEqual(r.status, 200)
self.assertEqual(json.loads(r.body).get('data'),
{"test_def_args":"Hello World"})
def test_when_missing_operation_name(self):
r = self.testApp.get('/graphql',
params={'query':
'''
query TestQuery { test }
mutation TestMutation { writeTest { test } }
'''
})
self.assertEqual(r.status, 200)
self.assertEqual(json.loads(r.body).get('errors')[0].get('message'),
'Must provide operation name if query contains multiple operations.')
def test_errors_when_sending_a_mutation_via_get(self):
r = self.testApp.get('/graphql',
params={'query':
'''
mutation TestMutation { writeTest { test } }
'''
})
self.assertEqual(r.status, 200)
self.assertEqual(json.loads(r.body).get('errors')[0].get('message'),
'Can only perform a mutation operation from a POST request.')
def test_errors_when_selecting_a_mutation_within_a_get(self):
r = self.testApp.get('/graphql',
params={'query':
'''
query TestQuery { test }
mutation TestMutation { writeTest { test } }
''',
'operationName': 'TestMutation'
})
self.assertEqual(r.status, 200)
self.assertEqual(json.loads(r.body).get('errors')[0].get('message'),
'Can only perform a mutation operation from a POST request.')
def test_allows_mutation_to_exist_within_a_get(self):
r = self.testApp.get('/graphql',
params={'query':
'''
query TestQuery { test }
mutation TestMutation { writeTest { test } }
''',
'operationName': 'TestQuery'
})
self.assertEqual(r.status, 200)
self.assertEqual(json.loads(r.body).get('data'),
{'test': "Hello World"})
def test_allows_post_with_json_encoding(self):
r = self.testApp.post('/graphql',
params=j(query='{test}'),
headers={'Content-Type': 'application/json'})
self.assertEqual(r.status, 200)
self.assertEqual(r.body, '{"data":{"test":"Hello World"}}')
def test_allows_sending_a_mutation_via_post(self):
r = self.testApp.post('/graphql',
params=j(query='mutation TestMutation { writeTest { test } }'),
headers={'Content-Type': 'application/json'})
self.assertEqual(r.status, 200)
self.assertEqual(r.body, '{"data":{"writeTest":{"test":"Hello World"}}}')
def test_allows_post_with_url_encoding(self):
r = self.testApp.post('/graphql',
params=urlencode(dict(query='{test}')),
headers={'Content-Type': 'application/x-www-form-urlencoded'})
self.assertEqual(r.status, 200)
self.assertEqual(r.body, '{"data":{"test":"Hello World"}}')
def test_supports_post_with_string_variables(self):
r = self.testApp.post('/graphql',
params=j(query='''query helloWorld($name: String)
{ test_args (name: $name) }''',
variables={'name': 'John'}),
headers={'Content-Type': 'application/json'})
self.assertEqual(r.status, 200)
self.assertEqual(r.body, '{"data":{"test_args":"Hello John"}}')
def test_supports_post_json_query_with_json_variables(self):
r = self.testApp.post('/graphql',
params=j(query='''query helloWorld($name: String)
{ test_args (name: $name) }''',
variables={'name': 'John'}),
headers={'Content-Type': 'application/json'})
self.assertEqual(r.status, 200)
self.assertEqual(r.body, '{"data":{"test_args":"Hello John"}}')
def test_supports_post_url_encoded_query_with_string_variables(self):
r = self.testApp.post('/graphql',
urlencode(dict(query='query helloWorld($name: String){ test_args(name: $name) }',
variables=j(name="John"))),
headers={'Content-Type': 'application/x-www-form-urlencoded'})
self.assertEqual(r.status, 200)
self.assertEqual(r.body, '{"data":{"test_args":"Hello John"}}')
def test_supports_post_json_query_with_get_variable_values(self):
r = self.testApp.post('/graphql?variables={"name": "John"}',
params=j(query='query helloWorld($name: String){ test_args(name: $name) }'),
headers={'Content-Type': 'application/json'})
self.assertEqual(r.status, 200)
self.assertEqual(r.body, '{"data":{"test_args":"Hello John"}}')
def test_supports_post_url_encoded_with_get_variable_values(self):
r = self.testApp.post('/graphql?variables={"name": "John"}',
urlencode(dict(query='query helloWorld($name: String){ test_args(name: $name) }')),
headers={'Content-Type': 'application/x-www-form-urlencoded'})
self.assertEqual(r.status, 200)
self.assertEqual(r.body, '{"data":{"test_args":"Hello John"}}')
def test_supports_post_raw_with_get_variable_values(self):
r = self.testApp.post('/graphql?variables={"name": "John"}',
params='query=query helloWorld($name: String){ test_args(name: $name) }',
headers={'Content-Type': 'application/graphql'})
self.assertEqual(r.status, 200)
self.assertEqual(r.body, '{"data":{"test_args":"Hello John"}}')
def test_allows_post_with_operation_name(self):
r = self.testApp.post('/graphql',
params=j(query= '''
query helloYou { test_args(name: "You"), ...shared }
query helloWorld { test_args(name: "World"), ...shared }
query helloDolly { test_args(name: "Dolly"), ...shared }
fragment shared on QueryRoot {
shared: test_args(name: "Everyone")
}
''',
operationName='helloWorld'),
headers={'Content-Type': 'application/json'})
self.assertEqual(r.status, 200)
self.assertEqual(r.body,
'{"data":{"test_args":"Hello World","shared":"Hello Everyone"}}')
def test_allows_post_with_get_operation_name(self):
r = self.testApp.post('/graphql?operationName=helloWorld',
params=j(query='''
query helloYou { test_args(name: "You"), ...shared }
query helloWorld { test_args(name: "World"), ...shared }
query helloDolly { test_args(name: "Dolly"), ...shared }
fragment shared on QueryRoot {
shared: test_args(name: "Everyone")
}
'''),
headers={'Content-Type': 'application/json'})
self.assertEqual(r.status, 200)
self.assertEqual(r.body,
'{"data":{"test_args":"Hello World","shared":"Hello Everyone"}}')
def test_not_pretty_by_default(self):
app = create_app(pretty=False)
self.middleware = []
self.testApp = TestApp(app.wsgifunc(*self.middleware))
r = self.testApp.get('/graphql', params={ 'query': 'query{test}'})
self.assertEqual(r.status, 200)
self.assertEqual(r.body, '{"data":{"test":"Hello World"}}')
def test_supports_pretty_printing_by_test(self):
app = create_app(pretty=True)
self.middleware = []
self.testApp = TestApp(app.wsgifunc(*self.middleware))
r = self.testApp.get('/graphql', params={ 'query': 'query{test}' })
self.assertEqual(r.status, 200)
self.assertEqual(r.body, '{\n "data": {\n "test": "Hello World"\n }\n}')
def test_handles_field_errors_caught_by_graphql(self):
r = self.testApp.get('/graphql', params={ 'query': '{thrower}' })
self.assertEqual(r.status, 200)
self.assertEqual(json.loads(r.body).get('errors')[0].get('message'),
"Throws!")
def test_handles_syntax_errors_caught_by_graphql(self):
r = self.testApp.get('/graphql', params={ 'query': 'syntaxerror' })
self.assertEqual(r.status, 200)
self.assertEqual(json.loads(r.body),
{ 'errors': [{'locations': [{'column': 1, 'line': 1}],
'message': 'Syntax Error GraphQL request (1:1) '
'Unexpected Name "syntaxerror"\n\n1: syntaxerror\n ^\n'}]
})
def test_handles_errors_caused_by_a_lack_of_query(self):
r = self.testApp.get('/graphql')
self.assertEqual(r.status, 200)
self.assertEqual(json.loads(r.body).get('errors')[0].get('message'),
'Must provide query string.')
def test_handles_batch_correctly_if_is_disabled(self):
r = self.testApp.post('/graphql',
params={'query': "{}"},
headers={'Content-Type': 'application/json'})
self.assertEqual(r.status, 200)
self.assertEqual(json.loads(r.body).get('errors')[0].get('message'),
'POST body sent invalid JSON.')
def test_handles_plain_post_text(self):
r = self.testApp.post('/graphql?variables={"name": "John"}',
params='query helloWorld($name: String){ test_args(name: $name) }',
headers={'Content-Type': 'text/plain'})
self.assertEqual(r.status, 200)
self.assertEqual(json.loads(r.body).get('errors')[0].get('message'),
'Must provide query string.')
def test_handles_poorly_formed_variables(self):
r = self.testApp.get('/graphql',
params={ 'query': 'query helloWorld($name: String){ test_args(name: $name) }',
'variables': "name: John" })
self.assertEqual(r.status, 200)
self.assertEqual(json.loads(r.body).get('errors')[0].get('message'),
"Variables are invalid JSON.")
def test_handles_unsupported_http_methods(self):
# need to improve
r = self.testApp.put('/graphql',
params={ 'query': 'query{test}'}, expect_errors=True)
self.assertEqual(r.status, 405)
self.assertEqual(r.header_dict.get('allow'), 'GET, POST')
@_set_params(context="CUSTOM CONTEXT")
def test_supports_custom_context(self):
r = self.testApp.get('/graphql', params={ 'query': 'query{context}' })
self.assertEqual(r.status, 200)
self.assertEqual(r.body, '{"data":{"context":"CUSTOM CONTEXT"}}')
def test_post_multipart_data(self):
query = 'mutation TestMutation { writeTest { test } }'
r = self.testApp.post('/graphql',
params={'query': query},
upload_files=[("Test", "text1.txt", "Guido")])
self.assertEqual(r.status, 200)
self.assertEqual(json.loads(r.body),
{u'data': {u'writeTest': {u'test': u'Hello World'}}})
@_set_params(batch=True)
def test_batch_allows_post_with_json_encoding(self):
r = self.testApp.post('/graphql',
params=jl(query='{test}'),
headers={'Content-Type': 'application/json'})
body = json.loads(r.body)[0]
self.assertEqual(r.status, 200)
self.assertEqual(body.get('id'), None)
self.assertEqual(body.get('payload'), {"data":{"test":"Hello World"}})
@_set_params(batch=True)
def test_batch_supports_post_json_query_with_json_variables(self):
r = self.testApp.post('/graphql',
params=jl(
# id=1,
query='query helloWorld($name: String){ test_args(name: $name) }',
variables=j(name="John")),
headers={'Content-Type': 'application/json'})
self.assertEqual(r.status, 200)
body = json.loads(r.body)[0]
self.assertEqual(body.get('id'), None) # id=1
self.assertEqual(body.get('payload'), {"data":{"test_args":"Hello John"}})
@_set_params(batch=True)
def test_batch_allows_post_with_operation_name(self):
r = self.testApp.post('/graphql',
params=jl(
# id=1
query='''
query helloYou { test_args(name: "You"), ...shared }
query helloWorld { test_args(name: "World"), ...shared }
query helloDolly { test_args(name: "Dolly"), ...shared }
fragment shared on QueryRoot {
shared: test_args(name: "Everyone")
}
''',
operationName='helloWorld'),
headers={'Content-Type': 'application/json'})
self.assertEqual(r.status, 200)
body = json.loads(r.body)[0]
self.assertEqual(body.get('id'), None) # id=1
self.assertEqual(body.get('payload'),
{"data":{"test_args":"Hello World","shared":"Hello Everyone"}})
@_set_params(graphiql=True, graphiql_temp_title="TestTitle")
def test_template_title(self):
r = self.testApp.get('/graphql',
params={ 'query': 'query { test }' },
headers={'Accept': 'text/html'})
self.assertEqual(r.status, 200)
self.assertIn("<title>TestTitle</title>", r.body)
if __name__ == '__main__':
unittest.main()
| {"/tests/app.py": ["/webpy_graphql/__init__.py"]} |
76,629 | ig-ksv/webpy-graphql | refs/heads/master | /webpy_graphql/utils.py | class _OldClass:
pass
class _NewClass(object):
pass
_all_vars = set(dir(_OldClass) + dir(_NewClass))
def props(x):
return {
key: value for key, value in vars(x).items() if key not in _all_vars
}
| {"/tests/app.py": ["/webpy_graphql/__init__.py"]} |
76,630 | ig-ksv/webpy-graphql | refs/heads/master | /webpy_graphql/__init__.py | from .graphqlview import GraphQLView
__all__ = ['GraphQLView']
| {"/tests/app.py": ["/webpy_graphql/__init__.py"]} |
76,631 | Abh4git/RetailStorePythonMongoService | refs/heads/main | /src/app/main/model/customers.py | from flask import Flask
from flask_mongoengine import MongoEngine
from mongoengine import Document, StringField, IntField, EmbeddedDocument, EmbeddedDocumentField
from dataclasses import dataclass
from .. import db
from flask_mongoengine import BaseQuerySet
class Customer(db.Document):
""" User Model for storing user related details """
meta = {'collection': 'customers', 'queryset_class': BaseQuerySet}
id = db.IntField(primary_key=True)
firstname = db.StringField(max_length=255, required=True)
lastname = db.StringField(max_length=255, required=True)
profile = db.StringField(max_length=255, required=True)
email = db.StringField(max_length=255, required=True)
phone = db.StringField(max_length=255, required=True)
address_line1 = db.StringField(max_length=255, required=True)
address_line2 = db.StringField(max_length=255, required=True)
city =db.StringField(max_length=255, required=True)
country = db.StringField(max_length=255, required=True)
paymenttype_id = db.IntField()
def __repr__(self):
return "<Customer '{}'>".format(self.username)
| {"/src/app/main/model/customers.py": ["/src/app/main/__init__.py"], "/src/app/main/model/producttypes.py": ["/src/app/main/__init__.py"], "/src/app/main/model/user.py": ["/src/app/main/__init__.py"], "/src/app/main/model/paymentmethods.py": ["/src/app/main/__init__.py"], "/src/app/main/model/products.py": ["/src/app/main/__init__.py"]} |
76,632 | Abh4git/RetailStorePythonMongoService | refs/heads/main | /src/app/main/routes.py | #All Routes are defined here
from flask_cors import CORS, cross_origin
from app.main.controller.products import ProductController
from app.main.controller.user import UserController
import app
from app.main.controller.auth import AuthenticationController
from flask import request
from flask_jwt_extended import jwt_required
#Test route without any connections
def test():
return "{testroutesuccess:'Test Route Success!'}"
api_v2_cors_config = {
"origins": [
'http://localhost:3000' # React
# React
],
"methods": ["OPTIONS", "GET", "POST"],
"allow_headers": ["Authorization", "Content-Type"]
}
#route returning Products list
@cross_origin(**api_v2_cors_config)
def getProductsList():
productC = ProductController()
return productC.getAllProducts()
#route for products list filtered by product types
@cross_origin(**api_v2_cors_config)
def getProductsListByType(typeid):
productC = ProductController()
return productC.getAllProductsByType(typeid)
@cross_origin(**api_v2_cors_config)
def getProductTypesList():
productC = ProductController()
return productC.getProductTypes()
userController = UserController()
userController.addUser(username=request.json.username,password=request.json.password,email=request.json.email)
return userController.getProductTypes()
@cross_origin()
def login():
auth = AuthenticationController()
#print(request.is_json)
userinfo = request.get_json()
print(userinfo)
authResult=auth.verify_password (userinfo['username'], userinfo['password'])
accesstoken = auth.create_token(userinfo['username'])
response= {"AuthResult": authResult,"accessToken":accesstoken,"user":userinfo['username']}
return response
@cross_origin(**api_v2_cors_config)
#@jwt_required()
def addUser():
userController = UserController()
print(request.is_json)
userinfo =request.get_json()
print(userinfo)
userController.addUser(username=userinfo['username'], password=userinfo['password'], email=userinfo['email'])
return {"Success":"1"}
@cross_origin(**api_v2_cors_config)
def addProduct():
productController = ProductController()
print(request.is_json)
productinfo =request.get_json()
print(productinfo)
response=productController.addProduct(id=productinfo['id'], name=productinfo['name'], description=productinfo['description']
, producttype_id=productinfo['producttype_id'], imagename=productinfo['imagename'])
return response
| {"/src/app/main/model/customers.py": ["/src/app/main/__init__.py"], "/src/app/main/model/producttypes.py": ["/src/app/main/__init__.py"], "/src/app/main/model/user.py": ["/src/app/main/__init__.py"], "/src/app/main/model/paymentmethods.py": ["/src/app/main/__init__.py"], "/src/app/main/model/products.py": ["/src/app/main/__init__.py"]} |
76,633 | Abh4git/RetailStorePythonMongoService | refs/heads/main | /src/manage.py | import os
import unittest
from app.main.model import user
from app.main.model import producttypes
from app.main.model import products
from app.main.model import paymentmethods
from app.main.model import customers
from flask_jwt_extended import JWTManager
from flask_script import Manager
from app.main import create_app, db
from flask_cors import CORS, cross_origin
#importing additional routes
from app.main import routes
app = create_app(os.getenv('BOILERPLATE_ENV') or 'dev')
app.app_context().push()
jwt=JWTManager(app)
#CORS(app,resources={ r'/*': {'origins': '*'}}, supports_credentials=True)
# Set CORS options on app configuration
CORS(app, resources={ r'/*': {'origins': [
'http://localhost:3000' # React
# React
]}}, supports_credentials=True)
app.config['CORS_HEADERS'] = 'Content-Type'
#Adding additional routes
app.add_url_rule('/api/test', view_func=routes.test)
app.add_url_rule('/api/producttypes', view_func=routes.getProductTypesList)
app.add_url_rule('/api/product', view_func=routes.getProductsList)
app.add_url_rule('/api/product', view_func=routes.addProduct,methods=['POST'])
app.add_url_rule('/api/product/type/<typeid>', view_func=routes.getProductsListByType)
app.add_url_rule('/api/user', view_func=routes.addUser,methods=['POST'])
app.add_url_rule('/api/auth', view_func=routes.login,methods=['POST'])
#@app.after_request
#def after_request(response):
# response.headers.add('Access-Control-Allow-Origin', 'http://localhost:3000')
# response.headers.add('Access-Control-Allow-Headers', 'Content-Type,Authorization')
# response.headers.add('Access-Control-Allow-Methods', 'GET,PUT,POST,DELETE,OPTIONS')
# response.headers.add('Access-Control-Allow-Credentials', 'true')
# return response
manager = Manager(app)
#migrate = Migrate(app, db)
@manager.command
def run():
app.run(debug=True, host='0.0.0.0')
#from app.main import create_app
#A default route here
@app.route('/')
def home():
return "RetailStore API!"
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0',port=5000)
@manager.command
def test():
"""Runs the unit tests."""
tests = unittest.TestLoader().discover('app/test', pattern='test*')
result = unittest.TextTestRunner(verbosity=2).run(tests)
if result.wasSuccessful():
return 0
return 1
if __name__ == '__main__':
manager.run() | {"/src/app/main/model/customers.py": ["/src/app/main/__init__.py"], "/src/app/main/model/producttypes.py": ["/src/app/main/__init__.py"], "/src/app/main/model/user.py": ["/src/app/main/__init__.py"], "/src/app/main/model/paymentmethods.py": ["/src/app/main/__init__.py"], "/src/app/main/model/products.py": ["/src/app/main/__init__.py"]} |
76,634 | Abh4git/RetailStorePythonMongoService | refs/heads/main | /src/app/main/model/producttypes.py | from flask import Flask
from mongoengine import Document, StringField, IntField, EmbeddedDocument, EmbeddedDocumentField
from flask_mongoengine import BaseQuerySet
from dataclasses import dataclass
from .. import db
from dataclasses import dataclass
import json
@dataclass
class ProductType(db.Document):
""" Product Type Model for storing type related details """
id:int
name:str
code:str
meta = {'collection': 'product_types', 'queryset_class': BaseQuerySet}
id = db.IntField(primary_key=True)
name = db.StringField(max_length=255, required=True)
description = db.StringField(max_length=255, required=True)
code = db.StringField(max_length=255, required=True)
def __init__(self, id, name, description, code,*args, **kwargs):
super(Document, self).__init__(*args, **kwargs)
self.id = id
self.name = name
self.description = description
self.code=code
def __repr__(self):
return "<ProductType '{}'>".format(self.username)
def toJson(self):
return json.dumps(self, default=lambda o: o.__dict__)
| {"/src/app/main/model/customers.py": ["/src/app/main/__init__.py"], "/src/app/main/model/producttypes.py": ["/src/app/main/__init__.py"], "/src/app/main/model/user.py": ["/src/app/main/__init__.py"], "/src/app/main/model/paymentmethods.py": ["/src/app/main/__init__.py"], "/src/app/main/model/products.py": ["/src/app/main/__init__.py"]} |
76,635 | Abh4git/RetailStorePythonMongoService | refs/heads/main | /src/app/main/model/user.py | from flask import Flask
from mongoengine import Document, StringField, IntField, EmbeddedDocument, EmbeddedDocumentField, ObjectIdField
from flask_mongoengine import BaseQuerySet
from .. import db, bcryptInst
from dataclasses import dataclass
from bson.objectid import ObjectId
@dataclass()
class User(db.Document):
""" User Model for storing user related details """
_id: int
email: str
username: str
meta = {'collection': 'users', 'queryset_class': BaseQuerySet}
email = db.StringField(max_length=255, required=True, primary_key=True)
username = db.StringField(max_length=255, required=True)
description = db.StringField(max_length=255)
code = db.StringField(max_length=255)
public_id = db.StringField(max_length=255, required=True)
password_hash = db.StringField(max_length=255, required=True)
def __init__(self, email=None, username=None, password=None, *args, **kwargs):
super(db.Document, self).__init__(*args, **kwargs)
self.email = email
self.username = username
self.public_id = username
pwhash = bcryptInst.generate_password_hash(password).decode('utf-8')
self.password_hash = pwhash
def check_password(self, password):
return bcryptInst.check_password_hash(self.password_hash, password)
def __repr__(self):
return "<User '{}'>".format(self.username)
| {"/src/app/main/model/customers.py": ["/src/app/main/__init__.py"], "/src/app/main/model/producttypes.py": ["/src/app/main/__init__.py"], "/src/app/main/model/user.py": ["/src/app/main/__init__.py"], "/src/app/main/model/paymentmethods.py": ["/src/app/main/__init__.py"], "/src/app/main/model/products.py": ["/src/app/main/__init__.py"]} |
76,636 | Abh4git/RetailStorePythonMongoService | refs/heads/main | /src/app/main/controller/user.py | from app.main.model.user import User
from app.main import db
from datetime import date
from flask import request
from flask import jsonify
import json
from bson.objectid import ObjectId
class UserController:
def __init__(self):
pass
def addUser(self, username, password, email):
user = User(username=username, email=email, password=password)
user.save()
response = jsonify(user)
response.status_code = 201
return response
| {"/src/app/main/model/customers.py": ["/src/app/main/__init__.py"], "/src/app/main/model/producttypes.py": ["/src/app/main/__init__.py"], "/src/app/main/model/user.py": ["/src/app/main/__init__.py"], "/src/app/main/model/paymentmethods.py": ["/src/app/main/__init__.py"], "/src/app/main/model/products.py": ["/src/app/main/__init__.py"]} |
76,637 | Abh4git/RetailStorePythonMongoService | refs/heads/main | /src/app/main/controller/auth.py | from app.main.model.user import User
from flask import jsonify
import json
import jwt
import datetime
import app
from flask_jwt_extended import create_access_token
from flask_jwt_extended import get_jwt_identity
from flask_jwt_extended import JWTManager
class AuthenticationController:
def __init__(self):
pass
def verify_password(self,username, password):
user = User.objects(username=username).first()
if user is None:
return False
return user.check_password(password)
def create_token(self,username):
return create_access_token(username)
def encode_auth_token(self, user_id):
"""
Generates the Auth Token
:return: string
"""
try:
payload = {
'exp': datetime.datetime.utcnow() + datetime.timedelta(days=0, seconds=5),
'iat': datetime.datetime.utcnow(),
'sub': user_id
}
return jwt.encode(
payload,
app.main.config.get('SECRET_KEY'),
algorithm='HS256'
).decode('UTF-8')
except Exception as e:
return e | {"/src/app/main/model/customers.py": ["/src/app/main/__init__.py"], "/src/app/main/model/producttypes.py": ["/src/app/main/__init__.py"], "/src/app/main/model/user.py": ["/src/app/main/__init__.py"], "/src/app/main/model/paymentmethods.py": ["/src/app/main/__init__.py"], "/src/app/main/model/products.py": ["/src/app/main/__init__.py"]} |
76,638 | Abh4git/RetailStorePythonMongoService | refs/heads/main | /src/app/main/service/__init__.py | from flask import Blueprint
bp = Blueprint('api', __name__, url_prefix='/api/v2')
from app.main.service import products, auth, users, errors | {"/src/app/main/model/customers.py": ["/src/app/main/__init__.py"], "/src/app/main/model/producttypes.py": ["/src/app/main/__init__.py"], "/src/app/main/model/user.py": ["/src/app/main/__init__.py"], "/src/app/main/model/paymentmethods.py": ["/src/app/main/__init__.py"], "/src/app/main/model/products.py": ["/src/app/main/__init__.py"]} |
76,639 | Abh4git/RetailStorePythonMongoService | refs/heads/main | /src/app/main/model/paymentmethods.py | from flask import Flask
from mongoengine import Document, StringField, IntField, EmbeddedDocument, EmbeddedDocumentField
from flask_mongoengine import BaseQuerySet
from .. import db
class PaymentType(db.Document):
""" User Model for storing user related details """
meta = {'collection': 'payment_methods', 'queryset_class': BaseQuerySet}
id = db.IntField(primary_key=True)
name = db.StringField(max_length=255, required=True)
description = db.StringField(max_length=255, required=True)
code = db.IntField()
def __repr__(self):
return "<ProductType '{}'>".format(self.username)
| {"/src/app/main/model/customers.py": ["/src/app/main/__init__.py"], "/src/app/main/model/producttypes.py": ["/src/app/main/__init__.py"], "/src/app/main/model/user.py": ["/src/app/main/__init__.py"], "/src/app/main/model/paymentmethods.py": ["/src/app/main/__init__.py"], "/src/app/main/model/products.py": ["/src/app/main/__init__.py"]} |
76,640 | Abh4git/RetailStorePythonMongoService | refs/heads/main | /src/app/main/service/products.py | from flask import jsonify, request, url_for, g, abort
from app.main import db
from app.main.model.products import Product
from app.main.service import bp
from app.main.service.auth import token_auth
from app.main.service.errors import bad_request
@bp.route('/products/', methods=['GET'])
#@token_auth.login_required
def get_products():
return "{ testproducts:['book1','Food1']}"
| {"/src/app/main/model/customers.py": ["/src/app/main/__init__.py"], "/src/app/main/model/producttypes.py": ["/src/app/main/__init__.py"], "/src/app/main/model/user.py": ["/src/app/main/__init__.py"], "/src/app/main/model/paymentmethods.py": ["/src/app/main/__init__.py"], "/src/app/main/model/products.py": ["/src/app/main/__init__.py"]} |
76,641 | Abh4git/RetailStorePythonMongoService | refs/heads/main | /src/app/__init__.py |
from app.main import routes | {"/src/app/main/model/customers.py": ["/src/app/main/__init__.py"], "/src/app/main/model/producttypes.py": ["/src/app/main/__init__.py"], "/src/app/main/model/user.py": ["/src/app/main/__init__.py"], "/src/app/main/model/paymentmethods.py": ["/src/app/main/__init__.py"], "/src/app/main/model/products.py": ["/src/app/main/__init__.py"]} |
76,642 | Abh4git/RetailStorePythonMongoService | refs/heads/main | /src/app/main/model/products.py | from flask import Flask
from flask_mongoengine import BaseQuerySet
from mongoengine import Document, StringField, IntField, EmbeddedDocument, EmbeddedDocumentField
from dataclasses import dataclass
from .. import db
import json
@dataclass
class Product(db.Document):
""" Product Model for storing Product related details """
id:int
name:str
description:str
imagename:str
meta = {'collection': 'products', 'queryset_class': BaseQuerySet}
id = db.IntField(primary_key=True)
name = db.StringField(max_length=255, required=True)
description = db.StringField(max_length=255, required=True)
producttype_id = db.IntField()
imagename =db.StringField(max_length=255, required=True)
#__tablename__ = "products"
#id = db.Column(db.Integer, primary_key=True, autoincrement=True)
#name = db.Column(db.String(255), unique=True, nullable=False)
#description = db.Column(db.String(255), unique=True, nullable=False)
#producttype_id = db.Column(db.Integer, db.ForeignKey('product_types.id'))
#producttype = db.relationship('ProductType', backref='Product', lazy=True)
#imagename = db.Column(db.String(255), unique=True, nullable=True)
def __init__(self, id, name, description, producttypeid, imagename, *args, **kwargs):
super(Document, self).__init__(*args, **kwargs)
self.id = id
self.name = name
self.description = description
self.producttype_id = producttypeid
self.imagename = imagename
def __repr__(self):
return "<Product '{}'>".format(self.name)
def to_json(self):
return "{ id:" + str(self.id) + ", name:" + self.name + ",description:" + self.description + "}"
def toJson(self):
return json.dumps(self, default=lambda o: o.__dict__)
| {"/src/app/main/model/customers.py": ["/src/app/main/__init__.py"], "/src/app/main/model/producttypes.py": ["/src/app/main/__init__.py"], "/src/app/main/model/user.py": ["/src/app/main/__init__.py"], "/src/app/main/model/paymentmethods.py": ["/src/app/main/__init__.py"], "/src/app/main/model/products.py": ["/src/app/main/__init__.py"]} |
76,643 | Abh4git/RetailStorePythonMongoService | refs/heads/main | /src/app/main/__init__.py | from flask import Flask
from flask_mongoengine import MongoEngine
from flask_bcrypt import Bcrypt
from .config import config_by_name
#We will be using the application factory pattern for creating our
#Flask object.
from flask_bcrypt import Bcrypt
from flask_cors import CORS, cross_origin
db= MongoEngine()
bcryptInst = Bcrypt()
def create_app(config_name):
app = Flask(__name__)
app.config.from_object(config_by_name[config_name])
CORS(app)
app.config['CORS_HEADERS'] = 'Content-Type'
#app.config['MONGOALCHEMY_DATABASE'] = 'productinfo'
#app.config['MONGOALCHEMY_SERVER'] = 'localhost'
#app.config['MONGOALCHEMY_PORT'] = '27017'
app.config['MONGODB_SETTINGS'] = {
'db': 'retailstore',
'host': 'localhost',
'port': 27017
}
#db = MongoEngine(app)
bcryptInst.init_app(app)
db.init_app(app)
return app | {"/src/app/main/model/customers.py": ["/src/app/main/__init__.py"], "/src/app/main/model/producttypes.py": ["/src/app/main/__init__.py"], "/src/app/main/model/user.py": ["/src/app/main/__init__.py"], "/src/app/main/model/paymentmethods.py": ["/src/app/main/__init__.py"], "/src/app/main/model/products.py": ["/src/app/main/__init__.py"]} |
76,644 | Abh4git/RetailStorePythonMongoService | refs/heads/main | /src/app/main/controller/products.py | from app.main.model.products import Product
from app.main.model.producttypes import ProductType
from app.main import db
from flask import jsonify
import json
class ProductController:
def __init__(self):
pass
def obj_dict(self,obj):
return obj.to_json()
def getAllProducts(self):
products=Product.objects
return jsonify({"products":products})
def getAllProductsByType(self,producttypeid):
products=Product.objects(producttype_id =producttypeid)
return jsonify({"products":products})
def addProduct(self, id, name, description, producttype_id, imagename):
product=Product()
product.id=id
product.name=name
product.description=description
product.producttype_id=producttype_id
product.imagename=imagename
db.session.add(product)
db.session.commit()
response = jsonify(product)
response.status_code = 201
return response
def getProductTypes(self):
producttypes=ProductType.objects()
return jsonify({"producttypes":producttypes})
| {"/src/app/main/model/customers.py": ["/src/app/main/__init__.py"], "/src/app/main/model/producttypes.py": ["/src/app/main/__init__.py"], "/src/app/main/model/user.py": ["/src/app/main/__init__.py"], "/src/app/main/model/paymentmethods.py": ["/src/app/main/__init__.py"], "/src/app/main/model/products.py": ["/src/app/main/__init__.py"]} |
76,645 | daisukekobayashi/pocpy | refs/heads/master | /pocpy/poc.py | import sys
import numpy as np
from numpy import pi, sin, cos
from scipy.optimize import leastsq
import scipy, scipy.fftpack
import six
import cv2
if cv2.__version__[0] == "2":
import cv2.cv as cv
from pocpy.logpolar_opencv2 import *
else:
from pocpy.logpolar_opencv3 import *
def zero_padding(src, dstshape, pos=(0, 0)):
y, x = pos
dst = np.zeros(dstshape)
dst[y : src.shape[0] + y, x : src.shape[1] + x] = src
return dst
def pocfunc_model(alpha, delta1, delta2, r, u):
N1, N2 = r.shape
V1, V2 = list(six.moves.map(lambda x: 2 * x + 1, u))
return (
lambda n1, n2: alpha
/ (N1 * N2)
* sin((n1 + delta1) * V1 / N1 * pi)
* sin((n2 + delta2) * V2 / N2 * pi)
/ (sin((n1 + delta1) * pi / N1) * sin((n2 + delta2) * pi / N2))
)
def pocfunc(f, g, withlpf=False, windowfunc=np.hanning):
m = np.floor(list(six.moves.map(lambda x: x / 2.0, f.shape)))
u = list(six.moves.map(lambda x: x / 2.0, m))
# hanning window
hy = windowfunc(f.shape[0])
hx = windowfunc(f.shape[1])
hw = hy.reshape(hy.shape[0], 1) * hx
f = f * hw
g = g * hw
# compute 2d fft
F = scipy.fftpack.fft2(f)
G = scipy.fftpack.fft2(g)
G_ = np.conj(G)
R = F * G_ / np.abs(F * G_)
if withlpf == True:
R = scipy.fftpack.fftshift(R)
lpf = np.ones(list(six.moves.map(lambda x: int(x + 1), m)))
lpf = zero_padding(lpf, f.shape, list(six.moves.map(int, u)))
R = R * lpf
R = scipy.fftpack.fftshift(R)
return scipy.fftpack.fftshift(np.real(scipy.fftpack.ifft2(R)))
def poc(f, g, withlpf=False, fitting_shape=(9, 9)):
# compute phase-only correlation
center = list(six.moves.map(lambda x: x / 2.0, f.shape))
m = np.floor(list(six.moves.map(lambda x: x / 2.0, f.shape)))
u = list(six.moves.map(lambda x: x / 2.0, m))
r = pocfunc(f, g, withlpf=withlpf)
# least-square fitting
max_pos = np.argmax(r)
peak = (max_pos // f.shape[1], max_pos % f.shape[1])
max_peak = r[peak[0], peak[1]]
mf = list(six.moves.map(lambda x: int(x / 2), fitting_shape))
fitting_area = r[
peak[0] - mf[0] : peak[0] + mf[0] + 1, peak[1] - mf[1] : peak[1] + mf[1] + 1
]
p0 = [0.5, -(peak[0] - m[0]) - 0.02, -(peak[1] - m[1]) - 0.02]
y, x = np.mgrid[-mf[0] : mf[0] + 1, -mf[1] : mf[1] + 1]
y = y + peak[0] - m[0]
x = x + peak[1] - m[1]
errorfunction = lambda p: np.ravel(
pocfunc_model(p[0], p[1], p[2], r, u)(y, x) - fitting_area
)
plsq = leastsq(errorfunction, p0)
return (plsq[0][0], plsq[0][1], plsq[0][2])
def ripoc(f, g, withlpf=False, fitting_shape=(9, 9), M=50):
hy = np.hanning(f.shape[0])
hx = np.hanning(f.shape[1])
hw = hy.reshape(hy.shape[0], 1) * hx
ff = f * hw
gg = g * hw
F = scipy.fftpack.fft2(ff)
G = scipy.fftpack.fft2(gg)
F = scipy.fftpack.fftshift(np.log(np.abs(F)))
G = scipy.fftpack.fftshift(np.log(np.abs(G)))
FLP = logpolar(F, (F.shape[0] / 2, F.shape[1] / 2), M)
GLP = logpolar(G, (G.shape[0] / 2, G.shape[1] / 2), M)
R = poc(FLP, GLP, withlpf=withlpf)
angle = -R[1] / F.shape[0] * 360
scale = 1.0 - R[2] / 100
center = tuple(np.array(g.shape) / 2)
rot = cv2.getRotationMatrix2D(center, -angle, 1.0 + (1.0 - scale))
g_dash = cv2.warpAffine(g, rot, (g.shape[1], g.shape[0]), flags=cv2.INTER_LANCZOS4)
t = poc(f, g_dash, withlpf=withlpf)
return (t[0], t[1], t[2], angle, scale)
| {"/pocpy/poc.py": ["/pocpy/logpolar_opencv2.py", "/pocpy/logpolar_opencv3.py"], "/pocpy/__init__.py": ["/pocpy/poc.py"]} |
76,646 | daisukekobayashi/pocpy | refs/heads/master | /pocpy/logpolar_opencv3.py | import cv2
def logpolar(src, center, magnitude_scale=40):
return cv2.logPolar(
src, center, magnitude_scale, cv2.INTER_CUBIC + cv2.WARP_FILL_OUTLIERS
)
| {"/pocpy/poc.py": ["/pocpy/logpolar_opencv2.py", "/pocpy/logpolar_opencv3.py"], "/pocpy/__init__.py": ["/pocpy/poc.py"]} |
76,647 | daisukekobayashi/pocpy | refs/heads/master | /pocpy/__init__.py | from pocpy.poc import pocfunc
from pocpy.poc import poc
from pocpy.poc import ripoc
| {"/pocpy/poc.py": ["/pocpy/logpolar_opencv2.py", "/pocpy/logpolar_opencv3.py"], "/pocpy/__init__.py": ["/pocpy/poc.py"]} |
76,648 | daisukekobayashi/pocpy | refs/heads/master | /setup.py | import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="pocpy",
version="0.2.0",
license="MIT",
author="Daisuke Kobayashi",
author_email="daisuke@daisukekobayashi.com",
description="Phase Only Correlation in Python",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/daisukekobayashi/pocpy",
packages=setuptools.find_packages(),
kewords="registration phase-only-correlation",
classifiers=[
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Topic :: Scientific/Engineering :: Image Processing",
],
python_requires=">=2.7,!=3.0.*,!=3.1.*,!=3.2.*",
install_requires=["six", "numpy", "scipy", "opencv-python"],
)
| {"/pocpy/poc.py": ["/pocpy/logpolar_opencv2.py", "/pocpy/logpolar_opencv3.py"], "/pocpy/__init__.py": ["/pocpy/poc.py"]} |
76,649 | daisukekobayashi/pocpy | refs/heads/master | /pocpy/logpolar_opencv2.py | import cv2
import cv2.cv as cv
import numpy as np
def logpolar(src, center, magnitude_scale=40):
mat1 = cv.fromarray(np.float64(src))
mat2 = cv.CreateMat(src.shape[0], src.shape[1], mat1.type)
cv.LogPolar(
mat1,
mat2,
center,
magnitude_scale,
cv.CV_INTER_CUBIC + cv.CV_WARP_FILL_OUTLIERS,
)
return np.asarray(mat2)
| {"/pocpy/poc.py": ["/pocpy/logpolar_opencv2.py", "/pocpy/logpolar_opencv3.py"], "/pocpy/__init__.py": ["/pocpy/poc.py"]} |
76,655 | lucashalbert/OOA | refs/heads/master | /pipeline.py | from hw import register
from classes.decode import INSTRUCTIONDecode
from classes.alu import ALU
class pipeline:
'''
This class sets up pipeline variables and controls the pipeline stages
'''
def __init__(self, stack_ptr, inst_reg, data_reg, data_mem, inst_mem, ALU_in, ALU_out, MEM_out, WB_addr):
'''
This Constructor initializes all pipeline variables
'''
# Initialize all variables
self.stack_ptr = stack_ptr
self.inst_reg = inst_reg
self.data_reg = data_reg
self.data_mem = data_mem
self.inst_mem = inst_mem
self.need_stall = True
self.stall = False
# Values that feed into ALU
self.op = None
self.dest = None
self.source1 = None
self.source2 = None
# Values that come from ALU and MEM operations
self.ALU_in=ALU_in # Output of ALU
self.ALU_out=ALU_out # Output of ALU
self.MEM_out=MEM_out # Result of reading from MEM
self.WB_addr=WB_addr # Address to write back to
'''
print(self.stack_ptr.read())
print("Inst_reg: ", self.inst_reg.read())
print(self.data_reg[3].read())
print(self.data_mem.load(3))
print(self.inst_mem.load(3))
'''
self.start_pipeline()
def start_pipeline(self):
'''
This Constructor starts the pipeline, and increments the stack_ptr to keep track of instruction execution
'''
to_end = 5
while(to_end > 0):
print(self.inst_mem.load(int(self.stack_ptr.read(), 2)).rjust(32, '0') != "0".rjust(32, '0'))
if not (self.inst_mem.load(int(self.stack_ptr.read(), 2)).rjust(32, '0') != "0".rjust(32, '0')):
to_end = to_end -1
print("to_end = ", to_end)
else:
to_end = 5
#print("Compare: ",self.inst_mem.load(int(self.stack_ptr.read(), 2)).rjust(32, '0'), "to", "0".rjust(32, '0'))
# Place inst_mem[stack_ptr] into inst_reg
#print("\n\nSTART LOADING MEMORY WITH STUFF")
# convert stack pointer binary to an a string(decimal)
#print("Stack ptr: ", str(int(self.stack_ptr.read(), 2)))
# Display inst_mem[stack_ptr]
#print("inst_mem[",int(self.stack_ptr.read(), 2),"]: ", self.inst_mem.load(int(self.stack_ptr.read(), 2)))
'''
self.fetch() does this now
# Write inst_mem[stack_ptr] to inst_reg
self.inst_reg.write(int(self.inst_mem.load(int(self.stack_ptr.read(), 2))))
# Read inst_reg
print("inst_reg: ", self.inst_reg.read())
'''
###
# Initial values should be some sort of NULL. If null, do nothing and return
###
# Start instruction result write back
self.writeBack()
# Start instruction memory operations
self.memory()
# Start instruction execution
self.execute()
# Start instruction decode
self.decode()
# Start instruction fetch
self.fetch()
# Print New lines to seperate instructions
print("\n\n\n")
'''
# convert stack_ptr to int, increment stack ptr, and convert back to padded str
temp_stack_ptr = "{0:b}".format(int(str(int(self.stack_ptr.read(), 2) + 1))).rjust(32, '0')
# Set new stack_ptr
self.stack_ptr.write(str(temp_stack_ptr).rjust(32, '0'))
'''
def fetch(self):
'''
Fetch current instruction from the instruction memory and place in the instruction register
'''
if not (self.stall):
print("\n|----------------------|")
print("| Entering fetch stage |")
print("|----------------------|")
# Write inst_mem[stack_ptr] to inst_reg
self.inst_reg.write(int(self.inst_mem.load(int(self.stack_ptr.read(), 2))))
# print stack_ptr
print("Stack_ptr: ",int(self.stack_ptr.read(), 2))
# Read inst_reg
print("inst_reg: ", self.inst_reg.read())
# convert stack_ptr to int, increment stack ptr, and convert back to padded str
temp_stack_ptr = "{0:b}".format(int(str(int(self.stack_ptr.read(), 2) + 1))).rjust(32, '0')
# Set new stack_ptr
self.stack_ptr.write(str(temp_stack_ptr).rjust(32, '0'))
else:
print("\n|-------------------------|")
print("| Stalled fetch stage |")
print("|-------------------------|")
def decode(self):
'''
Decode instruction in the instruction register
'''
print(self.ALU_in)
if not (self.stall):
print("\n|-----------------------|")
print("| Entering decode stage |")
print("|-----------------------|")
INSTRUCTIONDecode(self.inst_reg.read(), self.data_reg, self.WB_addr, self.ALU_in, self.ALU_out)
print("self.ALU_in",self.ALU_in)
#a=INSTRUCTIONDecode(self.inst_reg.read(), self.data_reg, self.WB_addr)
#self.op = a.decodeOpField()
#self.dest = a.decodeDestField()
#self.source1 = a.decodeSource1Field()
#self.source2 = a.decodeSource2Field()
#self.immediate = a.decodeImmediateValue()
#a.constructInstruction()
# self.op = a.decodeOpField()
# self.dest = a.decodeDestField()
# self.source1 = a.decodeSource1Field()
# self.source2 = a.decodeSource2Field()
# self.immediate = a.decodeImmediateValue()
# a.constructInstruction()
else:
print("\n|-------------------------|")
print("| Stalled decode stage |")
print("|-------------------------|")
def execute(self):
'''
Do ALU operation specified in the instruction
'''
print(self.ALU_in)
if not (self.stall):
print("\n|------------------------|")
print("| Entering execute stage |")
print("|------------------------|")
#(self, operation, destination, source1, source2)
a=ALU(self.data_reg, self.ALU_in)
a.executeOperation()
else:
print("\n|-------------------------|")
print("| Stalled execute stage |")
print("|-------------------------|")
def memory(self):
'''
Do memory operations
'''
if not (self.stall):
print("\n|-----------------------|")
print("| Entering memory stage |")
print("|-----------------------|")
# Read from or write to memory
# Needs to know
# which operation to perform
# what value to store if any
# what location to store/read in memory
# X is output of ALU. Instructs whether to read or write or neither from memory
"""
if ( X ):
#Read
mem_out = self.data_mem[mem_location]
else if ( X ):
#Write
self.data_mem[mem_location] = alu_out
"""
else:
print("\n|-------------------------|")
print("| Stalled memory stage |")
print("|-------------------------|")
def writeBack(self):
'''
Do write back to registers
'''
# Needs to know
# what register to write in
# what value to write
#Arguments
"""
reg_number
value_to_write
#Operation
self.data_reg[reg_number] = value_to_write
"""
#Stall cycle completed. Remove stall flag
if (self.stall):
self.stall = False
self.need_stall = False
#Stall needed.
if (self.need_stall):
self.stall = True
print("\n|---------------------------|")
print("| Entering write back stage |")
print("|---------------------------|")
| {"/pipeline.py": ["/hw.py", "/classes/decode.py", "/classes/alu.py"], "/assemblyfile2bin.py": ["/classes/encode.py"], "/classes/alu.py": ["/hw.py"], "/__init__.py": ["/hw.py", "/assemblyfile2bin.py", "/pipeline.py"], "/classes/decode.py": ["/hw.py"]} |
76,656 | lucashalbert/OOA | refs/heads/master | /hw.py | # Define all the hardware classes
#32-bit strings for initializing and testing
ALL_ZEROS="00000000000000000000000000000000"
ALT_FROM_1="10101010101010101010101010101010"
ALT_FROM_0="01010101010101010101010101010101"
class register:
def __init__(self):
self.data = ALL_ZEROS
def write(self, data):
# Pad data to 32 bits
self.data=str(data).rjust(32, '0')
def read(self):
return self.data
class mem_collection:
def __init__(self, name, size):
self.name=name
self.mem = [ALL_ZEROS]*size
#for index in range(size):
#self.mem[index]=ALL_ZEROS
def save_all(self, arr):
'''
save all binary from instruction binary array into the memory
'''
for it in range (0, len(arr)):
self.save(it, arr[it])
def load(self, addr):
return self.mem[addr]
def save(self, addr, value):
self.mem[addr]=value
| {"/pipeline.py": ["/hw.py", "/classes/decode.py", "/classes/alu.py"], "/assemblyfile2bin.py": ["/classes/encode.py"], "/classes/alu.py": ["/hw.py"], "/__init__.py": ["/hw.py", "/assemblyfile2bin.py", "/pipeline.py"], "/classes/decode.py": ["/hw.py"]} |
76,657 | lucashalbert/OOA | refs/heads/master | /assemblyfile2bin.py | #!/usr/local/env python3
# -*- coding: utf-8 -*-
'''
Filename: assembly2Bin.py
Author: Lucas Halbert
Date: 04/17/15
Modified: 04/21/15
Description: This utility will encode assembly instructions to binary
and store in the specified file.
'''
import sys
from classes.encode import INSTRUCTIONEncode
class FileToBin:
def __init__(self, sourcefile, binfile):
self.sourcefile = sourcefile
self.binfile= binfile
self.bin_array = []
self.instructions=[]
print ("\nFinished init of FileToBin")
def read(self):
f = open(self.sourcefile, 'r')
with open(self.sourcefile) as f:
self.instructions = f.readlines()
f.close()
def write(self):
# Open file for writing
f = open(self.binfile, 'w')
#while :
for inst in self.instructions:
if inst== "quit":
# Close file to free up system resources
f.close
# Exit gracefully
sys.exit(0)
# Call INSTRUCTIONEncode class
a=INSTRUCTIONEncode(inst)
# Encode operation field
#a.encodeOpField()
# Encode destination field
#a.encodeDestField()
# Encode source1 field
#a.encodeSource1Field()
#if a.immediate == 0:
# Encode source2 field
#a.encodeSource2Field()
#elif a.immediate == 1:
# Encode immediate value field
#a.encodeImmediateValue()
# Construct binary representation of instruction
#a.constructByteCode()
# Print constructed byte code
#print(a.inst_bin)
# return data to call
self.bin_array.append(a.inst_bin)
# Write byte code to file with newline
f.write(a.inst_bin + '\n')
f.close()
return self.bin_array
| {"/pipeline.py": ["/hw.py", "/classes/decode.py", "/classes/alu.py"], "/assemblyfile2bin.py": ["/classes/encode.py"], "/classes/alu.py": ["/hw.py"], "/__init__.py": ["/hw.py", "/assemblyfile2bin.py", "/pipeline.py"], "/classes/decode.py": ["/hw.py"]} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.