index int64 | repo_name string | branch_name string | path string | content string | import_graph string |
|---|---|---|---|---|---|
71,414 | gestiweb/llampex-mini | refs/heads/master | /projects/erp/generic/master/scripts/masterbasic.py | # encoding: UTF-8
print "loading customers.py"
import os.path, traceback
from PyQt4 import QtGui, QtCore, uic
from masterform import LlampexMasterForm
import time
import re
import threading
class DataLoaderThread(threading.Thread):
maxrowscached = 100000
def run(self):
p = self.parent
self.abort = False
self.totalrowcount = 0
self.rowsperfecth = p.rowsperfecth
self.paralellqueries = 1
self.queried = 0
start = time.time()
t1 = start
while True:
self.rowlimit = p.execute_rowlimit
t2 = time.time()
#print "Expecting: %d rows (%.3fs delay)" % (self.rowlimit,t2-t1)
rowcount = 0
results = []
self.queried = 0
def newfetch():
qsize = 0
rowsremaining = self.rowlimit - self.queried
self.paralellqueries = int(rowsremaining / self.rowsperfecth/2)
if self.paralellqueries < 5:
self.paralellqueries = 5
while len(results) < self.paralellqueries:
results.append(p.cursor.method.fetch(self.rowsperfecth))
qsize += 1
self.queried += qsize * self.rowsperfecth
#print "Queried: %d rows +(%d rows * %d times) (%d threads running) (%d - %d = %d rows remaining)" % (self.queried,self.rowsperfecth,qsize, len(results), self.rowlimit, self.queried, rowsremaining)
#self.rowsperfecth += p.rowsperfecth
#if self.rowsperfecth > p.maxrowsperfecth:
# self.rowsperfecth = p.maxrowsperfecth
t3 = time.time()
newfetch()
t4 = time.time()
#print "(%.3fs delay A ,%.3fs delay B )" % (t3-t2,t4-t3)
while True:
if self.abort: return
if results:
th1 = threading.Thread(target=newfetch)
th1.start()
else:
#print "querying new results."
newfetch()
t5 = time.time()
rows = results.pop(0).value
t6 = time.time()
rowcount += len(rows)
if not rows:
break
p.cachedata += rows
#print self.totalrowcount, rowcount, "%.3fs" % (time.time()-start), "(%d t) (%.3fs delay)" % (len(results),t6-t5)
self.hasdata.set()
if rowcount >= self.rowlimit:
while results:
rows = results.pop(0).value
break
self.totalrowcount += rowcount
#print "Got %d rows (total: %d)" % (rowcount,self.totalrowcount)
if rowcount == 0 or rowcount < self.rowlimit or self.totalrowcount > self.maxrowscached:
if self.totalrowcount > self.maxrowscached:
print "WARN: Stopped caching data because loader has reached %d rows" % (self.totalrowcount)
p.execute(1)
break
if self.abort: return
self.rowsperfecth = p.maxrowsperfecth
t1 = time.time()
p.execute(p.maxtablerows)
print "END:", self.totalrowcount, rowcount, "%.3fs" % (time.time()-start), "(%d t)" % len(results)
class MasterScript(object):
def __init__(self, form):
self.form = form
self.rpc = self.form.prjconn
self.cursor = self.rpc.call.newCursor()
self.table = self.form.actionobj.table
self.timer = QtCore.QTimer(self.form)
table = self.form.ui.table
table.setRowCount(0)
table.setColumnCount(1)
table.setHorizontalHeaderLabels(["wait, loading data . . . "])
self.form.connect(self.timer, QtCore.SIGNAL("timeout()"), self.timer_timeout)
self.form.connect(table, QtCore.SIGNAL("cellDoubleClicked(int,int)"), self.table_cellDoubleClicked)
tableheader = table.horizontalHeader()
self.form.connect(tableheader, QtCore.SIGNAL("sectionClicked(int)"), self.table_sectionClicked)
self.filterdata = {}
self.filter_regex = r"(\w+)[ ]*(~|=|>|<|LIKE|ILIKE|>=|<=)[ ]*'(.+)'"
self.sqlquery = None # obsolete
self.wherefilters = []
self.orderbyfields = []
self.datathread = None
self.cachedata = []
self.maxtablerows = 5000
self.firstfetch = 500
self.rowsperfecth = 20
self.maxrowsperfecth = 250
self.data_reload()
def update_sqlquery(self):
# Obsolete:
"""
where_str = []
for col, regex in self.filterdata.iteritems():
result1 = re.match(self.filter_regex,regex)
if result1:
fieldname, operator, regexvalue = result1.group(1), result1.group(2), result1.group(3)
print "adding:", fieldname, operator, regexvalue
where_str.append("%s %s '%s'" % (fieldname, operator ,regexvalue))
self.sqlquery = "SELECT * FROM \"%s\"" % self.table
if where_str:
self.sqlquery += " WHERE %s" % (" AND ".join(where_str))
"""
self.wherefilters = self.getwherefilter()
self.orderbyfields = []
def getwherefilter(self):
wherefilter = []
for col, regex in self.filterdata.iteritems():
result1 = re.match(self.filter_regex,regex)
if result1:
fieldname, operator, regexvalue = result1.group(1), result1.group(2), result1.group(3)
wherefilter.append( {'fieldname' : fieldname, 'op' : operator, 'value' : regexvalue} )
return wherefilter
def data_reload(self):
self.timer.stop()
if self.datathread:
if self.datathread.isAlive():
self.datathread.abort = True
self.datathread.join(0.5)
if self.datathread.isAlive():
print "WARN: DataThreadLoader still alive."
del self.datathread
self.datathread = None
self.maxcolumns = 32
self.starttime = time.time()
print "started full reload for", self.table
self.totalrows = 0
self.update_sqlquery()
self.datathread = DataLoaderThread()
self.datathread.parent = self
self.datathread.daemon = True
self.datathread.hasdata = threading.Event()
self.datathread.sql = self.sqlquery
self.table_initialized = False
self.timer_initload()
self.datathread.hasdata.wait(1)
self.timer_populatetable()
self.timer.start(1)
def data_softreload(self):
table = self.form.ui.table
self.starttime = time.time()
print "started soft reload for", self.table
self.nrows = 0
self.nrow = 0
self.omitted = 0
self.totalrows = len(self.cachedata)
table.setRowCount(min([self.totalrows,self.maxtablerows]))
self.timer.start(10)
def timer_timeout(self):
if self.table_initialized == False:
if not self.timer_initload():
return
self.timer_populatetable()
def execute(self,rows):
offset = len(self.cachedata)
limit = rows
self.execute_rowlimit = limit
try:
#self.cursor.call.execute(self.sqlquery + " LIMIT %d OFFSET %d" % (limit,offset))
self.sqlinfo = self.cursor.call.selecttable(self.table,
wherelist=self.wherefilters,
orderby=self.orderbyfields,
limit = limit,
offset = offset)
except Exception, e:
print "FATAL: Cursor Execute failed with:", repr(e)
self.cursor.call.rollback()
self.timer.stop()
return False
self.execute_rowlimit = self.sqlinfo["count"]
self.totalrows += self.sqlinfo["count"]
# print "%s: %d rows" % (self.table,self.totalrows)
return True
def timer_initload(self):
table = self.form.ui.table
self.cachedata[:] = []
if not self.execute(self.firstfetch):
table.setRowCount(0)
return False
field_list = self.sqlinfo["fields"][:self.maxcolumns]
table.setColumnCount(len(field_list))
table.setHorizontalHeaderLabels(field_list)
tableheader = self.form.ui.table.horizontalHeader()
# tableheader.setClickable(False) # default is True
tableheader.setSortIndicatorShown(True)
tableheader.setMovable(True)
tableheader.setStretchLastSection(True)
table.setRowCount(min([self.totalrows,self.maxtablerows]))
self.lastreporttime = 0
self.nrows = 0
self.nrow = 0
self.omitted = 0
self.datathread.start()
#self.fetchresult = self.cursor.method.fetch(self.rowsperfecth)
self.table_initialized = True
self.timer.start(10)
def timer_populatetable(self):
#self.fetchresult.conn.dispatch_until_empty()
#if self.fetchresult.response is None: return
#rowlist = self.fetchresult.value
#self.fetchresult = self.cursor.method.fetch(self.rowsperfecth)
if self.nrows < 100: maxsize = 10
else: maxsize = 50
rowlist = self.cachedata[self.nrows:self.nrows+maxsize]
table = self.form.ui.table
if not rowlist and not self.datathread.isAlive():
print "finished loading data for %s (%d/%d rows) in %.3f seconds" % (
self.table, self.nrow, self.totalrows, time.time() - self.starttime)
#x = self.fetchresult.value #get and drop.
#assert( not x ) # should be empty
self.timer.stop()
if not rowlist:
return
self.nrows += len(rowlist)
self.table_loaddata(rowlist)
def table_loaddata(self, rowlist):
table = self.form.ui.table
omittedrows = 0
table.setRowCount(self.nrow+len(rowlist))
for rowdata in rowlist:
includerow = True
if self.nrow > self.maxtablerows: includerow = False
for col, regex in self.filterdata.iteritems():
if col < 0 or col >= len(rowdata): continue
result1 = re.match(self.filter_regex,regex)
if result1: continue
val = unicode(rowdata[col])
if not re.search(regex,val, re.I):
#if not val.startswith(regex):
includerow = False
break
if not includerow:
omittedrows += 1
self.omitted += 1
continue
for ncol, value in enumerate(rowdata[:self.maxcolumns]):
item = QtGui.QTableWidgetItem(unicode(value))
table.setItem(self.nrow, ncol, item)
self.nrow += 1
if self.nrow == 0:
table.setRowCount(1)
else:
table.setRowCount(self.nrow)
#table.setRowCount(min([self.totalrows,self.maxtablerows]))
if time.time() - self.lastreporttime > 1:
self.lastreporttime = time.time()
print "loading table %s: %d rows (+%d hidden) (%.2f%%) (%.3f s)" % (self.table,
self.nrow, self.omitted, float(self.nrow+self.omitted)*100.0/float(self.totalrows), time.time() - self.starttime)
def table_cellDoubleClicked(self, row, col):
print "Clicked", row,col
def table_sectionClicked(self, col):
if col not in self.filterdata:
line1 = "No filter declared yet for column %d" % col
txt = ""
else:
line1 = "Replacing filter for column %d: %s" % (col, self.filterdata[col])
txt = unicode(self.filterdata[col])
rettext, ok = QtGui.QInputDialog.getText(self.form, "Filter By",
line1 + "\nWrite New filter (RegEx):", QtGui.QLineEdit.Normal, txt)
rettext = unicode(rettext)
if ok:
print "New filter:", repr(rettext)
if rettext == u"":
if col in self.filterdata:
del self.filterdata[col]
else:
self.filterdata[col] = rettext
fullreload = False
if self.wherefilters != self.getwherefilter(): fullreload = True
if self.datathread is None: fullreload = True
if fullreload:
self.data_reload()
else:
self.data_softreload()
| {"/engine/model/table_users.py": ["/engine/model/__init__.py"], "/engine/model/table_projectusers.py": ["/engine/model/__init__.py"], "/engine/model/table_userconfigs.py": ["/engine/model/__init__.py"], "/engine/model/table_projects.py": ["/engine/model/__init__.py"]} |
71,415 | gestiweb/llampex-mini | refs/heads/master | /engine/engine.py | import model
from model import RowProject, RowUser,RowProjectUser
import project_manager
import manage_projects
import bjsonrpc
from bjsonrpc.handlers import BaseHandler
from bjsonrpc.exceptions import ServerError
import threading
import signal, os
thread_server = None
class ServerHandler(BaseHandler):
username = None
def getAvailableProjects(self):
if self.username is None: raise ServerError, "LoginInvalidError"
projectlist = []
projectrows = model.session.query(RowProject).filter(RowProject.active == True).filter(RowProject.id.in_(model.session.query(RowProjectUser.project_id).filter(RowProjectUser.user_id == self.user.id))).order_by(RowProject.code)
for rowproject in projectrows:
projectrow = {
'code' : rowproject.code,
'description' : rowproject.description,
}
projectlist.append(projectrow)
return projectlist
def login(self,username,password):
if self.username is not None: raise ServerError, "AlreadyLoggedError"
userrow = model.session.query(RowUser).filter_by(active = True,username = username).first()
if userrow is None: raise ServerError, "LoginInvalidError"
if not project_manager.validate_password(password, userrow.password): raise ServerError, "LoginInvalidError"
#projectmanager = ProjectManager(rpc, project, dbusername, conn)
#return projectmanager
self.username = username
self.user = userrow
return True
def connectProject(self,projectname):
if self.username is None: raise ServerError, "LoginInvalidError"
projectrow = model.session.query(RowProject).filter_by(code = projectname).first()
if projectrow is None:
raise ServerError, "No project exists with the name '%s'" % projectname
if projectrow.active != True:
raise ServerError, "Project '%s' is not active" % projectname
# TODO: Limit user access for this project
return project_manager.connect_project(self,projectrow, self.username)
def getManageProjects(self):
return manage_projects.getManageProjects(self)
def handler(signum, frame):
print 'Received signal number', signum
raise KeyboardInterrupt
def start(verbose = False):
global thread_server
#signal.signal(signal.SIGINT, handler)
rpcserver = bjsonrpc.createserver(host="0.0.0.0", port=10123, handler_factory=ServerHandler)
rpcserver.debug_socket(verbose)
thread_server = threading.Thread(target=rpcserver.serve)
thread_server.daemon = True
thread_server.start()
# projectrows = model.session.query(RowProject).filter(RowProject.active == True)
#projectrows = model.session.query(RowProject).filter_by(active = True)
#for rowproject in projectrows:
# ProjectManager(rowproject)
def wait():
if thread_server:
try:
while True:
thread_server.join(1)
except KeyboardInterrupt:
print "bye!"
| {"/engine/model/table_users.py": ["/engine/model/__init__.py"], "/engine/model/table_projectusers.py": ["/engine/model/__init__.py"], "/engine/model/table_userconfigs.py": ["/engine/model/__init__.py"], "/engine/model/table_projects.py": ["/engine/model/__init__.py"]} |
71,416 | gestiweb/llampex-mini | refs/heads/master | /qt4client/widgets/llampextoolbar.py | #!/usr/bin/env python
# encoding: UTF-8
from PyQt4 import QtCore, QtGui
MIN_DRAG_DISTANCE = 16
class LlampexToolBarButton(QtGui.QToolButton):
def __init__(self, key, actionobj, parent=None):
super(QtGui.QToolButton, self).__init__(parent)
self.parent = parent
self.key = key
self.actionobj = actionobj
self.setup()
def setup(self):
self.dragStartPoint = None
icon = None
if self.actionobj.icon:
iconfile = self.actionobj.filedir(self.actionobj.icon)
icon = QtGui.QIcon(iconfile)
self.setToolTip(self.actionobj.name)
self.setIcon(icon)
self.setCursor(QtCore.Qt.PointingHandCursor)
self.setStyleSheet("QToolButton { border: none; padding: 0px; }")
self.connect(self, QtCore.SIGNAL("clicked()"), self.clicked)
def clicked(self,checked=False):
self.parent.parent.actionbutton_clicked(str(self.key))
def mouseMoveEvent(self, e):
QtGui.QToolButton.mouseMoveEvent(self, e)
if e.buttons() == QtCore.Qt.LeftButton and self.dragStartPoint:
x,y = e.x() , e.y()
ox, oy = self.dragStartPoint
dx2 = (x - ox) ** 2
dy2 = (y - oy) ** 2
d2 = dx2+dy2
if d2 > MIN_DRAG_DISTANCE ** 2:
mimeData = QtCore.QMimeData()
mimeData.setText(self.key)
drag = QtGui.QDrag(self)
drag.setPixmap(self.icon().pixmap(16,16))
drag.setMimeData(mimeData)
dragstartQPoint = QtCore.QPoint(self.dragStartPoint[0],self.dragStartPoint[1])
drag.setHotSpot(dragstartQPoint - self.rect().topLeft())
self.parent.layout().removeWidget(self)
self.parent.keys.remove(self.key)
self.parent.parent.prjconn.call.updateDirectLinks(self.parent.keys)
self.hide()
dropAction = drag.start(QtCore.Qt.MoveAction)
if (self.parent.layout().count() <= 1):
self.parent.dragInfoLabel.show()
def mousePressEvent(self, e):
QtGui.QToolButton.mousePressEvent(self, e)
if e.buttons() == QtCore.Qt.LeftButton:
self.dragStartPoint = (e.x(), e.y())
class LlampexToolBar(QtGui.QFrame):
def __init__(self, parent):
QtGui.QFrame.__init__(self)
self.parent = parent
self.setup()
def setup(self):
self.setAcceptDrops(True)
self.setFrameShape(QtGui.QFrame.StyledPanel)
self.setFrameShadow(QtGui.QFrame.Raised)
self.setLayout(QtGui.QHBoxLayout(self))
self.layout().setContentsMargins(2,2,2,2)
self.keys=[]
self.line = QtGui.QFrame()
self.line.setFrameShape(QtGui.QFrame.VLine);
self.line.setFrameShadow(QtGui.QFrame.Sunken);
self.line.hide()
self.dragInfoLabel = QtGui.QLabel()
self.dragInfoLabel.setText("<font color='gray'>Drag actions here to create direct links...</font>")
self.dragInfoLabel.setIndent(8)
self.layout().insertWidget(0,self.dragInfoLabel)
def dragEnterEvent(self, e):
#print "in!"
if self.dragInfoLabel.isVisible():
self.layout().removeWidget(self.dragInfoLabel)
self.dragInfoLabel.hide()
e.accept()
def dragLeaveEvent(self, e):
#print "out!"
self.layout().removeWidget(self.line)
self.line.hide()
if (self.layout().count() <= 1):
self.dragInfoLabel.show()
e.accept()
def dropEvent(self, e):
pos = e.pos()
self.layout().removeWidget(self.line)
self.line.hide()
if (isinstance(e.source(), LlampexToolBarButton)):
widget = e.source()
del widget
self.addToolButton(e.mimeData().text(),pos.x())
e.setDropAction(QtCore.Qt.CopyAction)
e.accept()
def dragMoveEvent(self, e):
pos = e.pos().x()
self.layout().removeWidget(self.line)
self.line.hide()
#find position
for i in range(self.layout().count()):
widget = self.layout().itemAt(i).widget()
if (widget.x() > pos):
break
self.layout().insertWidget(i,self.line)
self.line.show()
e.accept()
def addToolButton(self,key,pos):
print "Add "+key
if (str(key) not in self.keys):
index = str(key.split(".")[2])
actionobj = self.parent.project.action_index[index]
tb = LlampexToolBarButton(key,actionobj,self)
#find position
for i in range(self.layout().count()):
widget = self.layout().itemAt(i).widget()
if (widget.x() > pos):
break
self.layout().insertWidget(i,tb)
self.keys.insert(i,str(key))
self.parent.prjconn.call.updateDirectLinks(self.keys)
def loadSavedLinks(self):
links = self.parent.prjconn.call.getDirectLinks()
if (len(links)>0):
self.layout().removeWidget(self.dragInfoLabel)
self.dragInfoLabel.hide()
i = 0
for link in links:
self.addToolButton(link,i)
i=+1
class LlampexSearchBox(QtGui.QLineEdit):
def __init__(self, parent):
QtGui.QLineEdit.__init__(self)
self.clearButton = QtGui.QToolButton(self)
pixmap = QtGui.QPixmap("icons/searchclear.png")
self.clearButton.setIcon(QtGui.QIcon(pixmap))
self.clearButton.setIconSize(pixmap.size())
self.clearButton.setCursor(QtCore.Qt.ArrowCursor)
self.clearButton.setStyleSheet("QToolButton { border: none; padding: 0px; }")
self.clearButton.hide()
self.connect(self.clearButton, QtCore.SIGNAL("clicked()"), self.clearClicked)
self.connect(self,QtCore.SIGNAL("textChanged(const QString&)"), self.updateClearButton)
self.frameWidth = self.style().pixelMetric(QtGui.QStyle.PM_DefaultFrameWidth)
self.setStyleSheet(QtCore.QString("QLineEdit { padding-right: %1px; color: gray; } ").arg(self.clearButton.sizeHint().width()+self.frameWidth+1))
self.setText("Search...")
msz = self.minimumSizeHint()
self.setMinimumSize(self.qMax(msz.width(), self.clearButton.sizeHint().height() + self.frameWidth * 2 + 2),
self.qMax(msz.height(), self.clearButton.sizeHint().height() + self.frameWidth * 2 + 2))
def qMax(self, a1, a2):
if a1 <= a2:
return a2
else:
return a1
def clearClicked(self):
self.clear()
def resizeEvent(self, event):
sz = self.clearButton.sizeHint()
self.frameWidth = self.style().pixelMetric(QtGui.QStyle.PM_DefaultFrameWidth)
self.clearButton.move(self.rect().right() - self.frameWidth - sz.width(), (self.rect().bottom() + 1 - sz.height())/2)
def focusInEvent(self, event):
QtGui.QLineEdit.focusInEvent(self, event)
if (self.text() == "Search..."):
self.setStyleSheet(QtCore.QString("QLineEdit { padding-right: %1px; color: black } ").arg(self.clearButton.sizeHint().width()+self.frameWidth+1))
self.setText("")
def focusOutEvent(self,event):
QtGui.QLineEdit.focusOutEvent(self, event)
if (self.text().isEmpty()):
self.setStyleSheet(QtCore.QString("QLineEdit { padding-right: %1px; color: gray } ").arg(self.clearButton.sizeHint().width()+self.frameWidth+1))
self.setText("Search...")
def updateClearButton(self, text):
if (text.isEmpty() or text == "Search..."):
self.clearButton.setVisible(False)
else:
self.clearButton.setVisible(True) | {"/engine/model/table_users.py": ["/engine/model/__init__.py"], "/engine/model/table_projectusers.py": ["/engine/model/__init__.py"], "/engine/model/table_userconfigs.py": ["/engine/model/__init__.py"], "/engine/model/table_projects.py": ["/engine/model/__init__.py"]} |
71,417 | gestiweb/llampex-mini | refs/heads/master | /engine/model/table_projects.py | from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Table, Column, Integer, String, MetaData, ForeignKey, Boolean
from sqlalchemy.orm import relation as relationship
from . import Base
class RowProject(Base):
__tablename__ = 'projects'
id = Column(Integer, primary_key=True)
code = Column(String(64), nullable=False, unique=True)
description = Column(String(255), nullable=False, default="")
db = Column(String(64), nullable=False)
path = Column(String(255), nullable=False)
host = Column(String(64))
port = Column(Integer)
user = Column(String(64))
password = Column(String(255))
passwdcipher = Column(String(128), nullable=True, default="")
active = Column(Boolean, nullable=False, default=True)
def __str__(self):
return "<RowProject(%s) code=%s active=%s>" % (
repr(self.id),
repr(self.code),
repr(self.active)
) | {"/engine/model/table_users.py": ["/engine/model/__init__.py"], "/engine/model/table_projectusers.py": ["/engine/model/__init__.py"], "/engine/model/table_userconfigs.py": ["/engine/model/__init__.py"], "/engine/model/table_projects.py": ["/engine/model/__init__.py"]} |
71,418 | gestiweb/llampex-mini | refs/heads/master | /qt4client/login.py | #!/usr/bin/env python
# encoding: UTF-8
import sys
import os.path
import time, threading, traceback
import yaml, hashlib, bz2, zlib
from base64 import b64decode, b64encode
try:
from PyQt4 import QtGui, QtCore, QtSql, uic
except ImportError:
print "ERROR: Unable to import PyQt4 (Qt4 for Python)."
print " * * * Please install PyQt4 / python-qt4 package * * *"
sys.exit(1)
try:
import bjsonrpc
except ImportError:
print "ERROR: Unable to import bjsonrpc (bidirectional JSON-RPC protocol)."
print " * * * Please install bjsonrpc package * * *"
sys.exit(1)
bjsonrpc_required_release = '0.2.0'
try:
assert(bjsonrpc.__release__ >= bjsonrpc_required_release)
except AssertionError:
print "ERROR: bjsonrpc release is %s , and llampex mini qt4client requires at least %s" % (bjsonrpc.__release__, bjsonrpc_required_release)
print " * * * Please Upgrade BJSONRPC * * * "
sys.exit(1)
from bjsonrpc.exceptions import ServerError
import qsqlrpcdriver.qtdriver as qtdriver
from mainwindow import LlampexMainWindow
from widgets import llampexmainmenu
from manage_dialog import ManageDialog
__version__ = "0.0.1"
diskwrite_lock = threading.Lock()
lampex_icon = None
def apppath(): return os.path.abspath(os.path.dirname(sys.argv[0]))
def filepath(): return os.path.abspath(os.path.dirname(__file__))
def appdir(x): # convierte una ruta relativa a la aplicación en absoluta
if os.path.isabs(x): return x
else: return os.path.join(apppath(),x)
def filedir(x): # convierte una ruta relativa a este fichero en absoluta
if os.path.isabs(x): return x
else: return os.path.join(filepath(),x)
def argvparam(key):
try:
idx = sys.argv.index("-"+key)
except ValueError:
return None
try:
value = sys.argv[idx+1]
except IndexError:
return ""
return value
def str2bool(x):
if x == "": return False
x = x.lower()[0]
if x == "0": return False
if x == "f": return False
if x == "n": return False
if x == "1": return True
if x == "y": return True
if x == "t": return True
class ConfigSettings(yaml.YAMLObject):
yaml_tag = u'!ConfigSettings'
def setargv(self, key, default = None, cast=str):
val = argvparam(key)
if val is not None:
setattr(self,key,cast(val))
else:
if not hasattr(self,key):
setattr(self,key,default)
@classmethod
def load(cls, filename=".settings.yaml"):
try:
f1 = open(filedir(filename),"r")
settings = yaml.load(f1.read())
except IOError:
settings = ConfigSettings()
settings.setDefaults()
return settings
def setDefaults(self):
self.setargv("username","")
self.setargv("password","")
self.setargv("host","127.0.0.1")
self.setargv("port","10123")
self.setargv("remember",False, cast=str2bool)
self.setargv("debug",False, cast=str2bool)
self.setargv("project","")
class ConnectionDialog(QtGui.QDialog):
def __init__(self):
QtGui.QDialog.__init__(self)
ui_filepath = filedir("forms/login.ui") # convertimos la ruta a absoluta
self.ui = uic.loadUi(ui_filepath,self) # Cargamos un fichero UI externo
global llampex_icon
llampex_icon = self.windowIcon()
settings = ConfigSettings.load()
self.project = settings.project
self.debug = settings.debug
self.ui.user.setText(settings.username)
self.ui.password.setText(settings.password)
try:
self.ui.host.setText(settings.host)
self.ui.port.setText(settings.port)
except Exception:
pass
self.ui.rememberpasswd.setChecked(settings.remember)
self.connect(self.ui.manage, QtCore.SIGNAL("clicked()"), self.manage_clicked)
selected = 0
if '-autoconnect' in sys.argv:
QtCore.QTimer.singleShot(10,self.accept)
"""
availableprojects = c.call.getAvailableProjects()
for row,rowdict in enumerate(availableprojects):
listitem = QtGui.QListWidgetItem("%(description)s (%(code)s)" % rowdict)
listitem.project = rowdict
if rowdict['code'] == settings.project: selected = row
self.ui.project.addItem(listitem)
self.ui.project.setCurrentRow(selected)
"""
def manage_clicked(self):
host = unicode(self.ui.host.text())
port = unicode(self.ui.port.text())
port = int(port)
try:
self.conn = bjsonrpc.connect(host=host,port=port)
self.conn._debug_socket = self.debug
except Exception, e:
msgBox = QtGui.QMessageBox()
msgBox.setText("Error trying to connect to %s:%d: %s: %s\n" % (host,port,e.__class__.__name__ ,repr(e.args)))
msgBox.setIcon(QtGui.QMessageBox.Critical)
msgBox.exec_()
return
global managewindow
managewindow = ManageDialog(self.conn, filedir("forms/manage.ui"), filedir("forms/addProject.ui"))
managewindow.show()
self.close()
def accept(self):
username = unicode(self.ui.user.text())
password = unicode(self.ui.password.text())
host = unicode(self.ui.host.text())
port = unicode(self.ui.port.text())
try:
port = int(port)
except ValueError:
msgBox = QtGui.QMessageBox()
msgBox.setText("The port number must be integer")
msgBox.setIcon(QtGui.QMessageBox.Critical)
msgBox.exec_()
return
try:
self.conn = bjsonrpc.connect(host=host,port=port)
self.conn._debug_socket = self.debug
except Exception, e:
msgBox = QtGui.QMessageBox()
msgBox.setText("Error trying to connect to %s:%d: %s: %s\n" % (host,port,e.__class__.__name__ ,repr(e.args)))
msgBox.setIcon(QtGui.QMessageBox.Critical)
msgBox.exec_()
return
try:
logresult = self.conn.call.login(username,password)
if not logresult: raise ValueError
global selectionwindow
selectionwindow = ProjectSelectionDialog(self.conn)
availableprojects = self.conn.call.getAvailableProjects()
if len(availableprojects) == 1:
print "Only 1"
for row,rowdict in enumerate(availableprojects):
self.project = rowdict['code']
else:
print "Multiple or None"
selectionwindow.show()
if self.project:
selectionwindow.open_project(self.project)
self.close()
return
#msgBox = QtGui.QMessageBox()
#msgBox.setText("Login successful!")
#msgBox.setIcon(QtGui.QMessageBox.Information)
#msgBox.exec_()
# print project_manager.call.getUserList()
#filelist = project_manager.call.getFileList()
#print sorted( filelist.keys() )
splashwindow = SplashDialog()
splashwindow.prjconn = project_manager
splashwindow.show()
self.close()
except ServerError, e:
msgBox = QtGui.QMessageBox()
if e.args[0] == "DatabaseConnectionError":
msgBox.setText("The server could not connect to the underlying database")
msgBox.setIcon(QtGui.QMessageBox.Critical)
elif e.args[0] == "LoginInvalidError":
msgBox.setText("The username/password specified is invalid. Try again.")
msgBox.setIcon(QtGui.QMessageBox.Information)
else:
msgBox.setText("The server returned the following error:\n" + repr(e.args[0]))
msgBox.setIcon(QtGui.QMessageBox.Warning)
msgBox.exec_()
except Exception, e:
msgBox = QtGui.QMessageBox()
msgBox.setText("Unexpected error: %s\n" % e.__class__.__name__ + repr(e.args[0]))
msgBox.setIcon(QtGui.QMessageBox.Critical)
msgBox.exec_()
def reject(self):
self.close()
def closeEvent(self,event):
settings = ConfigSettings()
settings.host = str(self.ui.host.text())
settings.port = str(self.ui.port.text())
if self.ui.rememberpasswd.isChecked():
settings.username = str(self.ui.user.text())
settings.password = str(self.ui.password.text())
settings.remember = True
else:
settings.username = ""
settings.password = ""
settings.remember = False
f1 = open(filedir(".settings.yaml"),"w")
f1.write(yaml.dump(settings))
event.accept()
#event.ignore()
class ProjectSelectionDialog(QtGui.QDialog):
def __init__(self, conn):
QtGui.QDialog.__init__(self)
self.setWindowTitle("Project selection")
self.resize(500,300)
self.conn = conn
availableprojects = self.conn.call.getAvailableProjects()
self.layout = QtGui.QVBoxLayout()
self.setWindowIcon(llampex_icon)
n = 0
for row,rowdict in enumerate(availableprojects):
n += 1
button = llampexmainmenu.LlampexMainMenuButton("%(code)s" % rowdict, rowdict['code'],self.open_project)
button.setDescription("%(description)s" % rowdict)
button.setMaximumHeight(96)
self.layout.addWidget(button)
if n == 0:
label = QtGui.QLabel("No projects available for this username")
self.layout.addWidget(label)
self.setLayout(self.layout)
def open_project(self,projectname):
print "Open", projectname
project_manager = self.conn.call.connectProject(projectname)
splashwindow = SplashDialog()
splashwindow.prjname = projectname
splashwindow.prjconn = project_manager
splashwindow.show()
self.close()
class remoteProject(object):
pass
def trb64_name(b64): #translate b64 to filename
filename = "cache_" + b64.replace("+","_") + ".data"
filename = filename.replace("/","-")
return filename
class SplashDialog(QtGui.QDialog):
def __init__(self):
QtGui.QDialog.__init__(self)
self.prjconn = None
self.prjname = None
ui_filepath = filedir("forms/splash.ui") # convertimos la ruta a absoluta
self.ui = uic.loadUi(ui_filepath,self) # Cargamos un fichero UI externo
self.ui.version.setText("v%s" % __version__)
self.ui.progress.setValue(0)
wf = self.windowFlags()
self.setWindowFlags(wf | QtCore.Qt.FramelessWindowHint)
self.rprj = remoteProject()
self.progress_value = 0
self.prev_load_mode = None
self.load_mode = "init"
self.progress_load = {
"init" : 10,
"waitload" : 100,
"filetree" : 110,
"projectsignature" : 120,
"projectparts1" : 130,
"projectdownload" : 300,
"end" : 2000,
"error" : 0,
}
self.status_load = {
"init" : "Initializing ...",
"waitload" : "Waiting until server is ready ...",
"filetree" : "Obtaining project tree from server ...",
"projectsignature" : "Querying project signature ...",
"projectparts1" : "Querying project contents ...",
"projectdownload" : "Downloading project files ...",
"end" : "Load finished.",
"error" : "Unexpected error ocurred!!",
}
self.speed_load = {
"init" : 1,
"waitload" : 0.2,
"filetree" : 1,
"projectsignature" : 1,
"projectparts1" : 1,
"projectdownload" : 0.5,
"end" : 25,
"error" : 1,
}
self.waiting = 0
self.timer = QtCore.QTimer(self)
self.connect(self.timer, QtCore.SIGNAL("timeout()"), self.timer_timeout)
self.timer.start(20)
self.status_extra = ""
self.progress_extra = 0
self.statusthread = threading.Thread(target=self.updateLoadStatus)
self.statusthread.daemon = True
self.statusthread.start()
def timer_timeout(self):
self.progress_value = (self.progress_value * 100.0 + (self.progress_extra + self.progress_load[self.load_mode]) * self.speed_load[self.load_mode]) / (self.speed_load[self.load_mode]+100.0)
self.ui.progress.setValue(int(self.progress_value))
status = self.status_load[self.load_mode]
if self.status_extra:
status += " (%s)" % self.status_extra
self.ui.status.setText( status )
if self.prev_load_mode != self.load_mode:
self.changedMode(self.prev_load_mode, self.load_mode)
self.prev_load_mode = self.load_mode
if self.progress_value > 999: self.close()
def changedMode(self,frommode,tomode):
#print frommode, tomode
try:
if tomode == "end": self.finishLoad()
except:
print traceback.format_exc()
def updateLoadStatus(self):
try:
time.sleep(0.05)
self.load_mode = "init"
while self.prjconn is None:
time.sleep(0.05)
time.sleep(0.05)
self.load_mode = "waitload"
while not self.prjconn.call.isLoaded():
time.sleep(0.5)
time.sleep(0.05)
self.load_mode = "filetree"
self.rprj.filetree = self.prjconn.call.getFileTree()
time.sleep(0.05)
self.load_mode = "projectsignature"
key, size, signature = self.rprj.filetree.call.getNodeSignature()
self.rprj.project_signature = signature
self.rprj.project_size = size
time.sleep(0.05)
self.load_mode = "projectparts1"
self.rprj.project_childs = self.rprj.filetree.call.getChildSignature()
self.rprj.files = {}
sz = len(self.rprj.project_childs)
pparts = {}
for i,k in enumerate(self.rprj.project_childs):
pparts[k] = self.rprj.filetree.method.getNodeHashValue([k])
for i,k in enumerate(pparts):
p = i*100/sz
self.status_extra = "%d%%" % (p)
self.progress_extra = p
nodevalues = pparts[k].value
for nodehash, nodeval in nodevalues.iteritems():
digest = nodeval['digest']
name = nodeval['name']
self.rprj.files[name] = digest
p = 100
self.status_extra = "%d%%" % (p)
self.progress_extra = 100
time.sleep(0.05)
self.status_extra = ""
self.progress_extra = 0
self.load_mode = "projectdownload"
cachedir = filedir(".cache/%s/files" % self.prjname)
self.projectpath = cachedir
try:
os.makedirs(cachedir)
except os.error:
pass
sz = len(self.rprj.files)
th1_queue = []
delta = [0,0,0,0]
for i,name in enumerate(self.rprj.files):
p = i*100/sz
self.progress_extra = p
def download(name,result):
t1 = time.time()
fullfilename = os.path.join(cachedir,name)
cachefilename = os.path.join(cachedir,trb64_name(self.rprj.files[name]))
folder = os.path.dirname(fullfilename)
try:
os.makedirs(folder)
except os.error:
pass
basename = os.path.basename(name)
t2 = time.time()
value = self.prjconn.call.getFileName(name)
t3 = time.time()
f_contents = bz2.decompress(b64decode(value))
del value
t4 = time.time()
f1 = open(fullfilename,"w")
f1.write(f_contents)
f1.close()
t5 = time.time()
self.status_extra = "%d%% %s" % (p,basename)
delta[0] += t2-t1
delta[1] += t3-t2
delta[2] += t4-t3
delta[3] += t5-t4
#newdigest = get_b64digest(f_contents)
#try:
# assert(newdigest == self.rprj.files[name])
#except AssertionError:
# print "PANIC: Digest assertion error for", name
while len(th1_queue) > 64:
if th1_queue[0].is_alive():
th1_queue[0].join(3)
if th1_queue[0].is_alive():
print "Stuck:", th1_queue[0].filename
del th1_queue[0]
#download(name)
fullfilename = os.path.join(cachedir,name)
sha1_64 = None
f1 = None
try:
f1 = open(fullfilename)
sha1_64 = get_b64digest(f1.read())
f1.close()
except IOError:
sha1_64 = ""
if sha1_64 != self.rprj.files[name]:
th1 = threading.Thread(target=download,kwargs={'name':name,'result':None})
th1.filename = name
th1.start()
th1_queue.append(th1)
self.status_extra = "syncing"
while len(th1_queue) > 0:
if th1_queue[0].is_alive():
th1_queue[0].join(3)
self.status_extra = "syncing"
if th1_queue[0].is_alive():
print "Stuck:", th1_queue[0].filename
del th1_queue[0]
if delta != [0,0,0,0]:
print "Time Deltas:", delta
self.progress_extra = 0
self.status_extra = ""
self.load_mode = "end"
except:
self.load_mode = "error"
raise
def finishLoad(self):
global mainwin
if not hasattr(self.prjconn,"qtdriver"):
qtdriver.DEBUG_MODE = True
self.prjconn.qtdriver = qtdriver.QSqlLlampexDriver(self.prjconn)
self.prjconn.qtdb = QtSql.QSqlDatabase.addDatabase(self.prjconn.qtdriver, "llampex-qsqlrpcdriver")
assert(self.prjconn.qtdb.open("",""))
qtdriver.DEBUG_MODE = False
mainwin = LlampexMainWindow(self.projectpath, self.rprj.files,self.prjconn)
mainwin.setWindowIcon(llampex_icon)
mainwin.show()
self.close()
def get_b64digest(text):
bindigest = hashlib.sha1(text).digest()
b64digest = b64encode(bindigest)[:20]
return b64digest
"""
try:
import formimages
except ImportError:
print "formimages.py not found. Probably you forgot to do 'pyrcc forms/..qrc -i formimages.py'"
"""
def main():
app = QtGui.QApplication(sys.argv) # Creamos la entidad de "aplicación"
# Iniciar como: python login.py -stylesheet styles/llampex1/style.css
# app.setStyleSheet(open(filedir("styles/llampex1/style.css")).read())
connwindow = ConnectionDialog()
connwindow.show() # el método show asegura que se mostrará en pantalla.
retval = app.exec_() # ejecutamos la aplicación. A partir de aquí perdemos el
sys.exit(retval) # salimos de la aplicación con el valor de retorno adecuado.
if __name__ == "__main__": main() | {"/engine/model/table_users.py": ["/engine/model/__init__.py"], "/engine/model/table_projectusers.py": ["/engine/model/__init__.py"], "/engine/model/table_userconfigs.py": ["/engine/model/__init__.py"], "/engine/model/table_projects.py": ["/engine/model/__init__.py"]} |
71,419 | gestiweb/llampex-mini | refs/heads/master | /qt4client/recordform.py | # encoding: UTF-8
import os, os.path, traceback
import logging, imp
from PyQt4 import QtGui, QtCore, uic
from PyQt4 import QtSql
import time
import re
import qsqlrpcdriver.qtdriver as qtdriver
from qsqlmetadatamodel import QSqlMetadataModel, QMetadataModel
import threading
from projectloader import LlampexAction, LlampexTable
from llitemview import LlItemView1
try:
from llampexwidgets import LlItemView, FLTableDB, LlTableDB
except ImportError:
LlItemView = None
FLTableDB = None
print "WARN: *** LlampexWidgets module not installed ***. Record Forms may be not renderable."
def h(*args): return os.path.realpath(os.path.join(os.path.dirname(os.path.abspath( __file__ )), *args))
def _getAllWidgets(form):
widgets = []
for obj in form.children():
if isinstance(obj, QtGui.QWidget):
widgets.append(obj)
widgets+=_getAllWidgets(obj)
return widgets
def getAllWidgets(form):
return [ obj for obj in _getAllWidgets(form) if obj.objectName() ]
def load_module(name, path):
fp = None
module = None
try:
fp, pathname, description = imp.find_module(name,[path])
module = imp.load_module(name, fp, pathname, description)
except Exception:
logging.exception("FATAL: Error trying to load module %s" % (repr(name)))
if fp:
fp.close()
return module
class LlampexRecordForm(QtGui.QWidget):
def __init__(self, project, actionType, actionobj, prjconn, tmd, model, row):
QtGui.QWidget.__init__(self)
self.project = project
self.actionType = actionType
self.actionobj = actionobj
self.prjconn = prjconn
self.model = model
self.row = row
self.tmd = tmd
try:
ui_filepath = self.actionobj.filedir(self.actionobj.record["form"])
self.ui = uic.loadUi(ui_filepath,self)
except Exception:
self.layout = QtGui.QVBoxLayout()
self.layout.addStretch()
label = QtGui.QLabel("FATAL: An error ocurred trying to load the record form:")
self.layout.addWidget(label)
text = QtGui.QTextBrowser()
text.setText(traceback.format_exc())
self.layout.addWidget(text)
self.layout.addStretch()
self.setLayout(self.layout)
return
self.setChildValuesFormRecord(self.ui)
try:
if "script" in self.actionobj.record:
source_filepath = self.actionobj.filedir(self.actionobj.record["script"])
pathname , sourcename = os.path.split(source_filepath)
self.sourcemodule = load_module(sourcename, pathname)
self.recordscript = self.sourcemodule.RecordScript(self.project, self)
except Exception:
msgBox = QtGui.QMessageBox()
msgBox.setText("FATAL: An error ocurred trying to load the record script:\n" + traceback.format_exc())
msgBox.setIcon(QtGui.QMessageBox.Critical)
msgBox.exec_()
def setChildValuesFormRecord(self, form):
for obj in getAllWidgets(form):
if isinstance(obj, LlItemView):
try:
column = self.tmd.fieldlist.index(obj.fieldName)
except ValueError:
print "ERROR: FieldName %s does not exist" % (obj.fieldName)
else:
widget = LlItemView1(obj)
widget.setObjectName(obj.objectName()+"_editor")
widget.setup()
widget.setModel(self.model)
widget.setPosition(self.row, column)
widget.setTabWidget(obj)
widget.setSizePolicy(QtGui.QSizePolicy.MinimumExpanding,QtGui.QSizePolicy.MinimumExpanding)
obj.replaceEditorWidget(widget)
elif isinstance(obj, LlTableDB):
llampexTableDB = loadLlTableRelation(self.project, self, obj, self.prjconn, self.tmd, self.model, self.row)
def delete(self):
#self.ui.hide()
ret = self.ui.close()
self.ui.deleteLater()
self.actionobj = None
self.prjconn = None
self.model = None
self.row = None
self.tmd = None
self.ui = None
if hasattr(self.recordscript, "delete"):
self.recordscript.delete()
del self.recordscript
self.close()
self.deleteLater()
return ret
def lock(fn):
def myfn(self,*args):
if not self.lock.acquire(False):
print "Blocking"
self.lock.acquire()
try:
return fn(self,*args)
finally:
self.lock.release()
return myfn
class LlampexQDialog( QtGui.QDialog ):
def __init__( self, project, parent = None, widgetFactory = None, title = "Dialog Title"):
QtGui.QDialog.__init__(self)
self.project = project
self.lock = threading.Lock()
self.widgetFactory = widgetFactory
self.rowcount = 0
self.widget = None
self.setWindowTitle(title)
#self.resize(300,105)
self.setParent(parent)
self.setWindowFlags(QtCore.Qt.Sheet)
self.setupUi()
@lock
def createNewWidget(self, preserveRow = True):
row = None
if self.widget:
if preserveRow: row = self.widget.row
self.widgetlayout.removeWidget(self.widget)
self.widget.delete()
self.widget = None
self.widget = self.widgetFactory(row = row)
self.widgetlayout.addWidget(self.widget)
self.rowcount = self.widget.model.rowCount()
def createBottomButton(self, text = None, icon = None, action = None, key = None):
wbutton = QtGui.QToolButton()
if text:
wbutton.setText(text)
if icon:
wbutton.setIcon(QtGui.QIcon(QtGui.QPixmap(h("./icons/%s.png" % icon))))
wbutton.setMinimumSize(38, 38)
wbutton.setMaximumSize(38, 38)
wbutton.setIconSize(QtCore.QSize(22, 22))
wbutton.setFocusPolicy(QtCore.Qt.NoFocus)
if key:
seq = QtGui.QKeySequence(key)
wbutton.setShortcut(seq)
self.buttonlayout.addWidget(wbutton)
if action:
if type(action) is tuple:
self.connect(wbutton, QtCore.SIGNAL("clicked()"), *action)
else:
self.connect(wbutton, QtCore.SIGNAL("clicked()"), action)
return wbutton
def setupUi( self ):
self.vboxlayout = QtGui.QVBoxLayout(self)
self.vboxlayout.setMargin(9)
self.vboxlayout.setSpacing(6)
self.vboxlayout.setObjectName("vboxlayout")
self.hboxlayout = QtGui.QHBoxLayout()
self.hboxlayout.setMargin(0)
self.hboxlayout.setSpacing(6)
self.hboxlayout.setObjectName("hboxlayout")
self.buttonlayout = QtGui.QHBoxLayout()
self.buttonlayout.setMargin(3)
self.buttonlayout.setSpacing(0)
self.statuslabel = QtGui.QLabel()
self.buttonlayout.addWidget(self.statuslabel)
self.buttonlayout.addStretch()
"""
self.buttonbox = QtGui.QDialogButtonBox( QtGui.QDialogButtonBox.Yes | QtGui.QDialogButtonBox.No )
self.buttonlayout.addWidget(self.buttonbox)
"""
self.buttonfirst = self.createBottomButton(icon="first", action=self.first, key="F5")
self.buttonprev = self.createBottomButton(icon="previous", action=self.prev, key="F6")
self.buttonnext = self.createBottomButton(icon="next", action=self.next, key="F7")
self.buttonlast = self.createBottomButton(icon="last", action=self.last, key="F8")
self.buttonaccept = self.createBottomButton(icon="accept", action=(self,QtCore.SLOT("accept()")), key="F9")
self.buttonacceptcontinue = self.createBottomButton(icon="accepttocontinue", action=self.acceptToContinue, key="F10")
self.buttoncancel = self.createBottomButton(icon="cancel", action=(self,QtCore.SLOT("reject()")), key="ESC")
self.widgetlayout = QtGui.QVBoxLayout()
self.createNewWidget()
self.vboxlayout.addLayout(self.widgetlayout)
self.vboxlayout.addLayout(self.buttonlayout)
self.setLayout(self.vboxlayout)
self.updateEnableStatus()
self.updateStatusLabel()
def getRowCount(self):
return self.rowcount
def updateEnableStatus(self):
nonextrows = bool(self.widget.row >= (self.rowcount-1))
noprevrows = bool(self.widget.row == 0)
self.buttonfirst.setDisabled(noprevrows)
self.buttonprev.setDisabled(noprevrows)
self.buttonnext.setDisabled(nonextrows)
self.buttonlast.setDisabled(nonextrows)
def updateStatusLabel(self):
row = self.widget.row
self.statuslabel.setText("row number: %d/%d" % (row + 1, self.rowcount))
def enforceRowLimits(self):
if self.widget.row < 0:
self.widget.row = 0
if self.widget.row > self.rowcount-1:
self.widget.row = self.rowcount-1
def moveCursor(self, fn):
self.widget.row = fn(self.widget.row)
self.enforceRowLimits()
self.createNewWidget()
self.updateEnableStatus()
self.updateStatusLabel()
def next(self): self.moveCursor(lambda row: row+1)
def prev(self): self.moveCursor(lambda row: row-1)
def first(self): self.moveCursor(lambda row: 0)
def last(self): self.moveCursor(lambda row: self.getRowCount() )
def acceptToContinue( self ):
print "AcceptToContinue Button Clicked"
if self.widget.row is None: return False
self.widget.model.commitDirtyRow(self.widget.row)
class loadActionFormRecord():
def __init__(self, project = None, parent = 0, windowAction = 'INSERT', actionobj = None, prjconn = None, tmd = None, model = None, rowItemIdx = None):
self.project = project
self.parent = parent
self.windowAction = windowAction
self.actionobj = actionobj
self.rpc = prjconn
self.tmd = tmd
self.model = model
self.row = rowItemIdx
if self.model is None:
msgBox = QtGui.QMessageBox()
msgBox.setText("FATAL: An error ocurred trying to load the table model:\n" + traceback.format_exc())
msgBox.setIcon(QtGui.QMessageBox.Critical)
msgBox.exec_()
return
if self.row is None and self.windowAction != 'INSERT':
msgBox = QtGui.QMessageBox()
msgBox.setText("FATAL: No record data selected:\n" + traceback.format_exc())
msgBox.setIcon(QtGui.QMessageBox.Critical)
msgBox.exec_()
return
if self.windowAction == 'INSERT':
self.model = QMetadataModel(None, self.rpc.qtdb, self.tmd)
self.row = self.model.rowCount()-1
if self.tmd is None:
self.tmd = LlampexTable.tableindex[self.form.actionobj.table]
print "Ui record file : ", self.actionobj.record["form"]
self.showFormRecord()
def recordFormFactory(self, row = None):
if row is None: row = self.row
return LlampexRecordForm(self.project, self.windowAction, self.actionobj, self.rpc, self.tmd, self.model, row)
def showFormRecord(self):
dialog = LlampexQDialog(self.project, self.parent, self.recordFormFactory, "Articulos Form Record")
ret = dialog.exec_();
print "RecordForm: ", ret
class loadLlTableRelation(QtGui.QWidget):
def __init__(self, project = None, form = None, obj = None, prjconn = None, parentTmd = None, parentModel = None, parentRow = None):
QtGui.QWidget.__init__(self, form)
self.project = project
self.form = form
self.parentModel = parentModel
self.parentRow = parentRow
self.parentTmd = parentTmd
self.rpc = prjconn
self.db = self.rpc.qtdb
self.obj = obj
self.actionName = None
self.tableName = None
self.fieldRelation = None
self.foreignField = None
self.parentCol = None
self.parentModelIndex = None
self.fieldRelationValue = None
self.table = None
self.model = None
self.row = None
self.col = None
try:
self.actionName = self.obj.getActionName()
self.tableName = self.obj.getTableName()
self.fieldRelation = self.obj.getFieldRelation()
self.foreignField = self.obj.getForeignField()
self.parentCol = self.parentTmd.fieldlist.index(self.fieldRelation)
self.parentModelIndex = self.parentModel.index(self.parentRow, self.parentCol)
self.fieldRelationValue = self.parentModel.data(self.parentModelIndex, QtCore.Qt.DisplayRole)
except Exception, e:
print "Error loading the data table"
print traceback.format_exc()
if self.actionName is None:
self.actionName = self.tableName
print "Action Name: ", type(self.actionName)
print "Table Name: ", self.tableName
self.actionobj = self.project.action_index[unicode(self.actionName)]
print "Action Object: ", self.actionobj
try:
tmd = LlampexTable.tableindex[unicode(self.tableName)]
self.tmd = tmd
print "Llampex Table: ", tmd
print "Code:", tmd.code
print "Nombre:", tmd.name
print "PKey:", tmd.primarykey
print tmd.fieldlist
print tmd.fields
print "f0:", tmd.field[0]
print "f1:", tmd.field[1]
except Exception, e:
print "Error loading table metadata:"
print traceback.format_exc()
print
table = self.obj
table.setSortingEnabled( True )
try:
tableheader = table.horizontalHeader()
tableheader.setSortIndicator(0,0)
tableheader.setContextMenuPolicy( QtCore.Qt.CustomContextMenu )
self.headerMenu = QtGui.QMenu(tableheader)
action_addfilter = QtGui.QAction(
QtGui.QIcon(h("icons/page-zoom.png")),
"Add &Filter...", tableheader)
action_showcolumns = QtGui.QAction(
QtGui.QIcon(h("icons/preferences-actions.png")),
"Show/Hide &Columns...", tableheader)
action_hidecolumn = QtGui.QAction("&Hide this Column", tableheader)
action_addfilter.setIconVisibleInMenu(True)
action_showcolumns.setIconVisibleInMenu(True)
self.headerMenu.addAction(action_addfilter)
self.headerMenu.addAction(action_showcolumns)
self.headerMenu.addAction(action_hidecolumn)
tableheader.setStretchLastSection(True)
self.form.connect(tableheader, QtCore.SIGNAL("sortIndicatorChanged(int,Qt::SortOrder)"), self.table_sortIndicatorChanged)
self.form.connect(tableheader, QtCore.SIGNAL("customContextMenuRequested(const QPoint&)"),self.table_headerCustomContextMenuRequested)
self.form.connect(action_addfilter, QtCore.SIGNAL("triggered(bool)"), self.action_addfilter_triggered)
self.form.connect(action_hidecolumn, QtCore.SIGNAL("triggered(bool)"), self.action_hidecolumn_triggered)
except Exception, e:
print e
# set search invisible
self.form.ui.searchFrame.setVisible(False)
self.form.connect(table, QtCore.SIGNAL("activated(QModelIndex)"),self.table_cellActivated)
self.form.connect(table, QtCore.SIGNAL("clicked(QModelIndex)"),self.table_cellActivated)
self.connect(self.form.ui.btnNew, QtCore.SIGNAL("clicked()"), self.btnNew_clicked)
self.connect(self.form.ui.btnEdit, QtCore.SIGNAL("clicked(bool)"), self.btnEdit_clicked)
self.connect(self.form.ui.btnBrowse, QtCore.SIGNAL("clicked(bool)"), self.btnBrowse_clicked)
self.connect(self.form.ui.searchBox, QtCore.SIGNAL("textChanged(const QString&)"), self.searchBox_changed)
self.connect(self.form.ui.searchCombo, QtCore.SIGNAL("currentIndexChanged(const QString&)"), self.searchCombo_changed)
self.model = QSqlMetadataModel(None,self.db, self.tmd)
self.model.decorations[None] = QtGui.QIcon(h("icons/null.png"))
self.model.decorations[True] = QtGui.QIcon(h("icons/true.png"))
self.model.decorations[False] = QtGui.QIcon(h("icons/false.png"))
# Add fields to combobox
self.form.ui.searchCombo.addItems(self.model.getHeaderAlias())
self.modelReady = threading.Event()
self.modelSet = threading.Event()
self.setTableFilter()
self.reload_data()
self.select_data()
self.settablemodel()
def table_cellActivated(self, itemindex):
self.row, self.col = itemindex.row(), itemindex.column()
print "Related Table Cell:", self.row, self.col
def btnNew_clicked(self, checked=False):
print "Button New clicked"
load = loadActionFormRecord(self.project, self.form, 'INSERT', self.actionobj, self.rpc, self.tmd, self.model, self.row)
def btnEdit_clicked(self, checked=False):
print "Button Edit clicked --> Row: ", self.row
load = loadActionFormRecord(self.project, self.form, 'EDIT', self.actionobj, self.rpc, self.tmd, self.model, self.row)
def btnBrowse_clicked(self, checked=False):
print "Button Browse clicked"
#change visibility of searchFrame
self.form.ui.searchFrame.setVisible(not self.form.ui.searchFrame.isVisible())
def searchBox_changed(self, text):
print "Search Box changed to ", unicode(text)
self.setTableFilter(text, self.form.ui.searchCombo.currentText())
self.model.refresh()
def searchCombo_changed(self, alias):
print "Search Combo changed to ", unicode(alias)
self.setTableFilter(None, alias)
self.model.refresh()
def action_addfilter_triggered(self, checked):
print "Add Filter triggered:", checked
rettext, ok = QtGui.QInputDialog.getText(self.form, "Add New Filter",
"Write New WHERE expression:", QtGui.QLineEdit.Normal, self.model.getFilter())
if ok:
self.form.ui.searchBox.setText("")
self.setTableFilter(rettext)
self.model.refresh()
def action_hidecolumn_triggered(self, checked):
print "Hide Column triggered:", checked
self.model.tmd.fieldlist.pop(self.lastColumnClicked)
self.model.refresh()
self.form.ui.searchCombo.clear()
self.form.ui.searchCombo.addItems(self.model.getHeaderAlias())
def table_headerCustomContextMenuRequested(self, pos):
print pos
self.lastColumnClicked = self.form.ui.table.horizontalHeader().logicalIndexAt(pos)
print "We are in column: " + str(self.lastColumnClicked)
self.headerMenu.exec_( self.form.ui.table.horizontalHeader().mapToGlobal(pos) )
def table_sortIndicatorChanged(self, column, order):
print column, order
self.model.setSort(column,order)
self.model.refresh()
def setTableFilter(self, text = None, alias = None):
if self.fieldRelationValue is None: self.fieldRelationValue = -1
basicFilter = unicode(self.foreignField)+"="+str(self.fieldRelationValue)
addFilter = None
if text:
if alias is not None:
fieldname=""
for i, fname in enumerate(self.tmd.fieldlist):
field = self.tmd.field[i]
if unicode(field['alias']) == unicode(alias) or unicode(fname) == unicode(alias):
fieldname = fname
break
addFilter = " "+fieldname+"::VARCHAR ILIKE '%"+text+"%' "
else:
addFilter = text
basicFilter += " AND " + addFilter
self.model.setFilter(basicFilter)
def reload_data(self):
self.model.setSort(0,0)
def select_data(self):
self.model.select()
def settablemodel(self):
self.obj.setModel(self.model)
self.model.autoDelegate(self.obj)
| {"/engine/model/table_users.py": ["/engine/model/__init__.py"], "/engine/model/table_projectusers.py": ["/engine/model/__init__.py"], "/engine/model/table_userconfigs.py": ["/engine/model/__init__.py"], "/engine/model/table_projects.py": ["/engine/model/__init__.py"]} |
71,420 | gestiweb/llampex-mini | refs/heads/master | /qt4client/widgets/llampexgroupbutton.py | #!/usr/bin/env python
from PyQt4 import QtCore, QtGui
import math, sys, random
import os, os.path
def filepath(): return os.path.abspath(os.path.dirname(__file__))
def filedir(x): # convierte una ruta relativa a este fichero en absoluta
if os.path.isabs(x): return x
else: return os.path.join(filepath(),x)
MIN_DRAG_DISTANCE = 16
class LlampexActionButton(QtGui.QToolButton):
def __init__(self, text, key, icon, fn = None, parent=None):
super(LlampexActionButton, self).__init__(parent)
self._key = key
self._callback = fn
self.connect(self,QtCore.SIGNAL("clicked()"),self.button_clicked)
self.setAutoRaise(True)
self.setText(text)
if icon:
self.setIcon(icon)
self.setIconSize(QtCore.QSize(32,32))
self.setToolButtonStyle(QtCore.Qt.ToolButtonTextBesideIcon)
#self.setSizePolicy(QtGui.QSizePolicy.Expanding,QtGui.QSizePolicy.Fixed)
self.setSizePolicy(QtGui.QSizePolicy.Preferred,QtGui.QSizePolicy.Preferred)
self.setMinimumWidth(64)
self.setMaximumWidth(256)
self.setMinimumHeight(40)
self.setMaximumHeight(40)
self.dragStartPoint = None
def button_clicked(self):
if self._callback:
self._callback(self._key)
else:
print "Clicked", self._key
def mouseMoveEvent(self, e):
QtGui.QToolButton.mouseMoveEvent(self, e)
if e.buttons() == QtCore.Qt.LeftButton and self.dragStartPoint:
x,y = e.x() , e.y()
ox, oy = self.dragStartPoint
dx2 = (x - ox) ** 2
dy2 = (y - oy) ** 2
d2 = dx2+dy2
if d2 > MIN_DRAG_DISTANCE ** 2:
mimeData = QtCore.QMimeData()
mimeData.setText(self._key)
drag = QtGui.QDrag(self)
drag.setPixmap(self.icon().pixmap(32,32))
drag.setMimeData(mimeData)
#drag.setHotSpot(e.pos() - self.rect().topLeft())
dropAction = drag.start(QtCore.Qt.MoveAction)
self.setDown(False)
def mousePressEvent(self, e):
QtGui.QToolButton.mousePressEvent(self, e)
if e.buttons() == QtCore.Qt.LeftButton:
self.dragStartPoint = (e.x(), e.y())
class LlampexGroupButton(QtGui.QGroupBox):
def __init__(self, text = "ActionGroup", parent=None):
super(LlampexGroupButton, self).__init__(text,parent)
self.layout = QtGui.QGridLayout()
self.setLayout(self.layout)
self.setSizePolicy(QtGui.QSizePolicy.Expanding,QtGui.QSizePolicy.Preferred)
self.ncontrol = 0
self.controlwidth = 3
def addAction(self, text, key, icon, fn = None):
x = int(self.ncontrol % self.controlwidth)
y = int((self.ncontrol - x) / self.controlwidth)
button = LlampexActionButton(text, key, icon, fn)
self.ncontrol +=1
self.layout.addWidget(button,y,x)
def addSeparator(self,sz = 16):
while int(self.ncontrol % self.controlwidth) >0:
self.ncontrol += 1
x = int(self.ncontrol % self.controlwidth)
y = int((self.ncontrol - x) / self.controlwidth)
spacer = QtGui.QSpacerItem(sz,sz)
self.layout.addItem(spacer,y,x,1,self.controlwidth )
self.ncontrol += self.controlwidth
| {"/engine/model/table_users.py": ["/engine/model/__init__.py"], "/engine/model/table_projectusers.py": ["/engine/model/__init__.py"], "/engine/model/table_userconfigs.py": ["/engine/model/__init__.py"], "/engine/model/table_projects.py": ["/engine/model/__init__.py"]} |
71,421 | gestiweb/llampex-mini | refs/heads/master | /engine/llampexengine.py | #!/usr/bin/env python
# encoding: UTF-8
from optparse import OptionParser
from database import connect, create_all
from config import Config
import engine
import sys
try:
import bjsonrpc
except ImportError:
print "ERROR: Unable to import bjsonrpc (bidirectional JSON-RPC protocol)."
print " * * * Please install bjsonrpc package * * *"
sys.exit(1)
bjsonrpc_required_release = '0.2.0'
try:
assert(bjsonrpc.__release__ >= bjsonrpc_required_release)
except AssertionError:
print "ERROR: bjsonrpc release is %s , and llampex engine requires at least %s" % (bjsonrpc.__release__, bjsonrpc_required_release)
print " * * * Please Upgrade BJSONRPC * * * "
sys.exit(1)
global options
options = None
def main():
global options
parser = OptionParser()
parser.set_defaults(
dbname = Config.Database.dbname,
dbuser = Config.Database.dbuser,
dbpasswd = Config.Database.dbpasswd,
dbhost = Config.Database.dbhost,
dbport = Config.Database.dbport,
createtables = Config.Database.createtables
)
parser.add_option("--dbname", dest="dbname",
help="PostgreSQL database to connect the Llampex Engine", metavar="DBNAME")
parser.add_option("--host", dest="dbhost",
help="PostgreSQL host to connect", metavar="DBHOST")
parser.add_option("--port", dest="dbport", type="int",
help="PostgreSQL port to connect", metavar="DBPORT")
parser.add_option("--user", dest="dbuser",
help="PostgreSQL User", metavar="DBUSER")
parser.add_option("--pass", dest="dbpasswd",
help="PostgreSQL Password for User", metavar="DBUSER")
parser.add_option("--createtables", dest="createtables", action="store_true",
help="Creates the needed tables if aren't in the database yet")
parser.add_option("-q", "--quiet",
action="store_false", dest="verbose", default=True,
help="don't print status messages to stdout")
(options, args) = parser.parse_args()
dboptions = {}
for key in dir(options):
if key.startswith("db"):
dboptions[key] = getattr(options,key)
connect(dboptions,options.verbose)
if options.createtables:
create_all()
engine.start(options.verbose)
engine.wait()
if __name__ == "__main__":
main()
| {"/engine/model/table_users.py": ["/engine/model/__init__.py"], "/engine/model/table_projectusers.py": ["/engine/model/__init__.py"], "/engine/model/table_userconfigs.py": ["/engine/model/__init__.py"], "/engine/model/table_projects.py": ["/engine/model/__init__.py"]} |
71,422 | gestiweb/llampex-mini | refs/heads/master | /projects/erp/generic/master/scripts/masterbasic3.py | # encoding: UTF-8
import os.path, traceback
from PyQt4 import QtGui, QtCore, uic
from PyQt4 import QtSql
from masterform import LlampexMasterForm
from recordform import loadActionFormRecord #LlampexRecordForm, LlampexQDialog
import time
import re
import qsqlrpcdriver.qtdriver as qtdriver
import threading
import traceback
from projectloader import LlampexTable
from qsqlmetadatamodel import QSqlMetadataModel, ItemComboDelegate
def h(*args): return os.path.realpath(os.path.join(os.path.dirname(os.path.abspath( __file__ )), *args))
class MyItemView(QtGui.QAbstractItemView):
def setup(self):
print "setup"
self.colwidth = {}
self.row = 0
self.col = 0
self.margin = (3,3,3,3)
self.item = None
self.persistentEditor = None
"""
self.delegate = QtGui.QStyledItemDelegate(self)
"""
self.setSizePolicy(QtGui.QSizePolicy.Preferred,QtGui.QSizePolicy.Minimum)
self.setEditTriggers(QtGui.QAbstractItemView.DoubleClicked | QtGui.QAbstractItemView.EditKeyPressed)
self.setSelectionMode(QtGui.QAbstractItemView.NoSelection)
self.viewport().setSizePolicy(QtGui.QSizePolicy.Preferred,QtGui.QSizePolicy.Minimum)
self.setTabKeyNavigation(False)
def minimumSizeHint(self):
w = self.colwidth.get(self.col, 50)
sz = QtCore.QSize(w,16)
return sz
"""
def setDelegate(self, delegate):
self.delegate = delegate
"""
def setPosition(self,row, col):
self.row = row
self.col = col
self.updatePosition()
def setRow(self, row):
self.row = row
self.updatePosition()
def setCol(self, col):
self.col = col
self.updatePosition()
def focusInEvent(self, event):
QtGui.QAbstractItemView.focusInEvent(self,event)
if self.item:
#print "focus IN:", self.row, self.col
# TODO: Devuelve error si no se puede editar o si ya estaba editandose
self.edit(self.item)
def focusOutEvent(self, event):
QtGui.QAbstractItemView.focusOutEvent(self,event)
#if self.item:
# #print "focus OUT:", self.row, self.col
def updatePosition(self):
model = self.model()
if self.persistentEditor:
self.closePersistentEditor(self.item)
self.item = model.index(self.row, self.col)
fnAutoDelegate = getattr(model, "autoDelegate", None)
if fnAutoDelegate: fnAutoDelegate(self)
smodel = self.selectionModel()
smodel.setCurrentIndex(self.item, QtGui.QItemSelectionModel.NoUpdate);
#self.openPersistentEditor(self.item)
#self.persistentEditor = True
#szh = self.sizeHint()
#szh += QtCore.QSize(15,15)
#self.resize(szh)
def sizeHint(self):
#sz = QtGui.QAbstractItemView.sizeHint(self)
#sz.setHeight(32)
w = self.colwidth.get(self.col, 50)
sz = QtCore.QSize(w+32,32)
return sz
if self.item:
sz = self.sizeHintForIndex(self.item)
return sz
def setColumnWidth(self, col, width):
self.colwidth[col] = width
"""
def setDelegateForColumn(self, col, delegate):
if col != self.col: return
self.delegate = delegate
"""
def paintEvent(self, pEvent):
if not self.item: return
S = QtGui.QStyle
focus = self.hasFocus()
viewstate = self.state()
option = self.viewOptions()
state = option.state
enabled = bool(state & S.State_Enabled)
item = self.item # Element to be drawn
if focus:
option.state |= S.State_HasFocus
if viewstate & S.State_Editing:
option.state |= S.State_Editing
if viewstate & S.State_MouseOver:
option.state |= S.State_MouseOver
else:
option.state &= ~S.State_MouseOver
painter = QtGui.QStylePainter(self.viewport())
option.rect = self.visualRect(item)
#painter.save()
delegate = self.itemDelegate(item)
#painter.setClipRegion(QtGui.QRegion(option.rect))
delegate.paint(painter, option, item)
#painter.restore()
# virtual QModelIndex indexAt ( const QPoint & point ) const = 0
def indexAt(self, point):
return self.item
# virtual void scrollTo ( const QModelIndex & index, ScrollHint hint = EnsureVisible ) = 0
def scrollTo(self, index, hint):
#print "scrollTo", index,hint
return
# virtual QRect visualRect ( const QModelIndex & index ) const = 0
def visualRect(self, index):
if index != self.item: return QtCore.QRect()
rect = self.rect()
margin = self.margin
rect.adjust(margin[0],margin[1],-margin[2],-margin[3])
#szh = self.sizeHint()
#print rect, szh
return rect
# *** PROTECTED *** / INTERNAL FUNCTIONS::
# virtual int horizontalOffset () const = 0
def horizontalOffset(self):
"Returns the horizontal offset of the view"
return int(self.col)
# virtual int verticalOffset () const = 0
def verticalOffset(self):
"Returns the vertical offset of the view"
return int(self.row)
# virtual bool isIndexHidden ( const QModelIndex & index ) const = 0
def isIndexHidden(self, index):
"""
Returns true if the item referred to by the given index is hidden
in the view, otherwise returns false.
Hiding is a view specific feature. For example in TableView a column
can be marked as hidden or a row in the TreeView.
"""
row = index.row()
col = index.col()
if (row,col) == (self.row, self.col): return True
else: return False
# virtual QModelIndex moveCursor ( CursorAction cursorAction, Qt::KeyboardModifiers modifiers ) = 0
def moveCursor(self, cursorAction, kbmodifiers):
"""
Returns a QModelIndex object pointing to the next object in the
view, based on the given cursorAction and keyboard modifiers
specified by modifiers.
"""
w = None
parent = None
thisparent = self.parentWidget()
if cursorAction == QtGui.QAbstractItemView.MoveNext:
w = self
for i in range(10):
w = w.nextInFocusChain()
parent = w.parentWidget()
if parent == thisparent: break
elif cursorAction == QtGui.QAbstractItemView.MovePrevious:
w = self
for i in range(10):
w = w.previousInFocusChain()
parent = w.parentWidget()
if parent == thisparent: break
else:
#print "moveCursor:", cursorAction, kbmodifiers
pass
if w:
parent = w.parentWidget()
#print "moveCursor, giving focus:", w.__class__.__name__
#try: print w.row, w.col
#except Exception, e: print e
#print parent
#print thisparent
QtCore.QTimer.singleShot(50,w,QtCore.SLOT("setFocus()"))
return self.item
# virtual void setSelection ( const QRect & rect, QItemSelectionModel::SelectionFlags flags ) = 0
def setSelection(self, rect, flags):
"""
Applies the selection flags to the items in or touched by
the rectangle, rect.
When implementing your own itemview setSelection should
call selectionModel()->select(selection, flags) where selection
is either an empty QModelIndex or a QItemSelection that contains
all items that are contained in rect.
"""
# Does nothing.
return
# virtual QRegion visualRegionForSelection ( const QItemSelection & selection ) const = 0
def visualRegionForSelection(self, selection):
"""
Returns the region from the viewport of the items in the given selection.
"""
# TODO: Implementar esta funcion ?
return QtGui.QRegion(self.visualRect(self.item))
class MyWidget(QtGui.QWidget):
def setup(self):
self.itemindex = None
S = QtGui.QStyle
self.option = QtGui.QStyleOptionViewItemV4()
self.option.rect = QtCore.QRect(0,0,300,24)
self.option.state = S.State_Active | S.State_Enabled
self.delegate = QtGui.QStyledItemDelegate(self)
def mouseDoubleClickEvent(self, event):
S = QtGui.QStyle
self.option.state |= S.State_Editing
self.update()
def sizeHint(self):
if self.itemindex:
return self.delegate.sizeHint(self.option, self.itemindex)
else:
sz = QtCore.QSize(120,24)
return sz
def setItemIndex(self, itemindex):
self.itemindex = itemindex
self.update()
def paintEvent(self, pEvent):
painter = QtGui.QPainter(self);
if self.itemindex:
self.delegate.paint(painter, self.option, self.itemindex)
class MasterScript(object):
def __init__(self, form):
self.form = form
self.rpc = self.form.prjconn
self.db = self.rpc.qtdb
self.table = self.form.actionobj.table
self.model = None
self.row = None
self.col = None
print
try:
tmd=LlampexTable.tableindex[self.table]
self.tmd = tmd
print tmd
print "Code:", tmd.code
print "Nombre:", tmd.name
print "PKey:", tmd.primarykey
print tmd.fieldlist
print tmd.fields
print "f0:", tmd.field[0]
print "f1:", tmd.field[1]
except Exception, e:
print "Error loading table metadata:"
print traceback.format_exc()
print
table = self.form.ui.table
table.setSortingEnabled( True )
try:
tableheader = table.horizontalHeader()
tableheader.setSortIndicator(0,0)
tableheader.setContextMenuPolicy( QtCore.Qt.ActionsContextMenu )
action_addfilter = QtGui.QAction(
QtGui.QIcon(h("../../icons/page-zoom.png")),
"Add &Filter...", tableheader)
action_showcolumns = QtGui.QAction(
QtGui.QIcon(h("../../icons/preferences-actions.png")),
"Show/Hide &Columns...", tableheader)
action_hidecolumn = QtGui.QAction("&Hide this Column", tableheader)
action_addfilter.setIconVisibleInMenu(True)
action_showcolumns.setIconVisibleInMenu(True)
tableheader.addAction(action_addfilter)
tableheader.addAction(action_showcolumns)
tableheader.addAction(action_hidecolumn)
tableheader.setStretchLastSection(True)
self.form.connect(tableheader, QtCore.SIGNAL("sortIndicatorChanged(int,Qt::SortOrder)"), self.table_sortIndicatorChanged)
self.form.connect(tableheader, QtCore.SIGNAL("customContextMenuRequested(QPoint &)"),self.table_headerCustomContextMenuRequested)
self.form.connect(action_addfilter, QtCore.SIGNAL("triggered(bool)"), self.action_addfilter_triggered)
except Exception, e:
print e
self.form.connect(table, QtCore.SIGNAL("activated(QModelIndex)"),self.table_cellActivated)
self.form.connect(table, QtCore.SIGNAL("clicked(QModelIndex)"),self.table_cellActivated)
self.form.connect(self.form.ui.btnNew, QtCore.SIGNAL("clicked()"), self.btnNew_clicked)
self.model = QSqlMetadataModel(None,self.db, tmd)
self.model.decorations[None] = QtGui.QIcon(h("../../icons/null.png"))
self.model.decorations[True] = QtGui.QIcon(h("../../icons/true.png"))
self.model.decorations[False] = QtGui.QIcon(h("../../icons/false.png"))
self.modelReady = threading.Event()
self.modelSet = threading.Event()
self.reload_data()
self.select_data()
self.settablemodel()
layout = self.form.ui.layout()
self.fieldlayout = QtGui.QHBoxLayout()
self.fieldlayout.setSpacing(1)
self.fieldviews = []
for i, name in enumerate(self.tmd.fieldlist):
myitemview = MyItemView(self.form.ui)
myitemview.setup()
myitemview.setModel(self.model)
myitemview.setCol(i)
self.fieldlayout.addWidget(myitemview)
self.fieldviews.append(myitemview)
layout.addLayout( self.fieldlayout )
def table_cellActivated(self, itemindex):
self.row, self.col = itemindex.row(), itemindex.column()
print "Cell:", self.row, self.col
for fieldview in self.fieldviews:
fieldview.setRow(self.row)
def btnNew_clicked(self):
print "Button New clicked --> Row: ", self.row
load = loadActionFormRecord(self.form, 'INSERT', self.form.actionobj, self.rpc, self.tmd, self.model, self.row)
def action_addfilter_triggered(self, checked):
print "Add Filter triggered:", checked
rettext, ok = QtGui.QInputDialog.getText(self.form, "Add New Filter",
"Write New WHERE expression:", QtGui.QLineEdit.Normal, self.model.filter())
self.model.setFilter(rettext)
self.select_data()
def table_headerCustomContextMenuRequested(self, point):
print "table_headerCustomContextMenuRequested" , point
def table_sortIndicatorChanged(self, column, order):
print "table_sortIndicatorChanged", column, order
def reload_data(self):
self.model.setSort(0,0)
def select_data(self):
self.model.select()
def settablemodel(self):
self.form.ui.table.setModel(self.model)
self.model.autoDelegate(self.form.ui.table)
| {"/engine/model/table_users.py": ["/engine/model/__init__.py"], "/engine/model/table_projectusers.py": ["/engine/model/__init__.py"], "/engine/model/table_userconfigs.py": ["/engine/model/__init__.py"], "/engine/model/table_projects.py": ["/engine/model/__init__.py"]} |
71,423 | gestiweb/llampex-mini | refs/heads/master | /qt4client/mainwindow.py | #!/usr/bin/env python
# encoding: UTF-8
import os.path, traceback
import yaml
from PyQt4 import QtGui, QtCore, uic
from widgets import llampexmainmenu, llampexgroupbutton, llampextoolbar
from masterform import LlampexMasterForm
import projectloader
__version__ = "0.0.1"
def filepath(): return os.path.abspath(os.path.dirname(__file__))
def filedir(x): # convierte una ruta relativa a este fichero en absoluta
if os.path.isabs(x): return x
else: return os.path.join(filepath(),x)
class LlampexMdiSubWindow(QtGui.QMdiSubWindow):
windowdict = {}
def __init__(self, windowkey, widget):
QtGui.QMdiSubWindow.__init__(self)
self.windowkey = windowkey
if self.windowkey in self.windowdict:
print "!Window %s already opened, closing prior to creating it again." % self.windowkey
self.windowdict[self.windowkey].close()
self.windowdict[self.windowkey] = self
self.setWidget(widget)
self.setAttribute(QtCore.Qt.WA_DeleteOnClose)
self.widget = widget
@classmethod
def close_window(cls,key):
if key in cls.windowdict:
self = cls.windowdict[key]
self.close()
return True
return False
def closeEvent(self,event):
#print "Closing", self.windowkey
try:
if self.windowkey in self.windowdict:
del self.windowdict[self.windowkey]
finally:
event.accept()
class LlampexMainWindow(QtGui.QMainWindow):
def prjdir(self, x):
return os.path.join(self.projectpath,x)
def __init__(self, projectpath, projectfiles, prjconn):
QtGui.QMainWindow.__init__(self)
self.prjconn = prjconn # rpc connection for project.
self.mainmenu = llampexmainmenu.LlampexDockMainMenu()
self.setWindowTitle("Llampex Qt4 Client")
self.projectpath = projectpath
self.projectfiles = projectfiles
self.searchIcon = QtGui.QIcon("icons/search.png")
try:
self.prjloader = projectloader.ProjectLoader(projectpath,projectfiles)
self.project = self.prjloader.load()
self.load()
except Exception:
print traceback.format_exc()
print "Some error ocurred when loading your project. Loading default demo."
self.load_demo()
def load(self):
self.modules = {}
self.actions = {}
for area_code in self.project.area_list:
areaobj = self.project.area[area_code]
icon = None
item = self.mainmenu.addItem(unicode(areaobj.name))
item.setDefaultCallback(self.menubutton_clicked)
if areaobj.icon:
iconfile = areaobj.filedir(areaobj.icon)
icon = QtGui.QIcon(iconfile)
item.button.setIcon(icon)
if areaobj.description:
item.button.setDescription(areaobj.description)
item.button.setMaximumHeight(64)
else:
item.button.setMaximumHeight(32)
for module_code in areaobj.module_list:
moduleobj = areaobj.module[module_code]
module_key = "%s.%s" % (areaobj.code,moduleobj.code)
subitem = item.addItem(unicode(moduleobj.name),module_key)
icon = None
if moduleobj.icon:
iconfile = moduleobj.filedir(moduleobj.icon)
icon = QtGui.QIcon(iconfile)
subitem.setIcon(icon)
self.modules[module_key] = (icon, moduleobj)
self.modulesubwindow = {}
self.loadActions()
self.finish_load()
def finish_load(self):
self.addDockWidget(QtCore.Qt.LeftDockWidgetArea,self.mainmenu)
mainframe = QtGui.QFrame(self)
mainframe.setLayout(QtGui.QVBoxLayout(mainframe))
self.toolframe = llampextoolbar.LlampexToolBar(self)
self.searchBox = llampextoolbar.LlampexSearchBox(self.toolframe)
self.connect(self.searchBox, QtCore.SIGNAL("returnPressed()"), self.searchPerformed)
self.toolframe.layout().addWidget(self.searchBox,0,QtCore.Qt.AlignRight)
self.toolframe.loadSavedLinks()
mainframe.layout().addWidget(self.toolframe)
self.mdiarea = QtGui.QMdiArea()
self.mdiarea.setBackground(QtGui.QBrush())
self.mdiarea.setViewMode(QtGui.QMdiArea.TabbedView)
self.mdiarea.setDocumentMode(True)
#self.setCentralWidget(self.mdiarea)
mainframe.layout().addWidget(self.mdiarea)
self.setCentralWidget(mainframe)
#Set the tabbar of the mdiarea movable
for tab in self.mdiarea.findChildren(QtGui.QTabBar): tab.setMovable(True);
def loadActions(self):
for code, action in self.project.action_index.iteritems():
icon = None
if action.icon:
iconfile = action.filedir(action.icon)
icon = QtGui.QIcon(iconfile)
action_key = "%s.%s.%s" % (action.parent.parent.parent.code, action.parent.parent.code, action.code)
self.actions[action_key] = (action.parent.parent.parent.code+action.parent.parent.code, icon, action)
def searchPerformed(self):
if (not self.searchBox.text().isEmpty()):
search = unicode(self.searchBox.text()).lower()
widget = QtGui.QWidget()
widget.layout = QtGui.QVBoxLayout()
found = []
foundInTable = []
for code, action in self.project.action_index.iteritems():
aname = unicode(action).lower()
if aname.find(search)>=0:
found+=[action]
else:
for key, value in self.project.table_index[action.table].fields.iteritems():
fname = unicode(key).lower()
falias = unicode(value['alias']).lower()
if fname.find(search)>=0 or falias.find(search)>=0:
foundInTable+=[action]
if (found):
groupbox = llampexgroupbutton.LlampexGroupButton("In Actions")
for action in sorted(found):
icon = None
if action.icon:
iconfile = action.filedir(action.icon)
icon = QtGui.QIcon(iconfile)
action_key = "%s.%s.%s" % (action.parent.parent.parent.code, action.parent.parent.code, action.code)
groupbox.addAction(action.name, action_key, icon, self.actionbutton_clicked)
widget.layout.addWidget(groupbox)
if (foundInTable):
groupboxTables = llampexgroupbutton.LlampexGroupButton("In tables:")
for action in sorted(foundInTable):
icon = None
if action.icon:
iconfile = action.filedir(action.icon)
icon = QtGui.QIcon(iconfile)
action_key = "%s.%s.%s" % (action.parent.parent.parent.code, action.parent.parent.code, action.code)
groupboxTables.addAction(action.name, action_key, icon, self.actionbutton_clicked)
widget.layout.addWidget(groupboxTables)
if (not found and not foundInTable):
widget.layout.addWidget(QtGui.QLabel("No results found for "+search))
widget.setLayout(widget.layout)
scrollarea = QtGui.QScrollArea()
scrollarea.setWidget(widget)
scrollarea.setWidgetResizable(True)
scrollarea.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
scrollarea.setMinimumWidth(250)
subwindow = LlampexMdiSubWindow("Search:"+search,scrollarea)
self.mdiarea.addSubWindow(subwindow)
subwindow.setWindowTitle("Search: "+search)
subwindow.setWindowIcon(self.searchIcon)
subwindow.show()
self.mdiarea.setActiveSubWindow(subwindow)
subwindow.setWindowState(QtCore.Qt.WindowMaximized)
def menubutton_clicked(self,key):
#print "menubutton clicked:", key
LlampexMdiSubWindow.close_window(key)
"""
if key in self.modulesubwindow:
subwindow = self.modulesubwindow[key]
subwindow.close()
del self.modulesubwindow[key]
"""
widget = QtGui.QWidget()
widget.layout = QtGui.QVBoxLayout()
scrollarea = QtGui.QScrollArea()
scrollarea.setWidget(widget)
scrollarea.setWidgetResizable(True)
scrollarea.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
scrollarea.setMinimumWidth(250)
subwindow = LlampexMdiSubWindow(key,scrollarea)
self.mdiarea.addSubWindow(subwindow)
moduleicon, moduleobj = self.modules[key]
self.modulesubwindow[key] = subwindow
for group_code in moduleobj.group_list:
groupboxobj = moduleobj.group[group_code]
groupbox = llampexgroupbutton.LlampexGroupButton(groupboxobj.name)
oldweight = None
for action_code in groupboxobj.action_list:
actionobj = groupboxobj.action[action_code]
icon = None
if actionobj.icon:
iconfile = actionobj.filedir(actionobj.icon)
icon = QtGui.QIcon(iconfile)
try:
if oldweight and actionobj.weight[0] != oldweight[0]:
groupbox.addSeparator()
except:
print repr(actionobj.weight)
print repr(oldweight)
raise
action_key = "%s.%s" % (key,action_code)
groupbox.addAction(actionobj.name, action_key, icon, self.actionbutton_clicked)
oldweight = actionobj.weight
groupbox.addSeparator(0)
widget.layout.addWidget(groupbox)
widget.setLayout(widget.layout)
subwindow.show()
subwindow.setWindowTitle(moduleobj.name)
subwindow.setWindowIcon(moduleicon)
self.mdiarea.setActiveSubWindow(subwindow)
subwindow.setWindowState(QtCore.Qt.WindowMaximized)
def actionbutton_clicked(self, key):
print "action clicked", key
subwindowkey, icon, actionobj = self.actions[key]
if subwindowkey in self.modulesubwindow:
subwindow = self.modulesubwindow[subwindowkey]
if LlampexMdiSubWindow.close_window(subwindowkey):
del self.modulesubwindow[subwindowkey]
widget = LlampexMasterForm(self.project, key, actionobj, self.prjconn)
scrollarea = QtGui.QScrollArea()
scrollarea.setWidget(widget)
scrollarea.setWidgetResizable(True)
scrollarea.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
scrollarea.setMinimumWidth(250)
subwindow = LlampexMdiSubWindow(key,scrollarea)
self.mdiarea.addSubWindow(subwindow)
subwindow.setWindowTitle(actionobj.name)
subwindow.setWindowIcon(icon)
subwindow.show()
self.mdiarea.setActiveSubWindow(subwindow)
subwindow.setWindowState(QtCore.Qt.WindowMaximized)
def load_demo(self):
self.menubutton_clicked = self.menubutton_clicked_demo
icon_fact = QtGui.QIcon(self.prjdir("facturacion/facturacion/flfacturac.xpm"))
icon_cont = QtGui.QIcon(self.prjdir("contabilidad/principal/flcontppal.xpm"))
icon_fppl = QtGui.QIcon(self.prjdir("facturacion/principal/flfactppal.xpm"))
item_favr = self.mainmenu.addItem(u"Favoritos")
item_favr.setDefaultCallback(self.menubutton_clicked)
item_favr.button.setIcon(icon_fppl)
item_favr.button.setDescription(u"Acciones guardadas")
item_favr.button.setMaximumHeight(50)
item_favr.addItem(u"Artículos").setIcon(icon_fppl)
item_favr.addItem(u"Clientes")
item_favr.addItem(u"Proveedores")
item_favr.addItem(u"Fact. clientes").setIcon(icon_fact)
item_favr.addItem(u"Fact. proveedores").setIcon(icon_fact)
item_favr.addItem(u"Ventas artículo")
item_fact = self.mainmenu.addItem(u"Facturación")
item_fact.setDefaultCallback(self.menubutton_clicked)
item_fact.button.setDescription(u"Artículos, Clientes, Fra...")
item_fact.button.setIcon(icon_fact)
item_fact.button.setMaximumHeight(50)
item_fact.addItem(u"Almacén")
item_fact.addItem(u"Informes")
item_fact.addItem(u"Principal").setIcon(icon_fppl)
item_fact.addItem(u"Tesorería")
item_fact.addItem(u"Facturación").setIcon(icon_fact)
item_cont = self.mainmenu.addItem(u"Contabilidad")
item_cont.setDefaultCallback(self.menubutton_clicked)
item_cont.button.setDescription(u"Asientos, Amortizaciones..")
item_cont.button.setMaximumHeight(50)
item_cont.button.setIcon(icon_cont)
item_cont.addItem(u"Informes")
item_cont.addItem(u"Principal").setIcon(icon_cont)
item_cont.addItem(u"Modelos")
item_sist = self.mainmenu.addItem(u"Sistema")
item_sist.setDefaultCallback(self.menubutton_clicked)
item_sist.button.setDescription(u"Configuración, otros..")
item_sist.button.setMaximumHeight(50)
item_sist.addItem(u"Configuración")
item_sist.addItem(u"Datos")
item_sist.addItem(u"Exportación")
self.finish_load()
def menubutton_clicked_demo(self,key):
# print "menubutton clicked:", key
iconlist = [
'forms/accessories-dictionary.png',
'forms/accessories-text-editor.png',
'forms/acroread.png',
'forms/akonadi.png',
'forms/akregator.png',
'forms/alevt.png',
'forms/application-sxw.png',
'forms/settings.png'
]
icon = []
for i in iconlist:
icon.append(QtGui.QIcon(filedir(i)))
widget = QtGui.QWidget()
widget.layout = QtGui.QVBoxLayout()
groupbox = llampexgroupbutton.LlampexGroupButton("Principal")
groupbox.addAction("Empresa", "empresa", icon[0])
groupbox.addSeparator()
groupbox.addAction("Clientes", "clientes", icon[1])
groupbox.addAction("Proveedores", "proveedores", icon[2])
groupbox.addSeparator(0)
widget.layout.addWidget(groupbox)
groupbox = llampexgroupbutton.LlampexGroupButton("Fiscalidad")
groupbox.addAction("Ejercicios\nFiscales", "ejercicios", icon[3])
groupbox.addAction("Series de\nFacturacion", "series", icon[4])
groupbox.addAction("Impuestos", "impuestos", icon[5])
groupbox.addSeparator(0)
widget.layout.addWidget(groupbox)
groupbox = llampexgroupbutton.LlampexGroupButton("Tablas Generales")
groupbox.addAction("Cuentas Bancarias", "cuentasbco", icon[6])
groupbox.addAction("Bancos", "bancos", icon[3])
groupbox.addSeparator()
groupbox.addAction("Descuentos", "dtoclientes", icon[7])
groupbox.addAction("Tipos\nde Pago", "tipospago", icon[0])
groupbox.addAction("Formas\nde Pago", "formaspago", icon[1])
groupbox.addAction("Tipos\nde Rappel", "tiposrappel", icon[2])
groupbox.addSeparator()
groupbox.addAction("Agentes", "", icon[3])
groupbox.addAction("Departamentos", "", icon[3])
groupbox.addAction("Usuarios", "", icon[3])
groupbox.addSeparator()
groupbox.addAction("Agencias\nTransporte", "", icon[3])
groupbox.addSeparator(0)
widget.layout.addWidget(groupbox)
widget.setLayout(widget.layout)
scrollarea = QtGui.QScrollArea()
scrollarea.setWidget(widget)
scrollarea.setWidgetResizable(True)
scrollarea.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
scrollarea.setMinimumWidth(250)
subwindow = self.mdiarea.addSubWindow(scrollarea)
subwindow.show()
subwindow.setWindowTitle(key)
| {"/engine/model/table_users.py": ["/engine/model/__init__.py"], "/engine/model/table_projectusers.py": ["/engine/model/__init__.py"], "/engine/model/table_userconfigs.py": ["/engine/model/__init__.py"], "/engine/model/table_projects.py": ["/engine/model/__init__.py"]} |
71,424 | gestiweb/llampex-mini | refs/heads/master | /qt4client/qsqlmetadatamodel.py | #!/usr/bin/env python
# encoding: UTF-8
import random
import os.path
from datetime import datetime, date
import sys
from PyQt4 import QtGui, QtCore, uic, QtSql
QtFormatRole = QtCore.Qt.UserRole + 1
class utilities():
def __init__(self):
self.colors = {}
self.brushes = {}
def getColor(self, color):
if color not in self.colors:
self.colors[color] = QtGui.QColor(color)
return self.colors[color]
def getBrush(self, color):
if color not in self.brushes:
self.brushes[color] = QtGui.QBrush(self.getColor(color))
return self.brushes[color]
def autoDelegateMetadata(self, itemview):
delegate_bool = ItemComboDelegate(itemview)
delegate_bool.items = [u"Sí",u"No",u"--"]
delegate_bool.values = [True,False,None]
basic_delegate = ItemBasicDelegate(itemview)
fnSetColumnWidth = getattr(itemview,"setColumnWidth",None)
for i, name in enumerate(self.tmd.fieldlist):
field = self.tmd.field[i]
ctype = self.colType(i)
delegate = basic_delegate
optionlist = field.get("optionlist",None)
valuelist = field.get("valuelist",optionlist)
if valuelist:
# This is to avoid the same data in optionlist being referenced
# in valuelist instead of being copied.
valuelist = valuelist[:]
if ctype == "b": delegate = delegate_bool
if optionlist:
delegate_adhoc = ItemComboDelegate(itemview)
delegate_adhoc.items = valuelist
delegate_adhoc.values = optionlist
delegate = delegate_adhoc
if delegate:
itemview.setItemDelegateForColumn(i, delegate)
if fnSetColumnWidth:
if i not in self.columnWidth:
widths = [50]
for row in range(min(20, self.rowCount())):
midx = self.index(row,i)
sz = itemview.sizeHintForIndex(midx)
widths.append(sz.width())
widths.sort()
x = len(widths) / 4 + 1
m = widths[x:]
lm = len(m)
if lm:
w = sum(m) / lm + 10
#w = itemview.sizeHintForColumn(i)
self.columnWidth[i] = w
else:
self.columnWidth[i] = None
w = self.columnWidth[i]
if w:
fnSetColumnWidth(i, w)
class ItemComboDelegate(QtGui.QStyledItemDelegate):
def __init__(self,*args):
QtGui.QItemDelegate.__init__(self,*args)
self.items = []
self.values = []
def createEditor(self, parent, option, index):
combo = QtGui.QComboBox(parent)
#combo.setWindowFlags(QtCore.Qt.Popup | QtCore.Qt.FramelessWindowHint)
for item in self.items:
if item: combo.addItem(item)
return combo
def setEditorData(self, editor, index):
model = index.model()
val = model.data(index, QtCore.Qt.EditRole)
idx = None
try:
if val: idx = self.values.index(val)
except ValueError:
if val:
self.items.append(val)
self.values.append(val)
editor.addItem(val)
idx = self.values.index(val)
if idx: editor.setCurrentIndex(idx)
def setModelData(self, editor, model, index):
idx = editor.currentIndex()
val = self.values[idx]
model.setData(index,val, QtCore.Qt.EditRole)
class ItemBasicDelegate(QtGui.QStyledItemDelegate):
def createEditor(self, parent, option, index):
widget = QtGui.QStyledItemDelegate.createEditor(self, parent, option, index)
if isinstance(widget, (QtGui.QDateEdit,QtGui.QDateTimeEdit)):
widget.setCalendarPopup(True)
model = index.model()
format = model.data(index, QtFormatRole)
if format:
# TODO: asignar el formato al widget
widget.setDisplayFormat(format);
try:
widget.setFrame(True)
except AttributeError, e:
pass
return widget
def updateEditorGeometry(self, widget, option, index):
#QtGui.QStyledItemDelegate.updateEditorGeometry(self, widget, option, index)
widget.setGeometry(option.rect)
#print widget.frameGeometry(), widget.contentsRect()
#widget.setWindowFlags(QtCore.Qt.Popup)
#widget.setWindowFlags(QtCore.Qt.Popup | QtCore.Qt.FramelessWindowHint)
"""
if isinstance(widget, (QtGui.QDateEdit,QtGui.QDateTimeEdit)):
w,h = widget.width(), widget.height()
widget.resize(w-15,h)
"""
class QMetadataModel(QtSql.QSqlQueryModel):
def __init__(self, parent, db, tmd = None):
QtSql.QSqlQueryModel.__init__(self, parent)
self.db = db
self.tmd = tmd
self.table = self.tmd.code
self.fieldlist = self.tmd.fieldlist
self.pk = self.tmd.primarykey
self.pkidx = self.tmd.fieldlist.index(self.pk)
self.columnWidth = {}
self.checkstate = {}
self.decorations = {}
self.dirtyrows = {}
self.rows = 1
self._header_data = {}
self.utilities = utilities()
for i, fname in enumerate(self.tmd.fieldlist):
field = self.tmd.field[i]
self.setHeaderData(i, QtCore.Qt.Horizontal, field['alias'])
if i % 2 == 0:
color = self.utilities.getBrush("#00A")
else:
color = self.utilities.getBrush("#090")
self.setHeaderData(i, QtCore.Qt.Horizontal, color, role = QtCore.Qt.ForegroundRole)
def getHeaderAlias(self):
header = []
for i, fname in enumerate(self.tmd.fieldlist):
field = self.tmd.field[i]
header.append(field['alias'])
return header
def setSort(self, column, order):
pass
def setBasicFilter(self, fieldname, filtertext):
pass
def setFilter(self, filtertext):
pass
def select(self):
pass
def refresh(self):
pass
def flags(self, index):
assert(self.tmd)
flags = 0
field = self.tmd.field[index.column()]
if field.get("tableSelectable", True):
flags |= QtCore.Qt.ItemIsSelectable
if field.get("tableEditable", True):
flags |= QtCore.Qt.ItemIsEditable
if field.get("tableCheckable", False):
flags |= QtCore.Qt.ItemIsUserCheckable
if field.get("tableTristate", False):
flags |= QtCore.Qt.ItemIsTristate
if field.get("tableEnabled", True):
flags |= QtCore.Qt.ItemIsEnabled
return flags
def colType(self, column):
try: column = column.column()
except Exception: pass
field = self.tmd.field[column]
ftype = field.get("type", "vchar")
if ftype == "bool": return "b"
if ftype == "date": return "d"
if ftype == "datetime": return "dt"
if ftype == "double": return "n"
if ftype == "float": return "n"
if ftype == "int": return "n"
if ftype.startswith("number"): return "n"
if ftype == "string" or ftype.startswith("vchar"): return "s"
if ftype == "time": return "t"
return "x"
autoDelegate = autoDelegateMetadata
def columnCount(self, parent = None):
if parent is None: parent = QtCore.QModelIndex()
if parent.isValid(): raise ValueError, "Valid parent passed to columnCount"
return len(self.tmd.fieldlist)
def rowCount(self, parent = None):
if parent is None: parent = QtCore.QModelIndex()
if parent.isValid(): raise ValueError, "Valid parent passed to rowCount"
return self.rows
def setHeaderData(self, section, orientation, value, role = None):
if role == None: role = QtCore.Qt.DisplayRole
k = section, orientation, role
self._header_data[k] = QtCore.QVariant(value)
def headerData(self, section, orientation, role):
k = section, orientation, role
return self._header_data.get(k, QtCore.QAbstractTableModel.headerData(self, section, orientation, role))
#if orientation == QtCore.Qt.Horizontal and role == QtCore.Qt.DisplayRole:
# i = section
# field = self.tmd.field[i]
# return field['alias']
def data(self, index, role = None):
field = self.tmd.field[index.column()]
ctype = self.colType(index)
if role is None: role = QtCore.Qt.DisplayRole
if role == QtFormatRole:
# TODO: Si tiene formato devolverlo como string
# ... en caso contrario devolver None.
format = field.get("format", None)
return format
elif role == QtCore.Qt.DecorationRole:
ret = self.data(index,QtCore.Qt.EditRole)
optionlist = field.get("optionlist",None)
iconlist = field.get("iconlist",None)
icon = None
decoration = None
if not optionlist:
if ctype == "b":
optionlist = [True,False,None]
if not iconlist and optionlist:
iconlist = optionlist
if optionlist and iconlist:
try:
idx = optionlist.index(ret)
except ValueError:
idx = -1
if idx >= 0:
icon = iconlist[idx]
decoration = self.decorations.get(icon)
if decoration: return decoration
elif role == QtCore.Qt.TextAlignmentRole:
if ctype == "n":
return QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter
elif role == QtCore.Qt.BackgroundRole:
row = index.row()
if row in self.dirtyrows:
return self.utilities.getBrush("#EEE")
c = self.checkstate.get( (row,0), 0)
if c == 2:
return self.utilities.getBrush("#FB9")
elif c == 1:
return self.utilities.getBrush("#FEA")
elif role == QtCore.Qt.ForegroundRole:
ret = self.data(index,QtCore.Qt.EditRole)
optionlist = field.get("optionlist",None)
colorlist = field.get("colorlist",None)
color = None
if not optionlist:
if ctype == "b":
optionlist = [True,False,None]
colorlist = ["#0B0","#B00","#644"]
elif not colorlist:
colorlist = [ None for x in optionlist ]
brush = None
if optionlist and colorlist:
try: idx = optionlist.index(ret)
except ValueError: idx = -1
try:
if idx >= 0: color = colorlist[idx]
if color: brush = self.utilities.getBrush(color)
except IndexError:
pass
if brush is None:
if ctype == "n":
if ret is None: brush = self.utilities.getBrush("#00B")
elif float(ret) < 0: brush = self.utilities.getBrush("#B00")
if brush is not None:
return brush
if role == QtCore.Qt.CheckStateRole:
if field.get("tableCheckable", False):
k = (index.row(), index.column())
return self.checkstate.get(k,QtCore.Qt.Unchecked)
if role in (QtCore.Qt.EditRole, QtCore.Qt.DisplayRole):
row = index.row()
col = index.column()
ret = None
if row in self.dirtyrows:
if col in self.dirtyrows[row]:
ret = self.dirtyrows[row][col]
if ret is None:
ret = QtSql.QSqlQueryModel.data(self,index,role)
ftype = field.get("type", "vchar")
optionlist = field.get("optionlist",None)
valuelist = field.get("valuelist",optionlist)
format = field.get("format",None)
if not optionlist:
if ctype == "b":
optionlist = [True,False,None]
valuelist = [u"Sí",u"No","--"]
if ret.isNull(): ret = None
if ret is None:
if ctype == "d": ret = QtCore.QVariant(date.today().isoformat())
if ctype == "dt": ret = QtCore.QVariant(datetime.now().isoformat())
if ctype == "b": ret = QtCore.QVariant(False)
if ctype == "n": ret = QtCore.QVariant(0)
#if ctype == "s":
# if field.get("tableEditable", None)==False:
# ret = QtCore.QVariant(random.randrange(1,9999))
if ret:
try:
if ctype == "b": ret = ret.toBool()
if ctype == "d": ret = ret.toDate()
if ctype == "dt": ret = ret.toDateTime()
if ctype == "s": ret = ret.toString()
if ctype == "t": ret = ret.toTime()
if ctype == "n":
if ftype == "int": ret, ok = ret.toInt()
else: ret, ok = ret.toDouble()
except ValueError:
ret = None
if role == QtCore.Qt.DisplayRole:
try:
if optionlist:
idx = optionlist.index(ret)
if idx >= 0: ret = valuelist[idx]
elif format:
if ctype in ('n','s'):
ret = format % ret
elif ctype in ('d','dt'):
ret = ret.toString(format)
except Exception, e:
pass
return ret
return QtSql.QSqlQueryModel.data(self,index,role)
def setData(self, index, value, role):
if value == self.data(index, role): return False
if role == QtCore.Qt.EditRole:
row = index.row()
col = index.column()
if row not in self.dirtyrows:
self.dirtyrows[row] = {}
if col not in self.dirtyrows[row]:
self.dirtyrows[row][col] = None
self.dirtyrows[row][col] = QtCore.QVariant(value)
model = index.model()
#columns = model.columnCount()
left = model.index(row,0)
#right = model.index(row,columns-1)
self.emit(QtCore.SIGNAL("dataChanged(QModelIndex,QModelIndex)"), left,left)
"""
primaryKeyIndex = self.index(index.row(), self.pkidx)
pkeyval = self.data(primaryKeyIndex)
self.clear()
try:
return self.setValue(pkeyval, self.tmd.field[index.column()]['name'], value)
finally:
self.refresh()
"""
elif role == QtCore.Qt.CheckStateRole:
k = (index.row(), index.column())
val, ok = value.toInt()
print "Check %s -> %s" % (repr(k), repr(val))
c = self.checkstate.get(k,0)
if c == 0: val = 1
self.checkstate[k]=val
row = index.row()
model = index.model()
columns = model.columnCount()
left = model.index(row,0)
right = model.index(row,columns-1)
self.emit(QtCore.SIGNAL("dataChanged(QModelIndex,QModelIndex)"), left,right)
return True
return False
def commitDirtyRow(self, row):
if row not in self.dirtyrows:
return False
result = self.setValues(self.dirtyrows[row])
if result==True: del self.dirtyrows[row]
def setValues(self, dirtyrow):
values = []
fields = []
for col, value in dirtyrow.iteritems():
field = self.tmd.field[col]['name']
values.append("'"+unicode(value.toString())+"'")
fields.append(field)
query = QtSql.QSqlQuery(self.db)
query.prepare("INSERT INTO %(table)s (%(fields)s) VALUES(%(values)s)" %
{
'table' : self.table,
'fields' : ", ".join(fields),
'values' : ", ".join(values),
})
return query.exec_()
class QSqlMetadataModel(QMetadataModel):
def __init__(self, parent, db, tmd = None):
QMetadataModel.__init__(self, parent, db, tmd)
self.filter = None
self.sort = None
def rowCount(self, parent = None):
if parent is None: parent = QtCore.QModelIndex()
if parent.isValid(): raise ValueError, "Valid parent passed to rowCount"
return self.query().size ()
def getFilter(self):
return self.filter
def setFilter(self, filter):
self.filter = filter + " "
def setBasicFilter(self, alias=None, text=None):
if text is None or alias is None: self.filter = None
else:
fieldname=""
for i, fname in enumerate(self.tmd.fieldlist):
field = self.tmd.field[i]
if unicode(field['alias']) == unicode(alias) or unicode(fname) == unicode(alias):
fieldname = fname
break
self.filter = " "+fieldname+"::VARCHAR ILIKE '%"+text+"%' "
def setSort(self, col, desc):
# sorts column col ascending, or descending if desc == True
field = self.tmd.fieldlist[col]
self.sort = "ORDER BY "+field+" "
if desc==1: self.sort += "DESC"
def commitDirtyRow(self, row):
if row not in self.dirtyrows:
return False
primaryKeyIndex = self.index(row, self.pkidx)
pkeyval = self.data(primaryKeyIndex)
self.setValues(pkeyval, self.dirtyrows[row])
del self.dirtyrows[row]
self.refresh()
def setValues(self, pkvalue, dirtyrow):
values = []
fields = []
for col, value in dirtyrow.iteritems():
field = self.tmd.field[col]['name']
values.append(value)
fields.append(" %s = ? " % field)
query = QtSql.QSqlQuery(self.db)
query.prepare("UPDATE %(table)s SET %(setfields)s WHERE %(pk)s = ?" %
{
'table' : self.table,
'setfields' : ", ".join(fields),
'pk' : self.pk,
})
for value in values:
query.addBindValue(value)
query.addBindValue(pkvalue)
return query.exec_()
def setValue(self, pkvalue, field, value):
query = QtSql.QSqlQuery(self.db)
query.prepare("UPDATE %(table)s SET %(field)s = ? WHERE %(pk)s = ?" %
{
'table' : self.table,
'field' : str(field),
'pk' : self.pk,
})
query.addBindValue(value)
query.addBindValue(pkvalue)
return query.exec_()
def refresh(self):
query = "SELECT %s FROM %s " % (", ".join(self.tmd.fieldlist), self.table)
if self.filter: query+="WHERE "+self.filter
if self.sort: query+=self.sort
else: query+="ORDER BY %s" % (self.pk)
print query
self.setQuery(query, self.db)
if self.lastError().isValid():
print "Error Query: ", self.lastError();
select = refresh
| {"/engine/model/table_users.py": ["/engine/model/__init__.py"], "/engine/model/table_projectusers.py": ["/engine/model/__init__.py"], "/engine/model/table_userconfigs.py": ["/engine/model/__init__.py"], "/engine/model/table_projects.py": ["/engine/model/__init__.py"]} |
71,425 | single-cell-genetics/vireo | refs/heads/master | /vireoSNP/utils/vireo_model.py | import itertools
import numpy as np
from scipy.stats import entropy
from scipy.sparse import csc_matrix
from scipy.special import logsumexp, digamma, betaln
from .vireo_base import normalize, loglik_amplify, beta_entropy
from .vireo_base import get_binom_coeff, logbincoeff
__docformat__ = "restructuredtext en"
class Vireo():
"""Viroe model: Variational Inference for reconstruction of ensemble origin
The prior can be set via set_prior() before fitting the model.
Key properties
--------------
beta_mu: numpy array (1, n_GT) or (n_var, n_GT)
Beta mean parameter of theta's posterior
beta_sum: numpy array (1, n_GT) or (n_var, n_GT), same as beta_mu
Beta concetration parameter of theta's posterior
ID_prob: numpy array (n_cell, n_donor)
Posterior cell assignment probability to each donor
GT_prob: numpy array (n_var, n_donor, n_GT)
Posterior genotype probability per variant per donor
"""
def __init__(self, n_cell, n_var, n_donor, n_GT=3, learn_GT=True,
learn_theta=True, ASE_mode=False, fix_beta_sum=False,
beta_mu_init=None, beta_sum_init=None, ID_prob_init=None,
GT_prob_init=None):
"""Initialise Vireo model
Note, multiple initializations are highly recomended to avoid local
optima.
Parameters
----------
n_cell : int.
Number of cells
n_var : int.
Number of variants
n_donor : int.
Number of donors
n_GT : int.
Number of genotype categories
learn_GT: bool.
Whether updating `GT_prob`; otherwise using the initial
ASE_mode: bool.
Whether setting allelic ratio `theta` to be variant specific
fix_beta_sum: bool.
Whether fixing the concetration parameter of theta's posterior
beta_mu_init: numpy array (1, n_GT) or (n_var, n_GT)
Initial value of beta_mu, the mean parameter of theta
beta_sum_init: numpy array (1, n_GT) or (n_var, n_GT), same as beta_mu
Initial value of beta_sum, the concetration parameter of theta
ID_prob_init: numpy array (n_cell, n_donor)
Initial value of ID_prob, cell assignment probability to each donor
GT_prob_init: numpy array (n_var, n_donor, n_GT)
Initial value of GT_prob, genotype probability per variant and donor
"""
self.n_GT = n_GT
self.n_var = n_var
self.n_cell = n_cell
self.n_donor = n_donor
self.learn_GT = learn_GT
self.ASE_mode = ASE_mode
self.learn_theta = learn_theta
self.fix_beta_sum = fix_beta_sum
self.ELBO_ = np.zeros((0))
# initial key parameters
self.set_initial(beta_mu_init, beta_sum_init, ID_prob_init, GT_prob_init)
# set hyper parameters for prior
self.set_prior()
def set_initial(self, beta_mu_init=None, beta_sum_init=None,
ID_prob_init=None, GT_prob_init=None):
"""Set initial values
"""
theta_len = self.n_var if self.ASE_mode else 1
if beta_mu_init is not None:
self.beta_mu = beta_mu_init
else:
self.beta_mu = (np.ones((theta_len, self.n_GT)) *
np.linspace(0.01, 0.99, self.n_GT).reshape(1, -1))
if beta_sum_init is not None:
self.beta_sum = beta_sum_init
else:
self.beta_sum = np.ones((theta_len, self.n_GT)) * 50
if ID_prob_init is not None:
self.ID_prob = normalize(ID_prob_init, axis=1)
else:
self.ID_prob = normalize(np.random.rand(self.n_cell, self.n_donor))
if GT_prob_init is not None:
self.GT_prob = normalize(GT_prob_init)
else:
_GT_val = np.random.rand(self.n_var, self.n_donor, self.n_GT)
self.GT_prob = normalize(_GT_val)
def set_prior(self, GT_prior=None, ID_prior=None, beta_mu_prior=None,
beta_sum_prior=None, min_GP=0.00001):
"""Set prior for key variables: theta, GT_prob and ID_prob.
The priors are in the same shape as its according variables.
min_GP: float. Minimun genotype probability in GT_prior.
"""
if beta_mu_prior is None:
beta_mu_prior = np.expand_dims(
np.linspace(0.01, 0.99, self.beta_mu.shape[1]), axis=0)
if beta_sum_prior is None:
beta_sum_prior = np.ones(beta_mu_prior.shape) * 50.0
self.theta_s1_prior = beta_mu_prior * beta_sum_prior
self.theta_s2_prior = (1 - beta_mu_prior) * beta_sum_prior
if ID_prior is not None:
if len(ID_prior.shape) == 1:
ID_prior = np.expand_dims(ID_prior, axis=0)
self.ID_prior = ID_prior
else:
self.ID_prior = normalize(np.ones(self.ID_prob.shape))
if GT_prior is not None:
if len(GT_prior.shape) == 2:
GT_prior = np.expand_dims(GT_prior, axis=0)
GT_prior[GT_prior < min_GP] = min_GP
GT_prior[GT_prior > 1 - min_GP] = 1 - min_GP
GT_prior = normalize(GT_prior)
self.GT_prior = GT_prior
else:
self.GT_prior = normalize(np.ones(self.GT_prob.shape))
@property
def theta_s1(self):
"""Beta concetration1 parameter for theta posterior"""
return self.beta_mu * self.beta_sum
@property
def theta_s2(self):
"""Beta concetration2 parameter for theta posterior"""
return (1 - self.beta_mu) * self.beta_sum
@property
def digamma1_(self):
"""Digamma of Beta concetration1 parameter"""
return np.expand_dims(digamma(self.theta_s1), 1)
@property
def digamma2_(self):
"""Digamma of Beta concetration2 parameter"""
return np.expand_dims(digamma(self.theta_s2), 1)
@property
def digammas_(self):
"""Digamma of Beta concetration summary parameter"""
return np.expand_dims(digamma(self.theta_s1 + self.theta_s2), 1)
def update_theta_size(self, AD, DP):
"""Coordinate ascent for updating theta posterior parameters
"""
BD = DP - AD
S1_gt = AD @ self.ID_prob #(n_var, n_donor)
S2_gt = BD @ self.ID_prob #(n_var, n_donor)
_theta_s1 = np.zeros(self.beta_mu.shape)
_theta_s2 = np.zeros(self.beta_mu.shape)
_theta_s1 += self.theta_s1_prior.copy()
_theta_s2 += self.theta_s2_prior.copy()
for ig in range(self.n_GT):
_axis = 1 if self.ASE_mode else None
_theta_s1[:, ig:(ig+1)] += np.sum(
S1_gt * self.GT_prob[:, :, ig], axis=_axis, keepdims=True)
_theta_s2[:, ig:(ig+1)] += np.sum(
S2_gt * self.GT_prob[:, :, ig], axis=_axis, keepdims=True)
self.beta_mu = _theta_s1 / (_theta_s1 + _theta_s2)
if self.fix_beta_sum == False:
self.beta_sum = _theta_s1 + _theta_s2
def update_ID_prob(self, AD, DP):
"""Coordinate ascent for updating assignment probability
"""
BD = DP - AD
logLik_ID = np.zeros((AD.shape[1], self.n_donor))
for ig in range(self.n_GT):
S1 = AD.T @ (self.GT_prob[:, :, ig] * self.digamma1_[:, :, ig])
S2 = BD.T @ (self.GT_prob[:, :, ig] * self.digamma2_[:, :, ig])
SS = DP.T @ (self.GT_prob[:, :, ig] * self.digammas_[:, :, ig])
logLik_ID += (S1 + S2 - SS)
self.ID_prob = normalize(np.exp(loglik_amplify(
logLik_ID + np.log(self.ID_prior))))
return logLik_ID
def update_GT_prob(self, AD, DP):
"""Coordinate ascent for updating genotype probability
"""
S1_gt = AD @ self.ID_prob
SS_gt = DP @ self.ID_prob
S2_gt = SS_gt - S1_gt
logLik_GT = np.zeros(self.GT_prior.shape)
for ig in range(self.n_GT):
logLik_GT[:, :, ig] = (
S1_gt * self.digamma1_[:, :, ig] +
S2_gt * self.digamma2_[:, :, ig] -
SS_gt * self.digammas_[:, :, ig])
self.GT_prob = normalize(np.exp(loglik_amplify(
logLik_GT + np.log(self.GT_prior))))
def get_ELBO(self, logLik_ID, AD=None, DP=None):
"""Calculating variational evidence lower bound with current parameters
logLik_ID: numpy array (n_cell, n_donor), the output from update_ID_prob
"""
if logLik_ID is None:
BD = DP - AD
logLik_ID = np.zeros((AD.shape[1], self.n_donor))
for ig in range(self.n_GT):
S1 = AD.T @ (self.GT_prob[:, :, ig] * self.digamma1_[:, :, ig])
S2 = BD.T @ (self.GT_prob[:, :, ig] * self.digamma2_[:, :, ig])
SS = DP.T @ (self.GT_prob[:, :, ig] * self.digammas_[:, :, ig])
logLik_ID += (S1 + S2 - SS)
LB_p = np.sum(logLik_ID * self.ID_prob)
KL_ID = np.sum(entropy(self.ID_prob, self.ID_prior, axis=-1))
KL_GT = np.sum(entropy(self.GT_prob, self.GT_prior, axis=-1))
KL_theta = beta_entropy(
np.append(
np.expand_dims(self.theta_s1, 1),
np.expand_dims(self.theta_s2, 1), axis = 1),
np.append(
np.expand_dims(self.theta_s1_prior, 1),
np.expand_dims(self.theta_s2_prior, 1), axis = 1))
# print(LB_p, KL_ID, KL_GT, KL_theta)
return LB_p - KL_ID - KL_GT - KL_theta
def _fit_VB(self, AD, DP, max_iter=200, min_iter=5, epsilon_conv=1e-2,
delay_fit_theta=0, verbose=True):
"""Fit Vireo model with coordinate ascent
"""
ELBO = np.zeros(max_iter)
numerical_minimal = 1e-6
for it in range(max_iter):
if self.learn_theta and it >= delay_fit_theta:
self.update_theta_size(AD, DP)
if self.learn_GT:
self.update_GT_prob(AD, DP)
_logLik_ID = self.update_ID_prob(AD, DP)
ELBO[it] = self.get_ELBO(_logLik_ID) #+ _binom_coeff_log
if it > min_iter:
if ELBO[it] < ELBO[it - 1] - numerical_minimal:
if verbose:
print("Warning: Lower bound decreases!\n")
elif it == max_iter - 1:
if verbose:
print("Warning: VB did not converge!\n")
elif ELBO[it] - ELBO[it - 1] < epsilon_conv:
break
return ELBO[:it]
def fit(self, AD, DP, max_iter=200, min_iter=5, epsilon_conv=1e-2,
delay_fit_theta=0, verbose=True, n_inits=50, nproc=1):
"""Fit Vireo model with coordinate ascent
Parameters
----------
AD : scipy.sparse.csc_matrix (n_var, n_cell)
Sparse count matrix for alternative allele
DP : scipy.sparse.csc_matrix (n_var, n_cell)
Sparse count matrix for depths, alternative + refeerence alleles
max_iter : int
Maximum number of iterations
min_iter :
Minimum number of iterations
epsilon_conv : float
Threshold for detecting convergence
delay_fit_theta : int
Number of steps to delay updating theta. This can be very useful
for common genetics when there is good prior on allelic ratio.
verbose : bool
Whether print out log info
"""
if type(DP) is np.ndarray and np.mean(DP > 0) < 0.3:
print("Warning: input matrices is %.1f%% sparse, "
%(100 - np.mean(DP > 0) * 100) +
"change to scipy.sparse.csc_matrix" )
AD = csc_matrix(AD)
DP = csc_matrix(DP)
ELBO = self._fit_VB(AD, DP, max_iter, min_iter, epsilon_conv,
delay_fit_theta, verbose)
# _binom_coeff_log = np.sum(logbincoeff(DP, AD, is_sparse=True))
# _binom_coeff_log = np.sum(get_binom_coeff(AD, DP))
ELBO += np.sum(get_binom_coeff(AD, DP))
self.ELBO_ = np.append(self.ELBO_, ELBO)
| {"/vireoSNP/utils/vireo_model.py": ["/vireoSNP/utils/vireo_base.py"], "/vireoSNP/__init__.py": ["/vireoSNP/version.py", "/vireoSNP/utils/vcf_utils.py", "/vireoSNP/utils/io_utils.py", "/vireoSNP/utils/vireo_base.py", "/vireoSNP/utils/vireo_wrap.py", "/vireoSNP/utils/vireo_model.py", "/vireoSNP/utils/vireo_bulk.py", "/vireoSNP/utils/bmm_model.py"], "/vireoSNP/utils/bmm_model.py": ["/vireoSNP/utils/vireo_base.py"], "/vireoSNP/utils/io_utils.py": ["/vireoSNP/utils/vcf_utils.py"], "/vireoSNP/GTbarcode.py": ["/vireoSNP/version.py", "/vireoSNP/plot/base_plot.py", "/vireoSNP/utils/variant_select.py", "/vireoSNP/utils/vcf_utils.py"], "/vireoSNP/utils/vireo_doublet.py": ["/vireoSNP/utils/vireo_base.py", "/vireoSNP/utils/variant_select.py"], "/vireoSNP/utils/vireo_wrap.py": ["/vireoSNP/utils/vireo_base.py", "/vireoSNP/utils/vireo_model.py", "/vireoSNP/utils/vireo_doublet.py"], "/vireoSNP/utils/vcf_utils.py": ["/vireoSNP/utils/vireo_base.py"], "/vireoSNP/vireo.py": ["/vireoSNP/version.py", "/vireoSNP/utils/vireo_base.py", "/vireoSNP/utils/vireo_wrap.py", "/vireoSNP/plot/base_plot.py", "/vireoSNP/utils/io_utils.py", "/vireoSNP/utils/vcf_utils.py"], "/vireoSNP/plot/__init__.py": ["/vireoSNP/plot/base_plot.py"]} |
71,426 | single-cell-genetics/vireo | refs/heads/master | /vireoSNP/__init__.py | from .version import __version__
from .utils import vcf_utils as vcf
from .utils import vireo_base as base
from .utils import vireo_model as model
from .utils.vcf_utils import load_VCF, match_SNPs
from .utils.io_utils import read_cellSNP, read_vartrix
from .utils.vireo_base import normalize, loglik_amplify, get_binom_coeff
from .utils.vireo_base import match, optimal_match
from .utils.vireo_wrap import vireo_wrap
from .utils.vireo_model import Vireo
from .utils.vireo_bulk import VireoBulk, LikRatio_test
from .utils.bmm_model import BinomMixtureVB
from . import plot
__all__ = [
"__version__",
"utils",
"plot",
] | {"/vireoSNP/utils/vireo_model.py": ["/vireoSNP/utils/vireo_base.py"], "/vireoSNP/__init__.py": ["/vireoSNP/version.py", "/vireoSNP/utils/vcf_utils.py", "/vireoSNP/utils/io_utils.py", "/vireoSNP/utils/vireo_base.py", "/vireoSNP/utils/vireo_wrap.py", "/vireoSNP/utils/vireo_model.py", "/vireoSNP/utils/vireo_bulk.py", "/vireoSNP/utils/bmm_model.py"], "/vireoSNP/utils/bmm_model.py": ["/vireoSNP/utils/vireo_base.py"], "/vireoSNP/utils/io_utils.py": ["/vireoSNP/utils/vcf_utils.py"], "/vireoSNP/GTbarcode.py": ["/vireoSNP/version.py", "/vireoSNP/plot/base_plot.py", "/vireoSNP/utils/variant_select.py", "/vireoSNP/utils/vcf_utils.py"], "/vireoSNP/utils/vireo_doublet.py": ["/vireoSNP/utils/vireo_base.py", "/vireoSNP/utils/variant_select.py"], "/vireoSNP/utils/vireo_wrap.py": ["/vireoSNP/utils/vireo_base.py", "/vireoSNP/utils/vireo_model.py", "/vireoSNP/utils/vireo_doublet.py"], "/vireoSNP/utils/vcf_utils.py": ["/vireoSNP/utils/vireo_base.py"], "/vireoSNP/vireo.py": ["/vireoSNP/version.py", "/vireoSNP/utils/vireo_base.py", "/vireoSNP/utils/vireo_wrap.py", "/vireoSNP/plot/base_plot.py", "/vireoSNP/utils/io_utils.py", "/vireoSNP/utils/vcf_utils.py"], "/vireoSNP/plot/__init__.py": ["/vireoSNP/plot/base_plot.py"]} |
71,427 | single-cell-genetics/vireo | refs/heads/master | /vireoSNP/utils/bmm_model.py | import itertools
import numpy as np
from scipy.stats import entropy
from scipy.sparse import csc_matrix
from scipy.special import logsumexp, digamma, betaln
from .vireo_base import normalize, loglik_amplify, beta_entropy, get_binom_coeff
class BinomMixtureVB():
"""Binomial mixture model with variational inference
The prior can be set via set_prior() before fitting the model.
Key properties
--------------
beta_mu: numpy array (n_var, n_donor)
Beta mean parameter of theta's posterior
beta_sum: numpy array (n_var, n_donor)
Beta concetration parameter of theta's posterior
ID_prob: numpy array (n_cell, n_donor)
Posterior cell assignment probability to each donor
"""
def __init__(self, n_cell, n_var, n_donor, fix_beta_sum=False,
beta_mu_init=None, beta_sum_init=None, ID_prob_init=None):
"""Initialise Vireo model
Note, multiple initializations are highly recomended to avoid local
optima.
Parameters
----------
n_cell : int.
Number of cells
n_var : int.
Number of variants
n_donor : int.
Number of donors
fix_beta_sum: bool.
Whether fixing the concetration parameter of theta's posterior
beta_mu_init: numpy array (n_var, n_donor)
Initial value of beta_mu, the mean parameter of theta
beta_sum_init: numpy array (n_var, n_donor)
Initial value of beta_sum, the concetration parameter of theta
ID_prob_init: numpy array (n_cell, n_donor)
Initial value of ID_prob, cell assignment probability to each donor
"""
self.n_var = n_var
self.n_cell = n_cell
self.n_donor = n_donor
self.fix_beta_sum = fix_beta_sum
self.ID_prob_init = ID_prob_init
self.beta_mu_init = beta_mu_init
self.beta_sum_init = beta_sum_init
# set priors; you can re-set by run this function
self.set_prior()
# initial key parameters
self.set_initial(
self.beta_mu_init, self.beta_sum_init, self.ID_prob_init
)
def set_initial(self, beta_mu_init=None, beta_sum_init=None,
ID_prob_init=None):
"""Random initialization
"""
# initial key parameters
if beta_mu_init is not None:
self.beta_mu = beta_mu_init
else:
self.beta_mu = np.ones((self.n_var, self.n_donor)) * 0.5
if beta_sum_init is not None:
self.beta_sum = beta_sum_init
else:
self.beta_sum = np.ones(self.beta_mu.shape) * 30
if ID_prob_init is not None:
self.ID_prob = normalize(ID_prob_init, axis=1)
else:
self.ID_prob = normalize(np.random.rand(self.n_cell, self.n_donor))
self.ELBO_iters = np.array([])
def set_prior(self, ID_prior=None, beta_mu_prior=None,
beta_sum_prior=None):
"""Set prior for key variables: theta and ID_prob.
The priors are in the same shape as its according variables.
"""
if beta_mu_prior is None:
beta_mu_prior = np.ones((self.n_var, self.n_donor)) * 0.5
if beta_sum_prior is None:
beta_sum_prior = np.ones(beta_mu_prior.shape) * 2.0
self.theta_s1_prior = beta_mu_prior * beta_sum_prior
self.theta_s2_prior = (1 - beta_mu_prior) * beta_sum_prior
if ID_prior is not None:
if len(ID_prior.shape) == 1:
ID_prior = np.expand_dims(ID_prior, axis=0)
self.ID_prior = ID_prior
else:
self.ID_prior = normalize(np.ones((self.n_cell, self.n_donor)))
@property
def theta_s1(self):
"""Beta concetration1 parameter for theta posterior"""
return self.beta_mu * self.beta_sum
@property
def theta_s2(self):
"""Beta concetration2 parameter for theta posterior"""
return (1 - self.beta_mu) * self.beta_sum
def get_E_logLik(self, AD, DP):
"""Get the expecation of logLikelihood
E_theta [P(AD|DP, theta, Z)]
"""
BD = DP - AD
# shape: (n_cell, n_donor)
_E_logLik_mat = (
AD.T @ digamma(self.theta_s1) +
BD.T @ digamma(self.theta_s2) -
DP.T @ digamma(self.theta_s1 + self.theta_s2)
)
return _E_logLik_mat
def update_theta_size(self, AD, DP):
"""Coordinate ascent for updating theta posterior parameters
"""
BD = DP - AD
_theta_s1 = AD @ self.ID_prob #(n_var, n_donor)
_theta_s2 = BD @ self.ID_prob #(n_var, n_donor)
_theta_s1 += self.theta_s1_prior
_theta_s2 += self.theta_s2_prior
self.beta_mu = _theta_s1 / (_theta_s1 + _theta_s2)
if self.fix_beta_sum == False:
self.beta_sum = _theta_s1 + _theta_s2
def update_ID_prob(self, AD=None, DP=None, logLik_ID=None):
"""Coordinate ascent for updating assignment probability
"""
if logLik_ID is None:
logLik_ID = self.get_E_logLik(AD, DP)
self.ID_prob = normalize(np.exp(loglik_amplify(
logLik_ID + np.log(self.ID_prior))))
def get_ELBO(self, AD=None, DP=None, logLik_ID=None):
"""Calculating variational evidence lower bound with current parameters
logLik_ID: numpy array (n_cell, n_donor), the output from update_ID_prob
"""
if logLik_ID is None:
self.get_E_logLik(AD, DP)
LB_p = np.sum(logLik_ID * self.ID_prob)
KL_ID = np.sum(entropy(self.ID_prob, self.ID_prior, axis=-1))
KL_theta = beta_entropy(
np.append(
np.expand_dims(self.theta_s1, 1),
np.expand_dims(self.theta_s2, 1), axis = 1),
np.append(
np.expand_dims(self.theta_s1_prior, 1),
np.expand_dims(self.theta_s2_prior, 1), axis = 1))
return LB_p - KL_ID - KL_theta
def _fit_BV(self, AD, DP, max_iter=200, min_iter=20, epsilon_conv=1e-2,
verbose=True):
"""Fit Vireo model with coordinate ascent
"""
ELBO = np.zeros(max_iter)
for it in range(max_iter):
self.update_theta_size(AD, DP)
_logLik_ID = self.get_E_logLik(AD, DP)
self.update_ID_prob(logLik_ID = _logLik_ID)
ELBO[it] = self.get_ELBO(logLik_ID = _logLik_ID)
if it > min_iter:
if ELBO[it] - ELBO[it - 1] < -1e-6:
if verbose:
print("Warning: ELBO decreases %.8f to %.8f!\n"
%(ELBO[it - 1], ELBO[it]))
elif it == max_iter - 1:
if verbose:
print("Warning: VB did not converge!\n")
elif ELBO[it] - ELBO[it - 1] < epsilon_conv:
break
self.ELBO_iters = np.append(self.ELBO_iters, ELBO[:it])
def fit(self, AD, DP, n_init=10, max_iter=200, max_iter_pre=100,
random_seed=None, **kwargs):
"""Fit VB with multiple initializations
Parameters
----------
AD : scipy.sparse.csc_matrix (n_var, n_cell)
Sparse count matrix for alternative allele
DP : scipy.sparse.csc_matrix (n_var, n_cell)
Sparse count matrix for depths, alternative + refeerence alleles
n_inits : int
Number of random initialisations to use
max_iter : int
Maximum number of iterations for _fit_BV() in best initial
max_iter_pre : int
Maximum number of iterations for _fit_BV() in multiple initials
min_iter :
Minimum number of iterations for _fit_BV()
epsilon_conv : float
Threshold for detecting convergence for _fit_BV()
verbose : bool
Whether print out log info for _fit_BV()
random_seed : None or int
Random seed in numpy.random for multiple initializations
"""
if random_seed is not None:
np.random.seed(random_seed)
if type(DP) is np.ndarray and np.mean(DP > 0) < 0.3:
print("Warning: input matrices is %.1f%% sparse, "
%(100 - np.mean(DP > 0) * 100) +
"change to scipy.sparse.csc_matrix" )
AD = csc_matrix(AD)
DP = csc_matrix(DP)
_binom_coeff = np.sum(get_binom_coeff(AD, DP, is_log = True))
self.ELBO_inits = []
for i in range(n_init):
self.set_initial(
self.beta_mu_init, self.beta_sum_init, self.ID_prob_init
)
self._fit_BV(AD, DP, max_iter=max_iter_pre, **kwargs)
self.ELBO_inits.append(self.ELBO_iters[-1])
## first or better initialization
if i == 0 or (self.ELBO_iters[-1] > np.max(self.ELBO_inits[:-1])):
_ID_prob_best = self.ID_prob + 0
_beta_mu_best = self.beta_mu + 0
_beta_sum_best = self.beta_sum + 0
_ELBO_iters_best = self.ELBO_iters + 0
## Re-fit with best parameters
self.set_initial(_beta_mu_best, _beta_sum_best, _ID_prob_best)
self.ELBO_iters = _ELBO_iters_best
self._fit_BV(AD, DP, max_iter=max_iter, **kwargs)
## add binomial coefficient constants
self.ELBO_iters = self.ELBO_iters + _binom_coeff
self.ELBO_inits = np.array(self.ELBO_inits) + _binom_coeff
| {"/vireoSNP/utils/vireo_model.py": ["/vireoSNP/utils/vireo_base.py"], "/vireoSNP/__init__.py": ["/vireoSNP/version.py", "/vireoSNP/utils/vcf_utils.py", "/vireoSNP/utils/io_utils.py", "/vireoSNP/utils/vireo_base.py", "/vireoSNP/utils/vireo_wrap.py", "/vireoSNP/utils/vireo_model.py", "/vireoSNP/utils/vireo_bulk.py", "/vireoSNP/utils/bmm_model.py"], "/vireoSNP/utils/bmm_model.py": ["/vireoSNP/utils/vireo_base.py"], "/vireoSNP/utils/io_utils.py": ["/vireoSNP/utils/vcf_utils.py"], "/vireoSNP/GTbarcode.py": ["/vireoSNP/version.py", "/vireoSNP/plot/base_plot.py", "/vireoSNP/utils/variant_select.py", "/vireoSNP/utils/vcf_utils.py"], "/vireoSNP/utils/vireo_doublet.py": ["/vireoSNP/utils/vireo_base.py", "/vireoSNP/utils/variant_select.py"], "/vireoSNP/utils/vireo_wrap.py": ["/vireoSNP/utils/vireo_base.py", "/vireoSNP/utils/vireo_model.py", "/vireoSNP/utils/vireo_doublet.py"], "/vireoSNP/utils/vcf_utils.py": ["/vireoSNP/utils/vireo_base.py"], "/vireoSNP/vireo.py": ["/vireoSNP/version.py", "/vireoSNP/utils/vireo_base.py", "/vireoSNP/utils/vireo_wrap.py", "/vireoSNP/plot/base_plot.py", "/vireoSNP/utils/io_utils.py", "/vireoSNP/utils/vcf_utils.py"], "/vireoSNP/plot/__init__.py": ["/vireoSNP/plot/base_plot.py"]} |
71,428 | single-cell-genetics/vireo | refs/heads/master | /vireoSNP/utils/io_utils.py |
import subprocess
import numpy as np
from scipy.io import mmread
from itertools import permutations
from .vcf_utils import load_VCF, write_VCF, parse_donor_GPb
from .vcf_utils import read_sparse_GeneINFO, GenoINFO_maker, match_SNPs
def match_donor_VCF(cell_dat, donor_vcf):
"""Match variants between cell VCF and donor VCF information
"""
mm_idx = match_SNPs(cell_dat['variants'], donor_vcf['variants'])
idx1 = np.where(mm_idx != None)[0] #remove None
# TODO: check when chr is not compatible! given warning.
if len(idx1) == 0:
print("[vireo] warning: no variants matched to donor VCF, " +
"please check chr format!")
else:
print("[vireo] %d out %d variants matched to donor VCF"
%(len(idx1), len(cell_dat['variants'])))
idx2 = mm_idx[idx1].astype(int)
cell_dat['AD'] = cell_dat['AD'][idx1, :]
cell_dat['DP'] = cell_dat['DP'][idx1, :]
cell_dat["variants"] = [cell_dat["variants"][x] for x in idx1]
for _key in cell_dat["FixedINFO"].keys():
cell_dat["FixedINFO"][_key] = [
cell_dat["FixedINFO"][_key][x] for x in idx1]
donor_vcf["variants"] = [donor_vcf["variants"][x] for x in idx2]
for _key in donor_vcf["FixedINFO"].keys():
donor_vcf["FixedINFO"][_key] = [
donor_vcf["FixedINFO"][_key][x] for x in idx2]
for _key in donor_vcf["GenoINFO"].keys():
donor_vcf["GenoINFO"][_key] = [
donor_vcf["GenoINFO"][_key][x] for x in idx2]
return cell_dat, donor_vcf
def read_cellSNP(dir_name, layers=['AD', 'DP']):
"""Read data from the cellSNP output directory
Parameters
----------
dir_name:
directory full path name for cellSNP output
Return
------
A disctionary containing AD, DP, cells and variants
"""
cell_dat = load_VCF(dir_name + "/cellSNP.base.vcf.gz", load_sample=False,
biallelic_only=False)
for _layer in layers:
cell_dat[_layer] = mmread(dir_name + "/cellSNP.tag.%s.mtx" %(_layer)).tocsc()
cell_dat['samples'] = np.genfromtxt(dir_name + "/cellSNP.samples.tsv", dtype=str)
return cell_dat
def read_vartrix(alt_mtx, ref_mtx, cell_file, vcf_file=None):
"""Read data from VarTrix
Parameters
----------
alt_mtx:
sparse matrix file for alternative alleles
ref_mtx:
sparse matrix file for reference alleles
cell_file:
file for cell barcodes, each per line
vcf_file:
the vcf file used for fetch variants in VarTrix
Return
------
A disctionary containing AD, DP, cells and optionally variants
"""
if vcf_file is not None:
cell_dat = load_VCF(vcf_file, load_sample=False, biallelic_only=False)
cell_dat['variants'] = np.array(cell_dat['variants'])
else:
cell_dat = {}
cell_dat['AD'] = mmread(alt_mtx).tocsc()
cell_dat['DP'] = mmread(ref_mtx).tocsc() + cell_dat['AD']
cell_dat['samples'] = np.genfromtxt(cell_file, dtype=str)
return cell_dat
def write_donor_id(out_dir, donor_names, cell_names, n_vars, res_vireo):
"""
Write the results of donor id into files.
"""
ID_prob, doublet_prob = res_vireo['ID_prob'], res_vireo['doublet_prob']
prob_max = np.max(ID_prob, axis=1)
prob_doublet_out = np.max(doublet_prob, axis=1)
donor_singlet = np.array(donor_names, "U100")[np.argmax(ID_prob, axis=1)]
doublet_names = [",".join(x) for x in permutations(donor_names, 2)]
donor_doublet = np.array(doublet_names, "U100")[np.argmax(doublet_prob,
axis=1)]
donor_ids = donor_singlet.copy()
donor_ids[prob_max < 0.9] = "unassigned"
donor_ids[prob_doublet_out >= 0.9] = "doublet"
donor_ids[n_vars < 10] = "unassigned"
## save log file
fid = open(out_dir + "/_log.txt", "w")
fid.writelines("logLik: %.3e\n" %(res_vireo['LB_doublet']))
fid.writelines("thetas: \n%s\n" %(res_vireo['theta_shapes']))
fid.close()
## save summary file
fid = open(out_dir + "/summary.tsv", "w")
fid.writelines("Var1\tFreq\n")
donor_ids_uniq, donor_ids_count = np.unique(donor_ids, return_counts=True)
for i in range(len(donor_ids_uniq)):
fid.writelines("%s\t%d\n" %(donor_ids_uniq[i], donor_ids_count[i]))
fid.close()
print("[vireo] final donor size:")
print("\t".join([str(x) for x in donor_ids_uniq]))
print("\t".join([str(x) for x in donor_ids_count]))
## save donor_ids file
fid = open(out_dir + "/donor_ids.tsv", "w")
header = ["cell", "donor_id", "prob_max", "prob_doublet", "n_vars",
"best_singlet", "best_doublet", "doublet_logLikRatio"]
fid.writelines("\t".join(header) + "\n")
for i in range(len(cell_names)):
line = [cell_names[i], donor_ids[i], "%.2e" %prob_max[i],
"%.2e" %prob_doublet_out[i], "%d" %n_vars[i],
donor_singlet[i], donor_doublet[i],
"%.3f" %res_vireo['doublet_LLR'][i]]
fid.writelines("\t".join(line) + "\n")
fid.close()
## save singlet probability file
fid = open(out_dir + "/prob_singlet.tsv", "w")
fid.writelines("\t".join(["cell"] + donor_names) + "\n")
for i in range(len(cell_names)):
line = ["%.2e" %x for x in ID_prob[i, :]]
fid.writelines("\t".join([cell_names[i]] + line) + "\n")
fid.close()
## save doublet probability file
fid = open(out_dir + "/prob_doublet.tsv", "w")
fid.writelines("\t".join(["cell"] + doublet_names) + "\n")
for i in range(len(cell_names)):
line = ["%.2e" %x for x in doublet_prob[i, :]]
fid.writelines("\t".join([cell_names[i]] + line) + "\n")
fid.close()
## save ambient RNA file
if res_vireo['ambient_Psi'] is not None:
fid = open(out_dir + "/prop_ambient.tsv", "w")
fid.writelines("\t".join(["cell"] + donor_names + ['logLik_ratio']) + "\n")
for i in range(len(cell_names)):
line = ["%.4e" %x for x in res_vireo['ambient_Psi'][i, :]]
line += ['%.2f' %res_vireo['Psi_LLRatio'][i]]
fid.writelines("\t".join([cell_names[i]] + line) + "\n")
fid.close()
bashCommand = "gzip -f %s %s" %(out_dir + "/prob_singlet.tsv",
out_dir + "/prob_doublet.tsv")
pro = subprocess.Popen(bashCommand.split(), stdout=subprocess.PIPE)
pro.communicate()[0]
def make_whitelists(donor_id_file, out_prefix):
"""Generate whitelist for each donor as input for umi_tools extract
"""
table = np.genfromtxt(donor_id_file, dtype='str', delimiter='\t')[1:, :]
table = table[table[:, 1] != 'unassigned', :]
table = table[table[:, 1] != 'doublet', :]
for _donor in np.unique(table[:, 1]):
idx = table[:, 1] == _donor
barcodes = table[idx, 0]
fid = open(out_prefix + "_%s.txt" %_donor, "w")
for _line in barcodes:
fid.writelines(_line.split('-')[0] + '\n')
fid.close()
| {"/vireoSNP/utils/vireo_model.py": ["/vireoSNP/utils/vireo_base.py"], "/vireoSNP/__init__.py": ["/vireoSNP/version.py", "/vireoSNP/utils/vcf_utils.py", "/vireoSNP/utils/io_utils.py", "/vireoSNP/utils/vireo_base.py", "/vireoSNP/utils/vireo_wrap.py", "/vireoSNP/utils/vireo_model.py", "/vireoSNP/utils/vireo_bulk.py", "/vireoSNP/utils/bmm_model.py"], "/vireoSNP/utils/bmm_model.py": ["/vireoSNP/utils/vireo_base.py"], "/vireoSNP/utils/io_utils.py": ["/vireoSNP/utils/vcf_utils.py"], "/vireoSNP/GTbarcode.py": ["/vireoSNP/version.py", "/vireoSNP/plot/base_plot.py", "/vireoSNP/utils/variant_select.py", "/vireoSNP/utils/vcf_utils.py"], "/vireoSNP/utils/vireo_doublet.py": ["/vireoSNP/utils/vireo_base.py", "/vireoSNP/utils/variant_select.py"], "/vireoSNP/utils/vireo_wrap.py": ["/vireoSNP/utils/vireo_base.py", "/vireoSNP/utils/vireo_model.py", "/vireoSNP/utils/vireo_doublet.py"], "/vireoSNP/utils/vcf_utils.py": ["/vireoSNP/utils/vireo_base.py"], "/vireoSNP/vireo.py": ["/vireoSNP/version.py", "/vireoSNP/utils/vireo_base.py", "/vireoSNP/utils/vireo_wrap.py", "/vireoSNP/plot/base_plot.py", "/vireoSNP/utils/io_utils.py", "/vireoSNP/utils/vcf_utils.py"], "/vireoSNP/plot/__init__.py": ["/vireoSNP/plot/base_plot.py"]} |
71,429 | single-cell-genetics/vireo | refs/heads/master | /vireoSNP/GTbarcode.py | # GTbarcode - generator of genotype barcode for discriminatory variants
# Author: Yuanhua Huang
# Date: 28-09-2019
import os
import sys
import numpy as np
from optparse import OptionParser, OptionGroup
from .version import __version__
from .plot.base_plot import minicode_plot
from .utils.variant_select import variant_select
from .utils.vcf_utils import load_VCF, write_VCF, parse_donor_GPb
def main():
# import warnings
# warnings.filterwarnings('error')
# parse command line options
parser = OptionParser()
parser.add_option("--vcfFile", "-i", dest="vcf_file", default=None,
help="The VCF file for genotype of samples")
parser.add_option("--outFile", "-o", dest="out_file", default=None,
help="Output file [default: $vcfFile/GTbarcode.tsv]")
group0 = OptionGroup(parser, "Optional arguments")
group0.add_option("--genoTag", "-t", dest="geno_tag", default='GT',
help=("The tag for donor genotype: GT, GP, PL [default: %default]"))
group0.add_option("--noHomoAlt", dest="no_homo_alt", default=False,
action="store_true", help="Filter out variants with homozygous ALT.")
group0.add_option("--noPlot", dest="no_plot", default=False,
action="store_true", help="Turn off the plot for the barcode.")
group0.add_option("--figSize", dest="fig_size", default="4,2",
help="Size for the output figure, comma separated [default: %default].")
group0.add_option("--figFormat", dest="fig_format", default="png",
help="Format of output figure: png or pdf [default: %default].")
group0.add_option("--randSeed", type="int", dest="rand_seed", default=None,
help=("Seed for random pick variants with same information gain "
"[default: %default]"))
parser.add_option_group(group0)
(options, args) = parser.parse_args()
if len(sys.argv[1:]) == 0:
print("Welcome to GT barcode generator; Vireo v%s!\n" %(__version__))
print("use -h or --help for help on argument.")
sys.exit(1)
## input data vcf.gz
if options.vcf_file is None:
print("Error: need genotype data in vcf file.")
sys.exit(1)
else:
vcf_file = options.vcf_file
## out directory
if options.out_file is None:
print("Warning: no outFile provided, we use $vcfFile/GTbarcode.tsv")
out_file = os.path.dirname(os.path.abspath(vcf_file)) + "/GTbarcode.tsv"
else:
out_file = options.out_file
if not os.path.exists(os.path.dirname(out_file)):
os.mkdir(os.path.dirname(out_file))
## Load VCF data
geno_tag = options.geno_tag
donor_vcf = load_VCF(vcf_file, sparse=False, biallelic_only=True)
donor_GPb = parse_donor_GPb(donor_vcf['GenoINFO'][geno_tag], geno_tag)
var_ids = np.array(donor_vcf["variants"])
GT_vals = np.argmax(donor_GPb, axis = 2)
sample_ids = donor_vcf['samples']
INFO = donor_vcf["FixedINFO"]["INFO"]
AD, DP, OTH = [], [], []
for i in range(len(INFO)):
if INFO[i].count("AD=") == 0:
AD.append(0)
else:
AD.append(float(INFO[i].split("AD=")[1].split(";")[0]))
if INFO[i].count("DP=") == 0:
DP.append(0)
else:
DP.append(float(INFO[i].split("DP=")[1].split(";")[0]))
if INFO[i].count("OTH=") == 0:
OTH.append(0)
else:
OTH.append(float(INFO[i].split("OTH=")[1].split(";")[0]))
AD, DP, OTH = np.array(AD), np.array(DP), np.array(OTH)
## filetering
idx = (DP > 20) * (OTH / DP < 0.05)
if options.no_homo_alt:
idx *= np.max(GT_vals, axis=1) < 2
AD, DP, OTH = AD[idx], DP[idx], OTH[idx]
var_ids, GT_vals = var_ids[idx], GT_vals[idx, :]
res_barcodes = variant_select(GT_vals, DP, rand_seed=options.rand_seed)
fid = open(out_file, "w")
#fid.writelines("\n".join(["barcodes"] + sample_ids) + "\n")
fid.writelines("\t".join(["variants"] + sample_ids) + "\n")
for i in res_barcodes[2]:
line_list = [var_ids[i]] + ["%d" %x for x in GT_vals[i, :]]
fid.writelines("\t".join(line_list) + "\n")
fid.close()
## plot
if options.no_plot == False:
fig_size = np.array(options.fig_size.split(","), float)
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(fig_size[0], fig_size[1]), dpi=300)
minicode_plot(res_barcodes[1], var_ids[res_barcodes[2]],
donor_vcf['samples'])
plt.tight_layout()
fig.savefig(".".join(out_file.split(".")[:-1]) + "." +
options.fig_format)
if __name__ == "__main__":
main()
| {"/vireoSNP/utils/vireo_model.py": ["/vireoSNP/utils/vireo_base.py"], "/vireoSNP/__init__.py": ["/vireoSNP/version.py", "/vireoSNP/utils/vcf_utils.py", "/vireoSNP/utils/io_utils.py", "/vireoSNP/utils/vireo_base.py", "/vireoSNP/utils/vireo_wrap.py", "/vireoSNP/utils/vireo_model.py", "/vireoSNP/utils/vireo_bulk.py", "/vireoSNP/utils/bmm_model.py"], "/vireoSNP/utils/bmm_model.py": ["/vireoSNP/utils/vireo_base.py"], "/vireoSNP/utils/io_utils.py": ["/vireoSNP/utils/vcf_utils.py"], "/vireoSNP/GTbarcode.py": ["/vireoSNP/version.py", "/vireoSNP/plot/base_plot.py", "/vireoSNP/utils/variant_select.py", "/vireoSNP/utils/vcf_utils.py"], "/vireoSNP/utils/vireo_doublet.py": ["/vireoSNP/utils/vireo_base.py", "/vireoSNP/utils/variant_select.py"], "/vireoSNP/utils/vireo_wrap.py": ["/vireoSNP/utils/vireo_base.py", "/vireoSNP/utils/vireo_model.py", "/vireoSNP/utils/vireo_doublet.py"], "/vireoSNP/utils/vcf_utils.py": ["/vireoSNP/utils/vireo_base.py"], "/vireoSNP/vireo.py": ["/vireoSNP/version.py", "/vireoSNP/utils/vireo_base.py", "/vireoSNP/utils/vireo_wrap.py", "/vireoSNP/plot/base_plot.py", "/vireoSNP/utils/io_utils.py", "/vireoSNP/utils/vcf_utils.py"], "/vireoSNP/plot/__init__.py": ["/vireoSNP/plot/base_plot.py"]} |
71,430 | single-cell-genetics/vireo | refs/heads/master | /vireoSNP/utils/vireo_bulk.py | # Identification of donor abundance in bulk sample
import numpy as np
__docformat__ = "restructuredtext en"
__all__ = ['VireoBulk']
class VireoBulk():
"""
Estimate of donor abundance in a multipexed bulk sample
Varibale to infer
-----------------
psi: numpy.array (n_donor, )
The fractional abundance of each donor in the mixture
theta: numpy.array (n_GT, )
The alternative allele rate in each genotype category
Parameters
----------
n_GT: int, number of genotype categories
n_donor: int, number of donors in the mixture
"""
def __init__(self, n_donor, n_GT=3, psi_init=None,
theta_init=[0.01, 0.5, 0.99]):
self.n_GT = n_GT
self.n_donor = n_donor
self.psi = np.random.dirichlet([1] * n_donor)
self.theta = np.random.rand(n_GT)
if psi_init is not None:
if n_donor != len(psi_init):
print("Warning: n_donor != len(psi_init)")
else:
self.psi = np.random.dirichlet([1] * n_donor)
if theta_init is not None:
if n_GT != len(theta_init):
print("Warning: n_GT != len(theta_init)")
else:
self.theta = theta_init
def fit(self, AD, DP, GT_prob, max_iter=200, min_iter=5, epsilon_conv=1e-3,
learn_theta=True, delay_fit_theta=0, model="EM", verbose=False):
"""Fit the unknown variable psi and theta with EM algorithm
Parameters
----------
AD: numpy.array, (n_variant, ), int
The count vector for alternative allele in all variants
DP: numpy.array (n_variant, ), int
The count vector for depths in all variants (i.e., two alleles)
GT_prob: numpy.array, (n_variants, n_donor, n_GT)
The probability tensor for each genotype in each donor
learn_theta: bool
Whether learn theta, otherwise use theta_init
delay_fit_theta: int
The number of steps to delay in updating theta
max_iter : int
Maximum number of iterations
min_iter :
Minimum number of iterations
epsilon_conv : float
Threshold for detecting convergence
model: string
The algorithm used to fit the model. Only "EM" is supported for
Expectation-Maximumization algorithm
verbose : bool
Whether print out log info
"""
BD = DP - AD
logLik = np.zeros(max_iter)
for it in range(max_iter):
# E step: expectation of count assignment probability
theta_mat = np.tensordot(GT_prob, self.theta, axes=(2, 0)) #np.dot
Z1 = theta_mat * np.expand_dims(self.psi, 0)
Z1 = Z1 / np.sum(Z1, axis=1, keepdims=True)
Z0 = (1 - theta_mat) * np.expand_dims(self.psi, 0)
Z0 = Z0 / np.sum(Z0, axis=1, keepdims=True)
# M step: maximize logLikehood over psi and theta
psi_raw = np.dot(AD, Z1) + np.dot(BD, Z0)
self.psi = psi_raw / np.sum(psi_raw)
if learn_theta and it >= delay_fit_theta:
theta_s1 = np.dot(AD, np.sum(GT_prob * np.expand_dims(Z1, 2), axis = 1))
theta_s2 = np.dot(BD, np.sum(GT_prob * np.expand_dims(Z0, 2), axis = 1))
self.theta = theta_s1 / (theta_s1 + theta_s2)
# Likelihood and check convergence
theta_vct = np.dot(np.dot(GT_prob, self.theta), self.psi)
logLik[it] = np.sum(
AD * np.log(theta_vct) + BD * np.log(1 - theta_vct))
if it > min_iter:
if logLik[it] < logLik[it - 1]:
if verbose:
print("Warning: logLikelihood decreases!\n")
elif it == max_iter - 1:
if verbose:
print("Warning: VB did not converge!\n")
elif logLik[it] - logLik[it - 1] < epsilon_conv:
break
self.logLik = logLik[it]
self.logLik_all = logLik[:it]
def LR_test(self, **kwargs):
"""Likelihood ratio test for psi vector in a null hypothesis.
Use **kwargs for psi_null, AD, DP, GT_prob, log in
vireoSNP.LikRatio_test() function. Note, AD, DP, GT_prob the same as
the self.fit() function.
"""
return LikRatio_test(psi=self.psi, theta=self.theta, **kwargs)
def LikRatio_test(psi, psi_null, AD, DP, GT_prob, theta, log=False):
"""Likelihood ratio test for psi vector in a null hypothesis.
Please use the same AD, DP, and GT_prob as the fit() function.
Parameters
----------
psi: numpy.array (n_donor, )
The fractional abundance of each donor in the mixture for alternative
hypothesis
psi_null: numpy.array (n_donor, )
The psi vector in a null hypothesis
AD: numpy.array, (n_variant, ), int
The count vector for alternative allele in all variants
DP: numpy.array (n_variant, ), int
The count vector for depths in all variants (i.e., two alleles)
GT_prob: numpy.array, (n_variants, n_donor, n_GT)
The probability tensor for each genotype in each donor
theta: numpy.array (n_GT, )
The alternative allele rate in each genotype category
log: bool
If return p value in logarithm scale
Return
------
statistic: float
The calculated chi2-statistic.
pvalue: float
The single-tailed p-value.
"""
from scipy.stats import chi2
BD = DP - AD
theta_vct_alt = np.dot(np.dot(GT_prob, theta), psi)
logLik_alt = np.sum(
AD * np.log(theta_vct_alt) + BD * np.log(1 - theta_vct_alt))
theta_vct_null = np.dot(np.dot(GT_prob, theta), psi_null)
logLik_null = np.sum(
AD * np.log(theta_vct_null) + BD * np.log(1 - theta_vct_null))
LR = 2 * (logLik_alt - logLik_null)
df = len(psi_null) - 1
if log:
pval = chi2.logsf(LR, df)
else:
pval = chi2.sf(LR, df)
return LR, pval
| {"/vireoSNP/utils/vireo_model.py": ["/vireoSNP/utils/vireo_base.py"], "/vireoSNP/__init__.py": ["/vireoSNP/version.py", "/vireoSNP/utils/vcf_utils.py", "/vireoSNP/utils/io_utils.py", "/vireoSNP/utils/vireo_base.py", "/vireoSNP/utils/vireo_wrap.py", "/vireoSNP/utils/vireo_model.py", "/vireoSNP/utils/vireo_bulk.py", "/vireoSNP/utils/bmm_model.py"], "/vireoSNP/utils/bmm_model.py": ["/vireoSNP/utils/vireo_base.py"], "/vireoSNP/utils/io_utils.py": ["/vireoSNP/utils/vcf_utils.py"], "/vireoSNP/GTbarcode.py": ["/vireoSNP/version.py", "/vireoSNP/plot/base_plot.py", "/vireoSNP/utils/variant_select.py", "/vireoSNP/utils/vcf_utils.py"], "/vireoSNP/utils/vireo_doublet.py": ["/vireoSNP/utils/vireo_base.py", "/vireoSNP/utils/variant_select.py"], "/vireoSNP/utils/vireo_wrap.py": ["/vireoSNP/utils/vireo_base.py", "/vireoSNP/utils/vireo_model.py", "/vireoSNP/utils/vireo_doublet.py"], "/vireoSNP/utils/vcf_utils.py": ["/vireoSNP/utils/vireo_base.py"], "/vireoSNP/vireo.py": ["/vireoSNP/version.py", "/vireoSNP/utils/vireo_base.py", "/vireoSNP/utils/vireo_wrap.py", "/vireoSNP/plot/base_plot.py", "/vireoSNP/utils/io_utils.py", "/vireoSNP/utils/vcf_utils.py"], "/vireoSNP/plot/__init__.py": ["/vireoSNP/plot/base_plot.py"]} |
71,431 | single-cell-genetics/vireo | refs/heads/master | /vireoSNP/utils/base_utils.py | import numpy as np
def get_confusion(ids1, ids2):
"""Get confusion matrix
Parameters
----------
ids1: numpy.array or list
id list in the first annotation
ids2: numpy.array or list
id list in the second annotation
Return
------
(confuse_mat, ids1_uniq, ids2_uniq)
confuse_mat[i, j]:
number of samples have ids1 == ids1_uniq[i]
and ids2 == id2_uniq[j]
"""
if type(ids1) == list: ids1 = np.array(ids1)
if type(ids2) == list: ids2 = np.array(ids2)
ids1_uniq = np.unique(ids1)
ids2_uniq = np.unique(ids2)
confuse_mat = np.zeros((len(ids1_uniq), len(ids2_uniq)), dtype=int)
for i, _id1 in enumerate(ids1_uniq):
for j, _id2 in enumerate(ids2_uniq):
confuse_mat[i, j] = np.sum((ids1 == _id1) * (ids2 == _id2))
return confuse_mat, ids1_uniq, ids2_uniq
| {"/vireoSNP/utils/vireo_model.py": ["/vireoSNP/utils/vireo_base.py"], "/vireoSNP/__init__.py": ["/vireoSNP/version.py", "/vireoSNP/utils/vcf_utils.py", "/vireoSNP/utils/io_utils.py", "/vireoSNP/utils/vireo_base.py", "/vireoSNP/utils/vireo_wrap.py", "/vireoSNP/utils/vireo_model.py", "/vireoSNP/utils/vireo_bulk.py", "/vireoSNP/utils/bmm_model.py"], "/vireoSNP/utils/bmm_model.py": ["/vireoSNP/utils/vireo_base.py"], "/vireoSNP/utils/io_utils.py": ["/vireoSNP/utils/vcf_utils.py"], "/vireoSNP/GTbarcode.py": ["/vireoSNP/version.py", "/vireoSNP/plot/base_plot.py", "/vireoSNP/utils/variant_select.py", "/vireoSNP/utils/vcf_utils.py"], "/vireoSNP/utils/vireo_doublet.py": ["/vireoSNP/utils/vireo_base.py", "/vireoSNP/utils/variant_select.py"], "/vireoSNP/utils/vireo_wrap.py": ["/vireoSNP/utils/vireo_base.py", "/vireoSNP/utils/vireo_model.py", "/vireoSNP/utils/vireo_doublet.py"], "/vireoSNP/utils/vcf_utils.py": ["/vireoSNP/utils/vireo_base.py"], "/vireoSNP/vireo.py": ["/vireoSNP/version.py", "/vireoSNP/utils/vireo_base.py", "/vireoSNP/utils/vireo_wrap.py", "/vireoSNP/plot/base_plot.py", "/vireoSNP/utils/io_utils.py", "/vireoSNP/utils/vcf_utils.py"], "/vireoSNP/plot/__init__.py": ["/vireoSNP/plot/base_plot.py"]} |
71,432 | single-cell-genetics/vireo | refs/heads/master | /vireoSNP/utils/vireo_doublet.py | ## Prediction doublets
import itertools
import numpy as np
import multiprocessing
from scipy.stats import entropy
from scipy.sparse import csc_matrix
from scipy.special import logsumexp, digamma, betaln
from .vireo_base import normalize, loglik_amplify
def predict_doublet(vobj, AD, DP, update_GT=True, update_ID=True,
doublet_rate_prior=None):
"""Predict doublet with fitted Vireo model
Parameters
----------
vobj : Vireo object
Fitted Vireo object before predicting doublets
AD : scipy.sparse.csc_matrix (n_var, n_cell)
Sparse count matrix for alternative allele
DP : scipy.sparse.csc_matrix (n_var, n_cell)
Sparse count matrix for depths, alternative + refeerence alleles
update_GT : bool
Whether updating GT_prob after removing doublet_prob
update_GT : bool
Whether updating ID_prob by removing doublet_prob
doublet_rate_prior : float
Prior value of doublet rate
Returns
-------
A tuple of two numpy arrays (doublet_prob, ID_prob)
doublet_prob : numpy array (n_cell, n_donor * (n_donor - 1) / 2)
Assignment probability of a cells to any doublet (donor pair)
ID_prob : numpy array (n_cell, n_donor)
updated ID_prob by removing doublet_prob
"""
GT_both = add_doublet_GT(vobj.GT_prob)
beta_mu_both, beta_sum_both = add_doublet_theta(vobj.beta_mu,
vobj.beta_sum)
n_doublet_pair = GT_both.shape[1] - vobj.GT_prob.shape[1]
if doublet_rate_prior is None:
doublet_rate_prior = min(0.5, AD.shape[1] / 100000)
ID_prior_both = np.append(
vobj.ID_prior * (1 - doublet_rate_prior),
np.ones((vobj.n_cell, n_doublet_pair)) / n_doublet_pair *
doublet_rate_prior, axis=1)
# Calculate assignment probability (same as update_ID_prob())
BD = DP - AD
logLik_ID = np.zeros((AD.shape[1], GT_both.shape[1]))
_digamma1 = np.expand_dims(digamma(beta_sum_both * beta_mu_both), 1)
_digamma2 = np.expand_dims(digamma(beta_sum_both * (1 - beta_mu_both)), 1)
_digammas = np.expand_dims(digamma(beta_sum_both), 1)
for ig in range(GT_both.shape[2]):
S1 = AD.T @ (GT_both[:, :, ig] * _digamma1[:, :, ig])
S2 = BD.T @ (GT_both[:, :, ig] * _digamma2[:, :, ig])
SS = DP.T @ (GT_both[:, :, ig] * _digammas[:, :, ig])
logLik_ID += (S1 + S2 - SS)
logLik_ratio = (logLik_ID[:, vobj.n_donor:].max(1) -
logLik_ID[:, :vobj.n_donor].max(1))
ID_prob_both = normalize(np.exp(loglik_amplify(
logLik_ID + np.log(ID_prior_both))))
if update_ID:
vobj.ID_prob = ID_prob_both[:, :vobj.n_donor]
if update_GT:
if update_ID:
vobj.update_GT_prob(AD, DP)
else:
print("For update_GT, please turn on update_ID.")
prob_doublet = ID_prob_both[:, vobj.n_donor:]
prob_signlet = ID_prob_both[:, :vobj.n_donor]
return prob_doublet, prob_signlet, logLik_ratio
def add_doublet_theta(beta_mu, beta_sum):
"""
calculate theta for doublet genotype: GT=0&1, GT=0&2, and GT=1&2 by
averaging thire beta paramters
Example
-------
add_doublet_theta(np.array([[0.01, 0.5, 0.99]]), np.array([[30, 6, 30]]))
"""
# TODO: support reduced GT for relatives
combn_iter = itertools.combinations(range(beta_mu.shape[1]), 2)
db_idx = np.array([x for x in combn_iter])
beta_mu_db = (beta_mu[:, db_idx[:, 0]] + beta_mu[:, db_idx[:, 1]]) / 2.0
beta_sum_db = np.sqrt(beta_sum[:, db_idx[:, 0]] * beta_sum[:, db_idx[:, 1]])
return (np.append(beta_mu, beta_mu_db, axis=-1),
np.append(beta_sum, beta_sum_db, axis=-1))
def add_doublet_GT(GT_prob):
"""
Add doublet genotype by summarizing their probability:
New GT has five categories: 0, 1, 2, 1.5, 2.5
TODO: New GT has six categories: 0, 1, 2, 0_1, 0_2, 1_2
"""
combn_iter = itertools.combinations(range(GT_prob.shape[2]), 2)
gt_idx = np.array([x for x in combn_iter]) # GT combination
g_idx1 = gt_idx[:, 0]
g_idx2 = gt_idx[:, 1]
combn_iter = itertools.combinations(range(GT_prob.shape[1]), 2)
sp_idx = np.array([x for x in combn_iter]) # sample combination
s_idx1 = sp_idx[:, 0]
s_idx2 = sp_idx[:, 1]
## GT_prob has three genotypes: 0, 1, 2;
n_gt = GT_prob.shape[2]
GT_prob2 = np.zeros((GT_prob.shape[0], sp_idx.shape[0],
n_gt + gt_idx.shape[0]))
GT_prob2[:, :, :n_gt] = (GT_prob[:, s_idx1, :] *
GT_prob[:, s_idx2, :])
GT_prob2[:, :, n_gt:] = (GT_prob[:, s_idx1, :][:, :, g_idx1] *
GT_prob[:, s_idx2, :][:, :, g_idx2] +
GT_prob[:, s_idx1, :][:, :, g_idx2] *
GT_prob[:, s_idx2, :][:, :, g_idx1])
GT_prob2 = normalize(GT_prob2, axis=2)
GT_prob1 = np.append(GT_prob,
np.zeros((GT_prob.shape[0], GT_prob.shape[1], gt_idx.shape[0])), axis=2)
return np.append(GT_prob1, GT_prob2, axis=1)
def _fit_EM_ambient(AD, DP, theta_mat, n_donor=None, max_iter=200, min_iter=20,
epsilon_conv=1e-3, Hessian=True, verbose=False):
"""Estimate ambient RNA abundance by EM algorithm
"""
BD = DP - AD
if n_donor is None:
n_donor = theta_mat.shape[1]
psi = np.random.dirichlet([1] * theta_mat.shape[1])
logLik = np.zeros(max_iter)
for it in range(max_iter):
# masking donors to restrict the number of donors in the mixture
if it < min_iter - 3:
mask_idx = []
else:
mask_idx = np.argsort(psi)[ : (theta_mat.shape[1] - n_donor)]
# E step: expectation of count assignment probability
Z1 = theta_mat * np.expand_dims(psi, 0)
Z1[:, mask_idx] = 0
Z1 = Z1 / np.sum(Z1, axis=1, keepdims=True)
Z0 = (1 - theta_mat) * np.expand_dims(psi, 0)
Z0[:, mask_idx] = 0
Z0 = Z0 / np.sum(Z0, axis=1, keepdims=True)
# M step: maximize logLikehood over psi and theta
psi_raw = np.dot(AD, Z1) + np.dot(BD, Z0)
psi = psi_raw / np.sum(psi_raw)
# Likelihood and check convergence
theta_vct = np.dot(theta_mat, psi)
logLik[it] = np.sum(
AD * np.log(theta_vct) + BD * np.log(1 - theta_vct))
if it > min_iter:
if logLik[it] < logLik[it - 1]:
if verbose:
print("Warning: logLikelihood decreases!\n")
elif it == max_iter - 1:
if verbose:
print("Warning: VB did not converge!\n")
elif logLik[it] - logLik[it - 1] < epsilon_conv:
break
logLik_RV = logLik[:it]
# caluclate the Cramér–Rao bound of variance
var_CRbound = np.array([ None ] * len(psi))
if Hessian:
theta_vct = np.dot(theta_mat, psi).reshape(-1, 1)
Fisher_info = np.sum(
(theta_mat / theta_vct)**2 * AD.reshape(-1, 1) +
(theta_mat / (1 - theta_vct))**2 * BD.reshape(-1, 1),
axis = 0
)
var_CRbound = 1.0 / Fisher_info
# calculate likelihood ratio
logLik_null = np.zeros(theta_mat.shape[1])
for i in range(theta_mat.shape[1]):
min_p = 0 # minimal proportion for other donors, e.g., 0.001
psi_null = np.ones(theta_mat.shape[1]) * min_p / (theta_mat.shape[1] - 1)
psi_null[np.argmax(psi)] = 1 - min_p
theta_null = np.dot(theta_mat, psi_null)
logLik_null[i] = np.sum(
AD * np.log(theta_null) + BD * np.log(1 - theta_null))
logLik_ratio = logLik_RV[-1] - np.max(logLik_null)
# logLik_ratio = (
# logLik_RV[-1] - logsumexp(logLik_null - np.log(theta_mat.shape[1])))
return psi, var_CRbound, logLik_ratio
def predit_ambient(vobj, AD, DP, nproc=10, min_ELBO_gain=None):
"""Predict fraction of ambient RNA contaimination.
Still under development
"""
### detect ambient RNA for each cell
import timeit
start = timeit.default_timer()
## option 1: binomial
theta_mat = np.tensordot(vobj.GT_prob, vobj.beta_mu[0, :], axes=(2, 0))
## option 2: binomial per variants
# theta_mat = (AD @ vobj.ID_prob + 0.1) / (DP @ vobj.ID_prob + 0.2)
# theta_mat = betabinom.pmf(1, 1, _ss1, _ss2, loc=0)
## Select donor informed variants
from .variant_select import variant_ELBO_gain
if min_ELBO_gain is None:
min_ELBO_gain = np.sqrt(AD.shape[1]) / 3.0
_ELBO_gain = variant_ELBO_gain(vobj.ID_prob, AD, DP)
_snp_idx = _ELBO_gain >= min_ELBO_gain
print("[vireo] %d out %d SNPs selected for ambient RNA detection: "
"ELBO_gain > %.1f" %(sum(_snp_idx), len(_snp_idx), min_ELBO_gain))
theta_mat = theta_mat[_snp_idx, :]
AD = AD[_snp_idx, :]
DP = DP[_snp_idx, :]
## Fit models
if nproc > 1:
result = []
pool = multiprocessing.Pool(processes = nproc)
for i in range(AD.shape[1]):
_ad = AD[:, i].toarray().reshape(-1)
_dp = DP[:, i].toarray().reshape(-1)
result.append(pool.apply_async(
_fit_EM_ambient, (_ad, _dp, theta_mat, None), callback = None))
pool.close()
pool.join()
res_list = [res.get() for res in result]
Psi_mat = np.array([res[0] for res in res_list])
Psi_var = np.array([res[1] for res in res_list])
Psi_logLik = np.array([res[2] for res in res_list])
else:
Psi_mat = np.zeros((AD.shape[1], vobj.n_donor))
Psi_var = np.zeros((AD.shape[1], vobj.n_donor))
Psi_logLik = np.zeros(AD.shape[1])
for i in range(AD.shape[1]):
_ad = AD[:, i].toarray().reshape(-1)
_dp = DP[:, i].toarray().reshape(-1)
Psi_mat[i, :], Psi_var[i, :], Psi_logLik[i] = _fit_EM_ambient(
_ad, _dp, theta_mat, None)
stop = timeit.default_timer()
print('[vireo] Ambient RNA time: %.1f sec' %(stop - start))
return Psi_mat, Psi_var, Psi_logLik
| {"/vireoSNP/utils/vireo_model.py": ["/vireoSNP/utils/vireo_base.py"], "/vireoSNP/__init__.py": ["/vireoSNP/version.py", "/vireoSNP/utils/vcf_utils.py", "/vireoSNP/utils/io_utils.py", "/vireoSNP/utils/vireo_base.py", "/vireoSNP/utils/vireo_wrap.py", "/vireoSNP/utils/vireo_model.py", "/vireoSNP/utils/vireo_bulk.py", "/vireoSNP/utils/bmm_model.py"], "/vireoSNP/utils/bmm_model.py": ["/vireoSNP/utils/vireo_base.py"], "/vireoSNP/utils/io_utils.py": ["/vireoSNP/utils/vcf_utils.py"], "/vireoSNP/GTbarcode.py": ["/vireoSNP/version.py", "/vireoSNP/plot/base_plot.py", "/vireoSNP/utils/variant_select.py", "/vireoSNP/utils/vcf_utils.py"], "/vireoSNP/utils/vireo_doublet.py": ["/vireoSNP/utils/vireo_base.py", "/vireoSNP/utils/variant_select.py"], "/vireoSNP/utils/vireo_wrap.py": ["/vireoSNP/utils/vireo_base.py", "/vireoSNP/utils/vireo_model.py", "/vireoSNP/utils/vireo_doublet.py"], "/vireoSNP/utils/vcf_utils.py": ["/vireoSNP/utils/vireo_base.py"], "/vireoSNP/vireo.py": ["/vireoSNP/version.py", "/vireoSNP/utils/vireo_base.py", "/vireoSNP/utils/vireo_wrap.py", "/vireoSNP/plot/base_plot.py", "/vireoSNP/utils/io_utils.py", "/vireoSNP/utils/vcf_utils.py"], "/vireoSNP/plot/__init__.py": ["/vireoSNP/plot/base_plot.py"]} |
71,433 | single-cell-genetics/vireo | refs/heads/master | /vireoSNP/utils/vireo_wrap.py | # Wrap function for Vireo model
# Author: Yuanhua Huang
# Date: 22/03/2020
import sys
import numpy as np
import multiprocessing
from scipy.sparse import csc_matrix
from .vireo_base import optimal_match, donor_select
from .vireo_model import Vireo
from .vireo_doublet import predict_doublet, predit_ambient
def _model_fit(_model, AD, DP, max_iter, delay_fit_theta):
"""Temp function for model fitting with multiple processes
"""
_model.fit(AD, DP, min_iter=5, max_iter=max_iter,
delay_fit_theta=delay_fit_theta, verbose=False)
return _model
def vireo_wrap(AD, DP, GT_prior=None, n_donor=None, learn_GT=True, n_init=20,
random_seed=None, check_doublet=True, max_iter_init=20, delay_fit_theta=3,
n_extra_donor=0, extra_donor_mode="distance",
check_ambient=False, nproc=4, **kwargs):
"""
A wrap function to run vireo with multiple initializations
"""
if type(DP) is np.ndarray and np.mean(DP > 0) < 0.3:
print("Warning: input matrices is %.1f%% sparse, "
%(100 - np.mean(DP > 0) * 100) +
"change to scipy.sparse.csc_matrix" )
AD = csc_matrix(AD)
DP = csc_matrix(DP)
if learn_GT == False and n_extra_donor > 0:
print("Searching from extra donors only works with learn_GT")
n_extra_donor = 0
# note learn_GT is false for mode 2 and 5 only (set before)
if n_donor is None:
if GT_prior is None:
print("[vireo] Error: requiring n_donor or GT_prior.")
sys.exit()
else:
n_donor = GT_prior.shape[1]
if learn_GT is False and n_init > 1:
print("GT is fixed, so use a single initialization")
n_init = 1
## Setting random seed for initialization
if random_seed is not None:
np.random.seed(random_seed)
GT_prior_use = None
n_donor_use = int(n_donor + n_extra_donor)
if GT_prior is not None and n_donor_use == GT_prior.shape[1]:
GT_prior_use = GT_prior.copy()
elif GT_prior is not None and n_donor_use < GT_prior.shape[1]:
GT_prior_use = GT_prior.copy()
n_donor_use = GT_prior.shape[1]
## Initialise models
_models_all = []
for im in range(n_init):
_modelCA = Vireo(n_var=AD.shape[0], n_cell=AD.shape[1],
n_donor=n_donor_use, learn_GT=learn_GT,
GT_prob_init=GT_prior_use, **kwargs)
_modelCA.set_prior(GT_prior=GT_prior_use)
_models_all.append(_modelCA)
## Fitting the models with single or multiple processes
if nproc > 1:
result = []
pool = multiprocessing.Pool(processes = nproc)
for im in range(n_init):
result.append(pool.apply_async(_model_fit,
(_models_all[im], AD, DP, max_iter_init, delay_fit_theta),
callback = None))
pool.close()
pool.join()
_models_all = [res.get() for res in result]
else:
for im in range(n_init):
_models_all[im].fit(AD, DP, min_iter=5, max_iter=max_iter_init,
delay_fit_theta=delay_fit_theta, verbose=False)
## select the model with best initialization
elbo_all = np.array([x.ELBO_[-1] for x in _models_all])
_idx = np.argmax(elbo_all)
modelCA = _models_all[_idx]
if n_extra_donor == 0:
modelCA.fit(AD, DP, min_iter=5, verbose=False)
else:
_ID_prob = donor_select(modelCA.GT_prob, modelCA.ID_prob, n_donor,
mode=extra_donor_mode)
modelCA = Vireo(n_var=AD.shape[0], n_cell=AD.shape[1],
n_donor=n_donor, learn_GT=learn_GT,
GT_prob_init=GT_prior_use, ID_prob_init=_ID_prob,
beta_mu_init=modelCA.beta_mu,
beta_sum_init=modelCA.beta_sum, **kwargs)
modelCA.set_prior(GT_prior=GT_prior_use)
modelCA.fit(AD, DP, min_iter=5, delay_fit_theta=delay_fit_theta,
verbose=False)
print("[vireo] lower bound ranges [%.1f, %.1f, %.1f]"
%(np.min(elbo_all), np.median(elbo_all), np.max(elbo_all)))
## Run Vireo again with updateing genotype
if GT_prior is not None and n_donor < GT_prior.shape[1]:
_donor_cnt = np.sum(modelCA.ID_prob, axis=0)
_donor_idx = np.argsort(_donor_cnt)[::-1]
GT_prior_use = GT_prior[:, _donor_idx[:n_donor], :]
modelCA = Vireo(n_var=AD.shape[0], n_cell=AD.shape[1],
n_donor=n_donor, learn_GT=False,
GT_prob_init=GT_prior_use, **kwargs)
modelCA.fit(AD, DP, min_iter=20, verbose=False)
elif GT_prior is not None and n_donor > GT_prior.shape[1]:
GT_prior_use = modelCA.GT_prob.copy()
idx = optimal_match(GT_prior, GT_prior_use)[1]
GT_prior_use[:, idx, :] = GT_prior
_idx_order = np.append(idx, np.delete(np.arange(n_donor), idx))
GT_prior_use = GT_prior_use[:, _idx_order, :]
ID_prob_use = modelCA.ID_prob[:, _idx_order]
modelCA = Vireo(n_var=AD.shape[0], n_cell=AD.shape[1],
n_donor=n_donor, learn_GT=learn_GT,
ID_prob_init=ID_prob_use,
beta_mu_init=modelCA.beta_mu,
beta_sum_init=modelCA.beta_sum,
GT_prob_init=GT_prior_use, **kwargs)
modelCA.set_prior(GT_prior = GT_prior_use)
modelCA.fit(AD, DP, min_iter=20, verbose=False)
## print the beta parameters
print("[vireo] allelic rate mean and concentrations:")
print(np.round(modelCA.beta_mu, 3))
print(np.round(modelCA.beta_sum, 1))
## Summarise donor size
print("[vireo] donor size before removing doublets:")
_donor_cnt = np.sum(modelCA.ID_prob, axis=0)
print("\t".join(["donor%d" %x for x in range(len(_donor_cnt))]))
print("\t".join(["%.0f" %x for x in _donor_cnt]))
## Predict doublets
if check_doublet:
doublet_prob, ID_prob, doublet_LLR = predict_doublet(modelCA, AD, DP)
else:
ID_prob = modelCA.ID_prob
doublet_prob = np.zeros((AD.shape[1], int(n_donor * (n_donor - 1) / 2)))
doublet_LLR = np.zeros(AD.shape[1])
theta_shapes = np.append(modelCA.beta_mu * modelCA.beta_sum,
(1 - modelCA.beta_mu) * modelCA.beta_sum, axis=0)
## Predict ambient RNAs
if check_ambient:
from threadpoolctl import threadpool_limits
with threadpool_limits(limits=1, user_api='blas'):
ambient_Psi, Psi_var, Psi_logLik_ratio = predit_ambient(
modelCA, AD, DP, nproc=nproc)
else:
ambient_Psi, Psi_var, Psi_logLik_ratio = None, None, None
RV = {}
RV['ID_prob'] = ID_prob
RV['GT_prob'] = modelCA.GT_prob
RV['doublet_LLR'] = doublet_LLR
RV['doublet_prob'] = doublet_prob
RV['theta_shapes'] = theta_shapes
RV['theta_mean'] = modelCA.beta_mu
RV['theta_sum'] = modelCA.beta_sum
RV['ambient_Psi'] = ambient_Psi
RV['Psi_var'] = Psi_var
RV['Psi_LLRatio'] = Psi_logLik_ratio
RV['LB_list'] = elbo_all
RV['LB_doublet'] = modelCA.ELBO_[-1]
return RV
| {"/vireoSNP/utils/vireo_model.py": ["/vireoSNP/utils/vireo_base.py"], "/vireoSNP/__init__.py": ["/vireoSNP/version.py", "/vireoSNP/utils/vcf_utils.py", "/vireoSNP/utils/io_utils.py", "/vireoSNP/utils/vireo_base.py", "/vireoSNP/utils/vireo_wrap.py", "/vireoSNP/utils/vireo_model.py", "/vireoSNP/utils/vireo_bulk.py", "/vireoSNP/utils/bmm_model.py"], "/vireoSNP/utils/bmm_model.py": ["/vireoSNP/utils/vireo_base.py"], "/vireoSNP/utils/io_utils.py": ["/vireoSNP/utils/vcf_utils.py"], "/vireoSNP/GTbarcode.py": ["/vireoSNP/version.py", "/vireoSNP/plot/base_plot.py", "/vireoSNP/utils/variant_select.py", "/vireoSNP/utils/vcf_utils.py"], "/vireoSNP/utils/vireo_doublet.py": ["/vireoSNP/utils/vireo_base.py", "/vireoSNP/utils/variant_select.py"], "/vireoSNP/utils/vireo_wrap.py": ["/vireoSNP/utils/vireo_base.py", "/vireoSNP/utils/vireo_model.py", "/vireoSNP/utils/vireo_doublet.py"], "/vireoSNP/utils/vcf_utils.py": ["/vireoSNP/utils/vireo_base.py"], "/vireoSNP/vireo.py": ["/vireoSNP/version.py", "/vireoSNP/utils/vireo_base.py", "/vireoSNP/utils/vireo_wrap.py", "/vireoSNP/plot/base_plot.py", "/vireoSNP/utils/io_utils.py", "/vireoSNP/utils/vcf_utils.py"], "/vireoSNP/plot/__init__.py": ["/vireoSNP/plot/base_plot.py"]} |
71,434 | single-cell-genetics/vireo | refs/heads/master | /vireoSNP/utils/vcf_utils.py | # Utilility functions for processing vcf files
# Author: Yuanhua Huang
# Date: 24/06/2019
import os
import sys
import gzip
import subprocess
import numpy as np
from .vireo_base import match, optimal_match
def parse_sample_info(sample_dat, sparse=True, format_list=None):
"""
Parse genotype information for each sample
Note, it requires the format for each variants to
be the same.
"""
if sample_dat == [] or sample_dat is None:
return None
# require the same format for all variants
format_all = [x[0].split(":") for x in sample_dat]
if format_list is None:
format_list = format_all[0]
RV = {}
n_SNP_tagged = np.zeros(len(format_list), np.int64)
for _key in format_list:
RV[_key] = []
if sparse:
## sparse matrix requires all keys
format_set_all = [set(x) for x in format_all]
if format_set_all.count(set(format_list)) != len(format_all):
print("Error: require the same format for all variants.")
exit()
RV['indices'] = []
RV['indptr'] = [0]
RV['shape'] = (len(sample_dat[0][1:]), len(sample_dat))
missing_val = ":".join(["."] * len(format_list))
cnt = 0
for j in range(len(sample_dat)): #variant j
_line = sample_dat[j]
key_idx = [format_all[j].index(_key) for _key in format_list]
for i in range(len(_line[1:])): #cell i
if _line[i+1] == missing_val or _line[i+1] == ".":
continue
_line_key = _line[i+1].split(":")
for k in range(len(format_list)):
RV[format_list[k]].append(_line_key[key_idx[k]])
cnt += 1
RV['indices'].append(i)
n_SNP_tagged += 1
RV['indptr'].append(cnt)
else:
for j in range(len(sample_dat)): #variant j
_line = sample_dat[j]
_line_split = [x.split(":") for x in _line[1:]]
for il, _key in enumerate(format_list):
if _key in format_all[j]:
k = format_all[j].index(_key)
_line_key = [x[k] for x in _line_split]
RV[_key].append(_line_key)
n_SNP_tagged[il] += 1
else:
RV[_key].append(["."] * len(_line_split))
# Check if format tags are well convered
idx_low_tag = np.where(n_SNP_tagged < (0.1 * len(sample_dat)))[0]
if len(idx_low_tag) > 0:
print('[vireo] Warning: too few variants with tags!',
'\t'.join([format_list[k] + ": " + str(n_SNP_tagged[k])
for k in range(len(format_list))]))
return RV, n_SNP_tagged
def load_VCF(vcf_file, biallelic_only=False, load_sample=True, sparse=True,
format_list=None):
"""
Load whole VCF file
-------------------
Initially designed to load VCF from cellSNP output, requiring
1) all variants have the same format list;
2) a line starting with "#CHROM", with sample ids.
If these two requirements are satisfied, this function also supports general
VCF files, e.g., genotype for multiple samples.
Note, it may take a large memory, please filter the VCF with bcftools first.
Examples
--------
* Load VCF file, e.g., from cellsnp-lite output:
>>> import vireoSNP
>>> import numpy as np
>>> vcf_dat = vireoSNP.vcf.load_VCF("cellSNP.cells.vcf.gz", sparse=False,
>>> biallelic_only=False, format_list=['GT', 'AD', 'DP', 'ALL'])
>>> var_ids = np.array(vcf_dat['variants'])
>>> samples = np.array(vcf_dat['samples'])
>>> GT_mat = np.array(vcf_dat['GenoINFO']['GT'])
>>> AD_mat = np.array(vcf_dat['GenoINFO']['AD']).astype(float)
>>> DP_mat = np.array(vcf_dat['GenoINFO']['DP']).astype(float)
>>> ALL_bases_mat = np.array(vcf_dat['GenoINFO']['ALL'])
"""
if vcf_file[-3:] == ".gz" or vcf_file[-4:] == ".bgz":
infile = gzip.open(vcf_file, "rb")
is_gzip = True
else:
infile = open(vcf_file, "r")
is_gzip = False
FixedINFO = {}
contig_lines = []
comment_lines = []
var_ids, obs_ids, obs_dat = [], [], []
for line in infile:
if is_gzip:
line = line.decode('utf-8')
if line.startswith("#"):
if line.startswith("##contig="):
contig_lines.append(line.rstrip())
if line.startswith("#CHROM"):
if load_sample:
obs_ids = line.rstrip().split("\t")[9:]
key_ids = line[1:].rstrip().split("\t")[:8]
for _key in key_ids:
FixedINFO[_key] = []
else:
comment_lines.append(line.rstrip())
else:
list_val = line.rstrip().split("\t") #[:5] #:8
if biallelic_only:
if len(list_val[3]) > 1 or len(list_val[4]) > 1:
continue
if load_sample:
obs_dat.append(list_val[8:])
for i in range(len(key_ids)):
FixedINFO[key_ids[i]].append(list_val[i])
var_ids.append("_".join([list_val[x] for x in [0, 1, 3, 4]]))
infile.close()
RV = {}
RV["variants"] = var_ids
RV["FixedINFO"] = FixedINFO
RV["contigs"] = contig_lines
RV["comments"] = comment_lines
if load_sample:
RV["samples"] = obs_ids
RV["GenoINFO"], RV["n_SNP_tagged"] = parse_sample_info(
obs_dat, sparse, format_list)
return RV
def write_VCF_to_hdf5(VCF_dat, out_file):
"""
Write vcf data into hdf5 file
"""
import h5py
f = h5py.File(out_file, 'w')
f.create_dataset("contigs", data=np.string_(VCF_dat['contigs']),
compression="gzip", compression_opts=9)
f.create_dataset("samples", data=np.string_(VCF_dat['samples']),
compression="gzip", compression_opts=9)
f.create_dataset("variants", data=np.string_(VCF_dat['variants']),
compression="gzip", compression_opts=9)
f.create_dataset("comments", data=np.string_(VCF_dat['comments']),
compression="gzip", compression_opts=9)
## variant fixed information
fixed = f.create_group("FixedINFO")
for _key in VCF_dat['FixedINFO']:
fixed.create_dataset(_key, data=np.string_(VCF_dat['FixedINFO'][_key]),
compression="gzip", compression_opts=9)
## genotype information for each sample
geno = f.create_group("GenoINFO")
for _key in VCF_dat['GenoINFO']:
geno.create_dataset(_key, data=np.string_(VCF_dat['GenoINFO'][_key]),
compression="gzip", compression_opts=9)
f.close()
def read_sparse_GeneINFO(GenoINFO, keys=['AD', 'DP'], axes=[-1, -1]):
M, N = np.array(GenoINFO['shape']).astype('int')
indptr = np.array(GenoINFO['indptr']).astype('int')
indices = np.array(GenoINFO['indices']).astype('int')
from scipy.sparse import csr_matrix
RV = {}
for i in range(len(keys)):
_dat = [x.split(",")[axes[i]] for x in GenoINFO[keys[i]]]
_dat = [x if x != '.' else '0' for x in _dat]
data = np.array(_dat).astype('float')
RV[keys[i]] = csr_matrix((data, indices, indptr), shape=(N, M))
return RV
def GenoINFO_maker(GT_prob, AD_reads, DP_reads):
"""
Generate the Genotype information for estimated genotype probability at
sample level.
"""
GT_val = np.argmax(GT_prob, axis=2)
GT_prob[GT_prob < 10**(-10)] = 10**(-10)
PL_prob = np.round(-10 * np.log10(GT_prob)).astype(int).astype(str)
AD_reads = np.round(AD_reads).astype(int).astype(str)
DP_reads = np.round(DP_reads).astype(int).astype(str)
GT, PL, AD, DP = [], [], [], []
for i in range(GT_prob.shape[0]):
GT.append([['0/0', '1/0', '1/1'][x] for x in GT_val[i, :]])
PL.append([",".join(list(x)) for x in PL_prob[i, :, :]])
AD.append(list(AD_reads[i, :]))
DP.append(list(DP_reads[i, :]))
RV = {}
RV['GT'] = GT
RV['AD'] = AD
RV['DP'] = DP
RV['PL'] = PL
return RV
def write_VCF(out_file, VCF_dat, GenoTags=['GT', 'AD', 'DP', 'PL']):
if out_file.endswith(".gz"):
out_file_use = out_file.split(".gz")[0]
else:
out_file_use = out_file
if "samples" not in VCF_dat:
VCF_dat["samples"] = []
if GenoTags != []:
print("No sample available: GenoTags will be ignored.")
fid_out = open(out_file_use, "w")
for line in VCF_dat['comments']:
fid_out.writelines(line + "\n")
VCF_COLUMN = ["CHROM", "POS", "ID", "REF", "ALT", "QUAL", "FILTER",
"INFO", "FORMAT"]
fid_out.writelines("#" + "\t".join(VCF_COLUMN + VCF_dat['samples']) + "\n")
for i in range(len(VCF_dat['variants'])):
line = [VCF_dat['FixedINFO'][x][i] for x in VCF_COLUMN[:8]]
line.append(":".join(GenoTags))
# for d in range(len(VCF_dat['GenoINFO'][GenoTags[0]][0])):
# _line_tag = [VCF_dat['GenoINFO'][x][i][d] for x in GenoTags]
for s in range(len(VCF_dat['samples'])):
_line_tag = [VCF_dat['GenoINFO'][_tag][i][s] for _tag in GenoTags]
line.append(":".join(_line_tag))
fid_out.writelines("\t".join(line) + "\n")
fid_out.close()
import shutil
if shutil.which("bgzip") is not None:
bashCommand = "bgzip -f %s" %(out_file_use)
else:
bashCommand = "gzip -f %s" %(out_file_use)
pro = subprocess.Popen(bashCommand.split(), stdout=subprocess.PIPE)
pro.communicate()[0]
def parse_donor_GPb(GT_dat, tag='GT', min_prob=0.0):
"""
Parse the donor genotype probability
tag: GT, GP, or PL
Examples
--------
>>> GProb_tensor = vireoSNP.vcf.parse_donor_GPb(vcf_dat['GenoINFO']['GT'], 'GT')
"""
def parse_GT_code(code, tag):
if code == "." or code == "./." or code == ".|.":
return np.array([1/3, 1/3, 1/3])
if tag == 'GT':
_prob = np.array([0, 0, 0], float)
_prob[int(float(code[0]) + float(code[-1]))] = 1
elif tag == 'GP':
_prob = np.array(code.split(','), float)
elif tag == 'PL':
_Phred = np.array(code.split(','), float)
_prob = 10**(-0.1 * (_Phred - min(_Phred)) - 0.025) # 0?
else:
_prob = None
return _prob
if ['GT', 'GP', 'PL'].count(tag) == 0:
print("[parse_donor_GPb] Error: no support tag: %s" %tag)
return None
GT_prob = np.zeros((len(GT_dat), len(GT_dat[0]), 3))
for i in range(GT_prob.shape[0]):
for j in range(GT_prob.shape[1]):
GT_prob[i, j, :] = parse_GT_code(GT_dat[i][j], tag)
GT_prob += min_prob
GT_prob /= GT_prob.sum(axis=2, keepdims=True)
return GT_prob
def match_SNPs(SNP_ids1, SNPs_ids2):
"""Match variants with considering using or not using chr prefix
Please check vireoSNP.match() for more details on handling None values.
"""
mm_idx = match(SNP_ids1, SNPs_ids2)
if np.mean(mm_idx == None) == 1:
_SNP_ids1 = ["chr" + x for x in SNP_ids1]
mm_idx = match(_SNP_ids1, SNPs_ids2)
if np.mean(mm_idx == None) == 1:
_SNP_ids2 = ["chr" + x for x in SNPs_ids2]
mm_idx = match(SNP_ids1, _SNP_ids2)
return mm_idx
def match_VCF_samples(VCF_file1, VCF_file2, GT_tag1, GT_tag2):
"""Match donors in two VCF files. Please subset the VCF with bcftools first,
as it is more computationally efficient:
`bcftools view large_file.vcf.gz -R small_file.vcf.gz -Oz -o sub.vcf.gz`
Parameters
----------
VCF_file1: str
the full path of first VCF file, in plain text or gzip / bgzip
VCF_file2: str
the full path of second VCF file, in plain text or gzip / bgzip
GT_tag1: str
the tag for extracting the genotype probability in VCF1: GT, GP, PL
GT_tag2: str
the tag for extracting the genotype probability in VCF2: GT, GP, PL
"""
# VCF file 1
vcf_dat0 = load_VCF(
VCF_file1, biallelic_only=True, sparse=False, format_list=[GT_tag1])
GPb0_var_ids = np.array(vcf_dat0['variants'])
GPb0_donor_ids = np.array(vcf_dat0['samples'])
GPb0_tensor = parse_donor_GPb(vcf_dat0['GenoINFO'][GT_tag1], GT_tag1)
print('Shape for Geno Prob in VCF1:', GPb0_tensor.shape)
# VCF file 2
vcf_dat1 = load_VCF(
VCF_file2, biallelic_only=True, sparse=False, format_list=[GT_tag2])
GPb1_var_ids = np.array(vcf_dat1['variants'])
GPb1_donor_ids = np.array(vcf_dat1['samples'])
GPb1_tensor = parse_donor_GPb(vcf_dat1['GenoINFO'][GT_tag2], GT_tag2)
GPb1_tensor.shape
print('Shape for Geno Prob in VCF2:', GPb1_tensor.shape)
# Match variants
mm_idx = match_SNPs(GPb1_var_ids, GPb0_var_ids)
idx1 = np.where(mm_idx != None)[0] #remove None for unmatched
idx0 = mm_idx[idx1].astype(int)
GPb1_var_ids_use = GPb1_var_ids[idx1]
GPb0_var_ids_use = GPb0_var_ids[idx0]
# print(np.mean(GPb0_var_ids_use == GPb1_var_ids_use))
GPb1_tensor_use = GPb1_tensor[idx1]
GPb0_tensor_use = GPb0_tensor[idx0]
print("n_variants in VCF1, VCF2 and matched: %d, %d, %d"
%(GPb0_var_ids.shape[0], GPb1_var_ids.shape[0], len(idx1))
)
# Match donors
idx0, idx1, GPb_diff = optimal_match(
GPb0_tensor_use, GPb1_tensor_use, axis=1, return_delta=True)
print("aligned donors:")
print(GPb0_donor_ids[idx0])
print(GPb1_donor_ids[idx1])
RV = {}
RV['matched_GPb_diff'] = GPb_diff[idx0, :][:, idx1]
RV['matched_donors1'] = GPb0_donor_ids[idx0]
RV['matched_donors2'] = GPb1_donor_ids[idx1]
RV['full_GPb_diff'] = GPb_diff
RV['full_donors1'] = GPb0_donor_ids
RV['full_donors2'] = GPb1_donor_ids
RV['matched_n_var'] = len(GPb0_var_ids_use)
return RV
def snp_gene_match(varFixedINFO, gene_df, gene_key='gene', multi_gene=True,
gaps=[0, 1000, 10000, 100000], verbose=False):
"""Match genes for given list of SNPs.
This function benefits from grouped chromosomes in the variants list.
parameters
----------
varFixedINFO: dictionary, from vireoSNP.load_VCF()
has keys of 'CHROM', 'POS'
gene_df: pandas.DataFrame
has columns in order: chrom, start, stop, gene [, others]
gene_key: string
the column key in gene_df for gene name
multi_gene: bool
If True, support all overlapped genes, otherwise, only one in the most
central gene
gaps: list of int
the distance between a gene and the query SNP
verbose: bool
If True, print log info
returns
-------
(gene_list, flag_list)
gene_list is a list of gene list, for each variants it may have
one or multiple overlapped genes or None.
flag_list is a list of distance flag. 0: overlapped,
1: within 1KB, 2: within 10KB, 3: within 100KB, 4: no cis gene
"""
chrom_cur = 'None'
gene_list = []
flag_list = []
for i in range(len(varFixedINFO['CHROM'])):
_chrom = varFixedINFO['CHROM'][i]
_pos = int(varFixedINFO['POS'][i])
if chrom_cur != _chrom:
gene_use = gene_df[gene_df['chrom'] == _chrom]
chrom_cur = _chrom
if verbose:
print('processing:', _chrom)
for k, _gap in enumerate(gaps):
flag = k
_dist1 = gene_use['start'].values - _pos
_dist2 = gene_use['stop'].values - _pos
_distP = np.stack((_dist1, _dist2), axis=-1)
_sign = np.sign(_dist1) * np.sign(_dist2)
_dist = _sign * np.min(np.abs(_distP), axis=1)
idx_chrom = np.where(_dist < _gap)[0]
if len(idx_chrom) > 0:
if _gap > 0 or multi_gene is False:
# for cis gene, only return the nearest one
# for overlapped genes, return the most central one (when
# not in multi_gene mode)
idx_chrom = [idx_chrom[np.argmin(_dist[idx_chrom])]]
break
if len(idx_chrom) == 0:
flag = len(gaps)
#print(i, idx_chrom, gene_use.index.values[idx_chrom])
gene_list.append(gene_use[gene_key].values[idx_chrom])
flag_list.append(flag)
return gene_list, flag_list
| {"/vireoSNP/utils/vireo_model.py": ["/vireoSNP/utils/vireo_base.py"], "/vireoSNP/__init__.py": ["/vireoSNP/version.py", "/vireoSNP/utils/vcf_utils.py", "/vireoSNP/utils/io_utils.py", "/vireoSNP/utils/vireo_base.py", "/vireoSNP/utils/vireo_wrap.py", "/vireoSNP/utils/vireo_model.py", "/vireoSNP/utils/vireo_bulk.py", "/vireoSNP/utils/bmm_model.py"], "/vireoSNP/utils/bmm_model.py": ["/vireoSNP/utils/vireo_base.py"], "/vireoSNP/utils/io_utils.py": ["/vireoSNP/utils/vcf_utils.py"], "/vireoSNP/GTbarcode.py": ["/vireoSNP/version.py", "/vireoSNP/plot/base_plot.py", "/vireoSNP/utils/variant_select.py", "/vireoSNP/utils/vcf_utils.py"], "/vireoSNP/utils/vireo_doublet.py": ["/vireoSNP/utils/vireo_base.py", "/vireoSNP/utils/variant_select.py"], "/vireoSNP/utils/vireo_wrap.py": ["/vireoSNP/utils/vireo_base.py", "/vireoSNP/utils/vireo_model.py", "/vireoSNP/utils/vireo_doublet.py"], "/vireoSNP/utils/vcf_utils.py": ["/vireoSNP/utils/vireo_base.py"], "/vireoSNP/vireo.py": ["/vireoSNP/version.py", "/vireoSNP/utils/vireo_base.py", "/vireoSNP/utils/vireo_wrap.py", "/vireoSNP/plot/base_plot.py", "/vireoSNP/utils/io_utils.py", "/vireoSNP/utils/vcf_utils.py"], "/vireoSNP/plot/__init__.py": ["/vireoSNP/plot/base_plot.py"]} |
71,435 | single-cell-genetics/vireo | refs/heads/master | /vireoSNP/vireo.py | # vireoSNP - donor deconvolution for multiplexed scRNA-seq data
# Author: Yuanhua Huang
# Date: 24-06-2019
import os
import sys
import time
import subprocess
import numpy as np
import multiprocessing
from scipy.io import mmread
from optparse import OptionParser, OptionGroup
from .version import __version__
from .utils.vireo_base import match, optimal_match
from .utils.vireo_wrap import vireo_wrap
from .plot.base_plot import plot_GT
from .utils.io_utils import match_donor_VCF
from .utils.io_utils import write_donor_id, read_cellSNP, read_vartrix
from .utils.vcf_utils import load_VCF, write_VCF, parse_donor_GPb
from .utils.vcf_utils import read_sparse_GeneINFO, GenoINFO_maker
START_TIME = time.time()
def show_progress(RV=None):
return RV
def main():
# import warnings
# warnings.filterwarnings('error')
# parse command line options
parser = OptionParser()
parser.add_option("--cellData", "-c", dest="cell_data", default=None,
help=("The cell genotype file in VCF format or cellSNP folder with "
"sparse matrices."))
parser.add_option("--nDonor", "-N", type="int", dest="n_donor",
default=None, help=("Number of donors to demultiplex; can be larger "
"than provided in donor_file"))
parser.add_option("--outDir", "-o", dest="out_dir", default=None,
help=("Dirtectory for output files [default: $cellFilePath/vireo]"))
group0 = OptionGroup(parser, "Optional input files")
group0.add_option("--vartrixData", dest="vartrix_data", default=None,
help=("The cell genotype files in vartrix outputs (three/four files, "
"comma separated): alt.mtx,ref.mtx,barcodes.tsv,SNPs.vcf.gz. "
"This will suppress cellData argument."))
group0.add_option("--donorFile", "-d", dest="donor_file", default=None,
help=("The donor genotype file in VCF format. Please filter the sample "
"and region with bcftools -s and -R first!"))
group0.add_option("--genoTag", "-t", dest="geno_tag", default='PL',
help=("The tag for donor genotype: GT, GP, PL [default: %default]"))
group1 = OptionGroup(parser, "Optional arguments")
group1.add_option("--noDoublet", dest="no_doublet", action="store_true",
default=False, help="If use, not checking doublets.")
group1.add_option("--nInit", "-M", type="int", dest="n_init", default=50,
help=("Number of random initializations, when GT needs to learn "
"[default: %default]"))
group1.add_option("--extraDonor", type=int, dest="n_extra_donor",
default=0, help=("Number of extra donor in pre-cluster, when GT "
"needs to learn [default: %default]"))
group1.add_option("--extraDonorMode", dest="extra_donor_mode",
default="distance", help=("Method for searching from extra donors. "
"size: n_cell per donor; distance: GT distance between donors "
"[default: %default]"))
group1.add_option("--forceLearnGT", dest="force_learnGT", default=False,
action="store_true", help="If use, treat donor GT as prior only.")
group1.add_option("--ASEmode", dest="ASE_mode", default=False,
action="store_true", help="If use, turn on SNP specific allelic ratio.")
group1.add_option("--noPlot", dest="no_plot", default=False,
action="store_true", help="If use, turn off plotting GT distance.")
group1.add_option("--randSeed", type="int", dest="rand_seed", default=None,
help="Seed for random initialization [default: %default]")
group1.add_option("--cellRange", type="str", dest="cell_range", default=None,
help="Range of cells to process, eg. 0-10000 [default: all]")
group1.add_option("--callAmbientRNAs", dest="check_ambient", default=False,
action="store_true", help=("If use, detect ambient RNAs in each cell "
"(under development)"))
group1.add_option("--nproc", "-p", type="int", dest="nproc", default=1,
help=("Number of subprocesses for computing - this sacrifices memory "
"for speedups [default: %default]"))
parser.add_option_group(group0)
parser.add_option_group(group1)
(options, args) = parser.parse_args()
if len(sys.argv[1:]) == 0:
print("Welcome to vireoSNP v%s!\n" %(__version__))
print("use -h or --help for help on argument.")
sys.exit(1)
## out directory
if options.out_dir is None:
print("Warning: no outDir provided, we use $cellFilePath/vireo.")
input_dir = os.path.abspath(options.cell_data)
if input_dir is None and options.vartrix_data is not None:
input_dir = os.path.abspath(options.cell_data)
out_dir = os.path.dirname(input_dir) + "/vireo"
elif os.path.dirname(options.out_dir) == "":
out_dir= "./" + options.out_dir
else:
out_dir = options.out_dir
if not os.path.exists(out_dir):
os.mkdir(out_dir)
## input data (VCF.gz or a folder with sparse matrices)
if options.cell_data is None and options.vartrix_data is None:
print("Error: need cell data in vcf file, or cellSNP output folder, or "
"vartrix's alt.mtx,ref.mtx,barcodes.tsv.")
sys.exit(1)
elif options.vartrix_data is not None:
print("[vireo] Loading vartrix files ...")
vartrix_files = options.vartrix_data.split(",")
if len(vartrix_files) < 3 or len(vartrix_files) > 4:
print("Error: vartrixData requires 3 or 4 comma separated files")
sys.exit(1)
elif len(vartrix_files) == 3:
vartrix_files.append(None)
cell_dat = read_vartrix(vartrix_files[0], vartrix_files[1],
vartrix_files[2], vartrix_files[3])
elif os.path.isdir(os.path.abspath(options.cell_data)):
print("[vireo] Loading cell folder ...")
cell_dat = read_cellSNP(options.cell_data)
else:
print("[vireo] Loading cell VCF file ...")
cell_vcf = load_VCF(options.cell_data, biallelic_only=True)
cell_dat = read_sparse_GeneINFO(cell_vcf['GenoINFO'], keys=['AD', 'DP'])
for _key in ['samples', 'variants', 'FixedINFO', 'contigs', 'comments']:
cell_dat[_key] = cell_vcf[_key]
## subset input cell data if necessary
if options.cell_range is not None:
cellRange = options.cell_range.split("-")
cellRange[0] = int(cellRange[0])
cellRange[1] = int(cellRange[1])
cell_dat['AD'] = cell_dat['AD'][:,cellRange[0]:cellRange[1]]
cell_dat['DP'] = cell_dat['DP'][:,cellRange[0]:cellRange[1]]
cell_dat['samples'] = cell_dat['samples'][cellRange[0]:cellRange[1]]
## input donor genotype
n_donor = options.n_donor
if options.donor_file is not None:
if "variants" not in cell_dat.keys():
print("No variants information is loaded, please provide base.vcf.gz")
sys.exit(1)
print("[vireo] Loading donor VCF file ...")
donor_vcf = load_VCF(options.donor_file, biallelic_only=True,
sparse=False, format_list=[options.geno_tag])
if (donor_vcf['n_SNP_tagged'][0] <
(0.1 * len(donor_vcf['GenoINFO'][options.geno_tag]))):
print("[vireo] No " + options.geno_tag + " tag in donor genotype; "
"please try another tag for genotype, e.g., GT")
print(" %s" %options.donor_file)
sys.exit(1)
cell_dat, donor_vcf = match_donor_VCF(cell_dat, donor_vcf)
donor_GPb = parse_donor_GPb(donor_vcf['GenoINFO'][options.geno_tag],
options.geno_tag)
if n_donor is None or n_donor == donor_GPb.shape[1]:
n_donor = donor_GPb.shape[1]
donor_names = donor_vcf['samples']
learn_GT = False
elif n_donor < donor_GPb.shape[1]:
learn_GT = False
donor_names = ['donor%d' %x for x in range(n_donor)]
else:
learn_GT = True
donor_names = (donor_vcf['samples'] +
['donor%d' %x for x in range(donor_GPb.shape[1], n_donor)])
else:
learn_GT = True
donor_GPb = None
donor_names = ['donor%d' %x for x in range(n_donor)]
n_vars = np.array(np.sum(cell_dat['DP'] > 0, axis=0)).reshape(-1)
if options.force_learnGT:
learn_GT = True
# extra donor for initial search, only for learn_GT
n_extra_donor = 0
if learn_GT:
if options.n_extra_donor is None or options.n_extra_donor == "None":
n_extra_donor = int(round(np.sqrt(n_donor)))
else:
n_extra_donor = options.n_extra_donor
# number of initials, only for learn_GT
n_init = options.n_init if learn_GT else 1
check_doublet = options.no_doublet == False
## run vireo model (try multiple initializations)
print("[vireo] Demultiplex %d cells to %d donors with %d variants." %(
cell_dat['AD'].shape[1], n_donor, cell_dat['AD'].shape[0]))
res_vireo = vireo_wrap(cell_dat['AD'], cell_dat['DP'], n_donor=n_donor,
GT_prior=donor_GPb, learn_GT=learn_GT, n_init=n_init,
n_extra_donor=n_extra_donor, extra_donor_mode=options.extra_donor_mode,
check_doublet=check_doublet, random_seed=options.rand_seed,
ASE_mode=options.ASE_mode, check_ambient=options.check_ambient,
nproc=options.nproc)
if (n_donor is not None and
donor_GPb is not None and n_donor < donor_GPb.shape[1]):
idx = optimal_match(res_vireo['GT_prob'], donor_GPb)[1]
donor_names = [donor_vcf['samples'][x] for x in idx]
## save donor id for each cell
write_donor_id(out_dir, donor_names, cell_dat['samples'], n_vars, res_vireo)
if options.no_plot == False and options.vartrix_data is None:
idx = np.array(np.sum(cell_dat['DP'], axis=1) > (3*n_donor)).reshape(-1)
if learn_GT and donor_GPb is not None:
plot_GT(out_dir, res_vireo['GT_prob'][idx, :, :], donor_names,
donor_GPb[idx, :, :], donor_vcf['samples'])
else:
plot_GT(out_dir, res_vireo['GT_prob'][idx, :, :], donor_names)
# ## save inferred donor genotype
if learn_GT and 'variants' in cell_dat.keys():
donor_vcf_out = cell_dat
donor_vcf_out['samples'] = donor_names
donor_vcf_out['GenoINFO'] = GenoINFO_maker(res_vireo['GT_prob'],
cell_dat['AD'] * res_vireo['ID_prob'],
cell_dat['DP'] * res_vireo['ID_prob'])
write_VCF(out_dir + "/GT_donors.vireo.vcf.gz", donor_vcf_out)
run_time = time.time() - START_TIME
print("[vireo] All done: %d min %.1f sec" %(int(run_time / 60),
run_time % 60))
print()
if __name__ == "__main__":
main()
| {"/vireoSNP/utils/vireo_model.py": ["/vireoSNP/utils/vireo_base.py"], "/vireoSNP/__init__.py": ["/vireoSNP/version.py", "/vireoSNP/utils/vcf_utils.py", "/vireoSNP/utils/io_utils.py", "/vireoSNP/utils/vireo_base.py", "/vireoSNP/utils/vireo_wrap.py", "/vireoSNP/utils/vireo_model.py", "/vireoSNP/utils/vireo_bulk.py", "/vireoSNP/utils/bmm_model.py"], "/vireoSNP/utils/bmm_model.py": ["/vireoSNP/utils/vireo_base.py"], "/vireoSNP/utils/io_utils.py": ["/vireoSNP/utils/vcf_utils.py"], "/vireoSNP/GTbarcode.py": ["/vireoSNP/version.py", "/vireoSNP/plot/base_plot.py", "/vireoSNP/utils/variant_select.py", "/vireoSNP/utils/vcf_utils.py"], "/vireoSNP/utils/vireo_doublet.py": ["/vireoSNP/utils/vireo_base.py", "/vireoSNP/utils/variant_select.py"], "/vireoSNP/utils/vireo_wrap.py": ["/vireoSNP/utils/vireo_base.py", "/vireoSNP/utils/vireo_model.py", "/vireoSNP/utils/vireo_doublet.py"], "/vireoSNP/utils/vcf_utils.py": ["/vireoSNP/utils/vireo_base.py"], "/vireoSNP/vireo.py": ["/vireoSNP/version.py", "/vireoSNP/utils/vireo_base.py", "/vireoSNP/utils/vireo_wrap.py", "/vireoSNP/plot/base_plot.py", "/vireoSNP/utils/io_utils.py", "/vireoSNP/utils/vcf_utils.py"], "/vireoSNP/plot/__init__.py": ["/vireoSNP/plot/base_plot.py"]} |
71,436 | single-cell-genetics/vireo | refs/heads/master | /vireoSNP/version.py | __version__ = "0.5.8"
| {"/vireoSNP/utils/vireo_model.py": ["/vireoSNP/utils/vireo_base.py"], "/vireoSNP/__init__.py": ["/vireoSNP/version.py", "/vireoSNP/utils/vcf_utils.py", "/vireoSNP/utils/io_utils.py", "/vireoSNP/utils/vireo_base.py", "/vireoSNP/utils/vireo_wrap.py", "/vireoSNP/utils/vireo_model.py", "/vireoSNP/utils/vireo_bulk.py", "/vireoSNP/utils/bmm_model.py"], "/vireoSNP/utils/bmm_model.py": ["/vireoSNP/utils/vireo_base.py"], "/vireoSNP/utils/io_utils.py": ["/vireoSNP/utils/vcf_utils.py"], "/vireoSNP/GTbarcode.py": ["/vireoSNP/version.py", "/vireoSNP/plot/base_plot.py", "/vireoSNP/utils/variant_select.py", "/vireoSNP/utils/vcf_utils.py"], "/vireoSNP/utils/vireo_doublet.py": ["/vireoSNP/utils/vireo_base.py", "/vireoSNP/utils/variant_select.py"], "/vireoSNP/utils/vireo_wrap.py": ["/vireoSNP/utils/vireo_base.py", "/vireoSNP/utils/vireo_model.py", "/vireoSNP/utils/vireo_doublet.py"], "/vireoSNP/utils/vcf_utils.py": ["/vireoSNP/utils/vireo_base.py"], "/vireoSNP/vireo.py": ["/vireoSNP/version.py", "/vireoSNP/utils/vireo_base.py", "/vireoSNP/utils/vireo_wrap.py", "/vireoSNP/plot/base_plot.py", "/vireoSNP/utils/io_utils.py", "/vireoSNP/utils/vcf_utils.py"], "/vireoSNP/plot/__init__.py": ["/vireoSNP/plot/base_plot.py"]} |
71,437 | single-cell-genetics/vireo | refs/heads/master | /vireoSNP/utils/vireo_base.py | import numpy as np
from scipy.stats import entropy
from scipy.optimize import linear_sum_assignment
from scipy.special import logsumexp, digamma, betaln, binom, gammaln
def get_binom_coeff(AD, DP, max_val=700, is_log=True):
"""Get the binomial coefficients
"""
# Since binom can't give log value, the maximum value in 64bit is
# around e**700, close to binom(1000, 500)
# print("Warning: this function is deprecated, please use logbincoeff.")
idx = DP > 0
_AD = AD[idx].astype(np.int64)
_DP = DP[idx].astype(np.int64)
binom_coeff = np.log(binom(_DP, _AD))
binom_coeff[binom_coeff > max_val] = max_val
binom_coeff = binom_coeff.astype(np.float32)
return binom_coeff
def logbincoeff(n, k, is_sparse=False):
"""
Ramanujan's approximation of log [n! / (k! (n-k)!)]
This is mainly for convinience with pen. Please use betaln or gammaln
"""
if is_sparse:
RV_sparse = n.copy() * 0
idx = (k > 0).multiply(k < n)
n = np.array(n[idx]).reshape(-1)
k = np.array(k[idx]).reshape(-1)
RV = gammaln(n + 1) - gammaln(k + 1) - gammaln(n - k + 1)
if is_sparse:
RV_sparse[idx] += RV
RV = RV_sparse
return RV
def normalize(X, axis=-1):
"""
Normalization of tensor with sum to 1.
Example
-------
X = np.random.rand(3, 5, 8)
tensor_normalize(X, axis=1)
"""
shape2 = list(X.shape)
shape2[axis] = 1
X_sum = np.sum(X, axis=axis, keepdims=True)
return X / X_sum
def tensor_normalize(X, axis=1):
return normalize(X, axis)
def loglik_amplify(X, axis=-1):
"""
Amplify the log likelihood matrix by subtract the maximum.
Example
-------
X = np.random.rand(3, 5, 8)
loglik_amplify(X, axis=1)
"""
shape2 = list(X.shape)
shape2[axis] = 1
X_max = np.max(X, axis=axis, keepdims=True)
return X - X_max
def beta_entropy(X, X_prior=None, axis=None):
"""
Get the entropy for beta distributions. If X_prior is not None, return the
Kullback-Leibler divergence
See: https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.entropy.html
https://en.wikipedia.org/wiki/Beta_distribution#Quantities_of_information_(entropy)
Parameters
----------
X, X_prior:
numpy.array with shape: (N, 2)
Example
-------
theta_shapes1 = np.array([[0.3, 29.7], [3, 3], [29.7, 0.3]])
theta_shapes2 = np.array([[364, 24197], [5886, 7475], [6075, 397]])
beta_entropy(theta_shapes2)
beta_entropy(theta_shapes2, theta_shapes1)
"""
def _beta_cross_entropy(Xp, Xq):
"""return cross entropy -E_p[log q] for beta distribution
For entropy, use as _beta_cross_entropy(X, X)
"""
return (
betaln(Xq[:, 0], Xq[:, 1]) -
(Xq[:, 0] - 1) * digamma(Xp[:, 0]) -
(Xq[:, 1] - 1) * digamma(Xp[:, 1]) +
(Xq.sum(axis=1) - 2) * digamma(Xp.sum(axis=1))
)
# check shape
if len(X.shape) == 1:
if X.shape[0] == 2:
X = X.reshape(-1, 2)
else:
print("Error: unsupported shape. Make sure it's (N, 2)")
if X_prior is not None and len(X.shape) == 1:
if X_prior.shape[0] == 2:
X_prior = X_prior.reshape(-1, 2)
else:
print("Error: unsupported shape. Make sure it's (N, 2)")
if X_prior is None:
# entropy
RV_mat = _beta_cross_entropy(X, X)
else:
# KL divergence
RV_mat = _beta_cross_entropy(X, X_prior) - _beta_cross_entropy(X, X)
return np.sum(RV_mat, axis=axis)
def match(ref_ids, new_ids, uniq_ref_only=True):
"""
Mapping new_ids to ref_ids. ref_ids can have repeated values, but new_ids
can only have unique ids or values. Therefore, new_ids[RT_idx] will be
the same as ref_ids. Note,
Parameters
----------
ref_ids : array_like or list
ids for reference with type of int, float, or string
new_ids : array_like or list
ids waiting to map.
Returns
-------
RV_idx : array_like, the same length of ref_ids
The index for new_ids mapped to ref_ids. If an id in ref_ids does not
exist in new_ids, then return a None for that id.
Examples
--------
>>> x1 = [5, 9, 1]
>>> x2 = [1, 2, 5, 7, 9]
>>> match(x1, x2)
array([2, 4, 0])
>>> match(x2, x1)
array([2, None, 0, None, 1], dtype=object)
>>> RT_idx = match(x2, x1)
>>> idx1 = numpy.where(RT_idx != None)[0]
>>> idx1
array([0, 2, 4])
>>> idx2 = RT_idx[idx1].astype(int)
>>> idx2
array([2, 0, 1])
"""
idx1 = np.argsort(ref_ids)
idx2 = np.argsort(new_ids)
RT_idx1, RT_idx2 = [], []
i, j = 0, 0
while i < len(idx1):
if j == len(idx2) or ref_ids[idx1[i]] < new_ids[idx2[j]]:
RT_idx1.append(idx1[i])
RT_idx2.append(None)
i += 1
elif ref_ids[idx1[i]] == new_ids[idx2[j]]:
RT_idx1.append(idx1[i])
RT_idx2.append(idx2[j])
i += 1
if uniq_ref_only: j += 1
elif ref_ids[idx1[i]] > new_ids[idx2[j]]:
j += 1
origin_idx = np.argsort(RT_idx1)
RT_idx = np.array(RT_idx2)[origin_idx]
return RT_idx
def optimal_match(X, Z, axis=1, return_delta=False):
"""
Match Z to X by minimize the difference,
hence np.take(Z, idx1, aixs) is best aligned to np.take(X, idx0, aixs)
Hungarian algorithm is used:
https://docs.scipy.org/doc/scipy-1.4.0/reference/generated/scipy.optimize.linear_sum_assignment.html
"""
X_copy = X.copy()
Z_copy = Z.copy()
diff_mat = np.zeros((X.shape[axis], Z.shape[axis]))
for i in range(X.shape[axis]):
for j in range(Z.shape[axis]):
diff_mat[i, j] = np.mean(np.abs(np.take(X_copy, i, axis=axis) -
np.take(Z_copy, j, axis=axis)))
idx0, idx1 = linear_sum_assignment(diff_mat)
if return_delta:
return idx0, idx1, diff_mat
else:
return idx0, idx1
def greed_match(X, Z, axis=1):
"""
This method has been dispatched, please use optimal_match!
"""
print("This method has been dispatched, please use optimal_match!")
return optimal_match(X, Z, axis=axis)[1]
def donor_select(GT_prob, ID_prob, n_donor, mode="distance"):
"""
Select the donors from a set with extra donors.
The GT_prior can have different number of donors from n_donor.
mode="size": only keep the n_donor with largest number of cells
mode="distance": only keep the n_donor with most different GT from each other
"""
_donor_cnt = np.sum(ID_prob, axis=0)
if mode == "size":
_donor_idx = np.argsort(_donor_cnt)[::-1]
else:
_GT_diff = np.zeros((GT_prob.shape[1], GT_prob.shape[1]))
for i in range(GT_prob.shape[1]):
for j in range(GT_prob.shape[1]):
_GT_diff[i, j] = np.mean(np.abs(GT_prob[:, i, :] -
GT_prob[:, j, :]))
_donor_idx = [np.argmax(_donor_cnt)]
_donor_left = np.delete(np.arange(GT_prob.shape[1]), _donor_idx)
_GT_diff = np.delete(_GT_diff, _donor_idx, axis=1)
while len(_donor_idx) < _GT_diff.shape[0]:
# _idx = np.argmax(np.sum(_GT_diff[_donor_idx, :], axis=0))
_idx = np.argmax(np.min(_GT_diff[_donor_idx, :], axis=0))
_donor_idx.append(_donor_left[_idx])
_donor_left = np.delete(_donor_left, _idx)
_GT_diff = np.delete(_GT_diff, _idx, axis=1)
print("[vireo] donor size with searching extra %d donors:"
%(GT_prob.shape[1] - n_donor))
print("\t".join(["donor%d" %x for x in _donor_idx]))
print("\t".join(["%.0f" %_donor_cnt[x] for x in _donor_idx]))
ID_prob_out = ID_prob[:, _donor_idx[:n_donor]]
ID_prob_out[ID_prob_out < 10**-10] = 10**-10
return ID_prob_out
| {"/vireoSNP/utils/vireo_model.py": ["/vireoSNP/utils/vireo_base.py"], "/vireoSNP/__init__.py": ["/vireoSNP/version.py", "/vireoSNP/utils/vcf_utils.py", "/vireoSNP/utils/io_utils.py", "/vireoSNP/utils/vireo_base.py", "/vireoSNP/utils/vireo_wrap.py", "/vireoSNP/utils/vireo_model.py", "/vireoSNP/utils/vireo_bulk.py", "/vireoSNP/utils/bmm_model.py"], "/vireoSNP/utils/bmm_model.py": ["/vireoSNP/utils/vireo_base.py"], "/vireoSNP/utils/io_utils.py": ["/vireoSNP/utils/vcf_utils.py"], "/vireoSNP/GTbarcode.py": ["/vireoSNP/version.py", "/vireoSNP/plot/base_plot.py", "/vireoSNP/utils/variant_select.py", "/vireoSNP/utils/vcf_utils.py"], "/vireoSNP/utils/vireo_doublet.py": ["/vireoSNP/utils/vireo_base.py", "/vireoSNP/utils/variant_select.py"], "/vireoSNP/utils/vireo_wrap.py": ["/vireoSNP/utils/vireo_base.py", "/vireoSNP/utils/vireo_model.py", "/vireoSNP/utils/vireo_doublet.py"], "/vireoSNP/utils/vcf_utils.py": ["/vireoSNP/utils/vireo_base.py"], "/vireoSNP/vireo.py": ["/vireoSNP/version.py", "/vireoSNP/utils/vireo_base.py", "/vireoSNP/utils/vireo_wrap.py", "/vireoSNP/plot/base_plot.py", "/vireoSNP/utils/io_utils.py", "/vireoSNP/utils/vcf_utils.py"], "/vireoSNP/plot/__init__.py": ["/vireoSNP/plot/base_plot.py"]} |
71,438 | single-cell-genetics/vireo | refs/heads/master | /vireoSNP/plot/base_plot.py | # base functions for plotting
import numpy as np
vireo_colors = np.array(['#4796d7', '#f79e54', '#79a702', '#df5858', '#556cab',
'#de7a1f', '#ffda5c', '#4b595c', '#6ab186', '#bddbcf',
'#daad58', '#488a99', '#f79b78', '#ffba00'])
def heat_matrix(X, yticks=None, xticks=None, rotation=45, cmap='BuGn',
alpha=0.6, display_value=True, row_sort=False,
aspect='auto', interpolation='none', **kwargs):
"""
Plot heatmap of distance matrix
Parameters
----------
X: numpy.array or matrix
The matrix to plot in heatmap
yticks: list
The ticks ids for y axis
xticks: list
The ticks ids for x axis
ratation: scalar
The ratation angel for xticks
cmap: str
The colormap for the heatmap, more options:
https://matplotlib.org/stable/tutorials/colors/colormaps.html
alpha: scalar
The transparency, value between 0 and 1
display_value: bool
If True, dispaly the values in the heatmap
raw_sort: bool
If True, sort the rows with row index as
row_idx = np.argsort(np.dot(X, 2**np.arange(X.shape[1])))
aspect: str
`aspect` in `plt.imshow`
interpolation: str
`interpolation` in `plt.imshow`
**kwargs: keywords & values
`**kwargs` for `plt.imshow`
Returns
-------
The return from `plt.imshow`
Examples
--------
.. plot::
>>> from vireoSNP.plot import heat_matrix
>>> import numpy as np
>>> np.random.seed(1)
>>> X = np.random.rand(5, 7)
>>> heat_matrix(X)
"""
import matplotlib.pyplot as plt
if row_sort:
row_idx = np.argsort(np.dot(X, 2**np.arange(X.shape[1])))
X = X[row_idx, :]
im = plt.imshow(X, cmap=cmap, alpha=alpha, aspect=aspect,
interpolation=interpolation, **kwargs)
if xticks is not None:
plt.xticks(range(len(xticks)), xticks, rotation=rotation)
plt.xlim(-0.5, len(xticks) - 0.5)
if yticks is not None:
plt.yticks(range(len(yticks)), yticks)
plt.ylim(-0.5, len(yticks) - 0.5)
# Loop over data dimensions and create text annotations.
if display_value:
for i in range(X.shape[0]):
for j in range(X.shape[1]):
plt.text(j, i, "%.2f" %X[i, j],
ha="center", va="center", color="k")
return im
def plot_GT(out_dir, cell_GPb, donor_names,
donor_GPb=None, donor_names_in=None):
"""
Plot the genotype distance between samples
"""
import matplotlib.pyplot as plt
## compare the GT probability of estimated samples
diff_mat = np.zeros((cell_GPb.shape[1], cell_GPb.shape[1]))
for i in range(cell_GPb.shape[1]):
for j in range(cell_GPb.shape[1]):
diff_mat[i,j] = np.mean(np.abs(cell_GPb[:, i, :] -
cell_GPb[:, j, :]))
fig = plt.figure()
heat_matrix(diff_mat, donor_names, donor_names)
plt.title("Geno Prob Delta: %d SNPs" %(cell_GPb.shape[0]))
plt.tight_layout()
fig.savefig(out_dir + "/fig_GT_distance_estimated.pdf", dpi=300)
## compare in the estimated sample with input samples
if donor_GPb is not None:
diff_mat = np.zeros((cell_GPb.shape[1], donor_GPb.shape[1]))
for i in range(cell_GPb.shape[1]):
for j in range(donor_GPb.shape[1]):
diff_mat[i,j] = np.mean(np.abs( cell_GPb[:, i, :] -
donor_GPb[:, j, :]))
fig = plt.figure()
heat_matrix(diff_mat, donor_names, donor_names_in)
plt.title("Geno Prob Delta: %d SNPs" %(cell_GPb.shape[0]))
plt.tight_layout()
fig.savefig(out_dir + "/fig_GT_distance_input.pdf", dpi=300)
def minicode_plot(barcode_set, var_ids=None, sample_ids=None,
cmap="Set3", interpolation='none', **kwargs):
import matplotlib.pyplot as plt
mat = np.zeros((len(barcode_set[0][1:]), len(barcode_set)))
for i in range(mat.shape[0]):
for j in range(mat.shape[1]):
mat[i, j] = float(barcode_set[j][i + 1])
im = plt.imshow(mat, cmap=cmap, interpolation=interpolation, **kwargs)
for i in range(mat.shape[0]):
for j in range(mat.shape[1]):
plt.text(j, i, int(mat[i, j]),
ha="center", va="center", color="k")
if var_ids is None:
var_ids = range(mat.shape[0])
plt.yticks(range(len(var_ids)), var_ids)
plt.ylim(-0.5, len(var_ids) - 0.5)
if sample_ids is None:
sample_ids = ["%s\nS%d" %(barcode_set[x], x)
for x in range(mat.shape[1])]
else:
sample_ids = ["%s\n%s" %(barcode_set[x], sample_ids[x])
for x in range(mat.shape[1])]
plt.xticks(range(len(sample_ids)), sample_ids)
plt.xlim(-0.5, len(sample_ids) - 0.5)
return im
def anno_heat(X, row_anno=None, col_anno=None,
row_order_ids=None, col_order_ids=None,
xticklabels=False, yticklabels=False,
row_cluster=False, col_cluster=False,
**kwargs):
"""
Heatmap with column or row annotations. Based on seaborn.clustermap()
Row or column will be ordered by the annotation group.
Note, haven't tested if input both row_anno and col_anno.
"""
import seaborn as sns
# prepare row annotation
if row_anno is not None:
if row_order_ids is None:
row_order_ids = list(np.unique(row_anno))
else:
row_order_ids = [x for x in row_order_ids]
row_num = np.array([row_order_ids.index(x) for x in row_anno])
dot_row = np.array(np.nansum(X, axis=1)).reshape(-1)
idx_row = np.argsort(row_num * 2**X.shape[1])# + dot_row / dot_row.max())
row_colors = vireo_colors[row_num][idx_row]
else:
row_colors = None
row_order_ids = []
idx_row = range(X.shape[0])
# prepare col annotation
if col_anno is not None:
if col_order_ids is None:
col_order_ids = list(np.unique(col_order_ids))
else:
col_order_ids = [x for x in col_order_ids]
col_num = np.array([col_order_ids.index(x) for x in col_anno])
dot_col = np.array(np.nansum(X, axis=0)).reshape(-1)
idx_col = np.argsort(col_num * 2**X.shape[0])# + dot_row / dot_row.max())
col_colors = vireo_colors[col_num][idx_col]
else:
col_colors = None
col_order_ids = []
idx_col = range(X.shape[1])
## plot with seaborn clustermap
g = sns.clustermap(X[idx_row, :][:, idx_col],
row_colors=row_colors, col_colors=col_colors,
col_cluster=col_cluster, row_cluster=row_cluster,
xticklabels=xticklabels, yticklabels=yticklabels,
**kwargs)
if row_anno is not None:
for i in range(len(row_order_ids)):
g.ax_row_dendrogram.bar(0, 0, color=vireo_colors[i],
label=row_order_ids[i], linewidth=0)
g.ax_row_dendrogram.legend(loc="center", ncol=1, title="")
if col_anno is not None:
for i in range(len(col_order_ids)):
g.ax_col_dendrogram.bar(0, 0, color=vireo_colors[i],
label=col_order_ids[i], linewidth=0)
g.ax_col_dendrogram.legend(loc="center", ncol=6, title="")
g.cax.set_position([1.01, .2, .03, .45])
return g
# def ppca_plot(AD, DP):
# """
# PPCA plot for each cell genotypes. This function is still underdevelopment
# """
# Z = DP.copy().astype(float)
# idx = DP > 0
# Z[idx] = AD[idx] / Z[idx]
# Z[idx] = Z[idx] - 0.5
# from sklearn.decomposition import TruncatedSVD
# svd = TruncatedSVD(n_components=5, n_iter=7, random_state=42)
# svd.fit(Z)
# print("variance explained:", svd.explained_variance_ratio_)
# import matplotlib.pyplot as plt
# plt.scatter(svd.components_[0, :], svd.components_[1, :])
# return svd.components_ | {"/vireoSNP/utils/vireo_model.py": ["/vireoSNP/utils/vireo_base.py"], "/vireoSNP/__init__.py": ["/vireoSNP/version.py", "/vireoSNP/utils/vcf_utils.py", "/vireoSNP/utils/io_utils.py", "/vireoSNP/utils/vireo_base.py", "/vireoSNP/utils/vireo_wrap.py", "/vireoSNP/utils/vireo_model.py", "/vireoSNP/utils/vireo_bulk.py", "/vireoSNP/utils/bmm_model.py"], "/vireoSNP/utils/bmm_model.py": ["/vireoSNP/utils/vireo_base.py"], "/vireoSNP/utils/io_utils.py": ["/vireoSNP/utils/vcf_utils.py"], "/vireoSNP/GTbarcode.py": ["/vireoSNP/version.py", "/vireoSNP/plot/base_plot.py", "/vireoSNP/utils/variant_select.py", "/vireoSNP/utils/vcf_utils.py"], "/vireoSNP/utils/vireo_doublet.py": ["/vireoSNP/utils/vireo_base.py", "/vireoSNP/utils/variant_select.py"], "/vireoSNP/utils/vireo_wrap.py": ["/vireoSNP/utils/vireo_base.py", "/vireoSNP/utils/vireo_model.py", "/vireoSNP/utils/vireo_doublet.py"], "/vireoSNP/utils/vcf_utils.py": ["/vireoSNP/utils/vireo_base.py"], "/vireoSNP/vireo.py": ["/vireoSNP/version.py", "/vireoSNP/utils/vireo_base.py", "/vireoSNP/utils/vireo_wrap.py", "/vireoSNP/plot/base_plot.py", "/vireoSNP/utils/io_utils.py", "/vireoSNP/utils/vcf_utils.py"], "/vireoSNP/plot/__init__.py": ["/vireoSNP/plot/base_plot.py"]} |
71,439 | single-cell-genetics/vireo | refs/heads/master | /simulate/synth_pool.py | # Sythetic mixture of bam files from multiple samples
# Author: Yuanhua Huang
# Date: 15-06-2019
import os
import sys
import pysam
import itertools
import numpy as np
import subprocess
import multiprocessing
from optparse import OptionParser, OptionGroup
from cellSNP.utils.vcf_utils import load_VCF
from cellSNP.utils.pileup_utils import check_pysam_chrom
def show_progress(RV=None):
return RV
def sample_barcodes(barcodes, n_cell_each=1000, minor_sample=1.0, seed=None):
"""
generate cell barcodes by down sampling
"""
if seed is not None:
np.random.seed(seed)
for ss in range(len(barcodes)):
if len(barcodes[ss]) < n_cell_each:
print("Error in sample_barcodes: input sample has fewer cell "
"barcodes than n_cell_each.")
sys.exit(1)
barcodes[ss] = list(np.random.permutation(barcodes[ss])[:n_cell_each])
barcodes[0] = barcodes[0][:round(minor_sample * n_cell_each)]
return barcodes
def pool_barcodes(barcodes, out_dir, doublet_rate=None, sample_suffix=True,
seed=None):
"""
Update cell barcodes with sample id and add doublets.
Note, barcodes is a list of multiple samples, each
sample has a list of barcodes.
"""
if seed is not None:
np.random.seed(seed)
if sample_suffix:
barcodes_out = []
for ss in range(len(barcodes)):
barcodes_out.append([x[:-1]+str(ss+1) for x in barcodes[ss]])
else:
barcodes_out = barcodes.copy()
barcodes_flat = list(itertools.chain(*barcodes_out))
n_cells = len(barcodes_flat)
if doublet_rate is None:
doublet_rate = n_cells / 100000.0
elif doublet_rate < 0 or doublet_rate > 1:
print("Error: doublet rate needs to be between 0 and 1.")
sys.exit(1)
if doublet_rate == 0:
n_doublets = 0
else:
n_doublets = round(n_cells / (1 + 1 / doublet_rate))
print(n_cells, n_doublets)
perm_idx = np.random.permutation(n_cells)
for ii in range(n_doublets):
if (barcodes_flat[perm_idx[ii]].split("-")[1] ==
barcodes_flat[perm_idx[ii + n_doublets]].split("-")[1]):
_barcode = barcodes_flat[perm_idx[ii]] + "S"
else:
_barcode = barcodes_flat[perm_idx[ii]] + "D"
barcodes_flat[perm_idx[ii]] = _barcode
barcodes_flat[perm_idx[ii + n_doublets]] = _barcode
start_idx = 0
for ss in range(len(barcodes_out)):
_n_cell = len(barcodes_out[ss])
barcodes_out[ss] = barcodes_flat[start_idx: start_idx + _n_cell]
start_idx += _n_cell
## save new cell barcodes
fid = open(out_dir + "/barcodes_pool.tsv", "w")
for _barcode in np.unique(barcodes_flat):
fid.writelines(_barcode + "\n")
fid.close()
fid = open(out_dir + "/cell_info.tsv", "w")
fid.writelines("CB_pool\tCB_origin\tSample_id\n")
for ss in range(len(barcodes_out)):
for ii in range(len(barcodes_out[ss])):
_out = [barcodes_out[ss][ii], barcodes[ss][ii], str(ss + 1)]
fid.writelines("\t".join(_out) + "\n")
fid.close()
return barcodes_out
def fetch_reads(samFile_list, chroms, positions, outbam,
barcodes_in, barcodes_out=None, cell_tag='CB'):
"""
"""
samFile_list = [check_pysam_chrom(x, chroms[0])[0] for x in samFile_list]
outbam = pysam.AlignmentFile(outbam, "wb", template=samFile_list[0])
if barcodes_out is None:
barcodes_out = barcodes_in.copy()
for ss in range(len(samFile_list)):
samFile = samFile_list[ss]
_barcodes_in = barcodes_in[ss]
_barcodes_out = barcodes_out[ss]
READ_CNT = 0
reads_all = []
for i in range(len(positions)):
chrom = chroms[i]
POS = positions[i]
for _read in samFile.fetch(chrom, POS-1, POS):
if _read.has_tag(cell_tag) == False:
continue
try:
idx = _barcodes_in.index(_read.get_tag(cell_tag))
_read.set_tag(cell_tag, _barcodes_out[idx])
except ValueError:
continue
reads_all.append(_read)
READ_CNT += 1
if READ_CNT % 100000 == 0:
print("BAM%d: %.2fM reads." %(ss+1, READ_CNT/1000000))
# remove redundant reads (one read may be called multiple times)
reads_all = set(reads_all)
print(len(reads_all), READ_CNT)
for _read in reads_all:
outbam.write(_read)
samFile.close()
outbam.close()
return None
def main():
import warnings
warnings.filterwarnings('error')
# parse command line options
parser = OptionParser()
parser.add_option("--samFiles", "-s", dest="sam_files", default=None,
help=("Input bam or sam files, comma separated."))
parser.add_option("--barcodeFiles", "-b", dest="barcodes_files",
default=None, help=("Input barcode files, comma separated."))
parser.add_option("--regionFile", "-r", dest="region_file",
default=None, help=("Input SNP list."))
parser.add_option("--doubletRate", "-d", dest="doublet_rate",
type="float", default=None, help=("Doublet rate [default: n/100000]"))
parser.add_option("--outDir", "-o", dest="out_dir", default=None,
help=("Directory for output files: pooled.bam and barcodes_pool.tsv."))
parser.add_option("--nproc", "-p", type="int", dest="nproc", default=1,
help="Number of subprocesses [default: %default]")
group = OptionGroup(parser, "Cell barcodes sampling")
group.add_option("--nCELL", type="int", dest="n_cell", default=None,
help="The number of cells in each sample [default: %default]")
group.add_option("--minorSAMPLE", type="float", dest="minor_sample",
default=1.0, help="Ratio size of minor sample [default: %default]")
group.add_option("--randomSEED", type="int", dest="random_seed",
default=None, help="The random seed in numpy [default: %default]")
parser.add_option_group(group)
(options, args) = parser.parse_args()
if len(sys.argv[1:]) == 0:
print("Welcome to VCF_convert!\n")
print("use -h or --help for help on argument.")
sys.exit(1)
## out directory
if options.out_dir is None:
print("Error: need outDir for output files.")
sys.exit(1)
elif os.path.dirname(options.out_dir) == "":
out_dir= "./" + options.out_dir
else:
out_dir = options.out_dir
if not os.path.exists(out_dir):
os.mkdir(out_dir)
## sam files
if options.sam_files is None:
print("Error: need samFile for sam file.")
sys.exit(1)
else:
samFile_list = options.sam_files.split(",")
## cell barcodes
if options.barcodes_files is None:
print("Error: need files for cell barcodes.")
sys.exit(1)
else:
barcodes_files = options.barcodes_files.split(",")
if len(barcodes_files) != len(samFile_list):
print("Error: barcodes files are not equal to sam files.")
sys.exit(1)
barcodes_in = []
for _bar in barcodes_files:
fid = open(_bar, 'r')
all_lines = [x.rstrip() for x in fid.readlines()]
fid.close()
barcodes_in.append(all_lines)
if options.n_cell is not None:
barcodes_in = sample_barcodes(barcodes_in, options.n_cell,
options.minor_sample, options.random_seed)
barcodes_out = pool_barcodes(barcodes_in, out_dir, options.doublet_rate,
seed=options.random_seed)
## VCF file
vcf_dat = load_VCF(options.region_file, biallelic_only=False,
load_sample=False)
chroms = vcf_dat['FixedINFO']['CHROM']
positions = [int(x) for x in vcf_dat['FixedINFO']['POS']]
# fetch each position
if (options.nproc == 1):
BAM_FILE = out_dir + "/pooled.bam"
fetch_reads(samFile_list, chroms, positions,
BAM_FILE, barcodes_in, barcodes_out)
else:
result = []
pool = multiprocessing.Pool(processes=options.nproc)
for ii in range(len(samFile_list)):
BAM_FILE = out_dir + "/pooled_temp%d.bam" %(ii)
print(ii, BAM_FILE)
result.append(pool.apply_async(fetch_reads, ([samFile_list[ii]],
chroms, positions, BAM_FILE, [barcodes_in[ii]],
[barcodes_out[ii]], "CB"), callback=show_progress))
pool.close()
pool.join()
## merge bam files
file_list = [out_dir + "/pooled.bam"]
file_list += [out_dir + "/pooled_temp%d.bam" %(x)
for x in range(len(samFile_list))]
bashCommand = "samtools merge %s" %(" ".join(file_list))
pro = subprocess.Popen(bashCommand.split(), stdout=subprocess.PIPE)
pro.communicate()[0]
for dd in range(len(samFile_list)):
os.remove(out_dir + "/pooled_temp%d.bam" %(dd))
print("")
## sort and index bam file
bashCommand = "samtools sort %s -o %s" %(out_dir + "/pooled.bam",
out_dir + "/pooled.sorted.bam")
pro = subprocess.Popen(bashCommand.split(), stdout=subprocess.PIPE)
pro.communicate()[0]
bashCommand = "samtools index %s" %(out_dir + "/pooled.sorted.bam")
pro = subprocess.Popen(bashCommand.split(), stdout=subprocess.PIPE)
pro.communicate()[0]
os.remove(out_dir + "/pooled.bam")
if __name__ == "__main__":
main()
| {"/vireoSNP/utils/vireo_model.py": ["/vireoSNP/utils/vireo_base.py"], "/vireoSNP/__init__.py": ["/vireoSNP/version.py", "/vireoSNP/utils/vcf_utils.py", "/vireoSNP/utils/io_utils.py", "/vireoSNP/utils/vireo_base.py", "/vireoSNP/utils/vireo_wrap.py", "/vireoSNP/utils/vireo_model.py", "/vireoSNP/utils/vireo_bulk.py", "/vireoSNP/utils/bmm_model.py"], "/vireoSNP/utils/bmm_model.py": ["/vireoSNP/utils/vireo_base.py"], "/vireoSNP/utils/io_utils.py": ["/vireoSNP/utils/vcf_utils.py"], "/vireoSNP/GTbarcode.py": ["/vireoSNP/version.py", "/vireoSNP/plot/base_plot.py", "/vireoSNP/utils/variant_select.py", "/vireoSNP/utils/vcf_utils.py"], "/vireoSNP/utils/vireo_doublet.py": ["/vireoSNP/utils/vireo_base.py", "/vireoSNP/utils/variant_select.py"], "/vireoSNP/utils/vireo_wrap.py": ["/vireoSNP/utils/vireo_base.py", "/vireoSNP/utils/vireo_model.py", "/vireoSNP/utils/vireo_doublet.py"], "/vireoSNP/utils/vcf_utils.py": ["/vireoSNP/utils/vireo_base.py"], "/vireoSNP/vireo.py": ["/vireoSNP/version.py", "/vireoSNP/utils/vireo_base.py", "/vireoSNP/utils/vireo_wrap.py", "/vireoSNP/plot/base_plot.py", "/vireoSNP/utils/io_utils.py", "/vireoSNP/utils/vcf_utils.py"], "/vireoSNP/plot/__init__.py": ["/vireoSNP/plot/base_plot.py"]} |
71,440 | single-cell-genetics/vireo | refs/heads/master | /vireoSNP/utils/variant_select.py | import numpy as np
from scipy.stats import entropy
from scipy.special import logsumexp, digamma
def barcode_entropy(X, y=None):
"""
entropy for categorical barcodes
"""
if y is None:
Z_str = [str(x) for x in X]
elif len(X) == len(y):
Z_str = [str(X[i]) + str(y[i]) for i in range(len(X))]
else:
print("Error: X and y have different length in barcode_entropy.")
return None, None
Z_val, Z_cnt = np.unique(Z_str, return_counts=True)
return entropy(Z_cnt / np.sum(Z_cnt), base=2), Z_str
def variant_select(GT, var_count=None, rand_seed=0):
"""
Selection of a set of discriminatory variants by prioritise variants on
information gain.
GT: (n_var * n_donor)
a matrix with categorical values
var_count: (n_var, )
the counts for each variant
"""
np.random.seed(rand_seed)
K = GT.shape[1]
entropy_now = 0
variant_set = []
barcode_set = ["#"] * K
entropy_all = np.zeros(GT.shape[0])
barcode_all = [barcode_set] * GT.shape[0]
while True:
for i in range(GT.shape[0]):
_entropy, _barcode = barcode_entropy(barcode_set, GT[i, :])
entropy_all[i], barcode_all[i] = _entropy, _barcode
if np.max(entropy_all) == entropy_now:
break
idx = np.where(np.max(entropy_all) == entropy_all)[0]
if var_count is not None:
# idx = idx[np.argsort(var_count[idx])[::-1]]
idx = idx[var_count[idx] >= np.median(var_count[idx])]
print("Randomly select 1 more variants out %d" %len(idx))
idx_use = idx[np.random.randint(len(idx))]
variant_set.append(idx_use)
barcode_set = barcode_all[idx_use]
entropy_now = entropy_all[idx_use]
if entropy_now < np.log2(K):
print("Warning: variant_select can't distinguish all samples.")
return entropy_now, barcode_set, variant_set
def variant_ELBO_gain(ID_prob, AD, DP, pseudocount=0.5):
"""variats selection by comparing evidence lower bounds between
M1: assigned to multiple donors and M0: with only a single donor
Parameters
----------
ID_prob: (n_cell * n_donor)
a matrix for cell assignment probability
AD, DP: (n_var, n_cell)
sparse matrices for counts on alternative allele or total depth
pseudocount: float
pseudo count as binomial prior
Return
------
Evidence lower bound gain between M1 with multiple donors denoted in ID_prob
or M0 wiht single donor.
"""
BD = DP - AD
_s1_M2 = AD @ ID_prob + pseudocount #(n_var, n_donor)
_s2_M2 = BD @ ID_prob + pseudocount #(n_var, n_donor)
_ss_M2 = DP @ ID_prob + pseudocount * 2 #(n_var, n_donor)
_ELBO2 = logsumexp(
_s1_M2 * digamma(_s1_M2) +
_s2_M2 * digamma(_s2_M2) -
_ss_M2 * digamma(_ss_M2), axis=1
)
_s1_M1 = AD.sum(1).A + pseudocount
_s2_M1 = BD.sum(1).A + pseudocount
_ss_M1 = DP.sum(1).A + pseudocount * 2
_ELBO1 = logsumexp(
_s1_M1 * digamma(_s1_M1) +
_s2_M1 * digamma(_s2_M1) -
_ss_M1 * digamma(_ss_M1), axis=1
)
ELBO_gain = _ELBO2 - _ELBO1
return ELBO_gain
| {"/vireoSNP/utils/vireo_model.py": ["/vireoSNP/utils/vireo_base.py"], "/vireoSNP/__init__.py": ["/vireoSNP/version.py", "/vireoSNP/utils/vcf_utils.py", "/vireoSNP/utils/io_utils.py", "/vireoSNP/utils/vireo_base.py", "/vireoSNP/utils/vireo_wrap.py", "/vireoSNP/utils/vireo_model.py", "/vireoSNP/utils/vireo_bulk.py", "/vireoSNP/utils/bmm_model.py"], "/vireoSNP/utils/bmm_model.py": ["/vireoSNP/utils/vireo_base.py"], "/vireoSNP/utils/io_utils.py": ["/vireoSNP/utils/vcf_utils.py"], "/vireoSNP/GTbarcode.py": ["/vireoSNP/version.py", "/vireoSNP/plot/base_plot.py", "/vireoSNP/utils/variant_select.py", "/vireoSNP/utils/vcf_utils.py"], "/vireoSNP/utils/vireo_doublet.py": ["/vireoSNP/utils/vireo_base.py", "/vireoSNP/utils/variant_select.py"], "/vireoSNP/utils/vireo_wrap.py": ["/vireoSNP/utils/vireo_base.py", "/vireoSNP/utils/vireo_model.py", "/vireoSNP/utils/vireo_doublet.py"], "/vireoSNP/utils/vcf_utils.py": ["/vireoSNP/utils/vireo_base.py"], "/vireoSNP/vireo.py": ["/vireoSNP/version.py", "/vireoSNP/utils/vireo_base.py", "/vireoSNP/utils/vireo_wrap.py", "/vireoSNP/plot/base_plot.py", "/vireoSNP/utils/io_utils.py", "/vireoSNP/utils/vcf_utils.py"], "/vireoSNP/plot/__init__.py": ["/vireoSNP/plot/base_plot.py"]} |
71,441 | single-cell-genetics/vireo | refs/heads/master | /vireoSNP/plot/__init__.py | from .base_plot import * | {"/vireoSNP/utils/vireo_model.py": ["/vireoSNP/utils/vireo_base.py"], "/vireoSNP/__init__.py": ["/vireoSNP/version.py", "/vireoSNP/utils/vcf_utils.py", "/vireoSNP/utils/io_utils.py", "/vireoSNP/utils/vireo_base.py", "/vireoSNP/utils/vireo_wrap.py", "/vireoSNP/utils/vireo_model.py", "/vireoSNP/utils/vireo_bulk.py", "/vireoSNP/utils/bmm_model.py"], "/vireoSNP/utils/bmm_model.py": ["/vireoSNP/utils/vireo_base.py"], "/vireoSNP/utils/io_utils.py": ["/vireoSNP/utils/vcf_utils.py"], "/vireoSNP/GTbarcode.py": ["/vireoSNP/version.py", "/vireoSNP/plot/base_plot.py", "/vireoSNP/utils/variant_select.py", "/vireoSNP/utils/vcf_utils.py"], "/vireoSNP/utils/vireo_doublet.py": ["/vireoSNP/utils/vireo_base.py", "/vireoSNP/utils/variant_select.py"], "/vireoSNP/utils/vireo_wrap.py": ["/vireoSNP/utils/vireo_base.py", "/vireoSNP/utils/vireo_model.py", "/vireoSNP/utils/vireo_doublet.py"], "/vireoSNP/utils/vcf_utils.py": ["/vireoSNP/utils/vireo_base.py"], "/vireoSNP/vireo.py": ["/vireoSNP/version.py", "/vireoSNP/utils/vireo_base.py", "/vireoSNP/utils/vireo_wrap.py", "/vireoSNP/plot/base_plot.py", "/vireoSNP/utils/io_utils.py", "/vireoSNP/utils/vcf_utils.py"], "/vireoSNP/plot/__init__.py": ["/vireoSNP/plot/base_plot.py"]} |
71,445 | BenteCodes/learnedwalking | refs/heads/master | /FitnessFunctionAbstract.py | '''
Created on 11.09.2019
@author: TKler
'''
from abc import abstractmethod, ABC
class FitnessFunctionAbstract(ABC):
@abstractmethod
def getFitness(self):
pass
| {"/MA/Tests/GeneticAlgorithm_Test.py": ["/MA/GeneticAlgorithm.py"], "/NIPSChallenge/ForewardmovementTest.py": ["/NIPSChallenge/RobotControl_local.py"], "/MA/RobotControl.py": ["/RobotControlAbstract.py"], "/MA/Tests/Network_Test.py": ["/MA/WalkingNetwork.py"], "/NIPSChallenge/NIPSNetwork.py": ["/NetworkTemplate.py"], "/NIPSChallenge/TestRun.py": ["/NIPSChallenge/GeneticAlgorithm.py", "/SafeData.py"], "/MA/Tests/SyncedNetwork.py": ["/NIPSChallenge/SyncedNetwork.py", "/NIPSChallenge/NIPSNetwork.py"], "/NIPSChallenge/RobotControl_local.py": ["/RobotControlAbstract.py"], "/NIPSChallenge/PopGenNips.py": ["/PopulationGeneratorAbstract.py", "/NIPSChallenge/SyncedNetwork.py"], "/MA/Tests/FitnessFunction_Test.py": ["/MA/FitnessFunction.py"], "/NIPSChallenge/GeneticAlgorithm.py": ["/NIPSChallenge/PopGenNips.py", "/NIPSChallenge/RobotControl_local.py", "/NIPSChallenge/FitnessFunction.py", "/GeneticAlgorithmTemplate.py"], "/MA/FitnessFunction.py": ["/FitnessFunctionAbstract.py"], "/SafeData.py": ["/NIPSChallenge/SyncedNetwork.py"], "/MA/WalkingNetwork.py": ["/NetworkTemplate.py"], "/NetworkTemplate.py": ["/Network3LayerAbstract.py"], "/NIPSChallenge/FitnessFunction.py": ["/FitnessFunctionAbstract.py"], "/MA/Tests/PopulationGenerator_test.py": ["/MA/WalkingNetwork.py"], "/GeneticAlgorithmTemplate.py": ["/SafeData.py"], "/NIPSChallenge/Analyser.py": ["/NIPSChallenge/NIPSNetwork.py", "/SafeData.py"], "/NIPSChallenge/RobotControl_client.py": ["/NIPSChallenge/RobotControl_local.py"], "/MA/Tests/RobotControlDummy.py": ["/RobotControlAbstract.py"], "/NIPSChallenge/SyncedNetwork.py": ["/NIPSChallenge/NIPSNetwork.py", "/Network3LayerAbstract.py"], "/MA/GeneticAlgorithm.py": ["/MA/FitnessFunction.py", "/MA/Tests/RobotControlDummy.py", "/GeneticAlgorithmTemplate.py"]} |
71,446 | BenteCodes/learnedwalking | refs/heads/master | /MA/Tests/GeneticAlgorithm_Test.py | import pytest
from MA.GeneticAlgorithm import GeneticAlgorithm
def test_initNetwork():
cord = GeneticAlgorithm(20, 50, 50, 20, 0)
pass
# THIS IS NOT A UNIT TEST!!! ONLY MANUAL EXECUTION
def fullRun():
cord = GeneticAlgorithm(6, 50, 50, 3, 0)
cord.evolve()
fullRun()
| {"/MA/Tests/GeneticAlgorithm_Test.py": ["/MA/GeneticAlgorithm.py"], "/NIPSChallenge/ForewardmovementTest.py": ["/NIPSChallenge/RobotControl_local.py"], "/MA/RobotControl.py": ["/RobotControlAbstract.py"], "/MA/Tests/Network_Test.py": ["/MA/WalkingNetwork.py"], "/NIPSChallenge/NIPSNetwork.py": ["/NetworkTemplate.py"], "/NIPSChallenge/TestRun.py": ["/NIPSChallenge/GeneticAlgorithm.py", "/SafeData.py"], "/MA/Tests/SyncedNetwork.py": ["/NIPSChallenge/SyncedNetwork.py", "/NIPSChallenge/NIPSNetwork.py"], "/NIPSChallenge/RobotControl_local.py": ["/RobotControlAbstract.py"], "/NIPSChallenge/PopGenNips.py": ["/PopulationGeneratorAbstract.py", "/NIPSChallenge/SyncedNetwork.py"], "/MA/Tests/FitnessFunction_Test.py": ["/MA/FitnessFunction.py"], "/NIPSChallenge/GeneticAlgorithm.py": ["/NIPSChallenge/PopGenNips.py", "/NIPSChallenge/RobotControl_local.py", "/NIPSChallenge/FitnessFunction.py", "/GeneticAlgorithmTemplate.py"], "/MA/FitnessFunction.py": ["/FitnessFunctionAbstract.py"], "/SafeData.py": ["/NIPSChallenge/SyncedNetwork.py"], "/MA/WalkingNetwork.py": ["/NetworkTemplate.py"], "/NetworkTemplate.py": ["/Network3LayerAbstract.py"], "/NIPSChallenge/FitnessFunction.py": ["/FitnessFunctionAbstract.py"], "/MA/Tests/PopulationGenerator_test.py": ["/MA/WalkingNetwork.py"], "/GeneticAlgorithmTemplate.py": ["/SafeData.py"], "/NIPSChallenge/Analyser.py": ["/NIPSChallenge/NIPSNetwork.py", "/SafeData.py"], "/NIPSChallenge/RobotControl_client.py": ["/NIPSChallenge/RobotControl_local.py"], "/MA/Tests/RobotControlDummy.py": ["/RobotControlAbstract.py"], "/NIPSChallenge/SyncedNetwork.py": ["/NIPSChallenge/NIPSNetwork.py", "/Network3LayerAbstract.py"], "/MA/GeneticAlgorithm.py": ["/MA/FitnessFunction.py", "/MA/Tests/RobotControlDummy.py", "/GeneticAlgorithmTemplate.py"]} |
71,447 | BenteCodes/learnedwalking | refs/heads/master | /NIPSChallenge/ForewardmovementTest.py | '''
Created on Sep 19, 2019
@author: alica
'''
import numpy as np
from NIPSChallenge.RobotControl_local import RobotControlNipsLocal
bend_knee_action = [1, 1, 0, 0, 0.5, 0, 0, 0, 0, 0, 0.5, # left leg
0, 0, 1, 0, 0, 0.5, 0, 0, 0, 0, 0.5]
make_step_action = [1, 1, 1, 0, 0, 1, 1, 0.5, 0.5, 0.5, 1, # left leg
0, 0, 0.5, 0, 0, 0, 0, 1, 0.5, 0, 0]
make_step_action2 = [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, # left leg
0, 0, 0.5, 0, 0, 0.5, 0, 0.5, 0, 0, 0]
foot_down_action = [1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, # left leg
0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0.5]
bentes_robotcontroll = RobotControlNipsLocal(True)
counter = 0
while True:
if counter < 20:
bentes_robotcontroll.walkRobot(np.array([bend_knee_action]))
counter += 1
print(counter)
print(bentes_robotcontroll.reward)
| {"/MA/Tests/GeneticAlgorithm_Test.py": ["/MA/GeneticAlgorithm.py"], "/NIPSChallenge/ForewardmovementTest.py": ["/NIPSChallenge/RobotControl_local.py"], "/MA/RobotControl.py": ["/RobotControlAbstract.py"], "/MA/Tests/Network_Test.py": ["/MA/WalkingNetwork.py"], "/NIPSChallenge/NIPSNetwork.py": ["/NetworkTemplate.py"], "/NIPSChallenge/TestRun.py": ["/NIPSChallenge/GeneticAlgorithm.py", "/SafeData.py"], "/MA/Tests/SyncedNetwork.py": ["/NIPSChallenge/SyncedNetwork.py", "/NIPSChallenge/NIPSNetwork.py"], "/NIPSChallenge/RobotControl_local.py": ["/RobotControlAbstract.py"], "/NIPSChallenge/PopGenNips.py": ["/PopulationGeneratorAbstract.py", "/NIPSChallenge/SyncedNetwork.py"], "/MA/Tests/FitnessFunction_Test.py": ["/MA/FitnessFunction.py"], "/NIPSChallenge/GeneticAlgorithm.py": ["/NIPSChallenge/PopGenNips.py", "/NIPSChallenge/RobotControl_local.py", "/NIPSChallenge/FitnessFunction.py", "/GeneticAlgorithmTemplate.py"], "/MA/FitnessFunction.py": ["/FitnessFunctionAbstract.py"], "/SafeData.py": ["/NIPSChallenge/SyncedNetwork.py"], "/MA/WalkingNetwork.py": ["/NetworkTemplate.py"], "/NetworkTemplate.py": ["/Network3LayerAbstract.py"], "/NIPSChallenge/FitnessFunction.py": ["/FitnessFunctionAbstract.py"], "/MA/Tests/PopulationGenerator_test.py": ["/MA/WalkingNetwork.py"], "/GeneticAlgorithmTemplate.py": ["/SafeData.py"], "/NIPSChallenge/Analyser.py": ["/NIPSChallenge/NIPSNetwork.py", "/SafeData.py"], "/NIPSChallenge/RobotControl_client.py": ["/NIPSChallenge/RobotControl_local.py"], "/MA/Tests/RobotControlDummy.py": ["/RobotControlAbstract.py"], "/NIPSChallenge/SyncedNetwork.py": ["/NIPSChallenge/NIPSNetwork.py", "/Network3LayerAbstract.py"], "/MA/GeneticAlgorithm.py": ["/MA/FitnessFunction.py", "/MA/Tests/RobotControlDummy.py", "/GeneticAlgorithmTemplate.py"]} |
71,448 | BenteCodes/learnedwalking | refs/heads/master | /MA/RobotControl.py | from MA import vrep
# from nicomotion import Motion
import time
from RobotControlAbstract import RobotControlAbstract
class RobotControl(RobotControlAbstract):
robot_string = "../json/nico_humanoid_full_with_grippers_unchecked.json"
oneshot = "" # vrep.simx_opmode_oneshot
oneshot_wait = "" # vrep.simx_opmode_oneshot_wait
def __init__(self):
self.more_motors = 0
# self.robot = Motion.Motion(self.robot_string, vrep=True, vrepHost='127.0.0.1', vrepPort=19997)
vrep.simxFinish(-1) # TODO explain why this is here
self.clientID = vrep.simxStart('127.0.0.1', 19996, True, True, 5000, 5)
# TODO this is dirty!
def setMotorFlag(self, more_motors):
self.more_motors = more_motors
def startSimulation(self):
time.sleep(0.5)
vrep.simxStartSimulation(self.clientID, self.oneshot)
time.sleep(0.5)
def stopSimulation(self):
return vrep.simxStopSimulation(self.clientID, self.oneshot)
def robotFell(self):
torso_handle = vrep.simxGetObjectHandle(self.clientID, "torso_11_visual", self.oneshot_wait)
[_m, position_robot] = vrep.simxGetObjectPosition(self.clientID, torso_handle[1], -1, self.oneshot_wait)
return position_robot[2] < 0.2
def walkRobot(self, motor_values):
self.controlMotors(motor_values)
def controlMotors(self, motor_values):
if self.more_motors > 0:
full_legs = True
self.setLeftLeg(motor_values, full_legs)
self.setRightLeg(motor_values, full_legs)
if self.more_motors > 1:
self.setRightArm(motor_values)
self.setLeftArm(motor_values)
time.sleep(0.01)
def setRightArm(self, motorValues):
self.robot.changeAngle("r_shoulder_y", motorValues[(0, 0)], 1)
self.robot.changeAngle("r_shoulder_z", motorValues[(0, 1)], 1)
self.robot.changeAngle("r_arm_x", motorValues[(0, 2)], 1)
self.robot.changeAngle("r_elbow_y", motorValues[(0, 3)], 1)
def setLeftArm(self, motorValues):
self.robot.changeAngle("l_shoulder_y", motorValues[(0, 4)], 1)
self.robot.changeAngle("l_shoulder_z", motorValues[(0, 5)], 1)
self.robot.changeAngle("l_arm_x", motorValues[(0, 6)], 1)
self.robot.changeAngle("l_elbow_y", motorValues[(0, 7)], 1)
def setLeftLegVertical(self, motorValues):
self.robot.changeAngle("l_hip_x", motorValues[(0, 14)], 1)
self.robot.changeAngle("l_hip_z", motorValues[(0, 15)], 1)
self.robot.changeAngle("l_ankle_x", motorValues[(0, 19)], 1)
def setLeftLegHorizontal(self, motorValues):
self.robot.changeAngle("l_hip_y", motorValues[(0, 16)], 1)
self.robot.changeAngle("l_knee_y", motorValues[(0, 17)], 1)
self.robot.changeAngle("l_ankle_y", motorValues[(0, 18)], 1)
def setLeftLeg(self, motorValues, all_motors):
self.setLeftLegHorizontal(motorValues)
if all_motors:
self.setLeftLegVertical(motorValues)
def setRightLegVertical(self, motorValues):
self.robot.changeAngle("r_hip_x", motorValues[(0, 8)], 1)
self.robot.changeAngle("r_hip_z", motorValues[(0, 9)], 1)
self.robot.changeAngle("r_ankle_x", motorValues[(0, 13)], 1)
def setRightLegHorizontal(self, motorValues):
self.robot.changeAngle("r_hip_y", motorValues[(0, 10)], 1)
self.robot.changeAngle("r_knee_y", motorValues[(0, 11)], 1)
self.robot.changeAngle("r_ankle_y", motorValues[(0, 12)], 1)
def setRightLeg(self, motorValues, all_motors):
self.setRightLegHorizontal(motorValues)
if all_motors:
self.setRightLegVertical(motorValues)
def getEvalData(self):
cube_handle = vrep.simxGetObjectHandle(self.clientID, "reference_cube", self.oneshot_wait)
[_m, position_ref] = vrep.simxGetObjectPosition(self.clientID, cube_handle[1], -1, self.oneshot_wait) # print(position_ref)
foot_handle = vrep.simxGetObjectHandle(self.clientID, "right_foot_11_respondable", self.oneshot_wait)
[_m, position_robot_foot_r] = vrep.simxGetObjectPosition(self.clientID, foot_handle[1], -1, self.oneshot_wait)
foot_handle = vrep.simxGetObjectHandle(self.clientID, "left_foot_11_respondable", self.oneshot_wait)
[_m, position_robot_foot_l] = vrep.simxGetObjectPosition(self.clientID, foot_handle[1], -1, self.oneshot_wait) # print(position_robot_foot)
return self.robotFell(), position_ref, position_robot_foot_r, position_robot_foot_l
| {"/MA/Tests/GeneticAlgorithm_Test.py": ["/MA/GeneticAlgorithm.py"], "/NIPSChallenge/ForewardmovementTest.py": ["/NIPSChallenge/RobotControl_local.py"], "/MA/RobotControl.py": ["/RobotControlAbstract.py"], "/MA/Tests/Network_Test.py": ["/MA/WalkingNetwork.py"], "/NIPSChallenge/NIPSNetwork.py": ["/NetworkTemplate.py"], "/NIPSChallenge/TestRun.py": ["/NIPSChallenge/GeneticAlgorithm.py", "/SafeData.py"], "/MA/Tests/SyncedNetwork.py": ["/NIPSChallenge/SyncedNetwork.py", "/NIPSChallenge/NIPSNetwork.py"], "/NIPSChallenge/RobotControl_local.py": ["/RobotControlAbstract.py"], "/NIPSChallenge/PopGenNips.py": ["/PopulationGeneratorAbstract.py", "/NIPSChallenge/SyncedNetwork.py"], "/MA/Tests/FitnessFunction_Test.py": ["/MA/FitnessFunction.py"], "/NIPSChallenge/GeneticAlgorithm.py": ["/NIPSChallenge/PopGenNips.py", "/NIPSChallenge/RobotControl_local.py", "/NIPSChallenge/FitnessFunction.py", "/GeneticAlgorithmTemplate.py"], "/MA/FitnessFunction.py": ["/FitnessFunctionAbstract.py"], "/SafeData.py": ["/NIPSChallenge/SyncedNetwork.py"], "/MA/WalkingNetwork.py": ["/NetworkTemplate.py"], "/NetworkTemplate.py": ["/Network3LayerAbstract.py"], "/NIPSChallenge/FitnessFunction.py": ["/FitnessFunctionAbstract.py"], "/MA/Tests/PopulationGenerator_test.py": ["/MA/WalkingNetwork.py"], "/GeneticAlgorithmTemplate.py": ["/SafeData.py"], "/NIPSChallenge/Analyser.py": ["/NIPSChallenge/NIPSNetwork.py", "/SafeData.py"], "/NIPSChallenge/RobotControl_client.py": ["/NIPSChallenge/RobotControl_local.py"], "/MA/Tests/RobotControlDummy.py": ["/RobotControlAbstract.py"], "/NIPSChallenge/SyncedNetwork.py": ["/NIPSChallenge/NIPSNetwork.py", "/Network3LayerAbstract.py"], "/MA/GeneticAlgorithm.py": ["/MA/FitnessFunction.py", "/MA/Tests/RobotControlDummy.py", "/GeneticAlgorithmTemplate.py"]} |
71,449 | BenteCodes/learnedwalking | refs/heads/master | /MA/Tests/Network_Test.py | '''
Created on Sep 2, 2019
@author: alica
'''
import pytest
from MA.WalkingNetwork import WalkingNetwork
import numpy as np
def test_generateRandomWeights():
weights = WalkingNetwork.generateRandomWeights()
assert len(weights) == WalkingNetwork.number_of_weights, "Incorrect Number of weights"
def test_generateRandomNW():
weights = WalkingNetwork.generateRandomWeights()
nw = WalkingNetwork(weights)
assert nw is not None
def test_initNetwork():
weights = WalkingNetwork.generateRandomWeights()
nw = WalkingNetwork(weights)
assert nw is not None
assert nw.number_of_input_units == 10, 'wrong number of input units'
assert nw.number_of_hidden_units == 4, 'wrong number of hidden units'
assert nw.number_of_output_units == 20, 'wrong number of output units'
def test_inputToHidden():
hidden_to_hidden = [[1, 2, 3, 4]]
last_output_hidden = [[1, 2, 3, 4]]
nw_input = np.array([[1, 2, 3, 4]])
weights = [[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]]
value_hidden_neurons = np.matmul(nw_input, weights)
results = [10, 20, 30, 40]
for index in range(0, 4):
assert value_hidden_neurons[0][index] == results[index], 'wrong results'
print('happy')
for index in range(0, 4): # append the hidden layer inputs. this has to be done one by one, as they are not fully connected, but just one weight per line
value_hidden_neurons[0][index] += hidden_to_hidden[0][index] * last_output_hidden[0][index]
results = [11, 24, 39, 56]
for index in range(0, 4):
assert value_hidden_neurons[0][index] == results[index], 'wrong results'
print('happy')
def test_hiddenToOutput():
last_output_hidden = [[5, 6, 7, 8]]
hidden_to_output_all = [[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20], ]
value_output_neurons = np.matmul(last_output_hidden, hidden_to_output_all)
results = 26
for index in range(0, 20):
assert value_output_neurons[0][index] == (results * (index + 1)), 'wrong results'
def test_getInput():
pass
def test_computeOneStep():
pass
def test_resetHiddenLayer():
pass
def test_getNumberOfWeights():
pass
def test_getWeightAt():
pass
def test_getMovement():
pass
| {"/MA/Tests/GeneticAlgorithm_Test.py": ["/MA/GeneticAlgorithm.py"], "/NIPSChallenge/ForewardmovementTest.py": ["/NIPSChallenge/RobotControl_local.py"], "/MA/RobotControl.py": ["/RobotControlAbstract.py"], "/MA/Tests/Network_Test.py": ["/MA/WalkingNetwork.py"], "/NIPSChallenge/NIPSNetwork.py": ["/NetworkTemplate.py"], "/NIPSChallenge/TestRun.py": ["/NIPSChallenge/GeneticAlgorithm.py", "/SafeData.py"], "/MA/Tests/SyncedNetwork.py": ["/NIPSChallenge/SyncedNetwork.py", "/NIPSChallenge/NIPSNetwork.py"], "/NIPSChallenge/RobotControl_local.py": ["/RobotControlAbstract.py"], "/NIPSChallenge/PopGenNips.py": ["/PopulationGeneratorAbstract.py", "/NIPSChallenge/SyncedNetwork.py"], "/MA/Tests/FitnessFunction_Test.py": ["/MA/FitnessFunction.py"], "/NIPSChallenge/GeneticAlgorithm.py": ["/NIPSChallenge/PopGenNips.py", "/NIPSChallenge/RobotControl_local.py", "/NIPSChallenge/FitnessFunction.py", "/GeneticAlgorithmTemplate.py"], "/MA/FitnessFunction.py": ["/FitnessFunctionAbstract.py"], "/SafeData.py": ["/NIPSChallenge/SyncedNetwork.py"], "/MA/WalkingNetwork.py": ["/NetworkTemplate.py"], "/NetworkTemplate.py": ["/Network3LayerAbstract.py"], "/NIPSChallenge/FitnessFunction.py": ["/FitnessFunctionAbstract.py"], "/MA/Tests/PopulationGenerator_test.py": ["/MA/WalkingNetwork.py"], "/GeneticAlgorithmTemplate.py": ["/SafeData.py"], "/NIPSChallenge/Analyser.py": ["/NIPSChallenge/NIPSNetwork.py", "/SafeData.py"], "/NIPSChallenge/RobotControl_client.py": ["/NIPSChallenge/RobotControl_local.py"], "/MA/Tests/RobotControlDummy.py": ["/RobotControlAbstract.py"], "/NIPSChallenge/SyncedNetwork.py": ["/NIPSChallenge/NIPSNetwork.py", "/Network3LayerAbstract.py"], "/MA/GeneticAlgorithm.py": ["/MA/FitnessFunction.py", "/MA/Tests/RobotControlDummy.py", "/GeneticAlgorithmTemplate.py"]} |
71,450 | BenteCodes/learnedwalking | refs/heads/master | /NIPSChallenge/NIPSNetwork.py | '''
Created on 11.09.2019
@author: TKler
'''
from scipy.stats import logistic
from NetworkTemplate import NetworkTemplate
from SimplePatternGenerator import SimplePatternGenerator
class NIPSNetwork(NetworkTemplate):
number_of_sensory_inputs = 0
number_of_pattern_inputs = SimplePatternGenerator.number_of_patterns
number_of_input_units = number_of_pattern_inputs + number_of_sensory_inputs
number_of_hidden_units = 11
number_of_output_units = 11
number_of_weights = (number_of_input_units * number_of_hidden_units) + number_of_hidden_units + (number_of_hidden_units * number_of_output_units) + (number_of_input_units * number_of_output_units)
start_weights_range = [-4, 4]
def getInput(self):
return self.getInputFromSimplePattern()
def applyActivationFunction(self, matrix):
return logistic.cdf(matrix)
def takeInputFromSim(self, data):
pass
| {"/MA/Tests/GeneticAlgorithm_Test.py": ["/MA/GeneticAlgorithm.py"], "/NIPSChallenge/ForewardmovementTest.py": ["/NIPSChallenge/RobotControl_local.py"], "/MA/RobotControl.py": ["/RobotControlAbstract.py"], "/MA/Tests/Network_Test.py": ["/MA/WalkingNetwork.py"], "/NIPSChallenge/NIPSNetwork.py": ["/NetworkTemplate.py"], "/NIPSChallenge/TestRun.py": ["/NIPSChallenge/GeneticAlgorithm.py", "/SafeData.py"], "/MA/Tests/SyncedNetwork.py": ["/NIPSChallenge/SyncedNetwork.py", "/NIPSChallenge/NIPSNetwork.py"], "/NIPSChallenge/RobotControl_local.py": ["/RobotControlAbstract.py"], "/NIPSChallenge/PopGenNips.py": ["/PopulationGeneratorAbstract.py", "/NIPSChallenge/SyncedNetwork.py"], "/MA/Tests/FitnessFunction_Test.py": ["/MA/FitnessFunction.py"], "/NIPSChallenge/GeneticAlgorithm.py": ["/NIPSChallenge/PopGenNips.py", "/NIPSChallenge/RobotControl_local.py", "/NIPSChallenge/FitnessFunction.py", "/GeneticAlgorithmTemplate.py"], "/MA/FitnessFunction.py": ["/FitnessFunctionAbstract.py"], "/SafeData.py": ["/NIPSChallenge/SyncedNetwork.py"], "/MA/WalkingNetwork.py": ["/NetworkTemplate.py"], "/NetworkTemplate.py": ["/Network3LayerAbstract.py"], "/NIPSChallenge/FitnessFunction.py": ["/FitnessFunctionAbstract.py"], "/MA/Tests/PopulationGenerator_test.py": ["/MA/WalkingNetwork.py"], "/GeneticAlgorithmTemplate.py": ["/SafeData.py"], "/NIPSChallenge/Analyser.py": ["/NIPSChallenge/NIPSNetwork.py", "/SafeData.py"], "/NIPSChallenge/RobotControl_client.py": ["/NIPSChallenge/RobotControl_local.py"], "/MA/Tests/RobotControlDummy.py": ["/RobotControlAbstract.py"], "/NIPSChallenge/SyncedNetwork.py": ["/NIPSChallenge/NIPSNetwork.py", "/Network3LayerAbstract.py"], "/MA/GeneticAlgorithm.py": ["/MA/FitnessFunction.py", "/MA/Tests/RobotControlDummy.py", "/GeneticAlgorithmTemplate.py"]} |
71,451 | BenteCodes/learnedwalking | refs/heads/master | /NIPSChallenge/TestRun.py | '''
Created on 14.09.2019
Starndart gravity: -9.80665
@author: TKler
'''
from NIPSChallenge.GeneticAlgorithm import GeneticAlgorithm
import SafeData
def fullRun():
mutate = 80
crossover = 20
visualization = True
pop_size = 50
cord = GeneticAlgorithm(pop_size, mutate, crossover, 50, visualization)
cord.population = loadpop() # [:5]
cord.evolve()
def loadpop():
return SafeData.loadPopulation('office_run4.json')
while True:
fullRun()
| {"/MA/Tests/GeneticAlgorithm_Test.py": ["/MA/GeneticAlgorithm.py"], "/NIPSChallenge/ForewardmovementTest.py": ["/NIPSChallenge/RobotControl_local.py"], "/MA/RobotControl.py": ["/RobotControlAbstract.py"], "/MA/Tests/Network_Test.py": ["/MA/WalkingNetwork.py"], "/NIPSChallenge/NIPSNetwork.py": ["/NetworkTemplate.py"], "/NIPSChallenge/TestRun.py": ["/NIPSChallenge/GeneticAlgorithm.py", "/SafeData.py"], "/MA/Tests/SyncedNetwork.py": ["/NIPSChallenge/SyncedNetwork.py", "/NIPSChallenge/NIPSNetwork.py"], "/NIPSChallenge/RobotControl_local.py": ["/RobotControlAbstract.py"], "/NIPSChallenge/PopGenNips.py": ["/PopulationGeneratorAbstract.py", "/NIPSChallenge/SyncedNetwork.py"], "/MA/Tests/FitnessFunction_Test.py": ["/MA/FitnessFunction.py"], "/NIPSChallenge/GeneticAlgorithm.py": ["/NIPSChallenge/PopGenNips.py", "/NIPSChallenge/RobotControl_local.py", "/NIPSChallenge/FitnessFunction.py", "/GeneticAlgorithmTemplate.py"], "/MA/FitnessFunction.py": ["/FitnessFunctionAbstract.py"], "/SafeData.py": ["/NIPSChallenge/SyncedNetwork.py"], "/MA/WalkingNetwork.py": ["/NetworkTemplate.py"], "/NetworkTemplate.py": ["/Network3LayerAbstract.py"], "/NIPSChallenge/FitnessFunction.py": ["/FitnessFunctionAbstract.py"], "/MA/Tests/PopulationGenerator_test.py": ["/MA/WalkingNetwork.py"], "/GeneticAlgorithmTemplate.py": ["/SafeData.py"], "/NIPSChallenge/Analyser.py": ["/NIPSChallenge/NIPSNetwork.py", "/SafeData.py"], "/NIPSChallenge/RobotControl_client.py": ["/NIPSChallenge/RobotControl_local.py"], "/MA/Tests/RobotControlDummy.py": ["/RobotControlAbstract.py"], "/NIPSChallenge/SyncedNetwork.py": ["/NIPSChallenge/NIPSNetwork.py", "/Network3LayerAbstract.py"], "/MA/GeneticAlgorithm.py": ["/MA/FitnessFunction.py", "/MA/Tests/RobotControlDummy.py", "/GeneticAlgorithmTemplate.py"]} |
71,452 | BenteCodes/learnedwalking | refs/heads/master | /MA/Tests/SyncedNetwork.py | '''
Created on 19.09.2019
@author: TKler
'''
from NIPSChallenge.SyncedNetwork import SyncedNetwork
from NIPSChallenge.NIPSNetwork import NIPSNetwork
def sync_test():
weights = SyncedNetwork.generateRandomWeights()
synced = SyncedNetwork(weights)
# assert synced.nw2.simple_pattern.currentstep == (synced.nw1.simple_pattern..currentstep + 50)
# synced.nw2.simple_pattern = synced.nw2.simple_pattern.returnWithIncreasePhaseByPI()
# assert synced.nw2.simple_pattern.currentstep == synced.nw1.simple_pattern.currentstep
array = synced.computeOneStep()
half = int(len(array) / 2)
array2 = array[half :]
array = array [0 : half]
print(array)
print(array2)
for i in range(0, len(array) - 1):
assert abs(array[i] - array2[i]) < 0.1, 'non equal values'
sync_test()
def testing():
weights = NIPSNetwork.generateRandomWeights()
nw1 = NIPSNetwork(weights)
nw2 = NIPSNetwork(weights)
array1 = nw1.computeOneStep()
array2 = nw2.computeOneStep()
print(array1)
print(array2)
for i in range(0, len(array1) - 1):
assert (array1[i] == array2[i]), 'non equal values'
| {"/MA/Tests/GeneticAlgorithm_Test.py": ["/MA/GeneticAlgorithm.py"], "/NIPSChallenge/ForewardmovementTest.py": ["/NIPSChallenge/RobotControl_local.py"], "/MA/RobotControl.py": ["/RobotControlAbstract.py"], "/MA/Tests/Network_Test.py": ["/MA/WalkingNetwork.py"], "/NIPSChallenge/NIPSNetwork.py": ["/NetworkTemplate.py"], "/NIPSChallenge/TestRun.py": ["/NIPSChallenge/GeneticAlgorithm.py", "/SafeData.py"], "/MA/Tests/SyncedNetwork.py": ["/NIPSChallenge/SyncedNetwork.py", "/NIPSChallenge/NIPSNetwork.py"], "/NIPSChallenge/RobotControl_local.py": ["/RobotControlAbstract.py"], "/NIPSChallenge/PopGenNips.py": ["/PopulationGeneratorAbstract.py", "/NIPSChallenge/SyncedNetwork.py"], "/MA/Tests/FitnessFunction_Test.py": ["/MA/FitnessFunction.py"], "/NIPSChallenge/GeneticAlgorithm.py": ["/NIPSChallenge/PopGenNips.py", "/NIPSChallenge/RobotControl_local.py", "/NIPSChallenge/FitnessFunction.py", "/GeneticAlgorithmTemplate.py"], "/MA/FitnessFunction.py": ["/FitnessFunctionAbstract.py"], "/SafeData.py": ["/NIPSChallenge/SyncedNetwork.py"], "/MA/WalkingNetwork.py": ["/NetworkTemplate.py"], "/NetworkTemplate.py": ["/Network3LayerAbstract.py"], "/NIPSChallenge/FitnessFunction.py": ["/FitnessFunctionAbstract.py"], "/MA/Tests/PopulationGenerator_test.py": ["/MA/WalkingNetwork.py"], "/GeneticAlgorithmTemplate.py": ["/SafeData.py"], "/NIPSChallenge/Analyser.py": ["/NIPSChallenge/NIPSNetwork.py", "/SafeData.py"], "/NIPSChallenge/RobotControl_client.py": ["/NIPSChallenge/RobotControl_local.py"], "/MA/Tests/RobotControlDummy.py": ["/RobotControlAbstract.py"], "/NIPSChallenge/SyncedNetwork.py": ["/NIPSChallenge/NIPSNetwork.py", "/Network3LayerAbstract.py"], "/MA/GeneticAlgorithm.py": ["/MA/FitnessFunction.py", "/MA/Tests/RobotControlDummy.py", "/GeneticAlgorithmTemplate.py"]} |
71,453 | BenteCodes/learnedwalking | refs/heads/master | /NIPSChallenge/RobotControl_local.py | '''
Created on Sep 16, 2019
@author: alica
'''
import opensim as osim # This error is here because of the virtual env
from osim.env import L2M2019Env # This error is here because of the virtual env
import numpy as np
from RobotControlAbstract import RobotControlAbstract
class RobotControlNipsLocal(RobotControlAbstract):
'''
Initiates the simulator and the connection
'''
def __init__(self, visualization):
# Create environment
self.env = L2M2019Env(visualize=visualization)
self.observation = self.env.reset()
self.reward = 0
def startSimulation(self):
self.reward = 0
self.observation = self.env.reset()
def robotFell(self):
return self.done
def walkRobot(self, motor_values):
[observation, reward, done, _info] = self.env.step(motor_values)
self.observation = observation
self.done = done
self.reward += reward # >TODO check if reward is cumulativ on it's own
# return self.prepareObersavationForNw(self.observation)
def getEvalData(self):
return self.reward
| {"/MA/Tests/GeneticAlgorithm_Test.py": ["/MA/GeneticAlgorithm.py"], "/NIPSChallenge/ForewardmovementTest.py": ["/NIPSChallenge/RobotControl_local.py"], "/MA/RobotControl.py": ["/RobotControlAbstract.py"], "/MA/Tests/Network_Test.py": ["/MA/WalkingNetwork.py"], "/NIPSChallenge/NIPSNetwork.py": ["/NetworkTemplate.py"], "/NIPSChallenge/TestRun.py": ["/NIPSChallenge/GeneticAlgorithm.py", "/SafeData.py"], "/MA/Tests/SyncedNetwork.py": ["/NIPSChallenge/SyncedNetwork.py", "/NIPSChallenge/NIPSNetwork.py"], "/NIPSChallenge/RobotControl_local.py": ["/RobotControlAbstract.py"], "/NIPSChallenge/PopGenNips.py": ["/PopulationGeneratorAbstract.py", "/NIPSChallenge/SyncedNetwork.py"], "/MA/Tests/FitnessFunction_Test.py": ["/MA/FitnessFunction.py"], "/NIPSChallenge/GeneticAlgorithm.py": ["/NIPSChallenge/PopGenNips.py", "/NIPSChallenge/RobotControl_local.py", "/NIPSChallenge/FitnessFunction.py", "/GeneticAlgorithmTemplate.py"], "/MA/FitnessFunction.py": ["/FitnessFunctionAbstract.py"], "/SafeData.py": ["/NIPSChallenge/SyncedNetwork.py"], "/MA/WalkingNetwork.py": ["/NetworkTemplate.py"], "/NetworkTemplate.py": ["/Network3LayerAbstract.py"], "/NIPSChallenge/FitnessFunction.py": ["/FitnessFunctionAbstract.py"], "/MA/Tests/PopulationGenerator_test.py": ["/MA/WalkingNetwork.py"], "/GeneticAlgorithmTemplate.py": ["/SafeData.py"], "/NIPSChallenge/Analyser.py": ["/NIPSChallenge/NIPSNetwork.py", "/SafeData.py"], "/NIPSChallenge/RobotControl_client.py": ["/NIPSChallenge/RobotControl_local.py"], "/MA/Tests/RobotControlDummy.py": ["/RobotControlAbstract.py"], "/NIPSChallenge/SyncedNetwork.py": ["/NIPSChallenge/NIPSNetwork.py", "/Network3LayerAbstract.py"], "/MA/GeneticAlgorithm.py": ["/MA/FitnessFunction.py", "/MA/Tests/RobotControlDummy.py", "/GeneticAlgorithmTemplate.py"]} |
71,454 | BenteCodes/learnedwalking | refs/heads/master | /NIPSChallenge/PopGenNips.py | '''
Created on Sep 16, 2019
@author: alica
'''
from PopulationGeneratorAbstract import PopulationGeneratorAbstract
import random
from NIPSChallenge.SyncedNetwork import SyncedNetwork
# genetic algorithm to learn basic pattern
class PopulationGeneratorNips(PopulationGeneratorAbstract):
max_weight_change = 2
number_of_kept_best_networks = 2
network_type = SyncedNetwork
def __init__(self, size_of_population, mutation_rate, crossover_rate):
self.size_of_population = size_of_population
self.mutation_rate = mutation_rate
self.crossover_rate = crossover_rate
# create the next generation
def mutate5050(self, network):
mutate = random.randint(0, 1)
if mutate == 1:
network = self.createMutantNetwork(network)
return network
def createNextGeneration(self, old_networks_sorted_by_fitness):
print('Next Generation')
new_population = []
for i in range(0, self.number_of_kept_best_networks):
old_networks_sorted_by_fitness[i].resetHiddenLayer()
new_population.append(old_networks_sorted_by_fitness[i])
while len(new_population) < self.size_of_population-1:
probability = random.randint(0, 100)
# crossover with 5050 mutation
if probability <= self.crossover_rate:
child_network = self.crossoverNetwork(self.getRandomIndexBetterPreferred(old_networks_sorted_by_fitness), self.getRandomIndexBetterPreferred(old_networks_sorted_by_fitness))
child_network = self.mutate5050(child_network)
else:
# only mutation
child_network = self.createMutantNetwork(self.getRandomIndexBetterPreferred(old_networks_sorted_by_fitness))
new_population.append(child_network)
new_population.append(self.network_type(self.network_type.generateRandomWeights()))
return new_population
def createMutantNetwork(self, network):
new_weights = []
for index in range(0, network.getNumberOfWeights()):
# probability to mutate into weightmutation
weight = network.getWeightAt(index)
if random.randint(0, 100) <= self.mutation_rate:
mutation = random.uniform(-self.max_weight_change, self.max_weight_change)
weight += mutation
if weight < -20:
weight = -20
if weight > 20:
weight = 20
new_weights.append(weight)
return self.network_type(new_weights)
def crossoverNetwork(self, network1, network2):
network_size = network1.getNumberOfWeights()
crossover_point = random.randint(0, network_size - 1)
new_weights = []
for index in range(0, network_size):
if index <= crossover_point:
new_weights.append(network1.getWeightAt(index))
else:
new_weights.append(network2.getWeightAt(index))
return self.network_type(new_weights)
def getRandomIndexBetterPreferred(self, ranked):
rankSum = sum(range(self.size_of_population + 1))
pickedIndex = random.uniform(0, rankSum)
curIndex = 0
diminishingWorth = self.size_of_population
for i in range(0, self.size_of_population):
curIndex += diminishingWorth
if curIndex >= pickedIndex:
return ranked[i]
diminishingWorth -= 1
def initPopulation(self):
population = []
for _1 in range(0, self.size_of_population):
population.append(self.network_type(self.network_type.generateRandomWeights()))
return population
| {"/MA/Tests/GeneticAlgorithm_Test.py": ["/MA/GeneticAlgorithm.py"], "/NIPSChallenge/ForewardmovementTest.py": ["/NIPSChallenge/RobotControl_local.py"], "/MA/RobotControl.py": ["/RobotControlAbstract.py"], "/MA/Tests/Network_Test.py": ["/MA/WalkingNetwork.py"], "/NIPSChallenge/NIPSNetwork.py": ["/NetworkTemplate.py"], "/NIPSChallenge/TestRun.py": ["/NIPSChallenge/GeneticAlgorithm.py", "/SafeData.py"], "/MA/Tests/SyncedNetwork.py": ["/NIPSChallenge/SyncedNetwork.py", "/NIPSChallenge/NIPSNetwork.py"], "/NIPSChallenge/RobotControl_local.py": ["/RobotControlAbstract.py"], "/NIPSChallenge/PopGenNips.py": ["/PopulationGeneratorAbstract.py", "/NIPSChallenge/SyncedNetwork.py"], "/MA/Tests/FitnessFunction_Test.py": ["/MA/FitnessFunction.py"], "/NIPSChallenge/GeneticAlgorithm.py": ["/NIPSChallenge/PopGenNips.py", "/NIPSChallenge/RobotControl_local.py", "/NIPSChallenge/FitnessFunction.py", "/GeneticAlgorithmTemplate.py"], "/MA/FitnessFunction.py": ["/FitnessFunctionAbstract.py"], "/SafeData.py": ["/NIPSChallenge/SyncedNetwork.py"], "/MA/WalkingNetwork.py": ["/NetworkTemplate.py"], "/NetworkTemplate.py": ["/Network3LayerAbstract.py"], "/NIPSChallenge/FitnessFunction.py": ["/FitnessFunctionAbstract.py"], "/MA/Tests/PopulationGenerator_test.py": ["/MA/WalkingNetwork.py"], "/GeneticAlgorithmTemplate.py": ["/SafeData.py"], "/NIPSChallenge/Analyser.py": ["/NIPSChallenge/NIPSNetwork.py", "/SafeData.py"], "/NIPSChallenge/RobotControl_client.py": ["/NIPSChallenge/RobotControl_local.py"], "/MA/Tests/RobotControlDummy.py": ["/RobotControlAbstract.py"], "/NIPSChallenge/SyncedNetwork.py": ["/NIPSChallenge/NIPSNetwork.py", "/Network3LayerAbstract.py"], "/MA/GeneticAlgorithm.py": ["/MA/FitnessFunction.py", "/MA/Tests/RobotControlDummy.py", "/GeneticAlgorithmTemplate.py"]} |
71,455 | BenteCodes/learnedwalking | refs/heads/master | /MA/Tests/FitnessFunction_Test.py | import pytest
from MA.FitnessFunction import FitnessFunction
from math import sqrt
def test_penalizeFalling():
test_fitnessfunc = FitnessFunction()
assert test_fitnessfunc.penalizeFalling(True) == -100, 'falling wrongly penalized'
assert test_fitnessfunc.penalizeFalling(False) == 0, 'not falling penalized'
def test_calcDistanceMoved():
test_fitnessfunc = FitnessFunction()
assert test_fitnessfunc.calcDistanceMoved([0, 0], [0, 1], [0, 1]) == 1, 'distance wrongly calculated'
assert test_fitnessfunc.calcDistanceMoved([0, 0], [0, 2], [0, 1]) == 1.5, 'distance wrongly calculated'
assert test_fitnessfunc.calcDistanceMoved([0, 0], [0, 1], [0, -1]) == 1, 'distance wrongly calculated'
def test_calcEuclideanDistance():
test_fitnessfunc = FitnessFunction()
assert test_fitnessfunc.calcEuclideanDistance([5, 3], [4, 1]) == sqrt(1 * 1 + 2 * 2)
assert test_fitnessfunc.calcEuclideanDistance([38, -17], [8, 3]) == sqrt(30 * 30 + 20 * 20)
assert test_fitnessfunc.calcEuclideanDistance([-15, -13], [-1, 8]) == sqrt(196 + 441)
| {"/MA/Tests/GeneticAlgorithm_Test.py": ["/MA/GeneticAlgorithm.py"], "/NIPSChallenge/ForewardmovementTest.py": ["/NIPSChallenge/RobotControl_local.py"], "/MA/RobotControl.py": ["/RobotControlAbstract.py"], "/MA/Tests/Network_Test.py": ["/MA/WalkingNetwork.py"], "/NIPSChallenge/NIPSNetwork.py": ["/NetworkTemplate.py"], "/NIPSChallenge/TestRun.py": ["/NIPSChallenge/GeneticAlgorithm.py", "/SafeData.py"], "/MA/Tests/SyncedNetwork.py": ["/NIPSChallenge/SyncedNetwork.py", "/NIPSChallenge/NIPSNetwork.py"], "/NIPSChallenge/RobotControl_local.py": ["/RobotControlAbstract.py"], "/NIPSChallenge/PopGenNips.py": ["/PopulationGeneratorAbstract.py", "/NIPSChallenge/SyncedNetwork.py"], "/MA/Tests/FitnessFunction_Test.py": ["/MA/FitnessFunction.py"], "/NIPSChallenge/GeneticAlgorithm.py": ["/NIPSChallenge/PopGenNips.py", "/NIPSChallenge/RobotControl_local.py", "/NIPSChallenge/FitnessFunction.py", "/GeneticAlgorithmTemplate.py"], "/MA/FitnessFunction.py": ["/FitnessFunctionAbstract.py"], "/SafeData.py": ["/NIPSChallenge/SyncedNetwork.py"], "/MA/WalkingNetwork.py": ["/NetworkTemplate.py"], "/NetworkTemplate.py": ["/Network3LayerAbstract.py"], "/NIPSChallenge/FitnessFunction.py": ["/FitnessFunctionAbstract.py"], "/MA/Tests/PopulationGenerator_test.py": ["/MA/WalkingNetwork.py"], "/GeneticAlgorithmTemplate.py": ["/SafeData.py"], "/NIPSChallenge/Analyser.py": ["/NIPSChallenge/NIPSNetwork.py", "/SafeData.py"], "/NIPSChallenge/RobotControl_client.py": ["/NIPSChallenge/RobotControl_local.py"], "/MA/Tests/RobotControlDummy.py": ["/RobotControlAbstract.py"], "/NIPSChallenge/SyncedNetwork.py": ["/NIPSChallenge/NIPSNetwork.py", "/Network3LayerAbstract.py"], "/MA/GeneticAlgorithm.py": ["/MA/FitnessFunction.py", "/MA/Tests/RobotControlDummy.py", "/GeneticAlgorithmTemplate.py"]} |
71,456 | BenteCodes/learnedwalking | refs/heads/master | /NIPSChallenge/GeneticAlgorithm.py | '''
Created on 11.09.2019
@author: TKler
'''
from NIPSChallenge.PopGenNips import PopulationGeneratorNips
from NIPSChallenge.RobotControl_local import RobotControlNipsLocal
from NIPSChallenge.FitnessFunction import FitnessFunction
from GeneticAlgorithmTemplate import GeneticAlgorithmTemplate
class GeneticAlgorithm(GeneticAlgorithmTemplate):
number_of_steps_in_simulator = 200
simulator_repetitions = 1
def __init__(self, popsize, mutation_rate, crossover_rate, iterations, visualization):
super().checkParameters(popsize, mutation_rate, crossover_rate, iterations)
self.max_iterations = iterations
self.initRobotControl(visualization)
self.initPopGen(popsize, mutation_rate, crossover_rate)
self.init_population()
self.initFitnessFunc()
def init_population(self):
self.population = self.pop_generator.initPopulation()
def initPopGen(self, popsize, mutation_rate, crossover_rate):
self.pop_generator = PopulationGeneratorNips(popsize, mutation_rate, crossover_rate)
def initRobotControl(self, visualization):
self.robot_control = RobotControlNipsLocal(visualization)
def initFitnessFunc(self):
self.fitness_function = FitnessFunction()
def calcFitness(self, data):
fitness = self.fitness_function.getFitness(data)
return fitness
def getEvalFromSim(self):
fitness = self.robot_control.getEvalData()
return fitness
| {"/MA/Tests/GeneticAlgorithm_Test.py": ["/MA/GeneticAlgorithm.py"], "/NIPSChallenge/ForewardmovementTest.py": ["/NIPSChallenge/RobotControl_local.py"], "/MA/RobotControl.py": ["/RobotControlAbstract.py"], "/MA/Tests/Network_Test.py": ["/MA/WalkingNetwork.py"], "/NIPSChallenge/NIPSNetwork.py": ["/NetworkTemplate.py"], "/NIPSChallenge/TestRun.py": ["/NIPSChallenge/GeneticAlgorithm.py", "/SafeData.py"], "/MA/Tests/SyncedNetwork.py": ["/NIPSChallenge/SyncedNetwork.py", "/NIPSChallenge/NIPSNetwork.py"], "/NIPSChallenge/RobotControl_local.py": ["/RobotControlAbstract.py"], "/NIPSChallenge/PopGenNips.py": ["/PopulationGeneratorAbstract.py", "/NIPSChallenge/SyncedNetwork.py"], "/MA/Tests/FitnessFunction_Test.py": ["/MA/FitnessFunction.py"], "/NIPSChallenge/GeneticAlgorithm.py": ["/NIPSChallenge/PopGenNips.py", "/NIPSChallenge/RobotControl_local.py", "/NIPSChallenge/FitnessFunction.py", "/GeneticAlgorithmTemplate.py"], "/MA/FitnessFunction.py": ["/FitnessFunctionAbstract.py"], "/SafeData.py": ["/NIPSChallenge/SyncedNetwork.py"], "/MA/WalkingNetwork.py": ["/NetworkTemplate.py"], "/NetworkTemplate.py": ["/Network3LayerAbstract.py"], "/NIPSChallenge/FitnessFunction.py": ["/FitnessFunctionAbstract.py"], "/MA/Tests/PopulationGenerator_test.py": ["/MA/WalkingNetwork.py"], "/GeneticAlgorithmTemplate.py": ["/SafeData.py"], "/NIPSChallenge/Analyser.py": ["/NIPSChallenge/NIPSNetwork.py", "/SafeData.py"], "/NIPSChallenge/RobotControl_client.py": ["/NIPSChallenge/RobotControl_local.py"], "/MA/Tests/RobotControlDummy.py": ["/RobotControlAbstract.py"], "/NIPSChallenge/SyncedNetwork.py": ["/NIPSChallenge/NIPSNetwork.py", "/Network3LayerAbstract.py"], "/MA/GeneticAlgorithm.py": ["/MA/FitnessFunction.py", "/MA/Tests/RobotControlDummy.py", "/GeneticAlgorithmTemplate.py"]} |
71,457 | BenteCodes/learnedwalking | refs/heads/master | /MA/FitnessFunction.py | import math
from FitnessFunctionAbstract import FitnessFunctionAbstract
class FitnessFunction(FitnessFunctionAbstract):
def __init__(self):
pass
def getFitness(self, did_fall, position_ref, position_robot_foot_r, position_robot_foot_l):
fitness = 0
# fitness += self.penalizeNonMovement(did_move)
fitness += self.penalizeFalling(did_fall)
fitness += self.calcDistanceMoved(position_ref, position_robot_foot_r, position_robot_foot_l)
return fitness
def penalizeFalling(self, did_fall):
if did_fall:
return -100 # really don't fall
return 0
# def penalizeNonMovement(self, did_move):
# if not did_move:
# return -2000 # really, really move
def calcDistanceMoved(self, pos_start, pos_foot_r, pos_foot_l):
distance_right_foot = self.calcEuclideanDistance(pos_start, pos_foot_r)
distance_left_foot = self.calcEuclideanDistance(pos_start, pos_foot_l)
return (distance_right_foot + distance_left_foot) / 2
def calcEuclideanDistance(self, point1, point2):
return math.sqrt((math.pow((point1[0] - point2[0]), 2)) + (math.pow((point1[1] - point2[1]), 2)))
| {"/MA/Tests/GeneticAlgorithm_Test.py": ["/MA/GeneticAlgorithm.py"], "/NIPSChallenge/ForewardmovementTest.py": ["/NIPSChallenge/RobotControl_local.py"], "/MA/RobotControl.py": ["/RobotControlAbstract.py"], "/MA/Tests/Network_Test.py": ["/MA/WalkingNetwork.py"], "/NIPSChallenge/NIPSNetwork.py": ["/NetworkTemplate.py"], "/NIPSChallenge/TestRun.py": ["/NIPSChallenge/GeneticAlgorithm.py", "/SafeData.py"], "/MA/Tests/SyncedNetwork.py": ["/NIPSChallenge/SyncedNetwork.py", "/NIPSChallenge/NIPSNetwork.py"], "/NIPSChallenge/RobotControl_local.py": ["/RobotControlAbstract.py"], "/NIPSChallenge/PopGenNips.py": ["/PopulationGeneratorAbstract.py", "/NIPSChallenge/SyncedNetwork.py"], "/MA/Tests/FitnessFunction_Test.py": ["/MA/FitnessFunction.py"], "/NIPSChallenge/GeneticAlgorithm.py": ["/NIPSChallenge/PopGenNips.py", "/NIPSChallenge/RobotControl_local.py", "/NIPSChallenge/FitnessFunction.py", "/GeneticAlgorithmTemplate.py"], "/MA/FitnessFunction.py": ["/FitnessFunctionAbstract.py"], "/SafeData.py": ["/NIPSChallenge/SyncedNetwork.py"], "/MA/WalkingNetwork.py": ["/NetworkTemplate.py"], "/NetworkTemplate.py": ["/Network3LayerAbstract.py"], "/NIPSChallenge/FitnessFunction.py": ["/FitnessFunctionAbstract.py"], "/MA/Tests/PopulationGenerator_test.py": ["/MA/WalkingNetwork.py"], "/GeneticAlgorithmTemplate.py": ["/SafeData.py"], "/NIPSChallenge/Analyser.py": ["/NIPSChallenge/NIPSNetwork.py", "/SafeData.py"], "/NIPSChallenge/RobotControl_client.py": ["/NIPSChallenge/RobotControl_local.py"], "/MA/Tests/RobotControlDummy.py": ["/RobotControlAbstract.py"], "/NIPSChallenge/SyncedNetwork.py": ["/NIPSChallenge/NIPSNetwork.py", "/Network3LayerAbstract.py"], "/MA/GeneticAlgorithm.py": ["/MA/FitnessFunction.py", "/MA/Tests/RobotControlDummy.py", "/GeneticAlgorithmTemplate.py"]} |
71,458 | BenteCodes/learnedwalking | refs/heads/master | /SafeData.py | import csv
from pathlib import Path
import json
from NIPSChallenge.SyncedNetwork import SyncedNetwork
def safeMeanAndTopXFitnesses(mean_fitness, best_x_fitnesses):
path = (getBasePath() / 'fitness.csv').resolve()
fitness_string = ''
for fitness in best_x_fitnesses:
fitness_string += str(fitness) + ' '
with open(str(path), 'a') as csvfile1:
errorwriter = csv.writer(csvfile1, delimiter=' ', quotechar='|', quoting=csv.QUOTE_MINIMAL)
errorwriter.writerow([str(mean_fitness), fitness_string])
def getBasePath():
base_path = Path(__file__).parent
base_path = (base_path / 'output/').resolve()
return base_path
def safePopulation(pop):
data = {}
data['networks'] = []
for nw in pop:
data['networks'].append({
'weights': nw.weights,
})
with open('office_run4.json', 'w') as outfile:
json.dump(data, outfile)
print("safed population to json")
def loadPopulation(file):
population = []
with open(file) as json_file:
data = json.load(json_file)
for nw in data['networks']:
population.append(SyncedNetwork(nw['weights']))
print("loaded population from json")
return population
| {"/MA/Tests/GeneticAlgorithm_Test.py": ["/MA/GeneticAlgorithm.py"], "/NIPSChallenge/ForewardmovementTest.py": ["/NIPSChallenge/RobotControl_local.py"], "/MA/RobotControl.py": ["/RobotControlAbstract.py"], "/MA/Tests/Network_Test.py": ["/MA/WalkingNetwork.py"], "/NIPSChallenge/NIPSNetwork.py": ["/NetworkTemplate.py"], "/NIPSChallenge/TestRun.py": ["/NIPSChallenge/GeneticAlgorithm.py", "/SafeData.py"], "/MA/Tests/SyncedNetwork.py": ["/NIPSChallenge/SyncedNetwork.py", "/NIPSChallenge/NIPSNetwork.py"], "/NIPSChallenge/RobotControl_local.py": ["/RobotControlAbstract.py"], "/NIPSChallenge/PopGenNips.py": ["/PopulationGeneratorAbstract.py", "/NIPSChallenge/SyncedNetwork.py"], "/MA/Tests/FitnessFunction_Test.py": ["/MA/FitnessFunction.py"], "/NIPSChallenge/GeneticAlgorithm.py": ["/NIPSChallenge/PopGenNips.py", "/NIPSChallenge/RobotControl_local.py", "/NIPSChallenge/FitnessFunction.py", "/GeneticAlgorithmTemplate.py"], "/MA/FitnessFunction.py": ["/FitnessFunctionAbstract.py"], "/SafeData.py": ["/NIPSChallenge/SyncedNetwork.py"], "/MA/WalkingNetwork.py": ["/NetworkTemplate.py"], "/NetworkTemplate.py": ["/Network3LayerAbstract.py"], "/NIPSChallenge/FitnessFunction.py": ["/FitnessFunctionAbstract.py"], "/MA/Tests/PopulationGenerator_test.py": ["/MA/WalkingNetwork.py"], "/GeneticAlgorithmTemplate.py": ["/SafeData.py"], "/NIPSChallenge/Analyser.py": ["/NIPSChallenge/NIPSNetwork.py", "/SafeData.py"], "/NIPSChallenge/RobotControl_client.py": ["/NIPSChallenge/RobotControl_local.py"], "/MA/Tests/RobotControlDummy.py": ["/RobotControlAbstract.py"], "/NIPSChallenge/SyncedNetwork.py": ["/NIPSChallenge/NIPSNetwork.py", "/Network3LayerAbstract.py"], "/MA/GeneticAlgorithm.py": ["/MA/FitnessFunction.py", "/MA/Tests/RobotControlDummy.py", "/GeneticAlgorithmTemplate.py"]} |
71,459 | BenteCodes/learnedwalking | refs/heads/master | /MA/WalkingNetwork.py | from scipy.stats import logistic
from NetworkTemplate import NetworkTemplate
class WalkingNetwork(NetworkTemplate):
number_of_sensory_inputs = 0
number_of_pattern_inputs = 4
number_of_input_units = number_of_pattern_inputs + number_of_sensory_inputs
number_of_hidden_units = 4
number_of_output_units = 20
number_of_weights = (number_of_input_units * number_of_hidden_units) + number_of_hidden_units + (number_of_hidden_units * number_of_output_units)
start_weights_range = [-1, 1]
# @input weights weights of the network
def getInput(self):
input_matrix = self.getInputFromSimplePattern()
return input_matrix
def applyActivationFunction(self, matrix):
return (logistic.cdf(matrix) * 2) - 1
def takeInputFromSim(self, data):
pass # not applicable as this does not take any input
| {"/MA/Tests/GeneticAlgorithm_Test.py": ["/MA/GeneticAlgorithm.py"], "/NIPSChallenge/ForewardmovementTest.py": ["/NIPSChallenge/RobotControl_local.py"], "/MA/RobotControl.py": ["/RobotControlAbstract.py"], "/MA/Tests/Network_Test.py": ["/MA/WalkingNetwork.py"], "/NIPSChallenge/NIPSNetwork.py": ["/NetworkTemplate.py"], "/NIPSChallenge/TestRun.py": ["/NIPSChallenge/GeneticAlgorithm.py", "/SafeData.py"], "/MA/Tests/SyncedNetwork.py": ["/NIPSChallenge/SyncedNetwork.py", "/NIPSChallenge/NIPSNetwork.py"], "/NIPSChallenge/RobotControl_local.py": ["/RobotControlAbstract.py"], "/NIPSChallenge/PopGenNips.py": ["/PopulationGeneratorAbstract.py", "/NIPSChallenge/SyncedNetwork.py"], "/MA/Tests/FitnessFunction_Test.py": ["/MA/FitnessFunction.py"], "/NIPSChallenge/GeneticAlgorithm.py": ["/NIPSChallenge/PopGenNips.py", "/NIPSChallenge/RobotControl_local.py", "/NIPSChallenge/FitnessFunction.py", "/GeneticAlgorithmTemplate.py"], "/MA/FitnessFunction.py": ["/FitnessFunctionAbstract.py"], "/SafeData.py": ["/NIPSChallenge/SyncedNetwork.py"], "/MA/WalkingNetwork.py": ["/NetworkTemplate.py"], "/NetworkTemplate.py": ["/Network3LayerAbstract.py"], "/NIPSChallenge/FitnessFunction.py": ["/FitnessFunctionAbstract.py"], "/MA/Tests/PopulationGenerator_test.py": ["/MA/WalkingNetwork.py"], "/GeneticAlgorithmTemplate.py": ["/SafeData.py"], "/NIPSChallenge/Analyser.py": ["/NIPSChallenge/NIPSNetwork.py", "/SafeData.py"], "/NIPSChallenge/RobotControl_client.py": ["/NIPSChallenge/RobotControl_local.py"], "/MA/Tests/RobotControlDummy.py": ["/RobotControlAbstract.py"], "/NIPSChallenge/SyncedNetwork.py": ["/NIPSChallenge/NIPSNetwork.py", "/Network3LayerAbstract.py"], "/MA/GeneticAlgorithm.py": ["/MA/FitnessFunction.py", "/MA/Tests/RobotControlDummy.py", "/GeneticAlgorithmTemplate.py"]} |
71,460 | BenteCodes/learnedwalking | refs/heads/master | /NetworkTemplate.py | import numpy as np
from SimplePatternGenerator import SimplePatternGenerator
from Network3LayerAbstract import Network3LayerAbstract
from abc import abstractmethod
import random
class NetworkTemplate(Network3LayerAbstract):
def __init__(self, weights):
self.checkParameters(weights)
self.initNetwork(weights)
self.initInputPattern()
def checkParameters(self, weights):
if not(len(weights) == self.number_of_weights):
print("Paramcheck: Weights of incorrect number_of_weights")
def initNetwork(self, weights):
self.weights = weights
self.last_output_hidden = []
self.resetHiddenLayer()
position_start = 0
position_end = self.number_of_input_units * self.number_of_hidden_units
self.input_to_hidden_all = np.array([weights[position_start:position_end]])
self.input_to_hidden_all = np.reshape(self.input_to_hidden_all, (self.number_of_input_units, self.number_of_hidden_units))
position_start = position_end
position_end += self.number_of_hidden_units
self.hidden_to_hidden = np.abs(np.array([weights[position_start:position_end]]))
position_start = position_end
position_end += (self.number_of_hidden_units * self.number_of_output_units)
self.hidden_to_output_all = np.array([weights[position_start:position_end]])
self.hidden_to_output_all = np.reshape(self.hidden_to_output_all, (self.number_of_hidden_units, self.number_of_output_units)) # sort after connection not just a long list
position_start = position_end
position_end += (self.number_of_input_units * self.number_of_output_units)
self.input_to_output_all = np.array([weights[position_start:position_end]])
self.input_to_output_all = np.reshape(self.input_to_output_all, (self.number_of_input_units, self.number_of_output_units)) # sort after connection not just a long list
def initInputPattern(self):
self.simple_pattern = SimplePatternGenerator()
'''
Combines the weights from the input layer to the hidden layer and the recurrent weights of the hidden layer
into one matrix.
Only needed once per network
'''
def getInputFromSimplePattern(self):
return self.simple_pattern.nextStep()
def computeHiddenOutputs(self, nw_input, input_to_hidden_all, last_output_hidden, hidden_to_hidden):
value_hidden_neurons = np.matmul(nw_input, input_to_hidden_all)
for index in range(0, self.number_of_hidden_units): # append the hidden layer inputs. this has to be done one by one, as they are not fully connected, but just one weight per line
value_hidden_neurons[0][index] += hidden_to_hidden[0][index] * last_output_hidden[0][index]
value_hidden_neurons = self.normaliseNeuronInputSomewhat(value_hidden_neurons, self.number_of_input_units + 1)
value_hidden_neurons = (self.applyActivationFunction(value_hidden_neurons)) # TODO monitor this
return value_hidden_neurons
def computeOutputsFromHiddenOnwards(self, last_output_hidden, hidden_to_output_all, nw_input, input_to_output_all):
value_output_neurons1 = np.matmul(last_output_hidden, hidden_to_output_all)
value_output_neurons1 = self.normaliseNeuronInputSomewhat(value_output_neurons1, self.number_of_hidden_units)
value_output_neurons2 = np.matmul(nw_input, input_to_output_all)
value_output_neurons2 = self.normaliseNeuronInputSomewhat(value_output_neurons2, self.number_of_input_units)
network_output = self.applyActivationFunction(value_output_neurons1 + value_output_neurons2)
return network_output
'''
One run through the network. From input to hidden, hidden to output
With recursive neurons and sigmoid function
'''
def computeOneStep(self):
nw_input = np.array([self.getInputFromSimplePattern()])
self.last_output_hidden = self.computeHiddenOutputs(nw_input, self.input_to_hidden_all, self.last_output_hidden, self.hidden_to_hidden)
return self.computeOutputsFromHiddenOnwards(self.last_output_hidden, self.hidden_to_output_all, nw_input, self.input_to_output_all)
def normaliseNeuronInputSomewhat(self, values, no_inputs):
return np.divide(values, no_inputs / 2)
def resetHiddenLayer(self):
self.last_output_hidden = np.ones((1, self.number_of_hidden_units)) # set to neutral element
def getWeightAt(self, index):
return self.weights[index]
@classmethod
def getNumberOfWeights(cls):
return cls.number_of_weights
@classmethod
def generateRandomWeights(cls):
weights = []
for _i in range(0, cls.number_of_weights):
weights.append(random.uniform(cls.start_weights_range[0], cls.start_weights_range[1]))
return weights
@abstractmethod
def applyActivationFunction(self, matrix):
return NotImplementedError
@abstractmethod
def getInput(self):
return NotImplementedError
@abstractmethod
def takeInputFromSim(self, data):
return NotImplementedError
| {"/MA/Tests/GeneticAlgorithm_Test.py": ["/MA/GeneticAlgorithm.py"], "/NIPSChallenge/ForewardmovementTest.py": ["/NIPSChallenge/RobotControl_local.py"], "/MA/RobotControl.py": ["/RobotControlAbstract.py"], "/MA/Tests/Network_Test.py": ["/MA/WalkingNetwork.py"], "/NIPSChallenge/NIPSNetwork.py": ["/NetworkTemplate.py"], "/NIPSChallenge/TestRun.py": ["/NIPSChallenge/GeneticAlgorithm.py", "/SafeData.py"], "/MA/Tests/SyncedNetwork.py": ["/NIPSChallenge/SyncedNetwork.py", "/NIPSChallenge/NIPSNetwork.py"], "/NIPSChallenge/RobotControl_local.py": ["/RobotControlAbstract.py"], "/NIPSChallenge/PopGenNips.py": ["/PopulationGeneratorAbstract.py", "/NIPSChallenge/SyncedNetwork.py"], "/MA/Tests/FitnessFunction_Test.py": ["/MA/FitnessFunction.py"], "/NIPSChallenge/GeneticAlgorithm.py": ["/NIPSChallenge/PopGenNips.py", "/NIPSChallenge/RobotControl_local.py", "/NIPSChallenge/FitnessFunction.py", "/GeneticAlgorithmTemplate.py"], "/MA/FitnessFunction.py": ["/FitnessFunctionAbstract.py"], "/SafeData.py": ["/NIPSChallenge/SyncedNetwork.py"], "/MA/WalkingNetwork.py": ["/NetworkTemplate.py"], "/NetworkTemplate.py": ["/Network3LayerAbstract.py"], "/NIPSChallenge/FitnessFunction.py": ["/FitnessFunctionAbstract.py"], "/MA/Tests/PopulationGenerator_test.py": ["/MA/WalkingNetwork.py"], "/GeneticAlgorithmTemplate.py": ["/SafeData.py"], "/NIPSChallenge/Analyser.py": ["/NIPSChallenge/NIPSNetwork.py", "/SafeData.py"], "/NIPSChallenge/RobotControl_client.py": ["/NIPSChallenge/RobotControl_local.py"], "/MA/Tests/RobotControlDummy.py": ["/RobotControlAbstract.py"], "/NIPSChallenge/SyncedNetwork.py": ["/NIPSChallenge/NIPSNetwork.py", "/Network3LayerAbstract.py"], "/MA/GeneticAlgorithm.py": ["/MA/FitnessFunction.py", "/MA/Tests/RobotControlDummy.py", "/GeneticAlgorithmTemplate.py"]} |
71,461 | BenteCodes/learnedwalking | refs/heads/master | /NIPSChallenge/FitnessFunction.py | '''
Created on 14.09.2019
@author: TKler
'''
from FitnessFunctionAbstract import FitnessFunctionAbstract
class FitnessFunction(FitnessFunctionAbstract):
def __init__(self):
pass
def getFitness(self, fitness):
return fitness
| {"/MA/Tests/GeneticAlgorithm_Test.py": ["/MA/GeneticAlgorithm.py"], "/NIPSChallenge/ForewardmovementTest.py": ["/NIPSChallenge/RobotControl_local.py"], "/MA/RobotControl.py": ["/RobotControlAbstract.py"], "/MA/Tests/Network_Test.py": ["/MA/WalkingNetwork.py"], "/NIPSChallenge/NIPSNetwork.py": ["/NetworkTemplate.py"], "/NIPSChallenge/TestRun.py": ["/NIPSChallenge/GeneticAlgorithm.py", "/SafeData.py"], "/MA/Tests/SyncedNetwork.py": ["/NIPSChallenge/SyncedNetwork.py", "/NIPSChallenge/NIPSNetwork.py"], "/NIPSChallenge/RobotControl_local.py": ["/RobotControlAbstract.py"], "/NIPSChallenge/PopGenNips.py": ["/PopulationGeneratorAbstract.py", "/NIPSChallenge/SyncedNetwork.py"], "/MA/Tests/FitnessFunction_Test.py": ["/MA/FitnessFunction.py"], "/NIPSChallenge/GeneticAlgorithm.py": ["/NIPSChallenge/PopGenNips.py", "/NIPSChallenge/RobotControl_local.py", "/NIPSChallenge/FitnessFunction.py", "/GeneticAlgorithmTemplate.py"], "/MA/FitnessFunction.py": ["/FitnessFunctionAbstract.py"], "/SafeData.py": ["/NIPSChallenge/SyncedNetwork.py"], "/MA/WalkingNetwork.py": ["/NetworkTemplate.py"], "/NetworkTemplate.py": ["/Network3LayerAbstract.py"], "/NIPSChallenge/FitnessFunction.py": ["/FitnessFunctionAbstract.py"], "/MA/Tests/PopulationGenerator_test.py": ["/MA/WalkingNetwork.py"], "/GeneticAlgorithmTemplate.py": ["/SafeData.py"], "/NIPSChallenge/Analyser.py": ["/NIPSChallenge/NIPSNetwork.py", "/SafeData.py"], "/NIPSChallenge/RobotControl_client.py": ["/NIPSChallenge/RobotControl_local.py"], "/MA/Tests/RobotControlDummy.py": ["/RobotControlAbstract.py"], "/NIPSChallenge/SyncedNetwork.py": ["/NIPSChallenge/NIPSNetwork.py", "/Network3LayerAbstract.py"], "/MA/GeneticAlgorithm.py": ["/MA/FitnessFunction.py", "/MA/Tests/RobotControlDummy.py", "/GeneticAlgorithmTemplate.py"]} |
71,462 | BenteCodes/learnedwalking | refs/heads/master | /MA/Tests/PopulationGenerator_test.py | '''
Created on Sep 5, 2019
@author: alica
'''
import pytest
from MA.PopulationGenerator import PopulationGenerator
from MA.WalkingNetwork import WalkingNetwork
def test_mutate5050():
pop_gen = PopulationGenerator(10, 53, 45)
weights = WalkingNetwork.generateRandomWeights()
nw = WalkingNetwork(weights)
ten_networks = []
for _i in range(0, 10):
ten_networks.append(pop_gen.mutate5050(nw))
assert nw in ten_networks, 'Always mutation (fails with 0.1%)'
atleastOneChanged = False
for i in range(0, 10):
if(nw is not ten_networks[i]):
atleastOneChanged = True
assert atleastOneChanged, 'Never mutated (fails with 0.1%)'
def test_createNextGeneration():
pop_gen = PopulationGenerator(10, 53, 45)
init_population = pop_gen.initPopulation()
new_population = pop_gen.createNextGeneration(init_population)
assert init_population[0] is new_population[0], 'Good networks not kept alive'
assert init_population[1] is new_population[1], 'Good networks not kept alive'
for i in range(2, 10):
assert init_population[i] is not new_population[i], 'Unchanged WalkingNetwork'
changedWeights = False
for i in range(0, new_population[3].getNumberOfWeights()):
if(init_population[0].getWeightAt(i) != new_population[3].getWeightAt(i)):
changedWeights = True
assert changedWeights, 'No changed Networkweights on nw in position 3'
assert len(new_population) == 10, 'new population not the same size as old population'
def test_createMutantNetwork():
pop_gen = PopulationGenerator(10, 53, 45)
weights = WalkingNetwork.generateRandomWeights()
nw = WalkingNetwork(weights)
ten_networks = []
for _i in range(0, 10):
ten_networks.append(pop_gen.createMutantNetwork(nw))
assert True == False, 'Test not implemented'
def test_crossoverNetwork():
pop_gen = PopulationGenerator(10, 43, 45)
weights = WalkingNetwork.generateRandomWeights()
nw1 = WalkingNetwork(weights)
weights = WalkingNetwork.generateRandomWeights()
nw2 = WalkingNetwork(weights)
crossover_network = pop_gen.crossoverNetwork(nw1, nw2)
crossedOver = False
for i in range(0, crossover_network.getNumberOfWeights()):
if(crossover_network.getWeightAt(i) == nw1.getWeightAt(i)):
assert not crossedOver, 'Switched back to nw1 (or weirder) (or really bad luck)'
else:
crossedOver = True
if(crossedOver):
assert crossover_network.getWeightAt(i) == nw2.getWeightAt(i), 'Weights from neither nw1 nor nw2 taken'
def test_getRandomIndexBetterPreferred():
pop_gen = PopulationGenerator(10, 53, 45)
init_population = pop_gen.initPopulation()
ten_choosen_indexes = []
for _i in range(0, 10):
ten_choosen_indexes.append(pop_gen.getRandomIndexBetterPreferred(init_population))
individual_indexes = []
for element in ten_choosen_indexes:
if element not in individual_indexes:
individual_indexes.append(element)
assert len(ten_choosen_indexes) >= len(individual_indexes), 'no index chosen double, can happen but unlikely' # TODO how unlikely
assert init_population[0] in ten_choosen_indexes, 'first index not chosen, can happen but unlikely' # TODO how unlikely
def test_initPopulation():
pop_gen = PopulationGenerator(10, 53, 45)
init_population = pop_gen.initPopulation()
assert init_population[0] is not None, 'Generated WalkingNetwork is None'
assert len(init_population) == 10, 'wrong amount of individuals in the init-population'
| {"/MA/Tests/GeneticAlgorithm_Test.py": ["/MA/GeneticAlgorithm.py"], "/NIPSChallenge/ForewardmovementTest.py": ["/NIPSChallenge/RobotControl_local.py"], "/MA/RobotControl.py": ["/RobotControlAbstract.py"], "/MA/Tests/Network_Test.py": ["/MA/WalkingNetwork.py"], "/NIPSChallenge/NIPSNetwork.py": ["/NetworkTemplate.py"], "/NIPSChallenge/TestRun.py": ["/NIPSChallenge/GeneticAlgorithm.py", "/SafeData.py"], "/MA/Tests/SyncedNetwork.py": ["/NIPSChallenge/SyncedNetwork.py", "/NIPSChallenge/NIPSNetwork.py"], "/NIPSChallenge/RobotControl_local.py": ["/RobotControlAbstract.py"], "/NIPSChallenge/PopGenNips.py": ["/PopulationGeneratorAbstract.py", "/NIPSChallenge/SyncedNetwork.py"], "/MA/Tests/FitnessFunction_Test.py": ["/MA/FitnessFunction.py"], "/NIPSChallenge/GeneticAlgorithm.py": ["/NIPSChallenge/PopGenNips.py", "/NIPSChallenge/RobotControl_local.py", "/NIPSChallenge/FitnessFunction.py", "/GeneticAlgorithmTemplate.py"], "/MA/FitnessFunction.py": ["/FitnessFunctionAbstract.py"], "/SafeData.py": ["/NIPSChallenge/SyncedNetwork.py"], "/MA/WalkingNetwork.py": ["/NetworkTemplate.py"], "/NetworkTemplate.py": ["/Network3LayerAbstract.py"], "/NIPSChallenge/FitnessFunction.py": ["/FitnessFunctionAbstract.py"], "/MA/Tests/PopulationGenerator_test.py": ["/MA/WalkingNetwork.py"], "/GeneticAlgorithmTemplate.py": ["/SafeData.py"], "/NIPSChallenge/Analyser.py": ["/NIPSChallenge/NIPSNetwork.py", "/SafeData.py"], "/NIPSChallenge/RobotControl_client.py": ["/NIPSChallenge/RobotControl_local.py"], "/MA/Tests/RobotControlDummy.py": ["/RobotControlAbstract.py"], "/NIPSChallenge/SyncedNetwork.py": ["/NIPSChallenge/NIPSNetwork.py", "/Network3LayerAbstract.py"], "/MA/GeneticAlgorithm.py": ["/MA/FitnessFunction.py", "/MA/Tests/RobotControlDummy.py", "/GeneticAlgorithmTemplate.py"]} |
71,463 | BenteCodes/learnedwalking | refs/heads/master | /GeneticAlgorithmTemplate.py | '''
Created on 12 Sep 2019
@author: felix
'''
from abc import abstractmethod, ABC
import SafeData
import numpy as np
class GeneticAlgorithmTemplate(ABC):
number_of_steps_in_simulator = 100
simulator_repetitions = 1
number_of_documented_fitnesses_per_iteration = 5
fall_foreward_action = np.array([1, 1, 0, 0, 0.5, 0, 0, 0, 0, 0, 0.5,
0, 0, 1, 0, 0, 0.5, 0, 0, 0, 0, 0.5])
@abstractmethod
def init_population(self):
return NotImplementedError
@abstractmethod
def initPopGen(self, popsize, mutation_rate, crossover_rate):
return NotImplementedError
@abstractmethod
def initRobotControl(self):
return NotImplementedError
@abstractmethod
def initFitnessFunc(self):
return NotImplementedError
@abstractmethod
def calcFitness(self):
return NotImplementedError
@abstractmethod
def getEvalFromSim(self):
return NotImplementedError
def checkParameters(self, popsize, mutation_rate, crossover_rate, iterations):
if popsize < 5:
print("Paramcheck: Population size needs to be at least 5")
if not 0 <= mutation_rate <= 100:
print("Paramcheck: Mutation rate needs to be between 0 and 100")
if not 0 <= crossover_rate <= 100:
print("Paramcheck: Crossover rate needs to be between 0 and 100")
if iterations < 1:
print("Paramcheck: iterations needs to be positive")
def __init__(self, popsize, mutation_rate, crossover_rate, iterations):
self.checkParameters(popsize, mutation_rate, crossover_rate, iterations)
def simulateFitnessOfNetwork(self, network):
self.robot_control.startSimulation()
self.walkInSimulator(network)
dataDump = self.getEvalFromSim()
fitness = self.calcFitness(dataDump)
return fitness
def walkInSimulator(self, network):
for i in range(0, self.number_of_steps_in_simulator):
if i <= 15:
self.robot_control.walkRobot(self.fall_foreward_action)
else:
self.robot_control.walkRobot(network.computeOneStep())
# sensor_data = self.robot_control.walkRobot(network.computeOneStep())
# network.takeInputFromSim(sensor_data)
if(self.robot_control.robotFell()):
break
def getFitnessAveragedOverXTimes(self, network, times):
fitness = 0
for _1 in range(0, times):
fitness += self.simulateFitnessOfNetwork(network)
network.resetHiddenLayer()
return (fitness / times)
def getRankedNetworks(self): # get top5NWWithFitness
fitnessList = []
# create a list of networks their fitness
for index in range(0, len(self.population)):
fitness = self.getFitnessAveragedOverXTimes(self.population[index], self.simulator_repetitions)
fitnessList.append([self.population[index], fitness])
# sort it after fitness, biggest firsts
fitnessList.sort(key=lambda x: x[1], reverse=True)
# compute mean fitness
meanFitness = np.mean([row[1] for row in fitnessList], axis=0)
# safe mean and top 5 fitnesses (no networks)
self.safeMeanAndTopXFitnesses(meanFitness, fitnessList)
# return only the ordered networks (best first)
print([row[1] for row in fitnessList][:5])
return [row[0] for row in fitnessList]
def safeMeanAndTopXFitnesses(self, mean_fitness, fitnessList):
SafeData.safeMeanAndTopXFitnesses(mean_fitness, [row[1] for row in fitnessList][:self.number_of_documented_fitnesses_per_iteration])
def evolve(self):
curr_it = 1
while curr_it < 51:
print("Current iteration:" + str(curr_it))
rankedNetworks = self.getRankedNetworks()
SafeData.safePopulation(self.population)
self.population = self.pop_generator.createNextGeneration(rankedNetworks)
curr_it += 1
| {"/MA/Tests/GeneticAlgorithm_Test.py": ["/MA/GeneticAlgorithm.py"], "/NIPSChallenge/ForewardmovementTest.py": ["/NIPSChallenge/RobotControl_local.py"], "/MA/RobotControl.py": ["/RobotControlAbstract.py"], "/MA/Tests/Network_Test.py": ["/MA/WalkingNetwork.py"], "/NIPSChallenge/NIPSNetwork.py": ["/NetworkTemplate.py"], "/NIPSChallenge/TestRun.py": ["/NIPSChallenge/GeneticAlgorithm.py", "/SafeData.py"], "/MA/Tests/SyncedNetwork.py": ["/NIPSChallenge/SyncedNetwork.py", "/NIPSChallenge/NIPSNetwork.py"], "/NIPSChallenge/RobotControl_local.py": ["/RobotControlAbstract.py"], "/NIPSChallenge/PopGenNips.py": ["/PopulationGeneratorAbstract.py", "/NIPSChallenge/SyncedNetwork.py"], "/MA/Tests/FitnessFunction_Test.py": ["/MA/FitnessFunction.py"], "/NIPSChallenge/GeneticAlgorithm.py": ["/NIPSChallenge/PopGenNips.py", "/NIPSChallenge/RobotControl_local.py", "/NIPSChallenge/FitnessFunction.py", "/GeneticAlgorithmTemplate.py"], "/MA/FitnessFunction.py": ["/FitnessFunctionAbstract.py"], "/SafeData.py": ["/NIPSChallenge/SyncedNetwork.py"], "/MA/WalkingNetwork.py": ["/NetworkTemplate.py"], "/NetworkTemplate.py": ["/Network3LayerAbstract.py"], "/NIPSChallenge/FitnessFunction.py": ["/FitnessFunctionAbstract.py"], "/MA/Tests/PopulationGenerator_test.py": ["/MA/WalkingNetwork.py"], "/GeneticAlgorithmTemplate.py": ["/SafeData.py"], "/NIPSChallenge/Analyser.py": ["/NIPSChallenge/NIPSNetwork.py", "/SafeData.py"], "/NIPSChallenge/RobotControl_client.py": ["/NIPSChallenge/RobotControl_local.py"], "/MA/Tests/RobotControlDummy.py": ["/RobotControlAbstract.py"], "/NIPSChallenge/SyncedNetwork.py": ["/NIPSChallenge/NIPSNetwork.py", "/Network3LayerAbstract.py"], "/MA/GeneticAlgorithm.py": ["/MA/FitnessFunction.py", "/MA/Tests/RobotControlDummy.py", "/GeneticAlgorithmTemplate.py"]} |
71,464 | BenteCodes/learnedwalking | refs/heads/master | /NIPSChallenge/Analyser.py | '''
Created on 23.09.2019
@author: TKler
'''
from NIPSChallenge.NIPSNetwork import NIPSNetwork
import numpy as np
import matplotlib.pyplot as plt
import SafeData
def weightsPerInput(nw):
number_of_input = nw.number_of_input_units
weights_from_input = nw.input_to_hidden_all
for i in range(0, number_of_input):
array = np.array(weights_from_input[i])
array = np.absolute(array)
mean = np.mean(array)
print("Input", i, "mean:", mean)
print(array)
# weightsPerInput(nw)
def hiddenWeights(nw):
# no_hiddens = 11
print("Hidden weights", nw.hidden_to_hidden)
# hiddenWeights(nw)
def getOutputData(nw):
no_outputs = 11
no_iters = 200
y = [[0 for _x in range(no_iters)] for _y in range(no_outputs)]
x = []
for i in range(0, no_iters):
array = nw.computeOneStep()
x.append(i)
for index in range(0, no_outputs):
y[index][i] = array[0][index]
for i in range(0, no_outputs):
plt.plot(x, y[i])
plt.title('Output Layer outputs')
plt.show()
# getOutputData(nw)
def getHiddenLayerOutput(nw):
no_hiddens = 11
no_iters = 200
y = [[0 for _x in range(no_iters)] for _y in range(no_hiddens)]
x = []
for i in range(0, no_iters):
_array = nw.computeOneStep()
array = nw.last_output_hidden
x.append(i)
for index in range(0, no_hiddens):
y[index][i] = array[0][index]
for i in range(0, no_hiddens):
plt.plot(x, y[i])
plt.title('Hidden Layer outputs')
plt.show()
nw = NIPSNetwork(NIPSNetwork.generateRandomWeights())
# pop = SafeData.loadPopulation('office_run3.json')
# nw = pop[0].nw1
hiddenWeights(nw)
weightsPerInput(nw)
getHiddenLayerOutput(nw)
getOutputData(nw)
| {"/MA/Tests/GeneticAlgorithm_Test.py": ["/MA/GeneticAlgorithm.py"], "/NIPSChallenge/ForewardmovementTest.py": ["/NIPSChallenge/RobotControl_local.py"], "/MA/RobotControl.py": ["/RobotControlAbstract.py"], "/MA/Tests/Network_Test.py": ["/MA/WalkingNetwork.py"], "/NIPSChallenge/NIPSNetwork.py": ["/NetworkTemplate.py"], "/NIPSChallenge/TestRun.py": ["/NIPSChallenge/GeneticAlgorithm.py", "/SafeData.py"], "/MA/Tests/SyncedNetwork.py": ["/NIPSChallenge/SyncedNetwork.py", "/NIPSChallenge/NIPSNetwork.py"], "/NIPSChallenge/RobotControl_local.py": ["/RobotControlAbstract.py"], "/NIPSChallenge/PopGenNips.py": ["/PopulationGeneratorAbstract.py", "/NIPSChallenge/SyncedNetwork.py"], "/MA/Tests/FitnessFunction_Test.py": ["/MA/FitnessFunction.py"], "/NIPSChallenge/GeneticAlgorithm.py": ["/NIPSChallenge/PopGenNips.py", "/NIPSChallenge/RobotControl_local.py", "/NIPSChallenge/FitnessFunction.py", "/GeneticAlgorithmTemplate.py"], "/MA/FitnessFunction.py": ["/FitnessFunctionAbstract.py"], "/SafeData.py": ["/NIPSChallenge/SyncedNetwork.py"], "/MA/WalkingNetwork.py": ["/NetworkTemplate.py"], "/NetworkTemplate.py": ["/Network3LayerAbstract.py"], "/NIPSChallenge/FitnessFunction.py": ["/FitnessFunctionAbstract.py"], "/MA/Tests/PopulationGenerator_test.py": ["/MA/WalkingNetwork.py"], "/GeneticAlgorithmTemplate.py": ["/SafeData.py"], "/NIPSChallenge/Analyser.py": ["/NIPSChallenge/NIPSNetwork.py", "/SafeData.py"], "/NIPSChallenge/RobotControl_client.py": ["/NIPSChallenge/RobotControl_local.py"], "/MA/Tests/RobotControlDummy.py": ["/RobotControlAbstract.py"], "/NIPSChallenge/SyncedNetwork.py": ["/NIPSChallenge/NIPSNetwork.py", "/Network3LayerAbstract.py"], "/MA/GeneticAlgorithm.py": ["/MA/FitnessFunction.py", "/MA/Tests/RobotControlDummy.py", "/GeneticAlgorithmTemplate.py"]} |
71,465 | BenteCodes/learnedwalking | refs/heads/master | /NIPSChallenge/RobotControl_client.py | '''
Created on Sep 12, 2019
@author: alica
'''
import opensim as osim # This error is here because of the virtual env
from osim.http.client import Client # This error is here because of the virtual env
from NIPSChallenge.RobotControl_local import RobotControlNipsLocal
class RobotControlNipsClient(RobotControlNipsLocal):
'''
Initiates the simulator and the connection
'''
def __init__(self):
# Settings
self.remote_base = "http://osim-rl-grader.aicrowd.com/"
self.aicrowd_token = "a66245c8324e2d37b92f098a57ef3f99" # use your aicrowd token
# your aicrowd token (API KEY) can be found at your prorfile page at https://www.aicrowd.com
self.client = Client(self.remote_base)
# Create environment
self.observation = self.client.env_create(self.aicrowd_token, env_id='L2M2019Env')
self.reward = 0
| {"/MA/Tests/GeneticAlgorithm_Test.py": ["/MA/GeneticAlgorithm.py"], "/NIPSChallenge/ForewardmovementTest.py": ["/NIPSChallenge/RobotControl_local.py"], "/MA/RobotControl.py": ["/RobotControlAbstract.py"], "/MA/Tests/Network_Test.py": ["/MA/WalkingNetwork.py"], "/NIPSChallenge/NIPSNetwork.py": ["/NetworkTemplate.py"], "/NIPSChallenge/TestRun.py": ["/NIPSChallenge/GeneticAlgorithm.py", "/SafeData.py"], "/MA/Tests/SyncedNetwork.py": ["/NIPSChallenge/SyncedNetwork.py", "/NIPSChallenge/NIPSNetwork.py"], "/NIPSChallenge/RobotControl_local.py": ["/RobotControlAbstract.py"], "/NIPSChallenge/PopGenNips.py": ["/PopulationGeneratorAbstract.py", "/NIPSChallenge/SyncedNetwork.py"], "/MA/Tests/FitnessFunction_Test.py": ["/MA/FitnessFunction.py"], "/NIPSChallenge/GeneticAlgorithm.py": ["/NIPSChallenge/PopGenNips.py", "/NIPSChallenge/RobotControl_local.py", "/NIPSChallenge/FitnessFunction.py", "/GeneticAlgorithmTemplate.py"], "/MA/FitnessFunction.py": ["/FitnessFunctionAbstract.py"], "/SafeData.py": ["/NIPSChallenge/SyncedNetwork.py"], "/MA/WalkingNetwork.py": ["/NetworkTemplate.py"], "/NetworkTemplate.py": ["/Network3LayerAbstract.py"], "/NIPSChallenge/FitnessFunction.py": ["/FitnessFunctionAbstract.py"], "/MA/Tests/PopulationGenerator_test.py": ["/MA/WalkingNetwork.py"], "/GeneticAlgorithmTemplate.py": ["/SafeData.py"], "/NIPSChallenge/Analyser.py": ["/NIPSChallenge/NIPSNetwork.py", "/SafeData.py"], "/NIPSChallenge/RobotControl_client.py": ["/NIPSChallenge/RobotControl_local.py"], "/MA/Tests/RobotControlDummy.py": ["/RobotControlAbstract.py"], "/NIPSChallenge/SyncedNetwork.py": ["/NIPSChallenge/NIPSNetwork.py", "/Network3LayerAbstract.py"], "/MA/GeneticAlgorithm.py": ["/MA/FitnessFunction.py", "/MA/Tests/RobotControlDummy.py", "/GeneticAlgorithmTemplate.py"]} |
71,466 | BenteCodes/learnedwalking | refs/heads/master | /MA/Tests/RobotControlDummy.py | # 2 outputneurons try to be as far away from 0 as possbile after the run
from RobotControlAbstract import RobotControlAbstract
class RobotControlDummy(RobotControlAbstract):
def __init__(self):
self.more_motors = 0 # default
def startSimulation(self):
pass
def stopSimulation(self):
pass
def robotFell(self):
return False
def walkRobot(self, motor_values):
self.motor_values = motor_values
pass
def getEvalData(self):
return self.robotFell(), [0, 0], [0, self.motor_values[0][5]], [0, self.motor_values[0][10]]
# TODO this is dirty!
def setMotorFlag(self, more_motors):
self.more_motors = more_motors
| {"/MA/Tests/GeneticAlgorithm_Test.py": ["/MA/GeneticAlgorithm.py"], "/NIPSChallenge/ForewardmovementTest.py": ["/NIPSChallenge/RobotControl_local.py"], "/MA/RobotControl.py": ["/RobotControlAbstract.py"], "/MA/Tests/Network_Test.py": ["/MA/WalkingNetwork.py"], "/NIPSChallenge/NIPSNetwork.py": ["/NetworkTemplate.py"], "/NIPSChallenge/TestRun.py": ["/NIPSChallenge/GeneticAlgorithm.py", "/SafeData.py"], "/MA/Tests/SyncedNetwork.py": ["/NIPSChallenge/SyncedNetwork.py", "/NIPSChallenge/NIPSNetwork.py"], "/NIPSChallenge/RobotControl_local.py": ["/RobotControlAbstract.py"], "/NIPSChallenge/PopGenNips.py": ["/PopulationGeneratorAbstract.py", "/NIPSChallenge/SyncedNetwork.py"], "/MA/Tests/FitnessFunction_Test.py": ["/MA/FitnessFunction.py"], "/NIPSChallenge/GeneticAlgorithm.py": ["/NIPSChallenge/PopGenNips.py", "/NIPSChallenge/RobotControl_local.py", "/NIPSChallenge/FitnessFunction.py", "/GeneticAlgorithmTemplate.py"], "/MA/FitnessFunction.py": ["/FitnessFunctionAbstract.py"], "/SafeData.py": ["/NIPSChallenge/SyncedNetwork.py"], "/MA/WalkingNetwork.py": ["/NetworkTemplate.py"], "/NetworkTemplate.py": ["/Network3LayerAbstract.py"], "/NIPSChallenge/FitnessFunction.py": ["/FitnessFunctionAbstract.py"], "/MA/Tests/PopulationGenerator_test.py": ["/MA/WalkingNetwork.py"], "/GeneticAlgorithmTemplate.py": ["/SafeData.py"], "/NIPSChallenge/Analyser.py": ["/NIPSChallenge/NIPSNetwork.py", "/SafeData.py"], "/NIPSChallenge/RobotControl_client.py": ["/NIPSChallenge/RobotControl_local.py"], "/MA/Tests/RobotControlDummy.py": ["/RobotControlAbstract.py"], "/NIPSChallenge/SyncedNetwork.py": ["/NIPSChallenge/NIPSNetwork.py", "/Network3LayerAbstract.py"], "/MA/GeneticAlgorithm.py": ["/MA/FitnessFunction.py", "/MA/Tests/RobotControlDummy.py", "/GeneticAlgorithmTemplate.py"]} |
71,467 | BenteCodes/learnedwalking | refs/heads/master | /NIPSChallenge/SyncedNetwork.py | '''
Created on 19.09.2019
@author: TKler
'''
from NIPSChallenge.NIPSNetwork import NIPSNetwork
import numpy as np
from Network3LayerAbstract import Network3LayerAbstract
class SyncedNetwork(Network3LayerAbstract):
def __init__(self, weights):
self.weights = weights
self.nw1 = NIPSNetwork(weights)
self.nw2 = NIPSNetwork(weights)
self.nw2.simple_pattern.increasePhaseByPI()
def computeOneStep(self):
output_part1 = self.nw1.computeOneStep()
output_part2 = self.nw2.computeOneStep()
return np.append(output_part1, output_part2)
def resetHiddenLayer(self):
self.nw1.resetHiddenLayer()
self.nw2.resetHiddenLayer()
def getWeightAt(self, index):
return self.nw1.getWeightAt(index)
def takeInputFromSim(self, data):
pass
@staticmethod
def getNumberOfWeights():
return NIPSNetwork.getNumberOfWeights()
@staticmethod
def generateRandomWeights():
return NIPSNetwork.generateRandomWeights()
| {"/MA/Tests/GeneticAlgorithm_Test.py": ["/MA/GeneticAlgorithm.py"], "/NIPSChallenge/ForewardmovementTest.py": ["/NIPSChallenge/RobotControl_local.py"], "/MA/RobotControl.py": ["/RobotControlAbstract.py"], "/MA/Tests/Network_Test.py": ["/MA/WalkingNetwork.py"], "/NIPSChallenge/NIPSNetwork.py": ["/NetworkTemplate.py"], "/NIPSChallenge/TestRun.py": ["/NIPSChallenge/GeneticAlgorithm.py", "/SafeData.py"], "/MA/Tests/SyncedNetwork.py": ["/NIPSChallenge/SyncedNetwork.py", "/NIPSChallenge/NIPSNetwork.py"], "/NIPSChallenge/RobotControl_local.py": ["/RobotControlAbstract.py"], "/NIPSChallenge/PopGenNips.py": ["/PopulationGeneratorAbstract.py", "/NIPSChallenge/SyncedNetwork.py"], "/MA/Tests/FitnessFunction_Test.py": ["/MA/FitnessFunction.py"], "/NIPSChallenge/GeneticAlgorithm.py": ["/NIPSChallenge/PopGenNips.py", "/NIPSChallenge/RobotControl_local.py", "/NIPSChallenge/FitnessFunction.py", "/GeneticAlgorithmTemplate.py"], "/MA/FitnessFunction.py": ["/FitnessFunctionAbstract.py"], "/SafeData.py": ["/NIPSChallenge/SyncedNetwork.py"], "/MA/WalkingNetwork.py": ["/NetworkTemplate.py"], "/NetworkTemplate.py": ["/Network3LayerAbstract.py"], "/NIPSChallenge/FitnessFunction.py": ["/FitnessFunctionAbstract.py"], "/MA/Tests/PopulationGenerator_test.py": ["/MA/WalkingNetwork.py"], "/GeneticAlgorithmTemplate.py": ["/SafeData.py"], "/NIPSChallenge/Analyser.py": ["/NIPSChallenge/NIPSNetwork.py", "/SafeData.py"], "/NIPSChallenge/RobotControl_client.py": ["/NIPSChallenge/RobotControl_local.py"], "/MA/Tests/RobotControlDummy.py": ["/RobotControlAbstract.py"], "/NIPSChallenge/SyncedNetwork.py": ["/NIPSChallenge/NIPSNetwork.py", "/Network3LayerAbstract.py"], "/MA/GeneticAlgorithm.py": ["/MA/FitnessFunction.py", "/MA/Tests/RobotControlDummy.py", "/GeneticAlgorithmTemplate.py"]} |
71,468 | BenteCodes/learnedwalking | refs/heads/master | /Network3LayerAbstract.py | '''
Created on 11.09.2019
@author: TKler
'''
from builtins import staticmethod
'''
Created on 11.09.2019
@author: TKler
'''
from abc import abstractmethod, ABC
import numpy as np
class Network3LayerAbstract(ABC):
@abstractmethod
def computeOneStep(self):
return NotImplementedError
@abstractmethod
def resetHiddenLayer(self):
return NotImplementedError
@abstractmethod
def getWeightAt(self, index):
return NotImplementedError
@abstractmethod
def takeInputFromSim(self, data):
return NotImplementedError
@staticmethod
def getNumberOfWeights():
return NotImplementedError
@staticmethod
def generateRandomWeights():
return NotImplementedError
| {"/MA/Tests/GeneticAlgorithm_Test.py": ["/MA/GeneticAlgorithm.py"], "/NIPSChallenge/ForewardmovementTest.py": ["/NIPSChallenge/RobotControl_local.py"], "/MA/RobotControl.py": ["/RobotControlAbstract.py"], "/MA/Tests/Network_Test.py": ["/MA/WalkingNetwork.py"], "/NIPSChallenge/NIPSNetwork.py": ["/NetworkTemplate.py"], "/NIPSChallenge/TestRun.py": ["/NIPSChallenge/GeneticAlgorithm.py", "/SafeData.py"], "/MA/Tests/SyncedNetwork.py": ["/NIPSChallenge/SyncedNetwork.py", "/NIPSChallenge/NIPSNetwork.py"], "/NIPSChallenge/RobotControl_local.py": ["/RobotControlAbstract.py"], "/NIPSChallenge/PopGenNips.py": ["/PopulationGeneratorAbstract.py", "/NIPSChallenge/SyncedNetwork.py"], "/MA/Tests/FitnessFunction_Test.py": ["/MA/FitnessFunction.py"], "/NIPSChallenge/GeneticAlgorithm.py": ["/NIPSChallenge/PopGenNips.py", "/NIPSChallenge/RobotControl_local.py", "/NIPSChallenge/FitnessFunction.py", "/GeneticAlgorithmTemplate.py"], "/MA/FitnessFunction.py": ["/FitnessFunctionAbstract.py"], "/SafeData.py": ["/NIPSChallenge/SyncedNetwork.py"], "/MA/WalkingNetwork.py": ["/NetworkTemplate.py"], "/NetworkTemplate.py": ["/Network3LayerAbstract.py"], "/NIPSChallenge/FitnessFunction.py": ["/FitnessFunctionAbstract.py"], "/MA/Tests/PopulationGenerator_test.py": ["/MA/WalkingNetwork.py"], "/GeneticAlgorithmTemplate.py": ["/SafeData.py"], "/NIPSChallenge/Analyser.py": ["/NIPSChallenge/NIPSNetwork.py", "/SafeData.py"], "/NIPSChallenge/RobotControl_client.py": ["/NIPSChallenge/RobotControl_local.py"], "/MA/Tests/RobotControlDummy.py": ["/RobotControlAbstract.py"], "/NIPSChallenge/SyncedNetwork.py": ["/NIPSChallenge/NIPSNetwork.py", "/Network3LayerAbstract.py"], "/MA/GeneticAlgorithm.py": ["/MA/FitnessFunction.py", "/MA/Tests/RobotControlDummy.py", "/GeneticAlgorithmTemplate.py"]} |
71,469 | BenteCodes/learnedwalking | refs/heads/master | /RobotControlAbstract.py | from abc import abstractmethod, ABC
class RobotControlAbstract(ABC):
@abstractmethod
def __init__(self):
pass
@abstractmethod
def startSimulation(self):
return NotImplementedError
@abstractmethod
def robotFell(self):
return NotImplementedError
@abstractmethod
def walkRobot(self, motor_values):
return NotImplementedError
@abstractmethod
def getEvalData(self):
return NotImplementedError
| {"/MA/Tests/GeneticAlgorithm_Test.py": ["/MA/GeneticAlgorithm.py"], "/NIPSChallenge/ForewardmovementTest.py": ["/NIPSChallenge/RobotControl_local.py"], "/MA/RobotControl.py": ["/RobotControlAbstract.py"], "/MA/Tests/Network_Test.py": ["/MA/WalkingNetwork.py"], "/NIPSChallenge/NIPSNetwork.py": ["/NetworkTemplate.py"], "/NIPSChallenge/TestRun.py": ["/NIPSChallenge/GeneticAlgorithm.py", "/SafeData.py"], "/MA/Tests/SyncedNetwork.py": ["/NIPSChallenge/SyncedNetwork.py", "/NIPSChallenge/NIPSNetwork.py"], "/NIPSChallenge/RobotControl_local.py": ["/RobotControlAbstract.py"], "/NIPSChallenge/PopGenNips.py": ["/PopulationGeneratorAbstract.py", "/NIPSChallenge/SyncedNetwork.py"], "/MA/Tests/FitnessFunction_Test.py": ["/MA/FitnessFunction.py"], "/NIPSChallenge/GeneticAlgorithm.py": ["/NIPSChallenge/PopGenNips.py", "/NIPSChallenge/RobotControl_local.py", "/NIPSChallenge/FitnessFunction.py", "/GeneticAlgorithmTemplate.py"], "/MA/FitnessFunction.py": ["/FitnessFunctionAbstract.py"], "/SafeData.py": ["/NIPSChallenge/SyncedNetwork.py"], "/MA/WalkingNetwork.py": ["/NetworkTemplate.py"], "/NetworkTemplate.py": ["/Network3LayerAbstract.py"], "/NIPSChallenge/FitnessFunction.py": ["/FitnessFunctionAbstract.py"], "/MA/Tests/PopulationGenerator_test.py": ["/MA/WalkingNetwork.py"], "/GeneticAlgorithmTemplate.py": ["/SafeData.py"], "/NIPSChallenge/Analyser.py": ["/NIPSChallenge/NIPSNetwork.py", "/SafeData.py"], "/NIPSChallenge/RobotControl_client.py": ["/NIPSChallenge/RobotControl_local.py"], "/MA/Tests/RobotControlDummy.py": ["/RobotControlAbstract.py"], "/NIPSChallenge/SyncedNetwork.py": ["/NIPSChallenge/NIPSNetwork.py", "/Network3LayerAbstract.py"], "/MA/GeneticAlgorithm.py": ["/MA/FitnessFunction.py", "/MA/Tests/RobotControlDummy.py", "/GeneticAlgorithmTemplate.py"]} |
71,470 | BenteCodes/learnedwalking | refs/heads/master | /PopulationGeneratorAbstract.py | '''
Created on 11.09.2019
@author: TKler
'''
from abc import abstractmethod, ABC
class PopulationGeneratorAbstract(ABC):
@abstractmethod
def createNextGeneration(self, old_networks_sorted_by_fitness):
return NotImplementedError
@abstractmethod
def initPopulation(self):
return NotImplementedError
| {"/MA/Tests/GeneticAlgorithm_Test.py": ["/MA/GeneticAlgorithm.py"], "/NIPSChallenge/ForewardmovementTest.py": ["/NIPSChallenge/RobotControl_local.py"], "/MA/RobotControl.py": ["/RobotControlAbstract.py"], "/MA/Tests/Network_Test.py": ["/MA/WalkingNetwork.py"], "/NIPSChallenge/NIPSNetwork.py": ["/NetworkTemplate.py"], "/NIPSChallenge/TestRun.py": ["/NIPSChallenge/GeneticAlgorithm.py", "/SafeData.py"], "/MA/Tests/SyncedNetwork.py": ["/NIPSChallenge/SyncedNetwork.py", "/NIPSChallenge/NIPSNetwork.py"], "/NIPSChallenge/RobotControl_local.py": ["/RobotControlAbstract.py"], "/NIPSChallenge/PopGenNips.py": ["/PopulationGeneratorAbstract.py", "/NIPSChallenge/SyncedNetwork.py"], "/MA/Tests/FitnessFunction_Test.py": ["/MA/FitnessFunction.py"], "/NIPSChallenge/GeneticAlgorithm.py": ["/NIPSChallenge/PopGenNips.py", "/NIPSChallenge/RobotControl_local.py", "/NIPSChallenge/FitnessFunction.py", "/GeneticAlgorithmTemplate.py"], "/MA/FitnessFunction.py": ["/FitnessFunctionAbstract.py"], "/SafeData.py": ["/NIPSChallenge/SyncedNetwork.py"], "/MA/WalkingNetwork.py": ["/NetworkTemplate.py"], "/NetworkTemplate.py": ["/Network3LayerAbstract.py"], "/NIPSChallenge/FitnessFunction.py": ["/FitnessFunctionAbstract.py"], "/MA/Tests/PopulationGenerator_test.py": ["/MA/WalkingNetwork.py"], "/GeneticAlgorithmTemplate.py": ["/SafeData.py"], "/NIPSChallenge/Analyser.py": ["/NIPSChallenge/NIPSNetwork.py", "/SafeData.py"], "/NIPSChallenge/RobotControl_client.py": ["/NIPSChallenge/RobotControl_local.py"], "/MA/Tests/RobotControlDummy.py": ["/RobotControlAbstract.py"], "/NIPSChallenge/SyncedNetwork.py": ["/NIPSChallenge/NIPSNetwork.py", "/Network3LayerAbstract.py"], "/MA/GeneticAlgorithm.py": ["/MA/FitnessFunction.py", "/MA/Tests/RobotControlDummy.py", "/GeneticAlgorithmTemplate.py"]} |
71,471 | BenteCodes/learnedwalking | refs/heads/master | /MA/GeneticAlgorithm.py | '''
Created on 12 Sep 2019
@author: felix
'''
from MA.PopulationGenerator import PopulationGenerator
from MA.FitnessFunction import FitnessFunction
from MA.Tests.RobotControlDummy import RobotControlDummy
from GeneticAlgorithmTemplate import GeneticAlgorithmTemplate
class GeneticAlgorithm(GeneticAlgorithmTemplate):
number_of_steps_in_simulator = 400
simulator_repetitions = 3
def __init__(self, popsize, mutation_rate, crossover_rate, iterations, motor_number_flag):
super().checkParameters(popsize, mutation_rate, crossover_rate, iterations)
self.max_iterations = iterations
self.initRobotControl()
self.initPopGen(popsize, mutation_rate, crossover_rate)
self.init_population()
self.initFitnessFunc()
self.robot_control.setMotorFlag(motor_number_flag)
def init_population(self):
self.population = self.pop_generator.initPopulation()
def initPopGen(self, popsize, mutation_rate, crossover_rate):
self.pop_generator = PopulationGenerator(popsize, mutation_rate, crossover_rate)
def initRobotControl(self):
self.robot_control = RobotControlDummy()
def initFitnessFunc(self):
self.fitness_function = FitnessFunction()
def calcFitness(self, data):
fitness = self.fitness_function.getFitness(data[0], data[1], data [2], data[3])
return fitness
def getEvalFromSim(self):
passrobot_fell, start_point, pos_robot_foot_r, pos_robot_foot_l = self.robot_control.getEvalData()
return passrobot_fell, start_point, pos_robot_foot_r, pos_robot_foot_l
| {"/MA/Tests/GeneticAlgorithm_Test.py": ["/MA/GeneticAlgorithm.py"], "/NIPSChallenge/ForewardmovementTest.py": ["/NIPSChallenge/RobotControl_local.py"], "/MA/RobotControl.py": ["/RobotControlAbstract.py"], "/MA/Tests/Network_Test.py": ["/MA/WalkingNetwork.py"], "/NIPSChallenge/NIPSNetwork.py": ["/NetworkTemplate.py"], "/NIPSChallenge/TestRun.py": ["/NIPSChallenge/GeneticAlgorithm.py", "/SafeData.py"], "/MA/Tests/SyncedNetwork.py": ["/NIPSChallenge/SyncedNetwork.py", "/NIPSChallenge/NIPSNetwork.py"], "/NIPSChallenge/RobotControl_local.py": ["/RobotControlAbstract.py"], "/NIPSChallenge/PopGenNips.py": ["/PopulationGeneratorAbstract.py", "/NIPSChallenge/SyncedNetwork.py"], "/MA/Tests/FitnessFunction_Test.py": ["/MA/FitnessFunction.py"], "/NIPSChallenge/GeneticAlgorithm.py": ["/NIPSChallenge/PopGenNips.py", "/NIPSChallenge/RobotControl_local.py", "/NIPSChallenge/FitnessFunction.py", "/GeneticAlgorithmTemplate.py"], "/MA/FitnessFunction.py": ["/FitnessFunctionAbstract.py"], "/SafeData.py": ["/NIPSChallenge/SyncedNetwork.py"], "/MA/WalkingNetwork.py": ["/NetworkTemplate.py"], "/NetworkTemplate.py": ["/Network3LayerAbstract.py"], "/NIPSChallenge/FitnessFunction.py": ["/FitnessFunctionAbstract.py"], "/MA/Tests/PopulationGenerator_test.py": ["/MA/WalkingNetwork.py"], "/GeneticAlgorithmTemplate.py": ["/SafeData.py"], "/NIPSChallenge/Analyser.py": ["/NIPSChallenge/NIPSNetwork.py", "/SafeData.py"], "/NIPSChallenge/RobotControl_client.py": ["/NIPSChallenge/RobotControl_local.py"], "/MA/Tests/RobotControlDummy.py": ["/RobotControlAbstract.py"], "/NIPSChallenge/SyncedNetwork.py": ["/NIPSChallenge/NIPSNetwork.py", "/Network3LayerAbstract.py"], "/MA/GeneticAlgorithm.py": ["/MA/FitnessFunction.py", "/MA/Tests/RobotControlDummy.py", "/GeneticAlgorithmTemplate.py"]} |
71,472 | goddessofpom/tornadowebsocket | refs/heads/master | /handlers.py | from group_sender import SenderMixin
import tornadoredis
import config
from asyncio import events
import tormysql
import json
import tornado.gen
from tornado.gen import coroutine
import tornado.web
from tormysql import DictCursor
import traceback
class QueryHandler(SenderMixin):
def __init__(self):
CONNECTION_POOL = tornadoredis.ConnectionPool(max_connections=config.redis_config["max_connections"],
wait_for_available=True)
self.redis = tornadoredis.Client(host=config.redis_config["host"], port=config.redis_config["port"],
connection_pool=CONNECTION_POOL, selected_db=config.redis_config["db"])
self.mysql = tormysql.ConnectionPool(
max_connections = config.mysql_config["max_connections"], #max open connections
idle_seconds = config.mysql_config["idle_seconds"], #conntion idle timeout time, 0 is not timeout
wait_connection_timeout = config.mysql_config["wait_connection_timeout"], #wait connection timeout
host = config.mysql_config["host"],
port = config.mysql_config["port"],
user = config.mysql_config["user"],
passwd = config.mysql_config["passwd"],
db = config.mysql_config["db"],
charset = config.mysql_config["charset"]
)
def get_redis(self):
return self.redis
def get_mysql(self):
return self.mysql
@coroutine
def get_user_type(self, user_id):
with (yield self.mysql.Connection()) as conn:
with conn.cursor() as cursor:
yield cursor.execute("SELECT user_type FROM user WHERE id = %s" % user_id)
datas = cursor.fetchone()
yield conn.commit()
return datas
@coroutine
def get_user_amount(self, user_id):
with (yield self.mysql.Connection()) as conn:
with conn.cursor(cursor_cls=DictCursor) as cursor:
yield cursor.execute("SELECT amount FROM user WHERE id = %s" % user_id)
datas = cursor.fetchone()
yield conn.commit()
# yield self.mysql.close()
return datas
class NotifyHandler(SenderMixin):
pass
| {"/handlers.py": ["/group_sender.py"], "/main.py": ["/group_sender.py", "/handlers.py", "/validators.py"]} |
71,473 | goddessofpom/tornadowebsocket | refs/heads/master | /group_sender.py | class GroupManager(object):
def __init__(self):
self.group = {}
def register(self, user):
group_name = user.get_argument("group_name")
user_id = int(user.get_argument("user_id"))
print(group_name,user_id)
if group_name in self.group.keys():
self.group[group_name][user_id] = user
else:
user_ws = {user_id: user}
self.group[group_name]= user_ws
print(self.group)
def unregister(self, user):
group_name = user.get_argument("group_name")
user_id = user.get_argument("user_id")
try:
self.group[group_name].pop(user_id)
except KeyError:
pass
def get_user(self, user_id, group_name):
return self.group[group_name][user_id]
def get_group_user(self, group_name):
return self.group[group_name].values()
def get_all_user(self):
all_users = []
for k, v in self.group.items():
all_users.extend(v.values())
return all_users
class SenderMixin(object):
def broadcast(self, group, message):
for user in group:
user.write_message(message)
def private_message(self, user, message):
user.write_message(message)
| {"/handlers.py": ["/group_sender.py"], "/main.py": ["/group_sender.py", "/handlers.py", "/validators.py"]} |
71,474 | goddessofpom/tornadowebsocket | refs/heads/master | /main.py | import tornado.web
import tornado.websocket
import tornado.httpserver
import tornado.ioloop
import tornado.options
import json
import time
import config
from group_sender import GroupManager
import logging
from handlers import QueryHandler, NotifyHandler
from validators import Validator
import tornado.gen
import traceback
class IndexPageHandler(tornado.web.RequestHandler):
def get(self):
self.render('index.html')
class WebSocketHandler(tornado.websocket.WebSocketHandler):
def check_origin(self, origin):
return True
@tornado.web.asynchronous
@tornado.gen.engine
def open(self, *args, **kwargs):
redis = self.application.query_handler.get_redis()
user_id = self.get_argument("user_id")
group_name = self.get_argument("group_name")
user_token = self.get_argument("token")
if user_id and group_name and user_token:
key = "WEBSOCKET_TOKEN_%s" % str(user_id)
valid_token = yield tornado.gen.Task(redis.get, key)
if str(valid_token) == str(user_token):
self.application.manager.register(self)
else:
self.write_message(json.dumps({"error": "token invalid"}))
self.close()
else:
self.write_message(json.dumps({"error": "not enough parameters"}))
self.close()
@tornado.web.asynchronous
@tornado.gen.engine
def on_message(self, msg):
try:
data = json.loads(msg)
except:
data = None
self.write_message(json.dumps({"detail":"invalid json"}))
valid_result = self.application.validator.validate(data)
if valid_result is True:
if data["message_type"] == "query":
handler = self.application.query_handler
if data["message"] == "user_amount":
try:
user_id = data["args"]["user_id"]
message = yield self.application.query_handler.get_user_amount(user_id)
except KeyError:
message = {"detail": "empty user_id"}
elif data["message"] == "current_ticker":
redis = handler.get_redis()
coinpair = data["args"]["coinpair"].replace("/", "_")
key = "PRICE_%s" % coinpair
price = yield tornado.gen.Task(redis.hget, key, "price")
message = {"price": price}
else:
message = None
elif data["message_type"] == "notification":
user_type = self.application.query_handler.get_user_type(self.get_argument("user_id"))
if user_type == 3:
message = None
else:
try:
message = {"title": data["args"]["title"], "content": data["args"]["content"]}
except KeyError:
message = {"detail": "empty title or content"}
handler = self.application.notify_handler
else:
message = None
handler = None
if data["send_type"] == "private" and message and handler:
try:
send_user_id = data["args"]["send_user_id"]
send_group_name = data["args"]["send_group_name"]
except KeyError:
self.write_message(json.dumps({"detail": "empty send_user_id or send_group_name"}))
try:
user = self.application.manager.get_user(send_user_id, send_group_name)
handler.private_message(user, message)
except:
self.write_message(json.dumps({"detail": "send_user or send_group not exist"}))
elif data["send_type"] == "group" and message and handler:
try:
send_group_name = data["args"]["send_group_name"]
except KeyError:
self.write_message(json.dumps({"detail": "empty group_name"}))
try:
user = self.application.manager.get_group_user(send_group_name)
handler.broadcast(user, message)
except:
self.write_message(json.dumps({"detail": "send_group not exist"}))
elif data["send_type"] == "all" and message and handler:
user = self.application.manager.get_all_user()
handler.broadcast(user, message)
else:
self.write_message(json.dumps(valid_result))
def on_close(self):
self.application.manager.unregister(self)
class Application(tornado.web.Application):
def __init__(self):
self.manager = GroupManager()
self.query_handler = QueryHandler()
self.notify_handler = NotifyHandler()
self.validator = Validator()
handlers = [
(r'/', IndexPageHandler),
(r'/ws', WebSocketHandler)
]
settings = { "template_path": "."}
settings["log_function"] = config.log_func
tornado.web.Application.__init__(self, handlers=handlers, **settings)
if __name__ == "__main__":
tornado.options.parse_command_line()
ws_app = Application()
server = tornado.httpserver.HTTPServer(ws_app)
server.listen(8080)
try:
tornado.ioloop.IOLoop.instance().start()
except KeyboardInterrupt:
tornado.ioloop.IOLoop.instance().stop() | {"/handlers.py": ["/group_sender.py"], "/main.py": ["/group_sender.py", "/handlers.py", "/validators.py"]} |
71,475 | goddessofpom/tornadowebsocket | refs/heads/master | /validators.py |
import config
class Validator(object):
def validate(self, data):
try:
message_type = data["message_type"]
if not message_type in config.MESSAGE_TYPE:
return {"detail": "invalid message_type"}
except:
return {"detail": "empty message_type"}
if not "args" in data.keys():
return {"detail": "no args given"}
if not "message" in data.keys():
return {"detail": "no message given"}
try:
send_type = data["send_type"]
if not send_type in config.SEND_TYPE:
return {"detail": "invalid send_type"}
elif send_type == "private":
if not "send_user_id" in data["args"].keys():
return {"detail": "empty send_user_id"}
if not "send_group_name" in data["args"].keys():
return {"detail": "empty send_group_name"}
elif send_type == "group":
if not "send_group_name" in data["args"].keys():
return {"detail": "empty send_group_name"}
except:
return {"detail": "empty send_type"}
return True
| {"/handlers.py": ["/group_sender.py"], "/main.py": ["/group_sender.py", "/handlers.py", "/validators.py"]} |
71,478 | cyones/ECI2019-Competencia_Despegar | refs/heads/master | /src/augmentator.py | from torch.utils.data import Dataset
import torchvision.transforms as trn
class Augmentator(Dataset):
def __init__(self, dataset, mode, indices=None):
self.dataset = dataset
if indices is None: self.indices = list(range(len(dataset)))
else: self.indices = indices
self.mode = mode
def __getitem__(self, idx):
img, lbl = self.dataset[self.indices[idx]]
if self.mode=='train':
img = self.train_trans(img)
if self.mode=='valid':
img = self.valid_trans(img)
if self.mode=='test':
img = self.test_trans(img)
return img, lbl
def __len__(self):
return len(self.indices)
train_aug = trn.Compose([
trn.RandomAffine(15),
trn.RandomResizedCrop(280, scale=(0.5, 1.5), ratio=(0.9,1.1)),
trn.RandomHorizontalFlip(),
trn.RandomGrayscale(0.2),
trn.ColorJitter(0.25, 0.25, 0.25)])
valid_aug = trn.Compose([
trn.RandomResizedCrop(280, scale=(1.0, 1.0), ratio=(1.0,1.0)),
trn.RandomGrayscale(0.2),
trn.RandomHorizontalFlip()])
test_aug = trn.Compose([
trn.RandomResizedCrop(280, scale=(1.0, 1.0), ratio=(1.0,1.0)),
trn.RandomGrayscale(0.2),
trn.RandomHorizontalFlip()])
tensorize = trn.Compose([
trn.ToTensor(),
trn.Normalize(
mean=[0.485, 0.456, 0.406],
std= [0.229, 0.224, 0.225])])
train_trans = trn.Compose([train_aug, tensorize])
valid_trans = trn.Compose([valid_aug, tensorize])
test_trans = trn.Compose([test_aug, tensorize])
| {"/train_models.py": ["/src/dataset.py", "/src/sampler.py", "/src/model.py", "/src/augmentator.py"], "/eval_test.py": ["/src/dataset.py", "/src/sampler.py", "/src/model.py", "/src/augmentator.py"], "/repair_images.py": ["/src/dataset.py", "/src/augmentator.py"]} |
71,479 | cyones/ECI2019-Competencia_Despegar | refs/heads/master | /train_models.py | import torch as tr
import torch.nn as nn
import pandas as pd
import numpy as np
import os
from torchvision import models
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import balanced_accuracy_score
from torch.utils.data import Subset, DataLoader
from torch.optim.lr_scheduler import CyclicLR
from src.dataset import DDataset
from src.sampler import DSampler
from src.model import Model
from src.augmentator import Augmentator
tr.backends.cudnn.deterministic = True
tr.backends.cudnn.benchmark = False
batch_size = 24
n_classes = 16
ds = pd.read_csv("images/train.csv")
train = DDataset('images/train', files=ds['fileName'], labels=ds['tag'])
logger = open("logfiles/trainer0.log", 'w', buffering=1)
dev = tr.device("cuda:0")
nmodel = 0
rskf = StratifiedKFold(n_splits=10, random_state=42, shuffle=True)
for train_idx, valid_idx in rskf.split(ds['tag'], y=ds['tag']):
if os.path.isfile("models/%d.pmt" % nmodel):
print("Model %d already trained" % nmodel)
nmodel += 1
continue
tr.cuda.empty_cache()
tr.manual_seed(nmodel)
np.random.seed(nmodel)
ftrain = Augmentator(train, mode='train', indices=train_idx)
fvalid = Augmentator(train, mode='valid', indices=valid_idx)
model = Model(n_classes).to(dev)
model.freeze_resnet()
criterion = nn.NLLLoss()
optimizer = tr.optim.Adam(model.parameters())
optimizer = tr.optim.SGD(model.parameters(), lr=0.0001, momentum=0.9, weight_decay=0)
lr_scheduler = CyclicLR(optimizer, 0.0001, 0.1, step_size_up=1024,
mode="exp_range", gamma=0.9, base_momentum=0.5)
train_sampler = DSampler(list(ds['tag'][train_idx]), len(ftrain))
train_loader = DataLoader(ftrain, batch_size=batch_size, sampler=train_sampler,
num_workers=8, pin_memory=True)
valid_loader = DataLoader(fvalid, batch_size=batch_size, num_workers=8,
pin_memory=True)
train_loss = 1
train_acc = 0
valid_acc = 0
best_valid_acc = 0
early_stop = 0
epoch = 0
while early_stop < 16:
if epoch == 4:
tr.cuda.empty_cache()
model.unfreeze_resnet()
ib = 0
model = model.train()
for img, lbs in train_loader:
optimizer.zero_grad()
pred = model(img.to(dev))
loss = criterion(pred, lbs.to(dev))
loss.backward()
optimizer.step()
lr_scheduler.step()
pred = tr.max(pred, 1)[1].cpu().detach().numpy()
new_train_acc = balanced_accuracy_score(lbs.numpy(), pred)
train_acc = 0.1 * new_train_acc + 0.9 * train_acc
train_loss = 0.1 * loss.item() + 0.9 * train_loss
print('Model: %d, Epoch: %d, batch: %d, loss: %.4f, acc: %.4f' %
(nmodel, epoch, ib, train_loss, train_acc), end='\r')
ib += 1
del img, lbs, loss, pred
print('Model: %d, Epoch: %d, batch: %d, loss: %.4f, acc: %.4f' %
(nmodel, epoch, ib, train_loss, train_acc), end=', ')
model = model.eval()
pred = tr.LongTensor([])
labels = tr.LongTensor([])
for img, lbs in valid_loader:
lpred = tr.zeros(img.shape[0], 16)
for r in range(4):
lpred += tr.exp(model(img.to(dev)).detach().cpu()) / 4
pred = tr.cat((pred, tr.max(lpred, 1)[1]))
labels = tr.cat((labels, lbs))
valid_acc = balanced_accuracy_score(labels.numpy(), pred.detach().numpy())
print('Acc: %.4f' % (valid_acc), end=', ')
logger.write('%d, %d, %.4f, %.4f, %.4f' %
(nmodel, epoch, train_loss, train_acc, valid_acc))
del img, lbs, labels, pred
if valid_acc > best_valid_acc:
best_valid_acc = valid_acc
early_stop = 0
tr.save(model.state_dict(), 'models/%d.pmt' % nmodel)
print('Improvement')
else:
early_stop += 1
print('No improvement')
epoch += 1
nmodel += 1
| {"/train_models.py": ["/src/dataset.py", "/src/sampler.py", "/src/model.py", "/src/augmentator.py"], "/eval_test.py": ["/src/dataset.py", "/src/sampler.py", "/src/model.py", "/src/augmentator.py"], "/repair_images.py": ["/src/dataset.py", "/src/augmentator.py"]} |
71,480 | cyones/ECI2019-Competencia_Despegar | refs/heads/master | /eval_test.py | import torch as tr
import torch.nn as nn
import pandas as pd
import numpy as np
import os
from torch.utils.data import Subset, DataLoader
from src.dataset import DDataset
from src.sampler import DSampler
from src.model import Model
from src.augmentator import Augmentator
batch_size = 24
n_classes = 16
ids = list(range(9739))
ids.remove(1213)
ids.remove(3574)
ids.remove(6086)
test = Augmentator(DDataset('images/test', files=ids), mode='test')
test_loader = DataLoader(test, batch_size=batch_size, num_workers=8, pin_memory=True)
dev = tr.device("cuda:0")
test_preds = tr.zeros(len(test), n_classes)
for nmodel in range(10):
print("Evaluating with model %d: [" % nmodel, end='')
tr.cuda.empty_cache()
tr.manual_seed(nmodel)
np.random.seed(nmodel)
model = Model(n_classes, pretrained=False)
model.load_state_dict(tr.load('models/%d.pmt' % nmodel))
model = model.to(dev).eval()
for r in range(3):
print("|", end='')
pred = tr.Tensor(len(test), n_classes)
ib = 0
for img, _ in test_loader:
pred[ib:(ib+batch_size)] = model(img.to(dev)).detach().cpu()
ib += batch_size
test_preds += tr.exp(pred) / (3 * 10)
print("]")
idx_pred = np.argmax(test_preds, axis=1)
pd.DataFrame({'id' : ids, 'target' : idx_pred}).to_csv("preds/submission.csv",
index=False,
header=False)
| {"/train_models.py": ["/src/dataset.py", "/src/sampler.py", "/src/model.py", "/src/augmentator.py"], "/eval_test.py": ["/src/dataset.py", "/src/sampler.py", "/src/model.py", "/src/augmentator.py"], "/repair_images.py": ["/src/dataset.py", "/src/augmentator.py"]} |
71,481 | cyones/ECI2019-Competencia_Despegar | refs/heads/master | /src/model.py | import torch as tr
import torch.nn as nn
import pandas as pd
import numpy as np
from torchvision import models
class Model(nn.Module):
def __init__(self, n_classes, pretrained=True):
super(Model, self).__init__()
self.resnet = models.resnext101_32x8d(pretrained=pretrained)
self.fc = nn.Sequential(nn.Dropout(0.1), nn.ReLU(),
nn.BatchNorm1d(1000),
nn.Linear(1000, n_classes),
nn.LogSoftmax(dim=1))
def forward(self, x):
x = self.resnet(x)
x = self.fc(x)
return x
def freeze_resnet(self):
for param in self.resnet.parameters():
param.requires_grad = False
for param in self.resnet.fc.parameters():
param.requires_grad = True
def unfreeze_resnet(self):
for param in self.resnet.parameters():
param.requires_grad = True
for param in self.fc.parameters():
param.requires_grad = True
| {"/train_models.py": ["/src/dataset.py", "/src/sampler.py", "/src/model.py", "/src/augmentator.py"], "/eval_test.py": ["/src/dataset.py", "/src/sampler.py", "/src/model.py", "/src/augmentator.py"], "/repair_images.py": ["/src/dataset.py", "/src/augmentator.py"]} |
71,482 | cyones/ECI2019-Competencia_Despegar | refs/heads/master | /repair_images.py | import os
import pandas as pd
from tqdm import tqdm
from src.dataset import DDataset
from src.augmentator import Augmentator
ds = pd.read_csv("images/train.csv")
train = DDataset('images/train', files=ds['fileName'], labels=ds['tag'])
ag_train = Augmentator(train, mode="test")
repaired = 0
for i in tqdm(range(len(train))):
try:
img, lab = ag_train[i]
except:
img_name = os.path.join(train.img_dir, str(train.files[i])) + ".jpg"
os.system("convert %s tmp.png" % img_name)
os.system("convert tmp.png %s" % img_name)
repaired += 1
print("%d train images repaired" % repaired)
ts_files = list(range(9738))
ts_files.remove(1213)
ts_files.remove(3574)
ts_files.remove(6086)
test = DDataset('images/test', files=ts_files)
ag_test = Augmentator(test, mode="test")
repaired = 0
for i in tqdm(range(len(test))):
try:
img, lab = ag_test[i]
except:
img_name = os.path.join(test.img_dir, str(test.files[i])) + ".jpg"
os.system("convert %s tmp.png" % img_name)
os.system("convert tmp.png %s" % img_name)
repaired += 1
print("%d test images repaired" % repaired)
| {"/train_models.py": ["/src/dataset.py", "/src/sampler.py", "/src/model.py", "/src/augmentator.py"], "/eval_test.py": ["/src/dataset.py", "/src/sampler.py", "/src/model.py", "/src/augmentator.py"], "/repair_images.py": ["/src/dataset.py", "/src/augmentator.py"]} |
71,483 | cyones/ECI2019-Competencia_Despegar | refs/heads/master | /src/sampler.py | import torch as tr
from torch.utils.data.sampler import Sampler
class DSampler(Sampler):
def __init__(self, labels, nsamples):
self.nsamples = nsamples
label_to_count = {}
for idx in range(len(labels)):
if labels[idx] in label_to_count:
label_to_count[labels[idx]] += 1
else:
label_to_count[labels[idx]] = 1
weights = [1.0 / label_to_count[labels[idx]]
for idx in range(len(labels))]
self.weights = tr.DoubleTensor(weights)
def __iter__(self):
return iter(tr.multinomial(self.weights, self.nsamples, replacement=True))
def __len__(self):
return self.nsamples
| {"/train_models.py": ["/src/dataset.py", "/src/sampler.py", "/src/model.py", "/src/augmentator.py"], "/eval_test.py": ["/src/dataset.py", "/src/sampler.py", "/src/model.py", "/src/augmentator.py"], "/repair_images.py": ["/src/dataset.py", "/src/augmentator.py"]} |
71,484 | cyones/ECI2019-Competencia_Despegar | refs/heads/master | /src/dataset.py | import os
from PIL import Image
from torch.utils.data import Dataset
from torchvision.transforms.functional import resize
class DDataset(Dataset):
def __init__(self, img_dir, files, labels=None):
self.labels = labels
self.img_dir = img_dir
self.files = files
self.cache = {}
def __len__(self):
return len(self.files)
def __getitem__(self, idx):
if idx in self.cache:
img = self.cache[idx]
else:
img_name = os.path.join(self.img_dir, str(self.files[idx])) + ".jpg"
img = Image.open(img_name)
if img.mode != 'RGB':
img.convert('RGB')
if min(img.size) > 560:
img = resize(img, 560)
self.cache[idx] = img
if self.labels is None:
lab = -1
else:
lab = self.labels[idx]
return img, lab
| {"/train_models.py": ["/src/dataset.py", "/src/sampler.py", "/src/model.py", "/src/augmentator.py"], "/eval_test.py": ["/src/dataset.py", "/src/sampler.py", "/src/model.py", "/src/augmentator.py"], "/repair_images.py": ["/src/dataset.py", "/src/augmentator.py"]} |
71,501 | SpaceWhale/doddlebot | refs/heads/master | /src/constants/app.py | """
:author: john.sosoka
:date: 5/10/2018
"""
import logging
"""
GENERAL APP CONSTANTS
"""
CONFIG_NAME = "doddle.config"
BOT_NAME = "doddle"
ROOT_LOGGING_IDENTITY = "doddle"
ENVIRONMENT_VARIABLE_BOT_ID = "BOT_ID"
ENVIRONMENT_VARIABLE_BOT_TOKEN = "SLACK_BOT_TOKEN"
HELP_COMMAND_TEXT = "help"
"""
LOGGING CONSTANTS
"""
LOG_NAME = "doddle.log"
LOG_CONFIG = "%s(asctime)s - %s(name)s - %s(levelname)s - %(message)s"
LOG_LEVEL = logging.DEBUG
SLACK_API_CLIENT_LOG_LEVEL = logging.DEBUG
"""
PLUGIN CONSTANTS
"""
RELATIVE_PLUGIN_DIRECTORY_PATH = "plugins/"
"""
MISC CONSTANTS
"""
SPLASH = """
___ __ ____ _______ __ ___ __
/ _ \___ ___/ /__/ / /__ / ___/ / ___ _/ /_/ _ )___ / /_
/ // / _ \/ _ / _ / / -_) / /__/ _ \/ _ `/ __/ _ / _ \/ __/
/____/\___/\_,_/\_,_/_/\__/ \___/_//_/\_,_/\__/____/\___/\__/
2018
Author:
* john.sosoka@protonmail.ch
==================================================================
==================================================================
"""
PYTHON_FILE = "*.py" | {"/doddle.py": ["/src/__init__.py", "/src/doddle_exceptions.py"]} |
71,502 | SpaceWhale/doddlebot | refs/heads/master | /src/utilities/commandParser.py | """
:author: john.sosoka
:date: 5/10/2018
"""
class commandParser:
def __init__(self):
"""
The commandParser parses incoming slack messages and returns processed commands which
are then sent to all registered plugins.
Init:
:Args:
None
"""
def parse_command(self, option, parts):
"""
:param option:
dict -- the plugin supplied dictionary containing valid options & values
:param parts:
list -- each space delimited word from the initial command split into a list
:return:
dict -- returns a dictionary with matched options & values
:example:
options = {'command': ['start',
'stop',
'restart'],
'target': ['machine1',
'machine2']
parts = ['start', 'machine1', 'meaninglessInput']
print(parse_command(options, parts))
>> {'wildcard1': 'meaninglessInput', 'command': 'start', 'target': 'machine1'}
"""
parsed_commands = {}
tmp_dict = {}
tmp_known_values = []
tmp_known_keys = []
# Integer i keeps track of parsed wildcard options
i = 0
for command, options in option.items():
parsed_commands.update({command: ''.join(set(option).intersection(parts))})
tmp_dict.update({'wildcard': list(set(parsed_commands).symmetric_difference(parts))})
for key, value in parsed_commands.items():
tmp_known_values.append(value)
tmp_known_keys.append(key)
tmp_wild = set(parsed_commands).symmetric_difference(parts)
tmp_list = [x for x in tmp_wild if x not in tmp_known_values]
final_tmp_list = [x for x in tmp_list if x not in tmp_known_keys]
for item in final_tmp_list:
parsed_commands.update({'wildcard%d' % i: item})
return parsed_commands
| {"/doddle.py": ["/src/__init__.py", "/src/doddle_exceptions.py"]} |
71,503 | SpaceWhale/doddlebot | refs/heads/master | /src/__init__.py | """
:author: john.sosoka
:date: 5/10/2018
""" | {"/doddle.py": ["/src/__init__.py", "/src/doddle_exceptions.py"]} |
71,504 | SpaceWhale/doddlebot | refs/heads/master | /doddle.py | """
:author: john.sosoka
:date: 5/10/2018
"""
# custom
from src import client
import src.doddle_exceptions as exceptions
from src.utilities import doddleUtil
from src.constants import app
from src.constants import slack_api_constants
# 3rd party
import logging
import sys
from os.path import dirname, basename, isfile, abspath
import os
import importlib
import glob
class Doddle():
def __init__(self):
# making all original references in __init__ to avoid the squigglies.
self.modules = []
self.all_modules = []
self.imported_modules = []
# configure logging
self.log = self._configure_logging()
# prepare the config reader
self.config_reader = doddleUtil.DoddleUtil()
self.config_reader.configure()
def start(self):
"""
This function starts the bot. It essentially does 3 things:
1. Configures the logger
2. Imports custom plugins (it passes an instance of the client to each plugin which enables access to chat
bot features.
3. Connects the bot to the slack api
:return:
none -- changes application state
"""
self.log.info(app.SPLASH)
self.log.info("Preparing plugins to import")
self._prepare_custom_plugin_import()
sys.path.append(abspath(app.RELATIVE_PLUGIN_DIRECTORY_PATH))
self.log.info("Found {0} plugin candidates.".format(len(self.modules)))
self._sanitize_plugin_list()
self.log.info("{0} Plugin candidates after sanitization".format(len(self.modules)))
self.log.info("initializing slack client")
doddle = client.Client(os.environ.get(app.ENVIRONMENT_VARIABLE_BOT_ID),
os.environ.get(app.ENVIRONMENT_VARIABLE_BOT_TOKEN),
self.config_reader.read_config())
self.log.info("Importing plugins...")
for mod in self.modules:
try:
if not mod.__contains("init"):
path = list(sys.path)
str = mod.split("/")
sys.path.insert(0, str[0]+"/"+str[1])
self.imported_modules.append(importlib.import_module(str[2][:-3]))
finally:
sys.path[:] = path
for mod in self.imported_modules:
self.log.info("Loading plugin: " + mod.__name__)
obj = getattr(mod, mod.__name__)
obj(doddle)
doddle.start()
def _prepare_custom_plugin_import(self):
"""
Builds a list of plugins (or modules) to import from the plugin directory.
- the plugin directory is configured in constants/app.RELATIVE_PLUGIN_DIRECTORY_PATH
:return:
"""
self.log.info("preparing plugin candidates...")
try:
self.modules = glob.globg(dirname(__file__)+app.RELATIVE_PLUGIN_DIRECTORY_PATH+"*/"+app.PYTHON_FILE)
self.all_modules = [ basename(f)[:-3] for f in self.modules if isfile(f)]
except exceptions.PluginPrepareException:
logging.error("Unable to prepare a list of plugin candidates. The bot might be pretty boring without \
your plugins")
def _sanitize_plugin_list(self):
"""
Sanitizes plugin candidate list by removing initialization and test files.
:return:
None -- changes state.
"""
logging.info("Sanitizing plugin list")
for x in self.modules:
if "__init__" in x:
logging.debug("Removing plugin candidate: {0}".format(x))
self.modules.remove(x)
if "test" in any:
logging.debug("Removing plugin candidate: {0}".format(x))
self.modules.remove(x)
def _configure_logging(self):
"""
Configures doddle logging.
:see:
/constants/app.LOG_CONFIG
/constants/app.LOG_LEVEL
:return:
log -- logger
"""
logging.basicConfig(format=app.LOG_CONFIG)
log = logging.getLogger(app.ROOT_LOGGING_IDENTITY)
log.setLevel(app.LOG_LEVEL)
log_file = logging.FileHandler(app.LOG_NAME)
formatter = logging.Formatter(app.LOG_CONFIG)
log.addHandler(log_file)
logging.getLogger(slack_api_constants.SLACK_CLIENT_NAME).setLevel(app.SLACK_API_CLIENT_LOG_LEVEL)
logging.getLogger(slack_api_constants.SLACK_CLIENT_NAME).addHandler(log_file)
return log
bot = Doddle()
bot.start()
| {"/doddle.py": ["/src/__init__.py", "/src/doddle_exceptions.py"]} |
71,505 | SpaceWhale/doddlebot | refs/heads/master | /src/client.py | """
:author: john.sosoka
:date: 5/10/2018
This class is the slack client for the doddle bot and is the backbone for the entire framework.
Client gets passed into every imported plugin and provides functions needed to interface with the
slack api such as
:reply_to_channel:
Every custom plugin needs the following functions:
:register_plugin(self):
-- pass an instance of the plugin to the plugin registry. This adds the plugin to a list of plugins which get
commands broadcast to them via the :on_command: function.
:register_command(example, about):
-- register plugin information for the bots help reply.
:on_command(parts, channel):
-- The client will broadcast parts & channel to every registered plugin
* parts = a space-delimited list of words following the action character
* channnel = the slack_id (channel or user) which sent the message
"""
# custom
from utilities import doddleUtil
from utilities import commandParser
from constants import slack_api_constants
from constants import app
import doddle_exceptions
# 3rd party
import logging
import time
import json
from slackclient import SlackClient
log = logging.getLogger("doddle.src.Client")
class Client:
def __init__(self, bot_id, token, actionCharacter, websocket_delay=1):
"""
This initializes the doddle slack client.
:param bot_id:
str -- the bot_id
:param token:
str -- the slack doken
:param actionCharacter:
str -- this is the character the bot listens to to process a command, if it is the first character
(i.e, !help) the bot will process this message with "!" being the actionCharacter
:param websocket_delay:
int -- sets how frequently (in seconds) that the bot queries the rtm api
:return:
nothing
"""
# mandatory slackclient data
self.bot_id = bot_id
self.token = token
self.slack_client = SlackClient
# handle commands based on action character
self.actionChar = actionCharacter
# Handle @mentions if wanted
self.at_bot = "<@{0}>".format(self.bot_id)
# configure behavior
self.websocket_delay = websocket_delay
self.registered_plugins = []
self.commands = {}
self.channelDirectory = {}
self.commandParser = commandParser.util()
# prepare the config reader
self.config_reader = doddleUtil.DoddleUtil()
self.config_reader.configure()
# maintain state
self.connected_status = False
def start(self):
"""
This connects the slack client to the slack api. While this method doesn't return anything, it does capture
messages from slack, determines if it should be broadcasted to the plugin list and broadcasts the message to
registered plugins if all criteria are satisfied.
:return:
nothing.
"""
self.slack_client = SlackClient(self.token)
if self.slack_client.rtm_connect():
log.info("Connected to slack.")
self._set_channel_directory()
self.connected_status = True
while True:
command, channel = self._parse_slack_output(self.slack_client.rtm_read())
if command and channel:
self._handle_command(command, channel)
time.sleep(self.websocket_delay)
log.error(app.BOT_NAME + "has disconnected...Attempting to reconnect")
else:
self.connected_status = False
log.error(app.BOT_NAME + "has disconnected...Attempting to reconnect")
self.start()
def get_channel_directory(self):
"""
:return:
dict - a directory of channels.
Key = human readable name, Value = slack channel id.
"""
return self.channelDirectory
def reply_to_channel(self, channel, text, attatchments=None):
"""
:param channel:
string -- the channel/user where the message originated from.
:param attatchments:
dict/json -- allows for better slack messaging.
* See https://api.slack.com/docs/message-attachments
:return:
nothing -- This doesn't return anything to the caller function, instead if sends a message via the rtm_api
"""
attachments = None
if attachments:
attachment = json.dumps(attatchments)
self.slack_client.api_call(slack_api_constants.CALL_CHAT_POST_MESSAGE,
channel=channel,
text=text,
as_user=True,
attatchments=attachment)
def reply_to_thread(self):
"""
TODO
:return:
"""
def reply_to_direct_message(self):
"""
TODO
:return:
"""
def register_command(self, example, about):
"""
Registers a command provided by a plugin by adding the instructions to the "help" reply.
:param example:
string -- example of the command provided plugin in use
:param about:
string -- a description of the command
:return:
none -- changes application state
:example:
self.bot.register_command("restart <machine>", "restarts the target machine")
"""
self.commands[example] = self.actionChar + about
def register_plugin(self, plugin):
"""
Registers a new plugin by adding it into a list of plugins. Each plugin is required to have an on_command
function to process the command parts/channel that get broadcast to it.
:param plugin:
object -- pass in self, or an instance of the plugin.
:return:
none -- changes application state
:example:
bot.register_plugin(self)
"""
self.registered_plugins.append(plugin)
def get_option(self, section, option, default=None):
"""
Returns the value of the result for the section/option keys provided
:param section:
string -- string specifying section of config file
:param option:
string -- string specifying option within the section of config file.
:param default:
string --
:return:
object -- returns value for config field specified
"""
try:
return self.config_reader.read_config(section, option, default)
except doddle_exceptions.ConfigOptionRetrievalException:
log.error("Unable to read configuration {0} {1}".format(section,
option))
def parse_command(self, options, parts):
"""
Parses the command
see:
utilities/commandParser
:param options:
dict -- a dictionary with possible options/values
:param parts:
list -- a space delimited list of words from the original command
:return:
dict -- returns a dictionary of parsed commands.
"""
return self.commandParser.parse_command(options, parts)
def _set_channel_directory(self):
"""
This method builds a directory of available slack channels.
:return:
Nothing
"""
log.info("Building channel directory.")
channels = self.slack_client.api_call(slack_api_constants.CALL_CHANNELS_LIST)
try:
for channel in channels[slack_api_constants.CHANNELS]:
self.channelDirectory[channel['name']] = channel["id"]
log.info("Channel directory built. {0} channels.".format(len(self.channelDirectory)))
except doddle_exceptions.DirectoryCreateException:
log.error("Unable to create channel directory.")
def _parse_slack_output(self, rtm_output):
"""
This function parses all messages retrieved from the slack api. It returns a list of space delimited
commands if the any of the following criteria are satisfied.
- If the output starts with the action character
- If the output contains an @mention directed at the bot
- If a help command is issued
:param rtm_output:
list -- the output of a message from the slack api
:return:
list -- returns a list of space delimited words from the rtm_output
"""
output_list = rtm_output
if output_list and len(output_list) > 0:
for output in output_list:
try:
# Check for action character & help text
if output and slack_api_constants.MESSAGE_TEXT in output \
and self.actionChar in output[slack_api_constants.MESSAGE_TEXT][0]:
if app.HELP_COMMAND_TEXT in output[slack_api_constants.MESSAGE_TEXT]\
.split(self.actionChar)[1]\
.strip():
self.help(output[slack_api_constants.SLACK_USER])
# Check for action character
if output and slack_api_constants.MESSAGE_TEXT in output \
and self.actionChar in output[slack_api_constants.MESSAGE_TEXT][0]:
return output[slack_api_constants.MESSAGE_TEXT]\
.split(self.actionChar)[1]\
.strip(), output[slack_api_constants.SLACK_CHANNEL]
# Check for @mention
elif output and slack_api_constants.MESSAGE_TEXT in output \
and self.at_bot in output[slack_api_constants.MESSAGE_TEXT]:
return output[slack_api_constants.MESSAGE_TEXT]\
.split(self.at_bot)[1]\
.strip(), output[slack_api_constants.SLACK_CHANNEL]
except doddle_exceptions.CommandParseException:
log.error("Unable to parse rtm_output")
return None, None
def _handle_command(self, command, channel):
"""
This function splits the string of text that the bot has decided to process into a space-delimited
list of words and then broadcasts the command and channel to all registered plugins.
See Also:
utilities/commandParser
:param command:
string -- the string of text following the actionChar.
:param channel:
string -- the channel id that the command was sent from.
:return:
nothing -- broadcasts channel & parts to all registered plugins
"""
log.debug("Handling command: {0} in channel: {1}".format(command,
channel))
parts = command.split()
for plugin in self.registered_plugins:
plugin.on_command(channel, parts)
| {"/doddle.py": ["/src/__init__.py", "/src/doddle_exceptions.py"]} |
71,506 | SpaceWhale/doddlebot | refs/heads/master | /src/constants/slack_api_constants.py | """
:author: john.sosoka
:date: 5/10/2018
"""
# Actions
CALL_CHANNELS_LIST = "channels.list"
CALL_CHAT_POST_MESSAGE = "chat.postMessage"
# Terms
SLACK_CLIENT_NAME = "slackclient"
CHANNELS = "channels"
MESSAGE_TEXT = "text"
SLACK_CHANNEL = "channel"
SLACK_USER = "user" | {"/doddle.py": ["/src/__init__.py", "/src/doddle_exceptions.py"]} |
71,507 | SpaceWhale/doddlebot | refs/heads/master | /src/doddle_exceptions.py | """
:author: john.sosoka
:date: 5/10/2018
Collection of custom doddle exceptions.
"""
class DirectoryCreateException(Exception):
"""
Raised when the client is unable to build a directory of slack channels.
"""
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class CommandParseException(Exception):
"""
Raised when the output from the slack rtm api cannot be parsed
"""
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class PluginPrepareException(Exception):
"""
Raised when the application is unable to build a list of plugin candidates
"""
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class ConfigOptionRetrievalException(Exception):
"""
Raised when a config option cannot be retrieved.
"""
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
| {"/doddle.py": ["/src/__init__.py", "/src/doddle_exceptions.py"]} |
71,508 | SpaceWhale/doddlebot | refs/heads/master | /src/utilities/doddleUtil.py | """
:author: john.sosoka
:date: 5/10/2018
"""
import os
import logging
import ConfigParser
log = logging.getLogger("doddle.util.doddleUtil")
class DoddleUtil(object):
"""
The doddleUtil currently only handles loading & reading the configuration file for the bot
Init:
:Args:
None
"""
def __init__(self):
self.config_path = os.path.abspath('data/' + app.CONFIG_NAME)
log.info("Using config " + self.config_path)
def configure(self):
"""
Configures the doddleUtil class
:returns:
None
"""
self.bot_config = ConfigParser.ConfigParser()
self.bot_config.read(self.config_path)
def read_config(self, section):
"""
Reads the configuration file
:param section:
str -- a string which corresponds to a relevant section in the doddle config file.
:return:
dict -- returns a dictionary with the key specified and values from the doddle config file.
"""
section_dict = {}
try:
options = self.appConfig.options(section)
except IOError:
log.error("encountered an error reading config, %s" % IOError.message)
for option in options:
try:
section_dict[option] = self.appConfig.get(section, option)
if section_dict[option] == -1:
log.debug("skipping option: %s" % option)
except:
log.error("error reading option %s" % option)
return section_dict
def get_option(self, section, option, default=None):
"""
:param section:
str -- pointing to the relevant section of the config
:param option:
str -- pointing to the relevant subsection of the config
:param default:
:return:
returns the resulting object from the config (string, dictionary, list, etc)
:example:
>>>
"""
try:
value = self.bot_config.get(section, option)
except ConfigParser.NoOptionError:
log.error("Encountered no option error %s" % ConfigParser.NoOptionError.message)
log.info("No option provided, adding the default to the config, write and return default")
if default:
self.bot_config.set(section, option, default)
self.bot_config.write(open(self.configfile, "w"))
value = default
return value
| {"/doddle.py": ["/src/__init__.py", "/src/doddle_exceptions.py"]} |
71,537 | mhallin/hesitate-py | refs/heads/master | /tests/django17/test_app/tests.py | from django.test import Client, TestCase
class RewriterTests(TestCase):
def test_index_no_assert(self):
c = Client()
resp = c.get('/')
self.assertEqual(resp.status_code, 200)
| {"/hesitate/__init__.py": ["/hesitate/conf.py", "/hesitate/rewriter.py"], "/hesitate/wrappers/django/__init__.py": ["/hesitate/__init__.py"], "/hesitate/rewriter.py": ["/hesitate/__init__.py"], "/tests/test_rewriter.py": ["/hesitate/__init__.py", "/tests/__init__.py"], "/tests/__init__.py": ["/tests/test_rewriter.py"], "/hesitate/driver.py": ["/hesitate/__init__.py"]} |
71,538 | mhallin/hesitate-py | refs/heads/master | /tests/impl_futureimport.py | from __future__ import division
def fn():
assert 1 == 2
| {"/hesitate/__init__.py": ["/hesitate/conf.py", "/hesitate/rewriter.py"], "/hesitate/wrappers/django/__init__.py": ["/hesitate/__init__.py"], "/hesitate/rewriter.py": ["/hesitate/__init__.py"], "/tests/test_rewriter.py": ["/hesitate/__init__.py", "/tests/__init__.py"], "/tests/__init__.py": ["/tests/test_rewriter.py"], "/hesitate/driver.py": ["/hesitate/__init__.py"]} |
71,539 | mhallin/hesitate-py | refs/heads/master | /hesitate/__init__.py | # Driver must be included first to avoid recursive imports.
from . import driver
from .conf import set_initial_probability, \
set_target_timing, set_convergence_factor
from .rewriter import attach_hook
__version__ = '0.0.2'
assert driver # Dummy assertion to silence linting
__all__ = [
'set_initial_probability', 'set_target_timing',
'set_convergence_factor',
'attach_hook',
]
| {"/hesitate/__init__.py": ["/hesitate/conf.py", "/hesitate/rewriter.py"], "/hesitate/wrappers/django/__init__.py": ["/hesitate/__init__.py"], "/hesitate/rewriter.py": ["/hesitate/__init__.py"], "/tests/test_rewriter.py": ["/hesitate/__init__.py", "/tests/__init__.py"], "/tests/__init__.py": ["/tests/test_rewriter.py"], "/hesitate/driver.py": ["/hesitate/__init__.py"]} |
71,540 | mhallin/hesitate-py | refs/heads/master | /hesitate/wrappers/django/__init__.py | import hesitate
from django.conf import settings
_enable_hesitate = not settings.DEBUG
if hasattr(settings, 'HESITATE_ENABLED'):
_enable_hesitate = settings.HESITATE_ENABLED
if _enable_hesitate:
hesitate.attach_hook(
initial_probability=getattr(settings, 'HESITATE_INITIAL_PROBABILITY', None),
target_timing=getattr(settings, 'HESITATE_TARGET_TIMING', None),
convergence_factor=getattr(settings, 'HESITATE_CONVERGENCE_FACTOR', None)
)
| {"/hesitate/__init__.py": ["/hesitate/conf.py", "/hesitate/rewriter.py"], "/hesitate/wrappers/django/__init__.py": ["/hesitate/__init__.py"], "/hesitate/rewriter.py": ["/hesitate/__init__.py"], "/tests/test_rewriter.py": ["/hesitate/__init__.py", "/tests/__init__.py"], "/tests/__init__.py": ["/tests/test_rewriter.py"], "/hesitate/driver.py": ["/hesitate/__init__.py"]} |
71,541 | mhallin/hesitate-py | refs/heads/master | /hesitate/rewriter.py | import ast
import imp
import itertools
import os.path
import sys
from . import conf
class RewriterHook(object):
def __init__(self):
self.loaded_modules = {}
def find_module(self, full_name, path=None):
if path and not isinstance(path, list):
path = list(path)
if path and len(path) == 1:
path = path[0]
modpath = os.path.join(path, full_name.rpartition('.')[2] + '.py')
desc = ('.py', 'r', imp.PY_SOURCE)
try:
fobj = open(modpath)
except IOError:
return None
else:
try:
fobj, modpath, desc = imp.find_module(full_name, path)
except ImportError:
return None
suffix, mode, modtype = desc
try:
if modtype == imp.PY_SOURCE:
code = rewrite_source(fobj.read(), modpath)
self.loaded_modules[full_name] = code, modpath
return self
finally:
if fobj:
fobj.close()
def load_module(self, name):
code, modpath = self.loaded_modules[name]
mod = imp.new_module(name)
mod.__file__ = modpath
sys.modules[name] = mod
exec(code, mod.__dict__)
return mod
def attach_hook(initial_probability=None,
target_timing=None,
convergence_factor=None):
if initial_probability is not None:
conf.set_initial_probability(initial_probability)
if target_timing is not None:
conf.set_target_timing(target_timing)
if convergence_factor is not None:
conf.set_convergence_factor(convergence_factor)
sys.meta_path.insert(0, RewriterHook())
def rewrite_source(source, modpath):
try:
parsed = ast.parse(source)
except SyntaxError:
return None
rewritten = AssertionTransformer(modpath).visit(parsed)
return compile(rewritten, modpath, 'exec')
class AssertionTransformer(ast.NodeTransformer):
ASSERTION_TEST_IMPORTED_NAME = '@hesitate_should_assert'
ASSERTION_TIMER_IMPORTED_NAME = '@hesitate_timed'
HAS_WITHITEM = hasattr(ast, 'withitem')
def __init__(self, modpath):
self.modpath = modpath
def _is_docstring(self, node):
return isinstance(node, ast.Expr) \
and isinstance(node.value, ast.Str)
def _is_future_import(self, node):
return isinstance(node, ast.ImportFrom) \
and node.level == 0 \
and node.module == '__future__'
def visit_Module(self, node):
pre_nodes = list(itertools.takewhile(
lambda node: (self._is_docstring(node)
or self._is_future_import(node)),
node.body))
rest_nodes = [self.visit(n) for n in node.body[len(pre_nodes):]]
importnode = ast.ImportFrom(
module='hesitate.driver',
names=[
ast.alias(
name='should_assert',
asname=self.ASSERTION_TEST_IMPORTED_NAME),
ast.alias(
name='timed',
asname=self.ASSERTION_TIMER_IMPORTED_NAME)],
lineno=1,
col_offset=0,
level=0)
if pre_nodes:
importnode = ast.copy_location(importnode, pre_nodes[0])
new_mod = ast.Module(
body=pre_nodes + [importnode] + rest_nodes,
lineno=1,
col_offset=0)
return new_mod
def visit_Assert(self, node):
srcname_node = ast.copy_location(ast.Str(self.modpath), node)
lineno_node = ast.copy_location(ast.Num(node.lineno), node)
col_offset_node = ast.copy_location(ast.Num(node.col_offset), node)
assertion_test_name = ast.copy_location(
ast.Name(self.ASSERTION_TEST_IMPORTED_NAME, ast.Load()),
node)
func_call = ast.copy_location(
ast.Call(
func=assertion_test_name,
args=[srcname_node, lineno_node, col_offset_node],
keywords=[],
starargs=None,
kwargs=None),
node)
timer_name = ast.copy_location(
ast.Name(self.ASSERTION_TIMER_IMPORTED_NAME, ast.Load()),
node)
timer_call = ast.copy_location(
ast.Call(
func=timer_name,
args=[srcname_node, lineno_node, col_offset_node],
keywords=[],
starargs=None,
kwargs=None),
node)
with_node = ast.copy_location(
self._make_with_node(timer_call, [node]),
node)
new_node = ast.copy_location(
ast.If(
test=func_call,
body=[with_node],
orelse=[]),
node)
return new_node
def _make_with_node(self, with_expr, body):
if self.HAS_WITHITEM:
return ast.With(
items=[ast.withitem(
context_expr=with_expr,
optional_vars=None)],
body=body)
else:
return ast.With(
context_expr=with_expr,
optional_vars=None,
body=body)
| {"/hesitate/__init__.py": ["/hesitate/conf.py", "/hesitate/rewriter.py"], "/hesitate/wrappers/django/__init__.py": ["/hesitate/__init__.py"], "/hesitate/rewriter.py": ["/hesitate/__init__.py"], "/tests/test_rewriter.py": ["/hesitate/__init__.py", "/tests/__init__.py"], "/tests/__init__.py": ["/tests/test_rewriter.py"], "/hesitate/driver.py": ["/hesitate/__init__.py"]} |
71,542 | mhallin/hesitate-py | refs/heads/master | /tests/test_rewriter.py | from unittest import TestCase
from hesitate import attach_hook, set_initial_probability
class RewriterTest(TestCase):
@classmethod
def setUpClass(cls):
attach_hook()
set_initial_probability(0.0)
def test_plain_file(self):
from . import impl_plain
impl_plain.fn()
def test_docstring_file(self):
from . import impl_docstring
impl_docstring.fn()
self.assertEqual(impl_docstring.__doc__, 'Docstring')
def test_futureimport_file(self):
from . import impl_futureimport
impl_futureimport.fn()
| {"/hesitate/__init__.py": ["/hesitate/conf.py", "/hesitate/rewriter.py"], "/hesitate/wrappers/django/__init__.py": ["/hesitate/__init__.py"], "/hesitate/rewriter.py": ["/hesitate/__init__.py"], "/tests/test_rewriter.py": ["/hesitate/__init__.py", "/tests/__init__.py"], "/tests/__init__.py": ["/tests/test_rewriter.py"], "/hesitate/driver.py": ["/hesitate/__init__.py"]} |
71,543 | mhallin/hesitate-py | refs/heads/master | /tests/impl_docstring.py | """Docstring"""
def fn():
assert 1 == 2
| {"/hesitate/__init__.py": ["/hesitate/conf.py", "/hesitate/rewriter.py"], "/hesitate/wrappers/django/__init__.py": ["/hesitate/__init__.py"], "/hesitate/rewriter.py": ["/hesitate/__init__.py"], "/tests/test_rewriter.py": ["/hesitate/__init__.py", "/tests/__init__.py"], "/tests/__init__.py": ["/tests/test_rewriter.py"], "/hesitate/driver.py": ["/hesitate/__init__.py"]} |
71,544 | mhallin/hesitate-py | refs/heads/master | /tests/__init__.py | from unittest import TestSuite, TestLoader
from .test_rewriter import RewriterTest
def all_tests():
suite = TestSuite()
loader = TestLoader()
suite.addTests(loader.loadTestsFromTestCase(RewriterTest))
return suite
| {"/hesitate/__init__.py": ["/hesitate/conf.py", "/hesitate/rewriter.py"], "/hesitate/wrappers/django/__init__.py": ["/hesitate/__init__.py"], "/hesitate/rewriter.py": ["/hesitate/__init__.py"], "/tests/test_rewriter.py": ["/hesitate/__init__.py", "/tests/__init__.py"], "/tests/__init__.py": ["/tests/test_rewriter.py"], "/hesitate/driver.py": ["/hesitate/__init__.py"]} |
71,545 | mhallin/hesitate-py | refs/heads/master | /hesitate/driver.py | import time
import random
from contextlib import contextmanager
from . import conf
AVG_TIMINGS = {}
@contextmanager
def timed(modpath, lineno, col_offset):
start = time.perf_counter()
try:
yield
finally:
end = time.perf_counter()
key = (modpath, lineno, col_offset)
total_time = end - start
prev_time = AVG_TIMINGS.setdefault(key, total_time)
new_time = conf.CONVERGENCE_FACTOR * total_time \
+ (1.0 - conf.CONVERGENCE_FACTOR) * prev_time
AVG_TIMINGS[key] = new_time
def should_assert(modpath, lineno, col_offset):
key = (modpath, lineno, col_offset)
duration = AVG_TIMINGS.get(key)
if duration is None:
return random.random() < conf.INITIAL_PROBABILITY
else:
prob = conf.TARGET_TIMING / duration
return random.random() < prob
| {"/hesitate/__init__.py": ["/hesitate/conf.py", "/hesitate/rewriter.py"], "/hesitate/wrappers/django/__init__.py": ["/hesitate/__init__.py"], "/hesitate/rewriter.py": ["/hesitate/__init__.py"], "/tests/test_rewriter.py": ["/hesitate/__init__.py", "/tests/__init__.py"], "/tests/__init__.py": ["/tests/test_rewriter.py"], "/hesitate/driver.py": ["/hesitate/__init__.py"]} |
71,546 | mhallin/hesitate-py | refs/heads/master | /setup.py | from setuptools import setup, find_packages
version = __import__('hesitate').__version__
setup(
name='hesitate',
version=version,
description='A stochastic Design by Contract utility',
url='https://github.com/mhallin/hesitate-py',
author='Magnus Hallin',
author_email='mhallin@gmail.com',
license='BSD',
packages=find_packages(exclude=['tests']),
test_suite='tests.all_tests',
)
| {"/hesitate/__init__.py": ["/hesitate/conf.py", "/hesitate/rewriter.py"], "/hesitate/wrappers/django/__init__.py": ["/hesitate/__init__.py"], "/hesitate/rewriter.py": ["/hesitate/__init__.py"], "/tests/test_rewriter.py": ["/hesitate/__init__.py", "/tests/__init__.py"], "/tests/__init__.py": ["/tests/test_rewriter.py"], "/hesitate/driver.py": ["/hesitate/__init__.py"]} |
71,547 | mhallin/hesitate-py | refs/heads/master | /tests/django17/django17/urls.py | from django.conf.urls import patterns, include, url
from django.contrib import admin
import test_app.views
urlpatterns = patterns(
'',
url(r'^admin/', include(admin.site.urls)),
url(r'^$', test_app.views.index),
)
| {"/hesitate/__init__.py": ["/hesitate/conf.py", "/hesitate/rewriter.py"], "/hesitate/wrappers/django/__init__.py": ["/hesitate/__init__.py"], "/hesitate/rewriter.py": ["/hesitate/__init__.py"], "/tests/test_rewriter.py": ["/hesitate/__init__.py", "/tests/__init__.py"], "/tests/__init__.py": ["/tests/test_rewriter.py"], "/hesitate/driver.py": ["/hesitate/__init__.py"]} |
71,548 | mhallin/hesitate-py | refs/heads/master | /hesitate/conf.py | INITIAL_PROBABILITY = 1.0
TARGET_TIMING = 0.1
CONVERGENCE_FACTOR = 0.7
def set_initial_probability(probability):
assert 0 <= probability <= 1
global INITIAL_PROBABILITY
INITIAL_PROBABILITY = probability
def set_target_timing(target_timing):
assert target_timing >= 0
global TARGET_TIMING
TARGET_TIMING = target_timing
def set_convergence_factor(convergence_factor):
assert 0 <= convergence_factor <= 1
global CONVERGENCE_FACTOR
CONVERGENCE_FACTOR = convergence_factor
| {"/hesitate/__init__.py": ["/hesitate/conf.py", "/hesitate/rewriter.py"], "/hesitate/wrappers/django/__init__.py": ["/hesitate/__init__.py"], "/hesitate/rewriter.py": ["/hesitate/__init__.py"], "/tests/test_rewriter.py": ["/hesitate/__init__.py", "/tests/__init__.py"], "/tests/__init__.py": ["/tests/test_rewriter.py"], "/hesitate/driver.py": ["/hesitate/__init__.py"]} |
71,549 | mhallin/hesitate-py | refs/heads/master | /tests/django16/test_app/views.py | from django.http import HttpResponse
def index(request):
assert 1 == 2
return HttpResponse('ok')
| {"/hesitate/__init__.py": ["/hesitate/conf.py", "/hesitate/rewriter.py"], "/hesitate/wrappers/django/__init__.py": ["/hesitate/__init__.py"], "/hesitate/rewriter.py": ["/hesitate/__init__.py"], "/tests/test_rewriter.py": ["/hesitate/__init__.py", "/tests/__init__.py"], "/tests/__init__.py": ["/tests/test_rewriter.py"], "/hesitate/driver.py": ["/hesitate/__init__.py"]} |
71,614 | konradcala/BluePrism-encryption | refs/heads/master | /security/blue_prism_aes.py | import logging
from base64 import b64decode, b64encode
from Cryptodome.Cipher import AES
from Cryptodome.Util.Padding import pad
class BluePrismAes:
logger = logging.getLogger(__name__)
def __init__(self, credentials_key):
self.private_key = b64decode(credentials_key)
def decrypt(self, encrypted_string: str) -> str:
self.logger.info('Decrypting queue item data [%s]', encrypted_string)
encrypted_tab = encrypted_string.split(':')
iv = b64decode(encrypted_tab[0])
encrypted_data = b64decode(encrypted_tab[1])
cipher = AES.new(self.private_key, AES.MODE_CBC, iv)
return cipher.decrypt(encrypted_data).decode('utf-8').strip()
def encrypt(self, string_to_encrypt: str) -> str:
self.logger.info('Encrypting queue item data [%s]', string_to_encrypt)
cipher = AES.new(self.private_key, AES.MODE_CBC)
ct_bytes = cipher.encrypt(pad(string_to_encrypt.encode(), AES.block_size))
iv = b64encode(cipher.iv).decode('utf-8')
ct = b64encode(ct_bytes).decode('utf-8')
return f'{iv}:{ct}'
| {"/test/security/test_blue_prism_aes.py": ["/security/blue_prism_aes.py"]} |
71,615 | konradcala/BluePrism-encryption | refs/heads/master | /test/security/test_blue_prism_aes.py | import logging
from unittest import TestCase
from security.blue_prism_aes import BluePrismAes
class TestBluePrismAes(TestCase):
logging.basicConfig(level=logging.DEBUG)
aes = BluePrismAes('LOm/OeAd4NpwKjA5CTgZGaRLqzXd0ii4a/IxYzYnGnY=')
def test_decrypt(self):
# given
with open('test/resources/security/encrypted_queue_item_data.txt') as res_file:
encrypted_item = res_file.read()
with open('test/resources/security/decrypted_queue_item_data.xml') as res_file:
expected_decrypted_data_item = res_file.read()
# when
result = self.aes.decrypt(encrypted_item)
# then
self.assertEqual(expected_decrypted_data_item, result)
def test_encrypt(self):
with open('test/resources/security/decrypted_queue_item_data.xml') as res_file:
expected_decrypted_data_item = res_file.read()
# when
encrypted_result = self.aes.encrypt(expected_decrypted_data_item)
decrypted_result = self.aes.decrypt(encrypted_result)
# then
self.assertEqual(decrypted_result, expected_decrypted_data_item) | {"/test/security/test_blue_prism_aes.py": ["/security/blue_prism_aes.py"]} |
71,616 | lyndonrey85/Quotes | refs/heads/master | /apps/login/views.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from .models import User
# from .models import course
from django.shortcuts import render, redirect, reverse
from django.contrib import messages
# Create your views here.
def index(request):
return render(request, 'login/index.html')
def register(request):
result = User.objects.validate_registration(request.POST)
if type(result) == list:
for error in result:
messages.error(request, error)
return redirect('/')
request.session['user_id'] = result.id
messages.success(request, "Successfully registered!")
return redirect ("quote:index")
def login(request):
result = User.objects.validate_login(request.POST)
if type(result) == list:
for err in result:
messages.error(request, err)
return redirect('/')
request.session['user_id'] = result.id
messages.success(request, "Successfully logged in!")
return redirect ("quote:index")
def success(request):
try:
request.session['user_id']
except KeyError:
return redirect('/')
context = {
'user': User.objects.get(id=request.session['user_id'])
}
return render(request, 'login/success.html', context)
def show(request, user_id):
user = User.objects.get(id=user_id)
unique_ids = user.quotes_left.all().values("quote").distinct()
unique_quote = []
for quote in unique_ids:
unique_quote.append(quote.objects.get(id=quote['quote']))
context = {
'user': user,
'unique_quote_desc': unique_desc
}
return render(request, 'login/show.html', context)
def logout(request):
request.session.clear()
return redirect('/')
| {"/apps/quote/views.py": ["/apps/quote/models.py"]} |
71,617 | lyndonrey85/Quotes | refs/heads/master | /apps/quote/models.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from..login.models import User
from django.db import models
# Create your models here.
class QuoteManager(models.Manager):
def validate(self, post_data):
errors = []
if len(post_data['message']) < 5:
errors.append("Message field must be 5 characters or more")
if len(post_data['message']) < 30:
errors.append("Message field must be 30 characters or more")
return errors
class Quote(models.Model):
quoted_by = models.CharField(max_length=100)
message = models.CharField(max_length=255)
creator = models.ForeignKey(User, related_name="quotes_created", default=1)
user_favorites = models.ManyToManyField(User, related_name="favorites")
# user_favorites = models.ForeignKey(User, related_name="favorite")
created_at = models.DateTimeField(auto_now_add=True)
objects = QuoteManager()
| {"/apps/quote/views.py": ["/apps/quote/models.py"]} |
71,618 | lyndonrey85/Quotes | refs/heads/master | /apps/quote/views.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from .models import Quote
from ..login.models import User
from django.shortcuts import render, redirect
from django.contrib import messages
# Create your views here.
def index(request):
quotes = Quote.objects.all()
current_user = User.objects.get(id=request.session["user_id"])
user_favorites = current_user.favorites.all()
# course_delete = Course.objects.all().delete()
context = {
"quotes": quotes,
"favorites": user_favorites
# "course_to_remove": course_remove
}
return render(request, 'quote/index.html', context)
def create(request):
current_user = User.objects.get(id=request.session["user_id"])
new_quotes = Quote.objects.create(
quoted_by=request.POST["quoted_by"],
message=request.POST["message"],
creator=current_user)
return redirect("quote:index")
def delete(request, quote_id):
quote = Quote.objects.get(id=quote_id)
current_user = User.objects.get(id=request.session["user_id"])
quote.delete()
# quote_remove.remove()
return redirect("quote:index")
def favorite(request, quote_id):
current_user = User.objects.get(id=request.session["user_id"])
current_quote = Quote.objects.get(id=quote_id)
current_quote.user_favorites.add(current_user)
current_quote.save()
return redirect("quote:index")
def show(request, quote_id):
current_quote = Quote.objects.get(id=quote_id)
context = {
"quote":current_quote
}
return render(request, "quote/show.html", context) | {"/apps/quote/views.py": ["/apps/quote/models.py"]} |
71,620 | EpiSci/SBL | refs/heads/master | /pomdp.py | import numpy as np
#This class represents a node and each node is a state in the model.
class sPOMDPNode():
O_S = None
A_S = None
State_Size = None
Alpha = None
Epsilon = None
def __init__(self, Observation, Action_Dictionary):
self.Observation = Observation
self.Action_Dictionary = Action_Dictionary #Action dictionary tells you what transition is equal to alpha for each action.
self.Transition_Dictionary = {}
for Action_Key in self.Action_Dictionary:
Transition_Vector = np.zeros(self.State_Size)
Transition_Vector[:] = (1-self.Alpha)/(self.State_Size-1)
Transition_Vector[self.Action_Dictionary[Action_Key]] = self.Alpha
self.Transition_Dictionary[Action_Key] = Transition_Vector
self.Other_Observations = []
for obs in self.O_S:
if obs != self.Observation:
self.Other_Observations.append(obs)
def step_action(self, action:str):
#try:
return np.random.choice(np.arange(0, self.State_Size), p=self.Transition_Dictionary[action])
def get_observation(self):
Random_Sample = np.random.random()
if (Random_Sample<self.Epsilon):
return self.Observation
else:
return self.Other_Observations[np.random.randint(low = 0, high = len(self.Other_Observations))]
#This class combines all of the nodes into a model.
class sPOMDPModelExample():
def __init__(self):
self.Node_Set = []
self.SDE_Set = []
def reset(self):
#Select A Random Starting State
self.Current_State = np.random.choice(self.Node_Set)
return self.Current_State.get_observation()
def step(self, action:str):
self.Current_State = self.Node_Set[self.Current_State.step_action(action)]
return self.Current_State.get_observation()
def random_step(self):
action = np.random.choice(self.A_S)
return self.step(action), action
def get_SDE(self, first_obs = None):
if first_obs == None:
return self.SDE_Set
else:
Matching_SDE = []
for SDE in self.SDE_Set:
if SDE[0] == first_obs:
Matching_SDE.append(SDE)
return Matching_SDE
def get_true_transition_probs(self):
transitionProbs = np.zeros((len(self.A_S), len(self.Node_Set), len(self.Node_Set)))
for (a_idx, a) in enumerate(self.A_S):
for (s_idx, s) in enumerate(self.Node_Set):
transitionProbs[a_idx, s_idx, :] = s.Transition_Dictionary[a]
return transitionProbs
def get_observation_probs(self):
observationProbs = np.ones((len(self.O_S), len(self.Node_Set))) * ((1 - self.Epsilon) / (len(self.O_S) - 1))
for (s_idx, s) in enumerate(self.Node_Set):
o_idx = self.O_S.index(s.Observation)
observationProbs[o_idx, s_idx] = self.Epsilon
return observationProbs
#This class extends the generic sPOMDP model. This model is the fully-built ae-Shape environment
class Example1(sPOMDPModelExample):
def __init__(self):
sPOMDPModelExample.__init__(self)
#Set Environment Details
self.O_S = ["square", "diamond"] #Observation Set
self.A_S = ["x", "y"] #Action Set
self.State_Size = 4
self.Alpha = 0.99
self.Epsilon = 0.99
sPOMDPNode.O_S = self.O_S
sPOMDPNode.A_S = self.A_S
sPOMDPNode.State_Size = self.State_Size
sPOMDPNode.Alpha = self.Alpha
sPOMDPNode.Epsilon = self.Epsilon
#Use Already Known SDE
self.SDE_Set.append([self.O_S[0], self.A_S[1], self.O_S[1], self.A_S[0], self.O_S[0]])
self.SDE_Set.append([self.O_S[0], self.A_S[1], self.O_S[1], self.A_S[0], self.O_S[1]])
self.SDE_Set.append([self.O_S[1], self.A_S[0], self.O_S[0]])
self.SDE_Set.append([self.O_S[1], self.A_S[0], self.O_S[1]])
#Generate States
self.Node_Set.append(sPOMDPNode(Observation = "square", Action_Dictionary = {"x": 1, "y": 2})) #state 0
self.Node_Set.append(sPOMDPNode(Observation = "square", Action_Dictionary = {"x": 1, "y": 3})) #state 1
self.Node_Set.append(sPOMDPNode(Observation = "diamond", Action_Dictionary = {"x": 0, "y": 0})) #state 2
self.Node_Set.append(sPOMDPNode(Observation = "diamond", Action_Dictionary = {"x": 2, "y": 1})) #state 3
#This class extends the generic sPOMDP model. This model is the ae-Shape environment starting with initial observations.
class Example2(sPOMDPModelExample):
def __init__(self):
sPOMDPModelExample.__init__(self)
#Set Environment Details
self.O_S = ["diamond", "square"] #Observation Set
self.A_S = ["x", "y"] #Action Set
self.State_Size = 4
self.Alpha = 0.85
self.Epsilon = 0.99
sPOMDPNode.O_S = self.O_S
sPOMDPNode.A_S = self.A_S
sPOMDPNode.State_Size = self.State_Size
sPOMDPNode.Alpha = self.Alpha
sPOMDPNode.Epsilon = self.Epsilon
#Use Already Known SDE
self.SDE_Set.append([self.O_S[0]])
self.SDE_Set.append([self.O_S[1]])
#Generate States
self.Node_Set.append(sPOMDPNode(Observation = "square", Action_Dictionary = {"x": 1, "y": 2})) #state 0
self.Node_Set.append(sPOMDPNode(Observation = "square", Action_Dictionary = {"x": 1, "y": 3})) #state 1
self.Node_Set.append(sPOMDPNode(Observation = "diamond", Action_Dictionary = {"x": 0, "y": 0})) #state 2
self.Node_Set.append(sPOMDPNode(Observation = "diamond", Action_Dictionary = {"x": 2, "y": 1})) #state 3
#This class extends the generic sPOMDP model. This model is the ae-Shape environment starting with initial observations. The values of alpha and epsilon are significantly lower than other environments
class Example22(sPOMDPModelExample):
def __init__(self):
sPOMDPModelExample.__init__(self)
#Set Environment Details
self.O_S = ["diamond", "square"] #Observation Set
self.A_S = ["x", "y"] #Action Set
self.State_Size = 4
self.Alpha = 0.75
self.Epsilon = 0.99
sPOMDPNode.O_S = self.O_S
sPOMDPNode.A_S = self.A_S
sPOMDPNode.State_Size = self.State_Size
sPOMDPNode.Alpha = self.Alpha
sPOMDPNode.Epsilon = self.Epsilon
#Use Already Known SDE
self.SDE_Set.append([self.O_S[0]])
self.SDE_Set.append([self.O_S[1]])
#Generate States
self.Node_Set.append(sPOMDPNode(Observation = "square", Action_Dictionary = {"x": 1, "y": 2})) #state 0
self.Node_Set.append(sPOMDPNode(Observation = "square", Action_Dictionary = {"x": 1, "y": 3})) #state 1
self.Node_Set.append(sPOMDPNode(Observation = "diamond", Action_Dictionary = {"x": 0, "y": 0})) #state 2
self.Node_Set.append(sPOMDPNode(Observation = "diamond", Action_Dictionary = {"x": 2, "y": 1})) #state 3
#This class extends the generic sPOMDP model. This model is the ae-Little Prince environment starting with initial observations.
class Example3(sPOMDPModelExample):
def __init__(self):
sPOMDPModelExample.__init__(self)
#Set Environment Details
self.O_S = ["rose", "volcano","nothing"] #Observation Set
self.A_S = ["b", "f", "t"] #Action Set
self.State_Size = 4
self.Alpha = 0.85
self.Epsilon = 0.99
sPOMDPNode.O_S = self.O_S
sPOMDPNode.A_S = self.A_S
sPOMDPNode.State_Size = self.State_Size
sPOMDPNode.Alpha = self.Alpha
sPOMDPNode.Epsilon = self.Epsilon
#Use Already Known SDE
self.SDE_Set.append([self.O_S[0]])
self.SDE_Set.append([self.O_S[1]])
self.SDE_Set.append([self.O_S[2]])
#Generate States
self.Node_Set.append(sPOMDPNode(Observation = "rose", Action_Dictionary = {"f": 3, "b": 2, "t": 0})) #state 0
self.Node_Set.append(sPOMDPNode(Observation = "volcano", Action_Dictionary = {"f": 2, "b": 3, "t": 1})) #state 1
self.Node_Set.append(sPOMDPNode(Observation = "nothing", Action_Dictionary = {"f": 0, "b": 1, "t": 3})) #state 2
self.Node_Set.append(sPOMDPNode(Observation = "nothing", Action_Dictionary = {"f": 1, "b": 0, "t": 2})) #state 3
#This class extends the generic sPOMDP model. This model is the fully built ae-Little Prince environment.
class Example32(sPOMDPModelExample):
def __init__(self):
sPOMDPModelExample.__init__(self)
#Set Environment Details
self.O_S = ["rose", "volcano","nothing"] #Observation Set
self.A_S = ["b", "f", "t"] #Action Set
self.State_Size = 4
self.Alpha = 0.99
self.Epsilon = 0.99
sPOMDPNode.O_S = self.O_S
sPOMDPNode.A_S = self.A_S
sPOMDPNode.State_Size = self.State_Size
sPOMDPNode.Alpha = self.Alpha
sPOMDPNode.Epsilon = self.Epsilon
#Use Already Known SDE
self.SDE_Set.append([self.O_S[0]])
self.SDE_Set.append([self.O_S[1]])
self.SDE_Set.append([self.O_S[2], self.A_S[0], self.O_S[1]])
self.SDE_Set.append([self.O_S[2], self.A_S[0], self.O_S[0]])
#Generate States
self.Node_Set.append(sPOMDPNode(Observation = "rose", Action_Dictionary = {"f": 3, "b": 2, "t": 0})) #state 0
self.Node_Set.append(sPOMDPNode(Observation = "volcano", Action_Dictionary = {"f": 2, "b": 3, "t": 1})) #state 1
self.Node_Set.append(sPOMDPNode(Observation = "nothing", Action_Dictionary = {"f": 0, "b": 1, "t": 3})) #state 2
self.Node_Set.append(sPOMDPNode(Observation = "nothing", Action_Dictionary = {"f": 1, "b": 0, "t": 2})) #state 3
#This class extends the generic sPOMDP model. This model is the ae-1D Maze environment starting with initial observations.
class Example4(sPOMDPModelExample):
def __init__(self):
sPOMDPModelExample.__init__(self)
#Set Environment Details
self.O_S = ["goal","nothing"] #Observation Set
self.A_S = ["east", "west"] #Action Set
self.State_Size = 4
self.Alpha = 0.9
self.Epsilon = 0.99
sPOMDPNode.O_S = self.O_S
sPOMDPNode.A_S = self.A_S
sPOMDPNode.State_Size = self.State_Size
sPOMDPNode.Alpha = self.Alpha
sPOMDPNode.Epsilon = self.Epsilon
#Use Already Known SDE
self.SDE_Set.append([self.O_S[0]])
self.SDE_Set.append([self.O_S[1]])
#Generate States
self.Node_Set.append(sPOMDPNode(Observation = "goal", Action_Dictionary = {"east": 1, "west": 3})) #state goal
self.Node_Set.append(sPOMDPNode(Observation = "nothing", Action_Dictionary = {"east": 2, "west": 0})) #state left
self.Node_Set.append(sPOMDPNode(Observation = "nothing", Action_Dictionary = {"east": 3, "west": 1})) #state middle
self.Node_Set.append(sPOMDPNode(Observation = "nothing", Action_Dictionary = {"east": 0, "west": 2})) #state right
#This class extends the generic sPOMDP model. This model is the fully built ae-1D Maze environment.
class Example42(sPOMDPModelExample):
def __init__(self):
sPOMDPModelExample.__init__(self)
#Set Environment Details
self.O_S = ["goal","nothing"] #Observation Set
self.A_S = ["east", "west"] #Action Set
self.State_Size = 4
self.Alpha = 0.99
self.Epsilon = 0.99
sPOMDPNode.O_S = self.O_S
sPOMDPNode.A_S = self.A_S
sPOMDPNode.State_Size = self.State_Size
sPOMDPNode.Alpha = self.Alpha
sPOMDPNode.Epsilon = self.Epsilon
#Use Already Known SDE
self.SDE_Set.append([self.O_S[0]])
self.SDE_Set.append([self.O_S[1], self.A_S[0], self.O_S[1], self.A_S[0], self.O_S[1]])
self.SDE_Set.append([self.O_S[1], self.A_S[0], self.O_S[1], self.A_S[0], self.O_S[0]])
self.SDE_Set.append([self.O_S[1], self.A_S[0], self.O_S[0]])
#Generate States
self.Node_Set.append(sPOMDPNode(Observation = "goal", Action_Dictionary = {"east": 1, "west": 3})) #state goal
self.Node_Set.append(sPOMDPNode(Observation = "nothing", Action_Dictionary = {"east": 2, "west": 0})) #state left
self.Node_Set.append(sPOMDPNode(Observation = "nothing", Action_Dictionary = {"east": 3, "west": 1})) #state middle
self.Node_Set.append(sPOMDPNode(Observation = "nothing", Action_Dictionary = {"east": 0, "west": 2})) #state right
#This class extends the generic sPOMDP model. This model is the fully built ae-Balance Beam environment. y actions lead forward, x falls off
class Example6(sPOMDPModelExample):
def __init__(self):
sPOMDPModelExample.__init__(self)
#Set Environment Details
self.O_S = ["square", "diamond"] #Observation Set
self.A_S = ["x", "y"] #Action Set
self.State_Size = 4
self.Alpha = 0.99
self.Epsilon = 0.99
sPOMDPNode.O_S = self.O_S
sPOMDPNode.A_S = self.A_S
sPOMDPNode.State_Size = self.State_Size
sPOMDPNode.Alpha = self.Alpha
sPOMDPNode.Epsilon = self.Epsilon
#Use Already Known SDE
self.SDE_Set.append([self.O_S[0], self.A_S[1], self.O_S[1]])
self.SDE_Set.append([self.O_S[1], self.A_S[1], self.O_S[1]])
self.SDE_Set.append([self.O_S[1], self.A_S[1], self.O_S[0]])
self.SDE_Set.append([self.O_S[0], self.A_S[1], self.O_S[0]])
#Generate States
self.Node_Set.append(sPOMDPNode(Observation = "square", Action_Dictionary = {"x": 0, "y": 1})) #state 0
self.Node_Set.append(sPOMDPNode(Observation = "diamond", Action_Dictionary = {"x": 0, "y": 2})) #state 1
self.Node_Set.append(sPOMDPNode(Observation = "diamond", Action_Dictionary = {"x": 0, "y": 3})) #state 2
self.Node_Set.append(sPOMDPNode(Observation = "square", Action_Dictionary = {"x": 0, "y": 3})) #state 3
#This class extends the generic sPOMDP model. This model is the ae-Balance Beam environment starting with initial observations. y actions lead forward, x falls off
class Example7(sPOMDPModelExample):
def __init__(self):
sPOMDPModelExample.__init__(self)
#Set Environment Details
self.O_S = ["square", "diamond"] #Observation Set
self.A_S = ["x", "y"] #Action Set
self.State_Size = 4
self.Alpha = 0.99
self.Epsilon = 0.99
sPOMDPNode.O_S = self.O_S
sPOMDPNode.A_S = self.A_S
sPOMDPNode.State_Size = self.State_Size
sPOMDPNode.Alpha = self.Alpha
sPOMDPNode.Epsilon = self.Epsilon
#Use Already Known SDE
self.SDE_Set.append([self.O_S[0]])
self.SDE_Set.append([self.O_S[1]])
#Generate States
self.Node_Set.append(sPOMDPNode(Observation = "square", Action_Dictionary = {"x": 0, "y": 1})) #state 0
self.Node_Set.append(sPOMDPNode(Observation = "diamond", Action_Dictionary = {"x": 0, "y": 2})) #state 1
self.Node_Set.append(sPOMDPNode(Observation = "diamond", Action_Dictionary = {"x": 0, "y": 3})) #state 2
self.Node_Set.append(sPOMDPNode(Observation = "square", Action_Dictionary = {"x": 0, "y": 3})) #state 3
#Used in Algorithm 3 code as a generic model.
class genericModel(sPOMDPModelExample):
def __init__(self, observationSet,actionSet, stateSize, SDE_Set, alpha, epsilon, environmentNodes):
sPOMDPModelExample.__init__(self)
#Set Environment Details
self.O_S = observationSet
self.A_S = actionSet
self.State_Size = stateSize
self.Alpha = alpha
self.Epsilon = epsilon
sPOMDPNode.O_S = self.O_S
sPOMDPNode.A_S = self.A_S
sPOMDPNode.State_Size = self.State_Size
sPOMDPNode.Alpha = self.Alpha
sPOMDPNode.Epsilon = self.Epsilon
self.SDE_Set = SDE_Set
#Generate States
self.Node_Set = environmentNodes
# Calculate the gain of an environment given the transition probabilities and the one-step extension gammas
# Returns the associated entropy of each (m,a) pair and the associated gain
def calculateGain(env, Action_Probs, OneStep_Gammas):
entropyMA = np.zeros((len(env.A_S),len(env.SDE_Set))) #index as action, model number
gainMA = np.zeros((len(env.A_S),len(env.SDE_Set))) #index as action, model number
oneStep_TransitionProbs = OneStep_Gammas / np.reshape(np.repeat(np.sum(OneStep_Gammas, axis = 4),len(env.SDE_Set),axis=3),OneStep_Gammas.shape)
mSinglePrimeSum_aPrime = np.sum(OneStep_Gammas,axis = 4) #The total number of times the m' state is entered from state m under action a with respect to action a'
mSinglePrimeSum = np.sum(mSinglePrimeSum_aPrime,axis = 0) #The total number of times the m' state is entered from state m under action a
mPrimeSum = np.sum(np.sum(mSinglePrimeSum, axis = 0), axis=0) #The total number of times the m' state is entered
# Calculate the transition entropies H(Ttma). Calculate the gain values using the OneStep_Gammas
for mPrime_idx, mPrime in enumerate(env.SDE_Set):
for aPrime_idx, aPrime in enumerate(env.A_S):
transitionSetProbs = Action_Probs[aPrime_idx,mPrime_idx,:]
transitionSetEntropy = np.sum(np.multiply(transitionSetProbs,(np.log(transitionSetProbs) / np.log(len(env.SDE_Set))))) * -1
entropyMA[aPrime_idx,mPrime_idx] = transitionSetEntropy
sigma = 0
w_maSum = 0
for a_idx, a in enumerate(env.A_S):
for m_idx, m in enumerate(env.SDE_Set):
w_ma = mSinglePrimeSum[a_idx, m_idx, mPrime_idx] / mPrimeSum[mPrime_idx]
w_maSum = w_maSum + w_ma
oneStepTransitionProb = oneStep_TransitionProbs[a_idx,aPrime_idx,m_idx,mPrime_idx,:]
oneStep_TransitionEntropy = np.sum(np.multiply(oneStepTransitionProb,(np.log(oneStepTransitionProb) / np.log(len(env.SDE_Set))))) * -1
sigma = (w_ma * oneStep_TransitionEntropy) + sigma
gainMA[aPrime_idx,mPrime_idx] = entropyMA[aPrime_idx,mPrime_idx] - sigma
printOneStep = False
if printOneStep:
import csv
c = csv.writer(open("TestingFigure5April29Trial1.csv", "w"))
c.writerow(["entropyMA"])
c.writerow(entropyMA)
c.writerow(["Gain: "])
c.writerow(gainMA)
c.writerow(["Action_Probs: "])
for a1_idx, a1 in enumerate(env.A_S):
for m1_idx, m1 in enumerate(env.SDE_Set):
c.writerow(Action_Probs[a1_idx, m1_idx, :])
for a1_idx, a1 in enumerate(env.A_S):
for a2_idx, a2 in enumerate(env.A_S):
for m1_idx, m1 in enumerate(env.SDE_Set):
for m2_idx, m2 in enumerate(env.SDE_Set):
c.writerow(["One-Step Transition Gamma: " + str(m1) + " " + str(a1) + " " + str(m2) + " " + str(a2) + " X"])
c.writerow(OneStep_Gammas[a1_idx, a2_idx, m1_idx, m2_idx, :])
w_ma = mSinglePrimeSum[a_idx, m_idx, mPrime_idx] / mPrimeSum[mPrime_idx]
c.writerow(["Weight value that the transition m = " + str(m1) + " and a = " + str(a1) + "causes the transition into m' = " + str(m2) + ":"])
c.writerow([str(mSinglePrimeSum[a1_idx, m1_idx, m2_idx] / mPrimeSum[m2_idx])])
printOneStepTransitions = False
if printOneStepTransitions:
print("One Step Transition Probs: ")
for a1_idx, a1 in enumerate(env.A_S):
for a2_idx, a2 in enumerate(env.A_S):
for m1_idx, m1 in enumerate(env.SDE_Set):
for m2_idx, m2 in enumerate(env.SDE_Set):
print("One-Step Transition Probs: " + str(m1) + " " + str(a1) + " " + str(m2) + " " + str(a2) + " X")
print(OneStep_TransitoinProbs[a1_idx, a2_idx, m1_idx, m2_idx, :])
print("entropyMA")
print(entropyMA)
print("Gain: ")
print(gainMA)
print("Transition Probabilities: ")
print(Action_Probs)
return(gainMA, entropyMA)
# Calculate the Relative Error of a model as defined in Equation 6.4 (pg 122) of Collins' Thesis
# env holds the SDEs for the model
def calculateError(env, modelTransitionProbs, T, gammas):
doRelative = False
Current_Observation = env.reset()
SDE_List = env.get_SDE()
state_List = env.Node_Set
first_Observations_mod = [item[0] for item in SDE_List]
first_Observations_env = [item.Observation for item in state_List]
# Generate the transition probabilities for the environment
envTransitionProbs = env.get_true_transition_probs()
# Generate trajectory using environment
Full_Transition = [Current_Observation]
for num in range(0,T):
Current_Observation, random_action = env.random_step()
Full_Transition.append(random_action)
Full_Transition.append(Current_Observation)
# Generate a belief mask for each model state that indicates what the likelihood is of being in each model state given an observation
Obs_Belief_Mask_mod = np.zeros((len(env.O_S), len(SDE_List)))
for (o_idx, o) in enumerate(env.O_S):
SDE_Chance = np.zeros(len(SDE_List))
#Figure out how many SDEs correspond to the observation
num_Correspond = first_Observations_mod.count(o)
#Set the corresponding SDEs to 1 divided by that value
SDE_Chance[(np.array(first_Observations_mod) == o)] = 1/num_Correspond
Obs_Belief_Mask_mod[o_idx,:] = SDE_Chance
# Generate a belief mask for each env state that indicates what the likelihood is of being in each env state given an observation
Obs_Belief_Mask_env = np.zeros((len(env.O_S), len(state_List)))
for (o_idx, o) in enumerate(env.O_S):
Obs_Chance = np.zeros(len(state_List))
#Figure out how many states correspond to the observation
num_Correspond = first_Observations_env.count(o)
#Set the corresponding states to 1 divided by that value
Obs_Chance[(np.array(first_Observations_env) == o)] = 1/num_Correspond
Obs_Belief_Mask_env[o_idx,:] = Obs_Chance
# Generate P(o|m) matrix for model and environment
Obs_Probs_mod = np.ones((len(env.O_S), len(SDE_List))) * ((1 - env.Epsilon) / ((len(env.O_S)) - 1))
for (sde_idx, sde) in enumerate(SDE_List):
o_idx = env.O_S.index(sde[0])
Obs_Probs_mod[o_idx, sde_idx] = env.Epsilon
Obs_Probs_env = env.get_observation_probs()
# Generate starting belief states for environment and model using first observation
Observation = Full_Transition[0]
Observation_Idx = env.O_S.index(Observation)
Belief_State_mod = Obs_Belief_Mask_mod[Observation_Idx].copy()
Belief_State_env = Obs_Belief_Mask_env[Observation_Idx].copy()
# determine the weights for the error from the confidence
sum_of_row = np.sum(gammas, axis=2)
weights = len(SDE_List) * np.ones((len(env.A_S), len(SDE_List)))
weights = np.divide(weights, sum_of_row)
weights = 1 - weights
error = 0
Transition_Idx = 0
prev_error = 0
while Transition_Idx < len(Full_Transition)//2:
# update the belief states with the new action, observation pair
Observation = Full_Transition[Transition_Idx*2+2]
Observation_Idx = env.O_S.index(Observation)
Action = Full_Transition[Transition_Idx*2+1]
Belief_Mask_mod = Obs_Belief_Mask_mod[Observation_Idx]
Belief_Mask_env = Obs_Belief_Mask_env[Observation_Idx]
Model_Action_Idx = env.A_S.index(Action)
Belief_State_mod = np.dot(Belief_State_mod, modelTransitionProbs[Model_Action_Idx,:,:])
Belief_State_env = np.dot(Belief_State_env, envTransitionProbs[Model_Action_Idx,:,:])
Belief_State_mod = Belief_State_mod/np.sum(Belief_State_mod)
Belief_State_env = Belief_State_env/np.sum(Belief_State_env)
# Compute error for the current belief states
weight_vector = weights[env.A_S.index(Action), :]
error_vector = np.dot(Obs_Probs_mod, Belief_State_mod) - np.dot(Obs_Probs_env, Belief_State_env)
error = error + np.sqrt(error_vector.dot(error_vector))
Belief_State_mod = Belief_State_mod*Belief_Mask_mod
Belief_State_mod = Belief_State_mod/np.sum(Belief_State_mod)
Belief_State_env = Belief_State_env*Belief_Mask_env
Belief_State_env = Belief_State_env/np.sum(Belief_State_env)
prev_error = np.sqrt(error_vector.dot(error_vector))
Transition_Idx = Transition_Idx + 1
return error / T
# calculates the absolute error. Error=1 when incorrect SDEs or # of SDEs, otherwise it's the average difference of each transition / 2
# note: it is assummed that env.Node_Set contains a minimum representation of the true environment nodes
def calculateAbsoluteError(env, modelTransitionProbs):
num_of_env_states = len(env.Node_Set)
if num_of_env_states != len(env.SDE_Set):
return 1
envTransitionProbs = env.get_true_transition_probs()
# Generate P(o|m) matrix for environment
Obs_Probs_env = env.get_observation_probs()
# check that each SDE corresponds to one environment state
SDEToNode = []
for SDE in env.SDE_Set:
probabilities = np.zeros(len(env.Node_Set))
for env_state_num in range(len(env.Node_Set)):
Belief_State = np.zeros(len(env.Node_Set))
Belief_State[env_state_num] = 1
first_obs_index = env.O_S.index(SDE[0])
Belief_State = np.multiply(Obs_Probs_env[first_obs_index,:], Belief_State)
Transition_Idx = 0
while Transition_Idx < len(SDE)//2:
Observation = SDE[Transition_Idx*2+2]
Observation_Idx = env.O_S.index(Observation)
Action = SDE[Transition_Idx*2+1]
Action_Idx = env.A_S.index(Action)
Belief_State = np.dot(Belief_State, envTransitionProbs[Action_Idx,:,:])
Belief_State = np.multiply(Obs_Probs_env[Observation_Idx,:], Belief_State)
Transition_Idx = Transition_Idx + 1
probabilities[env_state_num] = np.sum(Belief_State)
SDEToNode.append(probabilities.argmax())
if len(SDEToNode) != len(set(SDEToNode)):
return 1
# SDEs are valid, so now calculate the absolute difference per transition / 2
SDEToNode = np.array(SDEToNode)
error = 0
for a_idx in range(len(env.A_S)):
permutatedEnvTrans = envTransitionProbs[a_idx, SDEToNode]
permutatedEnvTrans = permutatedEnvTrans[:, SDEToNode]
# we divide by two so that way each transition diff is normalized to be between 0 and 1
abs_difference = np.absolute(permutatedEnvTrans - modelTransitionProbs[a_idx,:,:]) / 2
error = error + np.sum(np.sum(np.sum(abs_difference)))
error = error / (len(env.A_S) * num_of_env_states)
return error
| {"/generateGraphs.py": ["/pomdp.py"], "/test.py": ["/pomdp.py", "/spomdp.py"], "/spomdp.py": ["/pomdp.py", "/test.py"]} |
71,621 | EpiSci/SBL | refs/heads/master | /generateGraphs.py | import csv
import glob
import numpy as np
import re
from matplotlib import pyplot as plt
import matplotlib.image as mpimg
import seaborn as sns
from subprocess import check_call
from tempfile import NamedTemporaryFile
from pomdp import *
import codecs
import matplotlib.ticker as mtick
def generateGraphTest1(useFirstPoint,genAbsoluteError):
environments = []
files = glob.glob("./Testing Data/Test1_v" + str(1) + "/*.csv")
for file in files:
env = re.search("env\d+", file).group()
env_num = env[len("env"):]
environments.append(env_num)
environments = set(environments)
figure_num = 1
prop_cycle = plt.rcParams['axes.prop_cycle']
colors = prop_cycle.by_key()['color']
# use list and not numpy array since we don't know how many iterations were done
for env_num in environments:
print("environment " + str(env_num))
v1Data = []
v2Data = []
model_splits = []
for versionNum in [1,3]:
files = glob.glob("./Testing Data/Test1_v" + str(versionNum) + "/Test" + str(1) + "_v" + str(versionNum) + "_env" + str(env_num) + "*.csv")
if len(files) == 0:
continue
data = []
if versionNum == 1:
data = v1Data
else:
data = v2Data
validCount = 0
totalCount = len(files)
for filename in files:
# find the returned model num
finalModelNum = -1
isValidSplitTrial = False
with open(filename, mode='r') as csv_file:
csv_reader = csv.DictReader(csv_file)
foundFinal = False
for row in csv_reader:
if row['0'] == '*':
foundFinal = True
continue
if foundFinal is True and finalModelNum == -1:
temp = row['0']
finalModelNum = int(temp[len('Model Num '):])
if foundFinal is True and row['0'] == 'Absolute Error:':
absError = float(row['1'])
if absError < 1:
validCount = validCount+1
isValidSplitTrial = True
if isValidSplitTrial:
with open(filename, mode='r') as csv_file:
csv_reader = csv.DictReader(csv_file)
iteration_num = 0
model_num = 0
offset_amount = 0
trialData = []
for row in csv_reader:
if model_num > finalModelNum:
break
if row['0'] == '*':
break
elif row['0'] == 'Model Num ' + str(model_num+1):
if iteration_num + offset_amount not in model_splits:
model_splits.append(iteration_num + offset_amount)
model_num = model_num + 1
offset_amount = offset_amount + iteration_num + 1 # add the number of iterations from the last model + 1 (since we start counting at zero)
elif row['0'] == 'Iteration: ':
iteration_num = float(row['1'])
elif row['0'] == 'Error:' and genAbsoluteError is False:
if iteration_num == 0 and useFirstPoint is False:
continue
trialData.append([iteration_num + offset_amount, float(row['1'])])
elif row['0'] == 'Absolute Error:' and genAbsoluteError:
if float(row['1']) < 1:
if iteration_num == 0 and useFirstPoint is False:
continue
trialData.append([iteration_num + offset_amount, float(row['1'])])
data.append(trialData)
print("Percent Trials Correct for Version " + str(versionNum) + " : " + str(validCount/totalCount))
v1Data = np.array(v1Data)
if v1Data.size > 0: # Check to make sure at least one trial was successful
v1Data_average = np.mean(v1Data, axis=0)
v1Data_stdDev = np.std(v1Data, axis=0)
v2Data = np.array(v2Data)
if v2Data.size > 0: # Check to make sure at least one trial was successful
v2Data_average = np.mean(v2Data, axis=0)
v2Data_stdDev = np.std(v2Data, axis=0)
plt.rcParams.update({'font.size': 16})
plt.figure(figure_num)
figure_num = figure_num + 1
if v1Data.size > 0: # Check to make sure at least one trial was successful
plt.errorbar(v1Data_average[:,0], v1Data_average[:,1],fmt='.',yerr=v1Data_stdDev[:,1],ecolor=colors[0],label="Freq. Dep.\nPosterior Update",color=colors[0],markersize=14,capsize=8)
if v2Data.size > 0:
plt.errorbar(v2Data_average[:,0], v2Data_average[:,1],fmt='.',yerr=v2Data_stdDev[:,1],ecolor=colors[1],label="Freq. Ind.\nPosterior Update",color=colors[1],markersize=14,capsize=8)
for num in range(len(model_splits)):
split = model_splits[num]
if num == 0:
plt.axvline(x=split, color='gray', label="Model Split")
else:
plt.axvline(x=split, color='gray')
plt.xlabel("Number of Actions Taken")
yLabel = "Relative Error"
errorType = "Rel. "
errorFile = "Relative"
if (genAbsoluteError):
yLabel = "Absolute Error"
errorType = "Abs. "
errorFile = "Absolute"
else:
yLabel = "Relative Error"
errorType = "Rel. "
errorFile = "Relative"
plt.ylabel(yLabel)
plt.title("Posterior Update Comparison (" + errorType + "Model Error)\nFor " + str(getEnvironmentName(env_num)))
leg = plt.legend(loc = 'lower left', fontsize=14)
leg.get_frame().set_edgecolor('black')
axes = plt.gca()
axes.set_ylim([0,axes.get_ylim()[1]])
axes.set_xlim([0,axes.get_xlim()[1]])
plt.show()
plt.savefig("Testing Data/Test1Env" + env_num + errorFile +"ErrorGraph.png", bbox_inches='tight')
def generateGraphTest2():
# use list and not numpy array since we don't know how many iterations were done
environments = []
files = glob.glob("./Testing Data/Test2_v" + str(1) + "/*.csv")
for file in files:
env = re.search("env\d+", file).group()
env_num = env[len("env"):]
environments.append(env_num)
environments = set(environments)
figure_num = 1
prop_cycle = plt.rcParams['axes.prop_cycle']
colors = prop_cycle.by_key()['color']
for env_num in environments:
v1Data = []
v2Data = []
v3Data = []
model_splits = []
for versionNum in range(1, 4):
files = glob.glob("./Testing Data/Test2_v" + str(versionNum) + "/Test" + str(2) + "_v" + str(versionNum) + "_env" + str(env_num) + "*.csv")
data = []
if versionNum == 1:
data = v1Data
elif versionNum == 2:
data = v2Data
else:
data = v3Data
for filename in files:
# find the returned model num
finalModelNum = -1
isValidSplitTrial = False
with open(filename, mode='r') as csv_file:
csv_reader = csv.DictReader(csv_file)
foundFinal = False
for row in csv_reader:
if row['0'] == '*':
foundFinal = True
continue
if foundFinal is True and finalModelNum == -1:
temp = row['0']
finalModelNum = int(temp[len('Model Num '):])
if foundFinal is True and row['0'] == 'Absolute Error:':
absError = float(row['1'])
if absError < 1:
isValidSplitTrial = True
if not isValidSplitTrial:
continue
with open(filename, mode='r') as csv_file:
iteration_num = 0
model_num = 0
offset_amount = 0
csv_reader = csv.DictReader(csv_file)
for row in csv_reader:
if model_num > finalModelNum:
break
if row['0'] == 'Model Num ' + str(model_num+1):
model_splits.append(iteration_num + offset_amount)
data.append([model_num, iteration_num])
model_num = model_num + 1
offset_amount = offset_amount + iteration_num + 1 # add the number of iterations from the last model + 1 (since we start counting at zero)
elif row['0'] == 'Iteration: ':
iteration_num = float(row['1'])
if model_num == finalModelNum:
data.append([model_num, iteration_num])
v1Data = np.array(v1Data)
v2Data = np.array(v2Data)
v1Data_average = np.mean(v1Data, axis=0)
v2Data_average = np.mean(v2Data, axis=0)
v1Data_stdDev = np.std(v1Data, axis=0)
v2Data_stdDev = np.std(v2Data, axis=0)
# import pdb; pdb.set_trace()
print("-----")
print(env_num)
v1_sum = np.sum(v1Data, axis=0)
v2_sum = np.sum(v2Data, axis=0)
print(v1_sum)
print(v2_sum)
v1_avg = v1_sum / 10
v2_avg = v2_sum / 10
print(v1_avg)
print(v2_avg)
print(v2_avg[1] / v1_avg[1])
xData = []
yData = []
groupings = []
if len(v3Data) > 0:
v3Data = np.array(v3Data)
v3Data_average = np.mean(v3Data, axis=0)
v3Data_stdDev = np.std(v3Data, axis=0)
xData = np.concatenate((v3Data[:,0], v1Data[:,0], v2Data[:,0]))
yData = np.concatenate((v3Data[:,1], v1Data[:,1], v2Data[:,1]))
groupings = np.concatenate((np.full(np.shape(v3Data[:,0]), "Collins with Posterior"), (np.full(np.shape(v1Data[:,0]), "Random Policy")), np.full(np.shape(v2Data[:,0]), "Navigation Policy")))
else:
xData = np.concatenate((v1Data[:,0], v2Data[:,0]))
yData = np.concatenate((v1Data[:,1], v2Data[:,1]))
groupings = np.concatenate(((np.full(np.shape(v1Data[:,0]), "Collins et al. Policy")), np.full(np.shape(v2Data[:,0]), "Proposed Navigation Policy")))
plt.rcParams.update({'font.size': 16})
plt.figure(figure_num)
figure_num = figure_num + 1
ax = sns.barplot(x=xData, y=yData, hue=groupings, capsize=0.1)
plt.xlabel("Model Split Number")
plt.ylabel("Number of Actions Taken")
ax.set_title("Number of Actions Taken per Model Split \nFor " + getEnvironmentName(env_num))
xlabels = ['{:,d}'.format(x) for x in ax.get_xticks()]
ax.set_xticklabels(xlabels)
ax.tick_params(axis='x')
ldg = plt.legend(fontsize=11.5)
ldg.get_frame().set_edgecolor('black')
# plt.show()
plt.savefig("Testing Data/Test2env" + env_num + ".png", bbox_inches='tight')
def generateGraphTest2_2(useFirstPoint,genAbsoluteError, environmentNum):
parameters = {'font.size': 16}
plt.rcParams.update(parameters)
# use list and not numpy array since we don't know how many iterations were done
v1Data = []
v2Data = []
model_splits = []
for versionNum in [1,2]:
files = glob.glob("./Testing Data/Test2_v" + str(versionNum) + "/Test2_v" + str(versionNum) + "_env" + str(environmentNum) + "*.csv")
if len(files) == 0:
continue
files = [files[0]]
data = []
if versionNum == 1:
data = v1Data
else:
data = v2Data
validCount = 0
totalCount = 0
for filename in files:
totalCount = totalCount + 1
finalModelNum = -1
isValidSplitTrial = False
with open(filename, mode='r') as csv_file:
csv_reader = csv.DictReader(csv_file)
foundFinal = False
for row in csv_reader:
if row['0'] == '*':
foundFinal = True
continue
if foundFinal is True and finalModelNum == -1:
temp = row['0']
finalModelNum = int(temp[len('Model Num '):])
if foundFinal is True and row['0'] == 'Absolute Error:':
absError = float(row['1'])
if absError < 1:
validCount = validCount+1
isValidSplitTrial = True
if isValidSplitTrial:
with open(filename, mode='r') as csv_file:
csv_reader = csv.DictReader(csv_file)
iteration_num = 0
model_num = 0
offset_amount = 0
trialData = []
model_splits_row = []
for row in csv_reader:
if model_num > finalModelNum:
break
if row['0'] == '*':
break
elif row['0'] == 'Model Num ' + str(model_num+1):
if iteration_num + offset_amount not in model_splits and model_num+1 <= finalModelNum:
model_splits_row.append(iteration_num + offset_amount)
model_num = model_num + 1
offset_amount = offset_amount + iteration_num + 1 # add the number of iterations from the last model + 1 (since we start counting at zero)
elif row['0'] == 'Iteration: ':
iteration_num = float(row['1'])
elif row['0'] == 'Error:' and genAbsoluteError is False:
if iteration_num == 0 and useFirstPoint is False:
continue
trialData.append([iteration_num + offset_amount, float(row['1'])])
elif row['0'] == 'Absolute Error:' and genAbsoluteError:
if float(row['1']) < 1:
if iteration_num == 0 and useFirstPoint is False:
continue
trialData.append([iteration_num + offset_amount, float(row['1'])])
data.append(trialData)
model_splits.append(model_splits_row)
print("Percent Trials Correct for Version " + str(versionNum) + " : " + str(validCount/totalCount))
prop_cycle = plt.rcParams['axes.prop_cycle']
colors = prop_cycle.by_key()['color']
# import pdb; pdb.set_trace()
v1Data = np.array(v1Data)
if v1Data.size > 0: # Check to make sure at least one trial was successful
v1Data_average = np.mean(v1Data, axis=0)
v1Data_stdDev = np.std(v1Data, axis=0)
v2Data = np.array(v2Data)
if v2Data.size > 0: # Check to make sure at least one trial was successful
v2Data_average = np.mean(v2Data, axis=0)
v2Data_stdDev = np.std(v2Data, axis=0)
if v1Data.size > 0: # Check to make sure at least one trial was successful
print(v1Data_average.size)
plt.scatter(v1Data_average[:,0], v1Data_average[:,1],label="Collins et al. Policy",c=colors[0])
if v2Data.size > 0:
plt.scatter(v2Data_average[:,0], v2Data_average[:,1],label="Proposed Navigation Policy",c=colors[1])
for row_num in range(len(model_splits)):
row = model_splits[row_num]
color = ''
grouping = ''
linestyle = ''
if row_num == 0:
color = colors[0]
grouping = "Collins et al. Policy"
linestyle = "-"
else:
color = colors[1]
grouping = "Proposed Navigation Policy"
linestyle = "--"
for num in range(len(row)):
split = model_splits[row_num][num]
if num == 0:
plt.axvline(x=split, color=color, label= grouping + "\nModel Split", linestyle=linestyle)
else:
plt.axvline(x=split, color=color, linestyle=linestyle)
plt.xlabel("Number of Actions Taken")
plt.ylabel("Error")
ax = plt.gca()
ax.set_title("Model Error vs. Number of Actions Taken \nFor " + getEnvironmentName(str(environmentNum)))
leg = plt.legend(fontsize=12)
leg.get_frame().set_edgecolor('black')
if useFirstPoint:
ax.set_ylim([0,1]) # make it so that the y axis starts at zero and goes to 1
plt.savefig("Testing Data/Test2_actions.png", bbox_inches='tight')
plt.show()
# Generate bar graph for varying alpha values
def generateGraphTest2_3(numTrialsPerAlpha):
# use list and not numpy array since we don't know how many iterations were done
environments = []
files = glob.glob("./Testing Data/Test2_v" + str(2) + "/*.csv")
for file in files:
env = re.search("env\d+", file).group()
env_num = env[len("env"):]
environments.append(env_num)
environments = set(environments)
figure_num = 1
prop_cycle = plt.rcParams['axes.prop_cycle']
colors = prop_cycle.by_key()['color']
prevAlphaVal = -1
holder = []
for env_num in environments:
v1Data = []
v2Data = []
v3Data = []
v1DataCorrect = []
v2DataCorrect = []
v3DataCorrect = []
model_splits = []
for versionNum in range(1, 4):
files = glob.glob("./Testing Data/Test2_v" + str(versionNum) + "/Test" + str(2) + "_v" + str(versionNum) + "_env" + str(env_num) + "*.csv")
totalTrials = len(files)
data = []
if versionNum == 1:
data = v1Data
elif versionNum == 2:
data = v2Data
else:
data = v3Data
for filename in files:
# find the returned model num
finalModelNum = -1
isValidSplitTrial = False
with open(filename, mode='r') as csv_file:
csv_reader = csv.DictReader(csv_file)
foundFinal = False
for row in csv_reader:
if row['0'] == '*':
foundFinal = True
continue
if foundFinal is True and finalModelNum == -1:
temp = row['0']
finalModelNum = int(temp[len('Model Num '):])
if foundFinal is True and row['0'] == 'Absolute Error:':
absError = float(row['1'])
if absError < 1:
isValidSplitTrial = True
# if not isValidSplitTrial:
# continue
alpha_val = 0
with open(filename, mode='r') as csv_file:
row_number = 0
offset_amount = 0
end_row = float('inf')
csv_reader = csv.DictReader(csv_file)
for row in csv_reader:
if row_number == 2: # This is the row where the alpha value is stored
alpha_val = float(row['2'])
# print(alpha_val)
elif row['0'] == '*':
end_row = row_number
elif row['0'] == 'Absolute Error:' and row_number > end_row:
absErr = float(row['1'])
# if absErr <= 1:
data.append([alpha_val, absErr])
# if absErr < 1:
# dataCorrect[alphaNum] = dataCorrect[alphaNum+1]
row_number = row_number + 1
# if alpha_val != prevAlphaVal and prevAlphaVal != -1:
# data.append(np.array(holder))
# holder = []
# holder.append([alpha_val, absErr])
# prevAlphaVal = alpha_val
# if model_num == finalModelNum:
# data.append([model_num, iteration_num])
# data.append(np.array(holder))
v1Data = np.array(v1Data)
v2Data = np.array(v2Data)
v2Data = v2Data[v2Data[:,0].argsort()] # sort based off of the x values (e.g. the alpha value)
v2Data = np.flipud(v2Data)
ratioDividendv2 = v2Data.copy()
print(v2Data)
print(v2Data)
v1Data_average = np.mean(v1Data, axis=0)
v2Data_average = []
for alphaTrial in v2Data:
v2Data_average.append(np.mean(alphaTrial, axis=0))
v2Data_average = np.array(v2Data_average)
# v2Data_average = np.mean(v2Data, axis=0)
v1Data_stdDev = np.std(v1Data, axis=0)
v2Data_stdDev = []
for alphaTrial in v2Data:
v2Data_stdDev.append(np.std(alphaTrial, axis=0))
v2Data_stdDev = np.array(v2Data_stdDev)
# v2Data_stdDev = np.std(v2Data, axis=0)
correctLocs2 = v2Data[:,1] < 1
v2Accuracy = []
i = 0
while i < len(v2Data):
j = 0
alphaSum = 0
while j < numTrialsPerAlpha:
alphaSum = alphaSum + correctLocs2[i+j]
j = j + 1
v2Accuracy.append(alphaSum / numTrialsPerAlpha)
i = i + numTrialsPerAlpha
print(correctLocs2)
print(v2Accuracy)
v2Accuracy = np.array(v2Accuracy)
v2Data = np.array([row for row in v2Data if row[1] < 1])
print(v2Data)
# import pdb; pdb.set_trace()
print("-----")
print(env_num)
# v1_sum = np.sum(v1Data, axis=0)
# v2_sum = np.sum(v2Data, axis=0)
# # print(v1_sum)
# print(v2_sum)
# v1_avg = v1_sum / 10
# v2_avg = v2_sum / 10
# print(v1_avg)
# print(v2_avg)
# print(v2Data)
# print(v2_avg[1] / v1_avg[1])
xData = []
yData = []
groupings = []
if len(v3Data) > 0 and len(v1Data) > 0:
correctLocs1 = v1Data[:,1] < 1
v1Accuracy = []
i = 0
while i < len(v1Data):
j = 0
alphaSum = 0
while j < numTrialsPerAlpha:
alphaSum = alphaSum + correctLocs1[i+j]
j = j + 1
v1Accuracy.append(alphaSum / numTrialsPerAlpha)
i = i + numTrialsPerAlpha
print(v1Accuracy)
v1Data = np.array(v1Data)
print(v1Data)
correctLocs3 = v3Data[:,1] < 1
v3Accuracy = []
i = 0
while i < len(v3Data):
j = 0
alphaSum = 0
while j < numTrialsPerAlpha:
alphaSum = alphaSum + correctLocs3[i+j]
j = j + 1
v3Accuracy.append(alphaSum / numTrialsPerAlpha)
i = i + numTrialsPerAlpha
print(v3Accuracy)
v3Data = np.array(v3Data)
print(v3Data)
xData = np.concatenate((v3Data[:,0], v1Data[:,0], v2Data[:,0]))
yData = np.concatenate((v3Data[:,1], v1Data[:,1], v2Data[:,1]))
yDataAccuracy = np.concatenate((v3Accuracy, v1Accuracy, v2Accuracy))
groupings = np.concatenate((np.full(np.shape(v3Data[:,0]), "Original Collins et al. Policy"), (np.full(np.shape(v1Data[:,0]), "Collins et al. Policy with Freq. Indep. Updates")), np.full(np.shape(v2Data[:,0]), "Proposed Naviagation Policy")))
if len(v3Data) > 0:
correctLocs3 = v3Data[:,1] < 1
v3Accuracy = []
i = 0
while i < len(v3Data):
j = 0
alphaSum = 0
while j < numTrialsPerAlpha:
alphaSum = alphaSum + correctLocs3[i+j]
j = j + 1
v3Accuracy.append(alphaSum / numTrialsPerAlpha)
i = i + numTrialsPerAlpha
print(v3Accuracy)
v3Data = np.array(v3Data)
print(v3Data)
v3Data = np.array(v3Data)
print(v3Data)
xData = np.concatenate((v3Data[:,0], v2Data[:,0]))
yData = np.concatenate((v3Data[:,1], v2Data[:,1]))
yDataAccuracy = np.concatenate((v3Accuracy, v2Accuracy))
groupings = np.concatenate((np.full(np.shape(v3Data[:,0]), "Collins et al. Policy"), np.full(np.shape(v2Data[:,0]), "Proposed Navigation Policy")))
elif len(v1Data) > 0:
v1Data = v1Data[v1Data[:,0].argsort()] # sort based off of the x values (e.g. the alpha value)
v1Data = np.flipud(v1Data)
ratioDividendv1 = v1Data.copy()
correctLocs1 = v1Data[:,1] < 1
v1Accuracy = []
i = 0
while i < len(v1Data):
j = 0
alphaSum = 0
while j < numTrialsPerAlpha:
alphaSum = alphaSum + correctLocs1[i+j]
j = j + 1
v1Accuracy.append(alphaSum / numTrialsPerAlpha)
i = i + numTrialsPerAlpha
print(v1Accuracy)
v1Accuracy = np.array(v1Accuracy)
# Parse v1Data and remove any values that have an absolute error of 1
v1Data = np.array([row for row in v1Data if row[1] < 1])
v1Data = np.array(v1Data)
print(v1Data)
xData = np.concatenate((v1Data[:,0], v2Data[:,0]))
yData = np.concatenate((v1Data[:,1], v2Data[:,1]))
yDataAccuracy = np.concatenate((v1Accuracy, v2Accuracy))
groupings = np.concatenate((np.full(np.shape(v1Data[:,0]), "Collins et al. Policy"), np.full(np.shape(v2Data[:,0]), "Proposed Navigation Policy")))
groupingsAccuracy = np.concatenate((np.full(np.shape(v1Accuracy), "Collins et al. Policy"), np.full(np.shape(v2Accuracy), "Proposed Navigation Policy")))
else:
xData = v2Data[:,0]
print(xData)
yData = v2Data[:,1]
groupings = (np.full(np.shape(v2Data[:,0]), "Proposed Navigation Policy"))
plt.rcParams.update({'font.size': 16})
plt.figure(figure_num)
figure_num = figure_num + 1
print(xData)
ax = sns.barplot(x=xData, y=yData, hue=groupings, capsize=0.1)
uniqeXVals = np.unique(xData)
plt.xlabel("Alpha Value")
plt.ylabel("Error")
ax.set_title("Model Transition Probability Error Versus Alpha \nFor " + getEnvironmentName(env_num))
# xlabels = ['{:,d}'.format(x) for x in ax.get_xticks()]
xlabels = [x for x in uniqeXVals]
ax.set_xticklabels(xlabels)
ax.tick_params(axis='x')
ax.invert_xaxis()
ldg = plt.legend(fontsize=11.5)
ldg.get_frame().set_edgecolor('black')
# plt.show()
plt.savefig("Testing Data/Test2ErrorAlphaVarying_env" + env_num + ".png", bbox_inches='tight')
# =========================== Make the histogram plot for accuracy
plt.rcParams.update({'font.size': 16})
plt.figure(figure_num)
figure_num = figure_num + 1
xDataAccuracy = (np.unique(xData))
xDataAccuracy = np.append(xDataAccuracy, xDataAccuracy)
xDataAccuracy = np.flipud(xDataAccuracy)
print(xDataAccuracy)
print(yDataAccuracy)
print(groupingsAccuracy)
ax = sns.barplot(x=xDataAccuracy, y=yDataAccuracy, hue=groupingsAccuracy, capsize=0.1)
uniqeXVals = np.unique(xData)
plt.xlabel("Alpha Value")
plt.ylabel("Success Rate")
ax.set_title("Successful Model Generation Versus Alpha \nFor " + getEnvironmentName(env_num))
# xlabels = ['{:,d}'.format(x) for x in ax.get_xticks()]
xlabels = [x for x in uniqeXVals]
ax.set_xticklabels(xlabels)
ax.tick_params(axis='x')
ax.invert_xaxis()
ldg = plt.legend(fontsize=11.5)
ldg.get_frame().set_edgecolor('black')
# plt.show()
plt.savefig("Testing Data/Test2AccuracyAlphaVarying_env" + env_num + ".png", bbox_inches='tight')
# =========================== Make the plot for success/error plot
plt.rcParams.update({'font.size': 16})
plt.figure(figure_num)
figure_num = figure_num + 1
ratioDividend = np.concatenate((ratioDividendv1, ratioDividendv2))
ratioDividend = ratioDividend[:,1]
appendedAccuracy = np.array([])
for i in range(len(yDataAccuracy)):
appendedAccuracy = np.append(appendedAccuracy, np.repeat(yDataAccuracy[i], yDataAccuracy[i]*10))
newRatio = []
for i in range(len(ratioDividend)):
if ratioDividend[i] < 1:
newRatio.append(ratioDividend[i])
ratioDividend = np.array(newRatio)
print("!!!!!!!!!!!")
print(appendedAccuracy)
print("##########")
print(ratioDividend)
print("^^^^^^^^^^^^")
yDataRatio = appendedAccuracy/ratioDividend
# yDataRatio[ratioDividend == 1] = 0 # Don't count the trials that have an absolue error of 1
print(xDataAccuracy)
print(yDataRatio)
print(groupings)
ax = sns.barplot(x=xData, y=yDataRatio, hue=groupings, capsize=0.1)
uniqeXVals = np.unique(xData)
plt.xlabel("Alpha Value")
plt.ylabel("(Success Rate) / Error")
ax.set_title("Normalized Success Rate and Error Versus Alpha \nFor " + getEnvironmentName(env_num))
# xlabels = ['{:,d}'.format(x) for x in ax.get_xticks()]
xlabels = [x for x in uniqeXVals]
ax.set_xticklabels(xlabels)
ax.tick_params(axis='x')
ax.set_xlim(-0.5, 3.5)
ax.set_ylim(0, 12)
ax.invert_xaxis()
ldg = plt.legend(fontsize=11.5)
ldg.get_frame().set_edgecolor('black')
# plt.show()
plt.savefig("Testing Data/Test2SuccessOverErrorAlphaVarying_env" + env_num + ".png", bbox_inches='tight')
def generateGraphTest3(useFirstPoint,genAbsoluteError):
environments = []
files = glob.glob("./Testing Data/Test3_v" + str(1) + "/*.csv")
for file in files:
env = re.search("env\d+", file).group()
env_num = env[len("env"):]
environments.append(env_num)
environments = set(environments)
figure_num = 1
prop_cycle = plt.rcParams['axes.prop_cycle']
colors = prop_cycle.by_key()['color']
for env_num in environments:
print(env_num)
# use list and not numpy array since we don't know how many iterations were done
v1Data = []
v2Data = []
model_splits = []
for versionNum in [1,3]:
files = glob.glob("./Testing Data/Test3_v" + str(versionNum) + "/Test" + str(3) + "_v" + str(versionNum) + "_env" + str(env_num) + "*.csv")
if len(files) == 0:
continue
firstFile = files[0]
data = []
if versionNum == 1:
data = v1Data
else:
data = v2Data
validCount = 0
totalCount = len(files)
for filename in files:
# find the returned model num
finalModelNum = -1
isValidSplitTrial = False
with open(filename, mode='r') as csv_file:
csv_reader = csv.DictReader(csv_file)
foundFinal = False
for row in csv_reader:
if row['0'] == '*':
foundFinal = True
continue
if foundFinal is True and finalModelNum == -1:
temp = row['0']
finalModelNum = int(temp[len('Model Num '):])
if foundFinal is True and row['0'] == 'Absolute Error:':
absError = float(row['1'])
if absError < 1:
validCount = validCount+1
isValidSplitTrial = True
if isValidSplitTrial:
with open(filename, mode='r') as csv_file:
csv_reader = csv.DictReader(csv_file)
iteration_num = 0
model_num = 0
offset_amount = 0
trialData = []
for row in csv_reader:
if model_num > finalModelNum:
break
if row['0'] == '*':
break
elif row['0'] == 'Model Num ' + str(model_num+1):
if iteration_num + offset_amount not in model_splits:
model_splits.append(iteration_num + offset_amount)
model_num = model_num + 1
offset_amount = offset_amount + iteration_num + 1 # add the number of iterations from the last model + 1 (since we start counting at zero)
elif row['0'] == 'Iteration: ':
iteration_num = float(row['1'])
elif row['0'] == 'Error:' and genAbsoluteError is False:
if iteration_num == 0 and useFirstPoint is False:
continue
trialData.append([iteration_num + offset_amount, float(row['1'])])
elif row['0'] == 'Absolute Error:' and genAbsoluteError:
if float(row['1']) < 1:
if iteration_num == 0 and useFirstPoint is False:
continue
trialData.append([iteration_num + offset_amount, float(row['1'])])
data.append(trialData)
print("Percent Trials Correct for Version " + str(versionNum) + " : " + str(validCount/totalCount))
v1Data = np.array(v1Data)
if v1Data.size > 0: # Check to make sure at least one trial was successful
v1Data_average = np.mean(v1Data, axis=0)
v1Data_stdDev = np.std(v1Data, axis=0)
v2Data = np.array(v2Data)
if v2Data.size > 0: # Check to make sure at least one trial was successful
v2Data_average = np.mean(v2Data, axis=0)
v2Data_stdDev = np.std(v2Data, axis=0)
plt.rcParams.update({'font.size': 16})
plt.figure(figure_num)
figure_num = figure_num + 1
if v1Data.size > 0: # Check to make sure at least one trial was successful
print(v1Data_average.size)
plt.errorbar(v1Data_average[:,0], v1Data_average[:,1],fmt='.',yerr=v1Data_stdDev[:,1],ecolor=colors[0],label="Collins et al. \nSDE Generation",color=colors[0],markersize=10,capsize=5)
if v2Data.size > 0:
plt.errorbar(v2Data_average[:,0], v2Data_average[:,1],fmt='.',yerr=v2Data_stdDev[:,1],ecolor=colors[1],label="Revised \nSDE Generation",color=colors[1],markersize=10,capsize=5)
for num in range(len(model_splits)):
split = model_splits[num]
if num == 0:
plt.axvline(x=split, color='gray', label="Model Split")
else:
plt.axvline(x=split, color='gray')
plt.xlabel("Number of Actions Taken")
yLabel = "Relative Error"
errorType = "Rel. "
errorFile = "Relative"
if (genAbsoluteError):
yLabel = "Absolute Error"
errorType = "Abs. "
errorFile = "Absolute"
else:
yLabel = "Relative Error"
errorType = "Rel. "
errorFile = "Relative"
plt.ylabel(yLabel)
plt.title("SDE Splitting Comparison (" + errorType + "Model Error)\nFor " + str(getEnvironmentName(env_num)))
leg = plt.legend(loc = 'lower left', fontsize=14)
leg.get_frame().set_edgecolor('black')
axes = plt.gca()
axes.set_ylim([0,axes.get_ylim()[1]])
axes.set_xlim([0,axes.get_xlim()[1]])
plt.show()
plt.savefig("Testing Data/Test3Env" + env_num + errorFile +"ErrorGraph.png", bbox_inches='tight')
def getEnvironmentName(env_num):
if env_num == "2":
return "αϵ-Shape Environment"
elif env_num == "3":
return "αϵ-Little Prince Environment"
elif env_num == "4":
return "αϵ-1D Maze Environment"
elif env_num == "7":
return "αϵ-Balance Beam Environment"
return "Environment " + env_num
def getModelGraph(env_num, SDE_Set, A_S, transitionProbs, filename):
"""
Write graph to a temporary file and invoke `dot`.
The output file type is automatically detected from the file suffix.
*`graphviz` needs to be installed, before usage of this method.*
"""
lines = []
lines.append("digraph tree {")
# add the nodes
for sde_idx in range(len(SDE_Set)):
sde = SDE_Set[sde_idx]
line = ' "' + str(sde_idx) + '" ['
sde_str = "\n("
for m_a in sde:
if m_a == "square":
sde_str = sde_str + "□,"
elif m_a == "diamond":
sde_str = sde_str + "◊,"
else:
sde_str = sde_str + m_a + ","
sde_str = sde_str[:-1] + ')' # -1 to get rid of comma
label = str(sde_idx) + sde_str
line = line + 'label="' + label + '"'
if sde[0] == "square":
line = line + ', shape="square"'
elif sde[0] == "diamond":
line = line + ', shape="diamond", height=1.7'
elif sde[0] == "volcano":
line = line + ', shape="trapezium"'
elif sde[0] == "rose":
line = line + ', shape="polygon", sides=7'
elif sde[0] == "goal":
line = line + ', shape="square"'
line = line + ', style=bold'
line = line + ', fontname="Times-Bold"'
line = line + '];'
lines.append(line)
# do the ranks
if env_num == 6:
for sde_idx in range(len(SDE_Set)):
line = ""
if sde_idx == 1: # we'll do this one when sde_idx == 2
continue
elif sde_idx == 2:
line = ' { rank=min; "1"; "2"; }'
else:
rank = "same"
if sde_idx == 3:
rank = "source"
elif sde_idx == 0:
rank = "sink"
line = ' { rank=' + rank + '; ' + '"' + str(sde_idx) + '"; }'
lines.append(line)
elif env_num == 1:
for sde_idx in range(len(SDE_Set)):
line = ""
if sde_idx == 3: # we'll do this one when sde_idx == 2
line = ' { rank=same; "1"; "0"; }'
elif sde_idx == 2:
line = ' { rank=same; "2"; "3"; }'
else:
continue
lines.append(line)
# elif env_num == 42:
# line = ' { rank=same; "0"; "1"; "2"; "3"; }'
# lines.append(line)
# add the edges
for m_idx in range(len(SDE_Set)):
for a_idx in range(len(A_S)):
row = transitionProbs[a_idx, m_idx, :]
m_p_idx = np.argmax(row)
probability = np.max(row)
line = ' "' + str(m_idx) + '" -> "' + str(m_p_idx) + '" '
line = line + '[label=" ' + A_S[a_idx] + '\n '+ str(probability) + '"'
line = line + ', style=bold'
line = line + ', fontname="Times-Bold"'
# if abs(m_idx - m_p_idx) == 1:
# line = line + ', weight=4'
# elif abs(m_idx - m_p_idx) == 3:
# line = line + ', weight=1'
# else:
# line = line + ', weight=1'
line = line + '];'
lines.append(line)
lines.append("}")
# now write the file
with codecs.open("dotfile.dot", "w", "utf-8") as dotfile:
# with NamedTemporaryFile("wb", delete=False) as dotfile:
dotfilename = dotfile.name
for line in lines:
dotfile.write("%s\n" % line)
# dotfile.write(("%s\n" % line).encode("utf-8"))
dotfile.flush()
cmd = ["dot", dotfilename, "-T", "png", "-o", filename]
# dot dotfile.dot -T png -o temp.png
check_call(cmd)
try:
remove(dotfilename)
except Exception:
msg = 'Could not remove temporary file %s' % dotfilename
img=mpimg.imread(filename)
imgplot = plt.imshow(img)
plt.show()
def getPercentAccurate(environmentNum):
for versionNum in [1,2]:
files = glob.glob("./Testing Data/Test2_v" + str(versionNum) + "/Test2_v" + str(versionNum) + "_env" + str(environmentNum) + "*.csv")
if len(files) == 0:
continue
validCount = 0
totalCount = 0
for filename in files:
totalCount = totalCount + 1
finalModelNum = -1
isValidSplitTrial = False
with open(filename, mode='r') as csv_file:
csv_reader = csv.DictReader(csv_file)
foundFinal = False
for row in csv_reader:
if row['0'] == '*':
foundFinal = True
continue
if foundFinal is True and finalModelNum == -1:
temp = row['0']
finalModelNum = int(temp[len('Model Num '):])
if foundFinal is True and row['0'] == 'Absolute Error:':
absError = float(row['1'])
if absError < 1:
validCount = validCount+1
isValidSplitTrial = True
print("Percent Trials Correct for Version " + str(versionNum) + " : " + str(validCount/totalCount))
if __name__ == "__main__":
# Test 1 (Testing Transition Posterior Update Equation)
# Relative Error
# generateGraphTest1(False,False)
# Test 1 (Testing Transition Posterior Update Equation)
# Absolute Error
# generateGraphTest1(False,True)
# Test 2 (Testing Agent Autonomous Navigation Algorithm)
# Bar Graphs for Relative Error
# generateGraphTest2()
# Test 2 (Testing Agent Autonomous Navigation Algorithm)
# Scatter Plot for Single Trial Comparison, Shape Environment
# generateGraphTest2_2(False, False, 2)
# Test 2 (Testing Agent Autonomous Navigation Algorithm)
# Bar Graph for Varying Alpha
generateGraphTest2_3(10)
# Test 3 (Testing SDE Generation Algorithms)
# Relative Error
# generateGraphTest3(False,False)
# Test 3 (Testing SDE Generation Algorithms)
# Absolute Error
# generateGraphTest3(False,True)
| {"/generateGraphs.py": ["/pomdp.py"], "/test.py": ["/pomdp.py", "/spomdp.py"], "/spomdp.py": ["/pomdp.py", "/test.py"]} |
71,622 | EpiSci/SBL | refs/heads/master | /test.py | from pomdp import *
import datetime
import spomdp
#Writes a numpy matrix to an xls file. Returns the last row the matrix was written on. Currently supports only 3D numpy matrices.
def writeNumpyMatrixToFile(sheet, matrix, row=0,col=0):
dimensions = matrix.shape
rowCount = row
for i in range(dimensions[0]):
for j in range(dimensions[1]):
for k in range(dimensions[2]):
sheet.write(rowCount,col+k, matrix[i][j][k])
rowCount = rowCount + 1
rowCount = rowCount + 1 #Provide an extra space between submatrices
return rowCount
#Writes a numpy matrix to a csv file. Currently supports only 3D numpy matrices.
def writeNumpyMatrixToCSV(c, matrix):
dimensions = matrix.shape
for i in range(dimensions[0]):
for j in range(dimensions[1]):
c.writerow(matrix[i][j][:])
c.writerow([]) #Provide an extra space between submatrices
#Uses the Test 1 parameters outlined in the SBLTests.docx file with column updates (Collins' method)
def test1_v1(filename,env):
gainThresh = 0.01 #Threshold of gain to determine if the model should split
numActionsPerExperiment = 25000
insertRandActions = False
explore = 0.5 #Note: Since Collins' pseudocode does not insert random actions between SDEs, the default value for this is 0.5 (as suggested in the dissertation) if insertRandActions is not enabled. Otherwise use 0.05
patience = 0
useUpdatedPosterior = False
revisedSplitting = False
haveControl = False
confidenceFactor = None
localization_threshold = 0.75
spomdp.psblLearning(env, numActionsPerExperiment, explore,patience,gainThresh, insertRandActions, True, filename, useUpdatedPosterior, revisedSplitting, haveControl, confidenceFactor, localization_threshold)
#Uses the Test 1 parameters outlined in the SBLTests.docx file with column updates (Collins' method) with the updated posterior function enabled
def test1_v3(filename,env):
gainThresh = 0.01 #Threshold of gain to determine if the model should split
numActionsPerExperiment = 25000
insertRandActions = False
explore = 0.5 #Note: Since Collins' pseudocode does not insert random actions between SDEs, the default value for this is 0.5 (as suggested in the dissertation) if insertRandActions is not enabled. Otherwise use 0.05
patience = 0
useUpdatedPosterior = True
revisedSplitting = False
haveControl = False
confidenceFactor = None
localization_threshold = 0.75
spomdp.psblLearning(env, numActionsPerExperiment, explore,patience,gainThresh, insertRandActions, True, filename, useUpdatedPosterior, revisedSplitting, haveControl, confidenceFactor, localization_threshold)
#Uses the Test 2 parameters outlined in the SBLTests.docx file with random actions (no agent control)
def test2_v1(filename,env):
haveControl = False
confidenceFactor = 250
gainThresh = 0.01 #Threshold of gain to determine if the model should split
numActionsPerExperiment = 75000
insertRandActions = False
explore = 0.5 #Note: Since Collins' pseudocode does not insert random actions between SDEs, the default value for this is 0.5 (as suggested in the dissertation) if insertRandActions is not enabled. Otherwise use 0.05
patience = 0
useUpdatedPosterior = True
revisedSplitting = False
localization_threshold = 0.75
spomdp.psblLearning(env, numActionsPerExperiment, explore,patience,gainThresh, insertRandActions, True, filename, useUpdatedPosterior, revisedSplitting, haveControl, confidenceFactor, localization_threshold)
#Uses the Test 2 parameters outlined in the SBLTests.docx file with agent control
def test2_v2(filename,env):
haveControl = True
confidenceFactor = 250
gainThresh = 0.01 #Threshold of gain to determine if the model should split
numActionsPerExperiment = 75000
insertRandActions = False
explore = 0.5 #Note: Since Collins' pseudocode does not insert random actions between SDEs, the default value for this is 0.5 (as suggested in the dissertation) if insertRandActions is not enabled. Otherwise use 0.05
patience = 0
useUpdatedPosterior = True
revisedSplitting = False
localization_threshold = 0.75
spomdp.psblLearning(env, numActionsPerExperiment, explore,patience,gainThresh, insertRandActions, True, filename, useUpdatedPosterior, revisedSplitting, haveControl, confidenceFactor, localization_threshold)
#Uses the Test 2 parameters outlined in the SBLTests.docx file with agent control
def test2_v3(filename,env):
useUpdatedPosterior = False
haveControl = False
confidenceFactor = 250
gainThresh = 0.01 #Threshold of gain to determine if the model should split
numActionsPerExperiment = 75000
insertRandActions = False
explore = 0.5 #Note: Since Collins' pseudocode does not insert random actions between SDEs, the default value for this is 0.5 (as suggested in the dissertation) if insertRandActions is not enabled. Otherwise use 0.05
patience = 0
revisedSplitting = False
localization_threshold = 0.75
spomdp.psblLearning(env, numActionsPerExperiment, explore,patience,gainThresh, insertRandActions, True, filename, useUpdatedPosterior, revisedSplitting, haveControl, confidenceFactor, localization_threshold)
#Uses the Test 3 parameters outlined in the SBLTests.docx file with Collins' method of SDE splitting
def test3_v1(filename,env):
gainThresh = 0.05 #Threshold of gain to determine if the model should split
numActionsPerExperiment = 25000
insertRandActions = False
explore = 0.5 #Note: Since Collins' pseudocode does not insert random actions between SDEs, the default value for this is 0.5 (as suggested in the dissertation) if insertRandActions is not enabled. Otherwise use 0.05
patience = 0
useUpdatedPosterior = False
revisedSplitting = False
haveControl = False
confidenceFactor = None
localization_threshold = 0.75
spomdp.psblLearning(env, numActionsPerExperiment, explore,patience,gainThresh, insertRandActions, True, filename, useUpdatedPosterior, revisedSplitting, haveControl, confidenceFactor, localization_threshold)
#Uses the Test 3 parameters outlined in the SBLTests.docx file with improved SDE splitting
def test3_v3(filename,env):
gainThresh = 0.05 #Threshold of gain to determine if the model should split
numActionsPerExperiment = 25000
insertRandActions = False
explore = 0.5 #Note: Since Collins' pseudocode does not insert random actions between SDEs, the default value for this is 0.5 (as suggested in the dissertation) if insertRandActions is not enabled. Otherwise use 0.05
patience = 0
useUpdatedPosterior = True
revisedSplitting = True
haveControl = False
confidenceFactor = None
localization_threshold = 0.75
spomdp.psblLearning(env, numActionsPerExperiment, explore,patience,gainThresh, insertRandActions, True, filename, useUpdatedPosterior, revisedSplitting, haveControl, confidenceFactor, localization_threshold)
if __name__ == "__main__":
'''----------BEGIN USER DEFINED TESTING PARAMETERS----------'''
'''
TestNum: Which test type to run. Use only the test number (e.g. For Test 3, use 3)
Test 1: Test the transition posteriors update
Test 2: Test the agent navigation algorithm
Test 3: Test the invalid SDE splitting
'''
testNum = 2
'''
versionNum: Which test version to run. Use only the version number (e.g. For version 3, use 3)
For Test 1: 1 corresponds to frequency-dependent transition posteriors update equation, 3 corresponds to our proposed frequency-independent transition posteriors update equation
For Test 2: 1 corresponds to frequency-independent transition posteriors update equation without control, 2 corresponds to frequency-independent transition posteriors update equation with control, 3 corresponds to frequency-dependent transition posteriors update equation without control
For Test 3: 1 corresponds to previous SDE generation algorithms, 3 corresponds to our proposed SDE generation algorithm with "safety checks"
'''
versionNum = 2
'''
envNum: The testing environment to test on
1: ae-Shape fully built
2: ae-Shape with initial observations
3: ae-Litle Prince with initial observations
32: ae-Little Prince fully built
4: ae-1D Maze with initial observations
42: ae-1D Maze fully built
6: ae-Balance Beam fully built
7: ae-Balance Beam with initial observations
'''
envNum = 2
'''
numSubTests: The number of tests to run consecutively
'''
numSubTests = 10
'''----------END USER DEFINED TESTING PARAMETERS----------'''
testString = "test"+str(testNum)+"_v"+str(versionNum)
envString = "Example"+str(envNum)
date = datetime.datetime.today()
for subTest in range(numSubTests):
filename = "Testing Data/Test" + str(testNum) + "_v" + str(versionNum) + "_env" + str(envNum) + "_" + str(date.month) + "_" + str(date.day) + "_" + str(date.hour) + "_" + str(date.minute) + "_" + str(subTest) + ".csv"
print(filename)
env = locals()[envString]()
locals()[testString](filename,env)
| {"/generateGraphs.py": ["/pomdp.py"], "/test.py": ["/pomdp.py", "/spomdp.py"], "/spomdp.py": ["/pomdp.py", "/test.py"]} |
71,623 | EpiSci/SBL | refs/heads/master | /spomdp.py | import numpy as np
import copy
from scipy.stats import dirichlet, entropy
import random
import csv
import git
import pomdp
import test
import networkx as nx
from anytree import Node, LevelOrderGroupIter
from anytree.exporter import UniqueDotExporter
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from subprocess import check_call
# Helper class to be used with the trie in the model
class TrieNode():
def __init__(self, value=None, lfs=[]):
self.leaves = lfs
self.val = value
# Given the head of a trie, find the largest consistent sequence that match one or more of the paths along the trie (See Figure 6.9)
def largestConsistentSequence(head, sequence):
if not not sequence:
for leaf in head.leaves:
if leaf.val == sequence[0]:
list = largestConsistentSequence(leaf,sequence[1:])
list.insert(0,leaf.val)
return list
# If it gets to this point, either head has no leaves or the sequence doesn't match
# Either way, return an empty list
return []
def insertSequence(head,sequence):
if not not sequence:
for leaf in head.leaves:
if leaf.val == sequence[0]:
insertSequence(leaf,sequence[1:])
return None
# If it gets here, then need to insert a new node
newNode = TrieNode(sequence[0],[])
head.leaves.append(newNode)
insertSequence(newNode,sequence[1:])
return None
return None
def printTrie(head):
print("Head node is " + str(head.val))
for (i,leaf) in enumerate(head.leaves):
print("Child " + str(i) + " of head " + str(head.val) + " has value " + str(leaf.val))
printTrie(leaf)
class CollinsModel():
# Algorithm 11: Initialize sPOMDP model
def __init__(self, environment, firstObservation, minimumGain):
self.env = environment
# Initialize the trie
self.trieHead = TrieNode(None,[])
for sde in self.env.SDE_Set:
insertSequence(self.trieHead,sde)
#The instance variable self.env has a current model associated with it. Thus lines 5 through 14 are unecessary (lines 12 and 13 will be addressed below).
#Note: lines 12 and 13 set the belief state to be 1 at the current observation
sdeFirstObservations = [sde[0] for sde in self.env.SDE_Set]
self.beliefState = [1 if val == firstObservation else 0 for val in sdeFirstObservations]
self.beliefState = self.beliefState / np.sum(self.beliefState)
# Note: self.TCounts is of shape (a,m,m') and not (m,a,m') for consistency
self.TCounts = np.ones((len(self.env.A_S),len(self.env.SDE_Set),len(self.env.SDE_Set)))
#Note: using a (a,a',m,m',m'') matrix instead of a counter list for efficiency
self.OneTCounts = np.ones((len(self.env.A_S),len(self.env.A_S),len(self.env.SDE_Set),len(self.env.SDE_Set),len(self.env.SDE_Set)))
#Note: not using M.T, M.OneT, and Algorithm 12 as these can be determined more efficiently by using dirichlet distributions
self.actionHistory = []
self.observationHistory = []
self.observationHistory.append(firstObservation)
self.beliefHistory = []
self.beliefHistory.append(copy.deepcopy(self.beliefState))
self.minGain = minimumGain
self.endEarly = False
# Reinitialize a model (after the new SDEs have been inserted)
def reinitializeModel(self):
self.TCounts = np.ones((len(self.env.A_S),len(self.env.SDE_Set),len(self.env.SDE_Set)))
self.OneTCounts = np.ones((len(self.env.A_S),len(self.env.A_S),len(self.env.SDE_Set),len(self.env.SDE_Set),len(self.env.SDE_Set)))
self.beliefState = np.zeros([1,len(self.env.O_S)])
sdeFirstObservations = [sde[0] for sde in self.env.SDE_Set]
self.beliefState = [1 if val == self.observationHistory[-1] else 0 for val in sdeFirstObservations]
self.beliefState = self.beliefState / np.sum(self.beliefState)
self.beliefHistory = []
self.beliefHistory.append(copy.deepcopy(self.beliefState))
self.actionHistory = []
prevOb = self.observationHistory[-1]
self.observationHistory = [prevOb]
# Algorithm 10: PSBL Learning of SPOMDP Models
def psblLearning(env, numActions, explore, patience,minGain, insertRandActions, writeToFile, filename,useUpdatedPosterior, revisedSplitting, haveControl, confidence_factor, localization_threshold):
prevOb = env.reset()
model = CollinsModel(env,prevOb,minGain)
minSurpriseModel = None
minSurprise = float("inf")
splitsSinceMin = 0
policy = []
foundSplit = True
modelNum = 0
if writeToFile:
c = csv.writer(open(filename, "w", newline=''))
c.writerow([x for x in range(30)])
# Write git repo sha
repo = git.Repo(search_parent_directories=True)
sha = repo.head.object.hexsha
c.writerow(["github Code Version (SHA):", sha])
# Write the training parameters into the .csv file
parameterNames = ["Environment Observations","Environment Actions","alpha","epsilon", "numActions","explore","patience","gainThresh", "insertRandActions","useUpdatedPosterior", "revisedSplitting", "haveControl", "confidence_factor"]
parameterVals = [model.env.O_S, model.env.A_S, model.env.Alpha, model.env.Epsilon, numActions, explore, patience, minGain, insertRandActions, useUpdatedPosterior, revisedSplitting, haveControl, confidence_factor]
c.writerow(parameterNames)
c.writerow(parameterVals)
genTraj = False
useTraj = False
if genTraj:
traj = np.zeros([numActions,1],dtype=object)
minSurpriseModelNum = 0
while foundSplit:
if useTraj:
numpyTraj = np.load("Testing Data/traj" + str(modelNum) + ".npy")
policy = [numpyTraj[i][0] for i in range(numpyTraj.size)]
performed_experiment = False
for i in range(numActions):
if confidence_factor is not None:
# if we have performed all experiments or there is no place that we can reliably get to do perform an experiment then terminate
if ((np.min(np.sum(model.TCounts, axis=2)) / len(model.env.SDE_Set)) >= confidence_factor) or (haveControl is True and getPathToExperiment(model, calcTransitionProbabilities(model), np.argmax(model.beliefState), confidence_factor, localization_threshold) is None and np.max(computeGains(model)) > model.minGain):
print("Finished early on iteration number " + str(i))
if((np.min(np.sum(model.TCounts, axis=2)) / len(model.env.SDE_Set)) >= confidence_factor):
print("Performed all necessary experiments")
else:
print("Couldn't reach all states confidently to perform all of the requested experiments")
model.endEarly = False
if writeToFile:
modelTransitionProbs = calcTransitionProbabilities(model)
iterError = pomdp.calculateError(model.env, modelTransitionProbs, 10000, model.TCounts)
iterAbsError = pomdp.calculateAbsoluteError(model.env, modelTransitionProbs)
c.writerow(["Iteration: ", i])
c.writerow(["Error:", iterError])
c.writerow(["Absolute Error:", iterAbsError])
c.writerow(["Transition Probabilities"])
test.writeNumpyMatrixToCSV(c, modelTransitionProbs)
break
if i % 1000 == 0:
print(i)
if i % 2500 == 0 or i == numActions - 1:
if i == 0:
c.writerow([])
c.writerow(["Model Num " + str(modelNum)])
c.writerow(["Model States: "])
c.writerow(env.SDE_Set)
modelTransitionProbs = calcTransitionProbabilities(model)
iterError = pomdp.calculateError(model.env, modelTransitionProbs, 10000, model.TCounts)
iterAbsError = pomdp.calculateAbsoluteError(model.env, modelTransitionProbs)
c.writerow(["Iteration: ", i])
c.writerow(["Error:", iterError])
c.writerow(["Absolute Error:", iterAbsError])
c.writerow(["Transition Probabilities"])
test.writeNumpyMatrixToCSV(c, modelTransitionProbs)
c.writerow(["Transition Gammas"])
test.writeNumpyMatrixToCSV(c, model.TCounts)
if not policy:
# Add actions of an SDE to the policy or random actions. This will also add a random action between SDEs if insertRandActions is enabled
(policy, performed_experiment) = updatePolicy(model, explore, prevOb, insertRandActions, haveControl, confidence_factor, performed_experiment, localization_threshold)
if genTraj:
for (actionIdx,action) in enumerate(policy):
if actionIdx + i < numActions:
traj[i + actionIdx] = action
action = policy.pop(0)
nextOb = model.env.step(action)
# Algorithm 13:
updateModelParameters(model, action, prevOb, nextOb, useUpdatedPosterior)
prevOb = nextOb
if genTraj:
np.save("Testing Data/traj" + str(modelNum) + ".npy",traj)
newSurprise = computeSurprise(model)
print("Transition Probabilities:")
print(calcTransitionProbabilities(model))
print("Surprise:")
print(newSurprise)
if newSurprise < minSurprise:
minSurpriseModelNum = modelNum
minSurprise = newSurprise
minSurpriseModel = copy.deepcopy(model)
splitsSinceMin = 0
else:
splitsSinceMin = splitsSinceMin + 1
if splitsSinceMin > patience:
print("TCounts: ")
print(model.TCounts)
print("Stopped model splitting due to a lack of patience.")
break
#Algorithm 18
foundSplit = trySplit(model, revisedSplitting)
modelNum = modelNum + 1
#make it clear which model was returned
c.writerow("***********")
c.writerow(["Model Num " + str(minSurpriseModelNum)])
c.writerow(["Model States: "])
c.writerow(minSurpriseModel.env.SDE_Set)
modelTransitionProbs = calcTransitionProbabilities(minSurpriseModel)
iterError = pomdp.calculateError(minSurpriseModel.env, modelTransitionProbs, 10000, minSurpriseModel.TCounts)
iterAbsError = pomdp.calculateAbsoluteError(minSurpriseModel.env, modelTransitionProbs)
c.writerow(["Iteration: ", i])
c.writerow(["Error:", iterError])
c.writerow(["Absolute Error:", iterAbsError])
c.writerow(["Transition Probabilities"])
test.writeNumpyMatrixToCSV(c, modelTransitionProbs)
c.writerow(["Transition Gammas"])
test.writeNumpyMatrixToCSV(c, minSurpriseModel.TCounts)
return minSurpriseModel
# Helper Function for Algorithm 10
def updatePolicy(model,explore,prevObservation,insertRandActions, haveControl, confidence_factor, performed_experiment, localization_threshold):
if haveControl is True:
if getPathToExperiment(model, calcTransitionProbabilities(model), np.argmax(model.beliefState), confidence_factor, localization_threshold) is not None:
return haveControlPolicy(model, prevObservation, confidence_factor, performed_experiment, localization_threshold)
else:
(temp, _) = updatePolicy(model, explore, prevObservation, insertRandActions, False, confidence_factor, performed_experiment, localization_threshold)
return (temp, performed_experiment)
random_sample = np.random.random()
matchingSDEs = model.env.get_SDE(prevObservation)
randSDE = random.choice(matchingSDEs)
policy = randSDE[1::2] # Need to pull every other value since both observations and actions are stored in the SDE, but only a policy should be returned
if insertRandActions:
policy.append(random.choice(model.env.A_S))
if random_sample > explore and not not policy:
return (policy, performed_experiment)
else:
return (random.choices(model.env.A_S, k=max(1,len(policy))), performed_experiment) # Use max of 1 or the policy length to make sure at least one action is returned
# Helper Function for Algorithm 10
def computeSurprise(model):
zetas = np.sum(model.TCounts,axis=2) #A AxM matrix
psi = np.sum(zetas)
surprise = 0
for m in range(len(model.env.SDE_Set)):
for a in range(len(model.env.A_S)):
surprise = surprise + ((zetas[a][m] / psi)*(entropy((dirichlet.mean(model.TCounts[a, m, :])), base=len(model.env.SDE_Set))))
return surprise
#Algorithm 13: Update sPOMDP Model Parameters
def updateModelParameters(model, a, prevOb, nextOb, useUpdatedPosterior):
model.actionHistory.append(a)
model.observationHistory.append(nextOb)
history = [val for pair in zip(model.observationHistory,model.actionHistory) for val in pair]
#Note: the previous line will only work for lists of the same length. Since the observation history has one more element, we need to append the nextOb to the end of the history
history.append(nextOb)
maxOutcomeLength = max([len(sde) for sde in model.env.SDE_Set])
if len(history) >= maxOutcomeLength + 6:
# Algorithm 15
model.beliefHistory = smoothBeliefHistory(model,history, model.beliefHistory)
# Algorithm 16
# updateTransitionFunctionPosteriors(model, a, nextOb)
updateTransitionFunctionPosteriors(model, model.actionHistory[0], model.observationHistory[1],useUpdatedPosterior)
# Algorithm 17
updateOneStepFunctionPosteriors(model, history, useUpdatedPosterior)
model.actionHistory.pop(0)
model.observationHistory.pop(0)
# Algorithm 14
model.beliefState = updateBeliefState(model, model.beliefState, a, nextOb)
model.beliefHistory.append(copy.deepcopy(model.beliefState))
if len(model.beliefHistory) > len(model.actionHistory) + 1:
model.beliefHistory.pop(0)
# Algorithm 14: sPOMDP Belief Update
def updateBeliefState(model, b, a, o):
a_index = model.env.A_S.index(a)
joint = np.zeros([len(b),len(b)])
for m in range(len(b)):
for m_prime in range(len(b)):
joint[m][m_prime] = (dirichlet.mean(model.TCounts[a_index, m, :])[m_prime])*b[m]#model.T[m][a_index][m_prime]*b[m]
b_prime = np.zeros(len(b))
for m_prime in range(len(b)):
for m in range(len(b)):
b_prime[m_prime] = b_prime[m_prime] + joint[m][m_prime]
for (m_idx, m) in enumerate(model.env.SDE_Set):
multFactor = int(m[0] == o)
b_prime[m_idx] = b_prime[m_idx]*multFactor
total = 0
for m in range(len(b)):
total = total + b_prime[m]
for m in range(len(b)):
b_prime[m] = b_prime[m] / total
return b_prime
#Algorithm 15: Smooth Belief History
def smoothBeliefHistory(model, history, beliefHistory):
for i in range(3):
savedBeliefs = copy.deepcopy(beliefHistory[i])
largestMatching = largestConsistentSequence(model.trieHead,history[2*i:])
matching = [sde for sde in model.env.SDE_Set if sde[0:len(largestMatching)] == largestMatching] #Only include those SDEs that contain the largestMatching sequence at their beginning
beliefHistory[i] = np.zeros(len(beliefHistory[i]))
for match in matching:
matchingState = model.env.SDE_Set.index(match)
beliefHistory[i][matchingState] = savedBeliefs[matchingState]
total = 0
for m in range(len(model.env.SDE_Set)):
total = total + beliefHistory[i][m]
for m in range(len(model.env.SDE_Set)):
beliefHistory[i][m] = beliefHistory[i][m] / total
return beliefHistory
#Algorithm 16: Transition Function Posteriors Update
def updateTransitionFunctionPosteriors(model, a, o, useUpdatedPosterior):
a_index = model.env.A_S.index(a)
counts = np.zeros([len(model.beliefState),len(model.beliefState)])
totalCounts = 0
for (m_idx, m) in enumerate(model.env.SDE_Set):
for (mp_idx, m_prime) in enumerate(model.env.SDE_Set):
# multFactor = int(m_prime[0] == o)
multFactor = model.beliefHistory[1][mp_idx] #Note: this is an alternative way of calculating multFactor that is supposed to be better in practice. See Section 6.3.4.3 in Collins' dissertation.
counts[m_idx][mp_idx] = multFactor * (dirichlet.mean(model.TCounts[a_index, m_idx, :])[mp_idx]) * model.beliefHistory[0][m_idx]
totalCounts = totalCounts + counts[m_idx][mp_idx]
if useUpdatedPosterior:
max_rows = np.argwhere(np.array(model.beliefHistory[0]) == np.amax(np.array(model.beliefHistory[0])))
if max_rows.size != 1:
counts[:,:] = 0
else:
max_row = max_rows[0]
counts[np.arange(len(model.env.SDE_Set)) != max_row, :] = 0
if totalCounts == 0:
print(counts)
print(max_row)
print(model.beliefHistory[0])
print(model.beliefHistory[1])
print(counts)
print(a)
print(o)
print(model.env.SDE_Set)
print(model.TCounts)
exit()
for m_idx in range(len(model.beliefState)):
for mp_idx in range(len(model.beliefState)):
counts[m_idx][mp_idx] = counts[m_idx][mp_idx] / totalCounts
model.TCounts[a_index][m_idx][mp_idx] = model.TCounts[a_index][m_idx][mp_idx] + counts[m_idx][mp_idx]
#Note: Not necessary to do updateTransitionProbabilities (Algorithm 12) since this is handled by the dirichlet distributions
# Algorithm 17: One Step Transition Function Posteriors Update
def updateOneStepFunctionPosteriors(model, history, useUpdatedPosterior):
o = history[0]
a = history[1]
o_prime = history[2]
a_prime = history[3]
o_dprime = history[4]
a_index = model.env.A_S.index(a)
ap_index = model.env.A_S.index(a_prime)
counts = np.zeros([len(model.beliefState),len(model.beliefState),len(model.beliefState)])
totalCounts = 0
for (m_idx,m) in enumerate(model.env.SDE_Set):
for (mp_idx,mp) in enumerate(model.env.SDE_Set):
for (mdp_idx,mdp) in enumerate(model.env.SDE_Set):
# multFactor1 = int(mp[0] == o) #BUG: Collins pseudocode uses these masks. However, o and o' correspond to m and m' respectively, not m' and m".
# multFactor2 = int(mdp[0] == o_prime)
multFactor1 = model.beliefHistory[1][mp_idx]
multFactor2 = model.beliefHistory[2][mdp_idx]
counts[m_idx][mp_idx][mdp_idx] = multFactor1 * multFactor2 * (dirichlet.mean(model.TCounts[ap_index, mp_idx, :])[mdp_idx]) * (dirichlet.mean(model.TCounts[a_index, m_idx, :])[mp_idx]) * model.beliefHistory[0][m_idx]
totalCounts = totalCounts + counts[m_idx][mp_idx][mdp_idx]
if useUpdatedPosterior:
max_rows = np.argwhere(np.array(model.beliefHistory[0]) == np.amax(np.array(model.beliefHistory[0])))
max_rows2 = np.argwhere(np.array(model.beliefHistory[1]) == np.amax(np.array(model.beliefHistory[1])))
if max_rows.size != 1 or max_rows2.size != 1:
counts[:,:,:] = 0
else:
max_row = max_rows[0]
max_row2 = max_rows2[0]
counts[np.arange(len(model.env.SDE_Set)) != max_row, np.arange(len(model.env.SDE_Set)) != max_row2, :] = 0
for m in range(len(model.env.SDE_Set)):
for mp in range(len(model.env.SDE_Set)):
for mdp in range(len(model.env.SDE_Set)):
counts[m][mp][mdp] = counts[m][mp][mdp] / totalCounts
model.OneTCounts[a_index][ap_index][m][mp][mdp] = model.OneTCounts[a_index][ap_index][m][mp][mdp] + counts[m][mp][mdp]
#Note: Not necessary to do updateOneStepProbabilities (analagous to Algorithm 12) since this is handled by the dirichlet distributions
# Algorithm 18: sPOMDP Model State Splitting
def trySplit(model, revisedSplitting):
G_ma = computeGains(model)
G = []
# Generate the list G that is used to order the model splitting
mTrajLengths = [len(sde) for sde in model.env.SDE_Set]
sortedIndexes = sorted(range(len(mTrajLengths)),key=mTrajLengths.__getitem__)
for length in set(mTrajLengths):
tripletStorage = []
gainStorage = []
for m in [i for i in sortedIndexes if mTrajLengths[i] == length]:
# Get all of the relevant gains
for a in range(len(model.env.A_S)):
tripletStorage.append(((model.env.SDE_Set[m],model.env.A_S[a]),G_ma[a][m]))
gainStorage.append(G_ma[a][m])
sortedGainIndexes = sorted(range(len(gainStorage)),key=gainStorage.__getitem__)
for index in sortedGainIndexes[::-1]: # Note: using the reverse of the list since sorted goes in ascending order of the gains
G.append(tripletStorage[index])
print("G")
print(G)
for gs in G:
state = gs[0][0]
action = gs[0][1]
gainValue = gs[1]
if revisedSplitting:
firstOb = [sde[0] for sde in model.env.SDE_Set]
skipGainPair = False
for (obNum, ob) in enumerate(firstOb):
if ob == state[0] and len(model.env.SDE_Set[obNum]) > 1: #Check the first observation of each SDE and, if it has a different first action than the variable "action", then skip as this would generate an invalid SDE
if model.env.SDE_Set[obNum][1] != action:
skipGainPair = True
break
if skipGainPair:
continue
if gainValue > model.minGain:
#Set m1 and m2 to be the two most likely states that are transitioned into from state m taking action a
m_index = model.env.SDE_Set.index(state)
a_index = model.env.A_S.index(action)
transitionSetProbs = dirichlet.mean(model.TCounts[a_index, m_index, :])
orderedVals = copy.deepcopy(transitionSetProbs)
orderedVals.sort()
prob1 = orderedVals[-1] #largest probability
prob2 = orderedVals[-2] #second largest probability
sde1_idx = np.where(transitionSetProbs == prob1)[0][0]
if np.where(transitionSetProbs == prob1)[0].size > 1: # In this case, the most likely probability actually occurs twice (e.g. a 50-50 transition split)
sde2_idx = np.where(transitionSetProbs == prob1)[0][1]
else:
sde2_idx = np.where(transitionSetProbs == prob2)[0][0]
m1 = model.env.get_SDE()[sde1_idx]
m2 = model.env.get_SDE()[sde2_idx]
newOutcome1 = copy.deepcopy(m1)
newOutcome1.insert(0,action)
newOutcome1.insert(0,state[0])
newOutcome2 = copy.deepcopy(m2)
newOutcome2.insert(0,action)
newOutcome2.insert(0,state[0])
outcomesToAdd = []
if newOutcome1 not in model.env.SDE_Set:
outcomesToAdd.append(newOutcome1)
insertSequence(model.trieHead,newOutcome1)
if newOutcome2 not in model.env.SDE_Set:
outcomesToAdd.append(newOutcome2)
insertSequence(model.trieHead,newOutcome2)
#Note: Not updating model.MaxOutcomeLength as this is generated dynamically when needed in Algorithm 13
if len(outcomesToAdd) > 1:
# Note: The modelState class is not used in this implementation so making a new modelState instance is not necessary.
model.env.SDE_Set.append(newOutcome1)
model.env.SDE_Set.append(newOutcome2)
model.env.SDE_Set.remove(state)
# Due to an issue in Collins pseudocode, the inserted SDEs are not guaranteed to correctly replace the older SDEs. Thus, if we are removing an SDE, it is best to rebuild the trie
model.trieHead = TrieNode(None,[])
for sde in model.env.SDE_Set:
insertSequence(model.trieHead, sde)
print("Split the model. New States: ")
print(model.env.SDE_Set)
model.reinitializeModel()
return True
elif len(outcomesToAdd) == 1:
model.env.SDE_Set.append(outcomesToAdd[0])
print("Split the model. New States: ")
print(model.env.SDE_Set)
model.reinitializeModel()
return True
return False
# Compute gains according to equation 6.10 (Helper Function for Algorithm 18)
def computeGains(model):
G = np.zeros([len(model.env.A_S),len(model.env.SDE_Set)])
# Calculate some matrices that are used in calculating w_ma later
mSinglePrimeSum_aPrime = np.sum(model.OneTCounts,axis = 4) #The total number of times the m' state is entered from state m under action a with respect to action a'
mSinglePrimeSum = np.sum(mSinglePrimeSum_aPrime,axis = 0) #The total number of times the m' state is entered from state m under action a
mPrimeSum = np.sum(np.sum(mSinglePrimeSum, axis = 0), axis=0) #The total number of times the m' state is entered
for mp in range(len(model.env.SDE_Set)):
for ap in range(len(model.env.A_S)):
sum = 0
w_masum = 0
for m in range(len(model.env.SDE_Set)):
for a in range(len(model.env.A_S)):
w_ma = mSinglePrimeSum[a, m, mp] / mPrimeSum[mp]
w_masum = w_masum + w_ma
sum = sum + (w_ma * entropy((dirichlet.mean(model.OneTCounts[a, ap, m, mp, :])), base=len(model.env.SDE_Set)))
G[ap][mp] = entropy((dirichlet.mean(model.TCounts[ap, mp, :])), base=len(model.env.SDE_Set)) - sum
return G
# Helper function to calculate the transition probabilities
def calcTransitionProbabilities(model):
transProbs = np.zeros([len(model.env.A_S),len(model.env.SDE_Set),len(model.env.SDE_Set)])
for a in range(len(model.env.A_S)):
for m in range(len(model.env.SDE_Set)):
transProbs[a][m][:] = np.array(dirichlet.mean(model.TCounts[a, m, :]))
return(transProbs)
def haveControlPolicy(model, prevObservation, confidence_factor, performed_experiment, localization_threshold):
np.seterr(all='warn')
import warnings
warnings.filterwarnings('error')
#<<New Work: Controlling the agent while generating the trajectory. This allows the agent to prioritize performing transitions it has yet to confidently learn>>
new_Full_Transition = []
transitionProbs = calcTransitionProbabilities(model)
# perform localization if unsure of where we are
nonzero_values = np.count_nonzero(model.beliefState)
if performed_experiment is True or (nonzero_values > 1 and entropy(model.beliefState, base=nonzero_values) > localization_threshold):
Matching_SDE = model.env.get_SDE(prevObservation)
Chosen_SDE = np.array(Matching_SDE[np.random.randint(low = 0, high = len(Matching_SDE))])
Chosen_SDE_Actions = Chosen_SDE[np.arange(start=1, stop = len(Chosen_SDE), step= 2, dtype=int)]
for action in Chosen_SDE_Actions:
new_Full_Transition.append(action)
if performed_experiment is True:
# now need to get ourselves to a random state (in case there's latent states)
# choose a random number of actions that could get us to any of our model states
rand_actions = random.choices(model.env.A_S, k=max(1,len(model.env.SDE_Set) - 1))
for action in rand_actions:
new_Full_Transition.append(action)
performed_experiment = False
else: # try to perform experiments so that we learn what we don't know
# perform experiment if we're in a place where we can
current_state = np.argmax(model.beliefState)
for action_idx in range(len(model.env.A_S)):
if np.sum(model.TCounts[action_idx, current_state]) / len(model.env.SDE_Set) < confidence_factor:
action = model.env.A_S[action_idx]
new_Full_Transition.append(action)
performed_experiment = True
break
# if not in a state of interest, try to go to a state of interest
if performed_experiment is False:
path = getPathToExperiment(model, transitionProbs, current_state, confidence_factor, localization_threshold)
if len(path) == 0:
print("Error: We're already at an experiment state")
return (new_Full_Transition, performed_experiment)
else:
new_Full_Transition.append(path[0])
return (new_Full_Transition, performed_experiment)
# returns a path to a state where experimentation needs to be done
# returns empty list if already at state where experiment can be performed
# returns None if it has done experimentation for all reliably reachable nodes
def getPathToExperiment(model, transitionProbs, current_state, confidence_factor, localization_threshold):
confidences = np.sum(model.TCounts, axis=2) / len(model.env.SDE_Set)
# get the states of interest. Use the 1 index so we get states (as opposed to actions), and use unique so we don't repeat states that need to explore 2 or more actions
states_of_interest = np.unique(np.array(np.where(confidences < confidence_factor))[1,:])
if states_of_interest.size == 0:
import pdb; pdb.set_trace()
return None
if current_state in states_of_interest:
return []
max_depth = len(model.env.SDE_Set)
root = Node(str(current_state), reward=0, probability=1, actions=[])
prev_lvl_nodes = [root]
depth = 1
bestNode = None
while depth <= max_depth:
added_nodes = []
for node in prev_lvl_nodes:
# that means we already performed experiment
if node.reward != 0:
continue
m_idx = int(node.name)
for action_idx in range(len(model.env.A_S)):
a = model.env.A_S[action_idx]
row = transitionProbs[action_idx, m_idx, :]
new_actions = node.actions.copy()
new_actions.append(a)
new_probability = np.amax(row)*node.probability
if confidences[action_idx, m_idx] >= confidence_factor:
nonzero_values = np.count_nonzero(row)
# if there's only one max and it's by a decent amount, set that to 1
if nonzero_values == 1 or (nonzero_values > 1 and entropy(row, base=nonzero_values) < localization_threshold):
if np.count_nonzero(row == np.amax(row)) == 1: # make sure only one max trans prob
not_in_ancestors = True
for ancestor in node.ancestors:
if ancestor.name == str(np.argmax(row)): # we've already been to this node for our path
not_in_ancestors = False
break
if not_in_ancestors is True:
added_nodes.append(Node(str(np.argmax(row)), parent=node, reward=0, probability=new_probability, actions=new_actions))
else: # need to do experimentation
reward = confidences[action_idx, m_idx] / (confidence_factor - 1)
reward = reward * node.probability
added_nodes.append(Node(str(np.argmax(row)), parent=node, reward=reward, probability=new_probability, actions=new_actions))
if bestNode is None or reward > bestNode.reward:
bestNode = added_nodes[-1]
if len(added_nodes) == 0:
break
prev_lvl_nodes = added_nodes
depth = depth + 1
if bestNode is None: # never found a place to do an experiment from
return None
else:
return bestNode.actions[:-1] # don't return the last action as it's the experiment
def drawGraph(model, root, bestNode):
# for line in UniqueDotExporter(root):
# print(line)
def nodeattrfunc(n):
toReturn = ""
roundedReward = round(n.reward,3)
sde_str = "\n("
for m_a in model.env.SDE_Set[int(n.name)]:
if m_a == "square":
sde_str = sde_str + "□,"
elif m_a == "diamond":
sde_str = sde_str + "◊,"
else:
sde_str = sde_str + m_a + ","
sde_str = sde_str[:-1] + ')' # -1 to get rid of comma
if n == bestNode:
toReturn = toReturn + 'color=forestgreen, fontcolor=black, fontname="Times-Bold", '
else:
toReturn = toReturn + 'fontname="Times-Roman", '
toReturn = toReturn + 'label="' + n.name + sde_str + '\nR=' + str(roundedReward) + '"'
if n.is_leaf and n.reward != 0:
toReturn = toReturn + ', style=dashed'
return toReturn
def edgeattrfunc(n, child):
toReturn = ""
if child == bestNode:
toReturn = toReturn + 'color=forestgreen, fontcolor=black, fontname="Times-Bold"'
toReturn = toReturn + 'label=" %s"' % (child.actions[-1])
if child.is_leaf and child.reward != 0:
toReturn = toReturn + ', style=dashed'
return toReturn
UniqueDotExporter(root, edgeattrfunc=edgeattrfunc, nodeattrfunc=nodeattrfunc).to_dotfile("Test2_graph.dot")
cmd = ["dot", "-Tpng", "Test2_graph.dot", "-o", "Test2_graph.png"]
check_call(cmd)
img=mpimg.imread("Test2_graph.png")
imgplot = plt.imshow(img)
plt.show()
| {"/generateGraphs.py": ["/pomdp.py"], "/test.py": ["/pomdp.py", "/spomdp.py"], "/spomdp.py": ["/pomdp.py", "/test.py"]} |
71,626 | Sayn650/bosunogi_bot | refs/heads/master | /config.py | token = '1887790390:AAGK7HDvt0nvcelxkMGB9XYlgFJEwDwQZk4' | {"/bot.py": ["/config.py", "/parsers.py"]} |
71,627 | Sayn650/bosunogi_bot | refs/heads/master | /bot.py | import time
import telebot
from config import token
from parsers import parser
from time import sleep
bot = telebot.TeleBot(token)
name = ''
price_category = 0
l = 0
k = 10
now_prod = 0
flag = False
@bot.message_handler(content_types=['text'])
def search_producte(message):
global k
global l
global name
global flag
global now_prod
if message.text.lower() == '/start':
bot.send_message(message.from_user.id,'Пожалуйста введите название или бренд для поиска.')
bot.register_next_step_handler(message,get_dis)
elif message.text.lower() == '/help':
bot.send_message(message.from_user.id,'Чтобы начать поиск введите /start (начать новый поиск),вводите название или бренд,\nвведите /help для информационной сводки,\nвведя /next_page выводит следующие 10 значений(вторым сообщением вводите Ok),\nкоманда /clear очищает поиск,\nкоманда /break завершает поиск,\n/contnue выводит с того места где остановились')
elif message.text.lower() == '/next_page':
flag = False
l += 10
k += 10
bot.register_next_step_handler(message,get_data)
elif message.text.lower() == '/continue':
flag = False
l = now_prod
bot.register_next_step_handler(message,get_data)
elif message.text.lower() == '/clear':
name =''
l = 0
k = 10
elif message.text.lower() == '/break':
flag = True
def get_dis(message):
global price_category
sk = message.text
bot.send_message(message.from_user.id,'Ценовая категория(вторым сообщением Ok)')
global name
name = sk
bot.register_next_step_handler(message,prices)
def prices(message):
global price_category
pr_st = message.text.lower()
if pr_st != '/pass':
price_category = int(pr_st)
elif pr_st == '/pass':
price_category = 0
bot.register_next_step_handler(message,get_data)
def get_data(message):
global name
global l
global k
global flag
global now_prod
bot.send_message(message.from_user.id,'Начинаю загрузгу данных!')
if price_category == 0:
kl = parser(name,pr = 10000000)
else:
kl = parser(name,pr=price_category)
bot.send_message(message.from_user.id,'Загрузка данных закончена!')
sleep(5)
for i in kl[l:k]:
image = i['image']
body = i['body']
bot.send_photo(message.from_user.id,image)
bot.send_message(message.from_user.id,body)
if flag:
break
else:
now_prod += 1
sleep(15)
bot.send_message(message.from_user.id,'Закончил')
bot.polling(none_stop=True,timeout=60)
| {"/bot.py": ["/config.py", "/parsers.py"]} |
71,628 | Sayn650/bosunogi_bot | refs/heads/master | /parsers.py | import requests,json
from requests.api import patch
def parser(name,pr):
site = f'https://api.rivegauche.ru/rg/v1/newRG/products/search?fields=FULL¤tPage=0&pageSize=24&text={name}&tag=1272867863968984'
r = requests.get(site)
js = json.loads(r.text)
lenght_pages=js['pagination']['totalPages']
page_size = js['pagination']['pageSize']
ls = list()
for i in range(lenght_pages):
site = f'https://api.rivegauche.ru/rg/v1/newRG/products/search?fields=FULL¤tPage=0&pageSize=24&text={name}&tag=1272867863968984'
resp = requests.get(site)
data_json = json.loads(resp.text)
cont = data_json['results']
for s in range(page_size-1):
price = cont[s]['price']['value']
if price <= pr:
name_prod = cont[s]['name']
url_ad ='https://rivegauche.ru' + cont[s]['url']
try:
description = cont[s]['description']
except KeyError:
description = ''
stock = cont[s]['stock']['stockLevelStatus']
price_valut = cont[s]['price']['currencyIso']
image = cont[s]['listingImage']['url']
image ='https://api.rivegauche.ru'+ image
ds = name_prod + '\n' +url_ad + '\n'+ str(price)+' '+price_valut+'\n'+stock+'\n'+description
ds = {'image':image,'body':ds}
ls.append(ds)
else:
continue
return ls
| {"/bot.py": ["/config.py", "/parsers.py"]} |
71,629 | thirdpin-hackaton/telegram-bot | refs/heads/master | /mbdevs/trafflight.py | #!/usr/bin/env python
import time
import enum
import minimalmodbus
import serial.tools.list_ports
from collections import namedtuple
from pykka import ThreadingActor
from functools import partial
from .common import Logger, find_device
from .exceptions import ComDeviceNotFound
from .modbus import FunctionalCodes, Register, Modbus, Action, ModbusUser
TrafficLightRegs = namedtuple(
'TrafficLightRegs',
['red', 'yellow', 'green', 'red_config', 'yellow_config', 'green_config'])
REGS = TrafficLightRegs(
red=Register(
name="Red light",
addr=8,
func_code=FunctionalCodes.COIL,
count=1,
value_type=bool,
unit=''),
yellow=Register(
name="Yellow light",
addr=9,
func_code=FunctionalCodes.COIL,
count=1,
value_type=bool,
unit=''),
green=Register(
name="Green light",
addr=10,
func_code=FunctionalCodes.COIL,
count=1,
value_type=bool,
unit=''),
red_config=Register(
name="Red light config",
addr=0,
func_code=FunctionalCodes.COIL,
count=1,
value_type=bool,
unit=''),
yellow_config=Register(
name="Yellow light config",
addr=1,
func_code=FunctionalCodes.COIL,
count=1,
value_type=bool,
unit=''),
green_config=Register(
name="Green light config",
addr=2,
func_code=FunctionalCodes.COIL,
count=1,
value_type=bool,
unit=''),
)
class TrafficLightDeviceNotFound(ComDeviceNotFound):
pass
class TrafficLight(ModbusUser, ThreadingActor):
class Action(enum.Enum):
ON = 1
OFF = 0
SEQUENCE = 2
TOGGLE = 3
class State(enum.Enum):
ON = 1
OFF = 0
class Color(enum.Enum):
RED = 8
YELLOW = 9
GREEN = 10
ALL = -1
@classmethod
def from_vid_pid(cls, vip, pid, dev_addr=2):
Logger.for_name(__name__).info("Device search...")
dev = find_device(vip, pid)
return cls.start(dev.device, dev_addr)
def __init__(self, port, dev_addr):
ThreadingActor.__init__(self)
self._log = Logger.for_name(__name__)
self.states = {
TrafficLight.Color.RED: TrafficLight.State.OFF,
TrafficLight.Color.GREEN: TrafficLight.State.OFF,
TrafficLight.Color.YELLOW: TrafficLight.State.OFF,
}
try:
ModbusUser.__init__(
self, minimalmodbus.Instrument(
str(port), dev_addr, mode='rtu'))
except Exception as e:
self._log.error(str(e), exc_info=True)
raise e
self.initialize_gpio()
self.all(TrafficLight.State.OFF)
def on_receive(self, msg):
action = msg.pop('action')
self._match_action(action, **msg)
def _reg(self, color):
return {
TrafficLight.Color.RED: REGS.red,
TrafficLight.Color.GREEN: REGS.green,
TrafficLight.Color.YELLOW: REGS.yellow
}[color]
def _match_action(self, action, **kwarg):
try:
{
TrafficLight.Action.OFF: partial(self.turn_off, **kwarg),
TrafficLight.Action.ON: partial(self.turn_on, **kwarg),
TrafficLight.Action.TOGGLE: partial(self.toggle, **kwarg),
TrafficLight.Action.SEQUENCE: partial(self.sequence, **kwarg)
}[action]()
except:
self._log.info("", exc_info=True)
def initialize_gpio(self):
self._write_reg(REGS.red_config, 1)
self._write_reg(REGS.yellow_config, 1)
self._write_reg(REGS.green_config, 1)
def _turn(self, color, state):
reg = self._reg(color)
if state == TrafficLight.State.ON:
self._write_reg(reg, 1)
else:
self._write_reg(reg, 0)
def all(self, state):
self.green(state)
self.yellow(state)
self.red(state)
def green(self, state):
self.states[TrafficLight.Color.GREEN] = state
self._turn(TrafficLight.Color.GREEN, state)
def yellow(self, state):
self.states[TrafficLight.Color.YELLOW] = state
self._turn(TrafficLight.Color.YELLOW, state)
def red(self, state):
self.states[TrafficLight.Color.RED] = state
self._turn(TrafficLight.Color.RED, state)
def turn_on(self, color):
if color == TrafficLight.Color.ALL:
self.all(TrafficLight.State.ON)
else:
self.states[color] = TrafficLight.State.ON
self._turn(color, TrafficLight.State.ON)
def turn_off(self, color):
if color == TrafficLight.Color.ALL:
self.all(TrafficLight.State.OFF)
else:
self.states[color] = TrafficLight.State.OFF
self._turn(color, TrafficLight.State.OFF)
def toggle(self, colors):
for color in colors:
if isinstance(color, TrafficLight.Color):
if self.states[color] == TrafficLight.State.ON:
self._turn(color, TrafficLight.State.OFF)
else:
self._turn(color, TrafficLight.State.ON)
def sequence(self, sleep_time, colors):
for color in colors:
if isinstance(color, TrafficLight.Color):
if self.states[color] == TrafficLight.State.ON:
state = TrafficLight.State.OFF
else:
state = TrafficLight.State.ON
self._turn(color, state)
self.states[color] = state
time.sleep(sleep_time)
| {"/mbdevs/trafflight.py": ["/mbdevs/common.py", "/mbdevs/exceptions.py", "/mbdevs/modbus.py"], "/bot.py": ["/mbdevs/dooropener.py", "/mbdevs/trafflight.py", "/mbdevs/emergency.py", "/mbdevs/__init__.py", "/mbdevs/ivitmrs.py"], "/mbdevs/emergency.py": ["/mbdevs/common.py", "/mbdevs/exceptions.py", "/mbdevs/modbus.py"], "/mbdevs/dooropener.py": ["/mbdevs/common.py", "/mbdevs/exceptions.py", "/mbdevs/modbus.py"], "/mbdevs/ivitmrs.py": ["/mbdevs/common.py", "/mbdevs/modbus.py"], "/mbdevs/common.py": ["/mbdevs/exceptions.py"], "/mbdevs/modbus.py": ["/mbdevs/common.py", "/mbdevs/exceptions.py"]} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.