repository_name stringclasses 316 values | func_path_in_repository stringlengths 6 223 | func_name stringlengths 1 134 | language stringclasses 1 value | func_code_string stringlengths 57 65.5k | func_documentation_string stringlengths 1 46.3k | split_name stringclasses 1 value | func_code_url stringlengths 91 315 | called_functions listlengths 1 156 ⌀ | enclosing_scope stringlengths 2 1.48M |
|---|---|---|---|---|---|---|---|---|---|
nerdynick/PySQLPool | src/PySQLPool/query.py | PySQLQuery.queryMulti | python | def queryMulti(self, queries):
self.lastError = None
self.affectedRows = 0
self.rowcount = None
self.record = None
cursor = None
try:
try:
self._GetConnection()
#Execute query and store results
cursor = self.conn.getCursor()
for query in queries:
self.conn.query = query
if query.__class__ == [].__class__:
self.affectedRows += cursor.execute(query[0], query[1])
else:
self.affectedRows += cursor.execute(query)
self.conn.updateCheckTime()
except Exception, e:
self.lastError = e
finally:
if cursor is not None:
cursor.close()
self._ReturnConnection()
if self.lastError is not None:
raise self.lastError
else:
return self.affectedRows | Execute a series of Deletes,Inserts, & Updates in the Queires List
@author: Nick Verbeck
@since: 9/7/2008 | train | https://github.com/nerdynick/PySQLPool/blob/a561275fea091e2667b69ce376c507f541b56e7d/src/PySQLPool/query.py#L197-L231 | [
"def _GetConnection(self):\n\t\"\"\"\n\tRetieves a prelocked connection from the Pool\n\n\t@author: Nick Verbeck\n\t@since: 9/7/2008\n\t\"\"\"\n\t#Attempt to get a connection. If all connections are in use and we have reached the max number of connections,\n\t#we wait 1 second and try again.\n\t#The Connection is returned locked to be thread safe\n\twhile self.conn is None:\n\t\tself.conn = Pool().GetConnection(self.connInfo)\n\t\tif self.conn is not None:\n\t\t\tbreak\n\t\telse:\n\t\t\ttime.sleep(1)\n",
"def _ReturnConnection(self):\n\t\"\"\"\n\tReturns a connection back to the pool\n\n\t@author: Nick Verbeck\n\t@since: 9/7/2008\n\t\"\"\"\n\tif self.conn is not None:\n\t\tif self.connInfo.commitOnEnd is True or self.commitOnEnd is True:\n\t\t\tself.conn.Commit()\n\n\t\tPool().returnConnection(self.conn)\n\t\tself.conn = None\n"
] | class PySQLQuery(object):
"""
Front-End class used for interaction with the PySQLPool core
This class is used to execute queries and to request a currently open connection from the pool.
If no open connections exist a new one is created by the pool.
@author: Nick Verbeck
@since: 5/12/2008
@version: 0.1
"""
def __init__(self, PySQLConnectionObj, commitOnEnd = False):
"""
Constructor for PySQLQuery Class
@param PySQLConnectionObj: PySQLConnection Object representing your connection string
@param commitOnEnd: Default False, When query is complete do you wish to auto commit. This is a one time auto commit
@author: Nick Verbeck
@since: 5/12/2008
"""
self.connInfo = PySQLConnectionObj
self.record = {}
self.rowcount = 0
self.affectedRows = None
#The Real Connection to the DB
self.conn = None
self.lastError = None
self.lastInsertID = None
def __del__(self):
"""
On destruct make sure the current connection is returned back to the pool for use later
@author: Nick Verbeck
@since: 5/12/2008
"""
if self.conn is not None:
self._ReturnConnection()
def __enter__(self):
"""
Starts transaction, used with the 'with' statement.
@author: Denis Malinovsky
@since: 5/21/2010
"""
self.Query('START TRANSACTION')
log.logger.info('Starting Transaction')
def __exit__(self, exc_type, exc_value, traceback):
"""
Commits transaction, if no exception was raised.
@author: Denis Malinovsky
@since: 5/21/2010
"""
if exc_type is None:
self.Query('COMMIT')
log.logger.info('Commiting Transaction')
else:
self.Query('ROLLBACK')
log.logger.info('Rolling Back Transaction')
#TODO: In the future lets decorate all our query calls with a connection fetching and releasing handler. Help to centralize all this logic for use in transactions in the future.
def query(self, query, args=None):
"""
Execute the passed in query against the database
@param query: MySQL Query to execute. %s or %(key)s will be replaced by parameter args sequence
@param args: Sequence of value to replace in your query. A mapping may also be used but your query must use %(key)s
@author: Nick Verbeck
@since: 5/12/2008
"""
self.affectedRows = None
self.lastError = None
cursor = None
try:
try:
self._GetConnection()
log.logger.debug('Running query "%s" with args "%s"', query, args)
self.conn.query = query
#Execute query and store results
cursor = self.conn.getCursor()
self.affectedRows = cursor.execute(query, args)
self.lastInsertID = self.conn.connection.insert_id()
self.rowcount = cursor.rowcount
log.logger.debug('Query Resulted in %s affected rows, %s rows returned, %s last insert id', self.affectedRows, self.lastInsertID, self.rowcount)
self.record = cursor.fetchall()
self.conn.updateCheckTime()
except Exception, e:
self.lastError = e
self.affectedRows = None
finally:
if cursor is not None:
cursor.close()
self._ReturnConnection()
if self.lastError is not None:
raise self.lastError
else:
return self.affectedRows
execute = Query = query
def queryOne(self, query, args=None):
"""
Execute the passed in query against the database.
Uses a Generator & fetchone to reduce your process memory size.
@param query: MySQL Query to execute. %s or %(key)s will be replaced by parameter args sequence
@param args: Sequence of value to replace in your query. A mapping may also be used but your query must use %(key)s
@author: Nick Verbeck
@since: 5/12/2008
"""
self.affectedRows = None
self.lastError = None
cursor = None
try:
try:
self._GetConnection()
self.conn.query = query
#Execute query
cursor = self.conn.getCursor()
self.affectedRows = cursor.execute(query, args)
self.conn.updateCheckTime()
while 1:
row = cursor.fetchone()
if row is None:
break
else:
self.record = row
yield row
self.rowcount = cursor.rowcount
except Exception, e:
self.lastError = e
self.affectedRows = None
finally:
if cursor is not None:
cursor.close()
self._ReturnConnection()
if self.lastError is not None:
raise self.lastError
else:
raise StopIteration
executeOne = QueryOne = queryOne
def queryMany(self, query, args):
"""
Executes a series of the same Insert Statments
Each tuple in the args list will be applied to the query and executed.
This is the equivilant of MySQLDB.cursor.executemany()
@author: Nick Verbeck
@since: 9/7/2008
"""
self.lastError = None
self.affectedRows = None
self.rowcount = None
self.record = None
cursor = None
try:
try:
self._GetConnection()
self.conn.query = query
#Execute query and store results
cursor = self.conn.getCursor()
self.affectedRows = cursor.executemany(query, args)
self.conn.updateCheckTime()
except Exception, e:
self.lastError = e
finally:
if cursor is not None:
cursor.close()
self._ReturnConnection()
if self.lastError is not None:
raise self.lastError
else:
return self.affectedRows
executeMany = queryMany
def queryMulti(self, queries):
"""
Execute a series of Deletes,Inserts, & Updates in the Queires List
@author: Nick Verbeck
@since: 9/7/2008
"""
self.lastError = None
self.affectedRows = 0
self.rowcount = None
self.record = None
cursor = None
try:
try:
self._GetConnection()
#Execute query and store results
cursor = self.conn.getCursor()
for query in queries:
self.conn.query = query
if query.__class__ == [].__class__:
self.affectedRows += cursor.execute(query[0], query[1])
else:
self.affectedRows += cursor.execute(query)
self.conn.updateCheckTime()
except Exception, e:
self.lastError = e
finally:
if cursor is not None:
cursor.close()
self._ReturnConnection()
if self.lastError is not None:
raise self.lastError
else:
return self.affectedRows
executeMulti = queryMulti
def _GetConnection(self):
"""
Retieves a prelocked connection from the Pool
@author: Nick Verbeck
@since: 9/7/2008
"""
#Attempt to get a connection. If all connections are in use and we have reached the max number of connections,
#we wait 1 second and try again.
#The Connection is returned locked to be thread safe
while self.conn is None:
self.conn = Pool().GetConnection(self.connInfo)
if self.conn is not None:
break
else:
time.sleep(1)
def _ReturnConnection(self):
"""
Returns a connection back to the pool
@author: Nick Verbeck
@since: 9/7/2008
"""
if self.conn is not None:
if self.connInfo.commitOnEnd is True or self.commitOnEnd is True:
self.conn.Commit()
Pool().returnConnection(self.conn)
self.conn = None
def escape_string(self, string):
"""
This is just an adapter function to allow previus users of MySQLdb.
To be familier with there names of functions.
@see: escapeString
"""
return MySQLdb.escape_string(string)
def escapeString(self, string):
"""
Escapes a string for use in a query
This is the equivilate and MySQLdb.escape_string()
@author: Nick Verbeck
@since: 9/7/2008
"""
return MySQLdb.escapeString(string)
def escape(self, string):
"""
Escapes a string for use in a query
This is the equivilate and MySQLdb.escape()
@author: Nick Verbeck
@since: 9/7/2008
"""
return MySQLdb.escape(string)
|
nerdynick/PySQLPool | src/PySQLPool/query.py | PySQLQuery._GetConnection | python | def _GetConnection(self):
#Attempt to get a connection. If all connections are in use and we have reached the max number of connections,
#we wait 1 second and try again.
#The Connection is returned locked to be thread safe
while self.conn is None:
self.conn = Pool().GetConnection(self.connInfo)
if self.conn is not None:
break
else:
time.sleep(1) | Retieves a prelocked connection from the Pool
@author: Nick Verbeck
@since: 9/7/2008 | train | https://github.com/nerdynick/PySQLPool/blob/a561275fea091e2667b69ce376c507f541b56e7d/src/PySQLPool/query.py#L234-L249 | null | class PySQLQuery(object):
"""
Front-End class used for interaction with the PySQLPool core
This class is used to execute queries and to request a currently open connection from the pool.
If no open connections exist a new one is created by the pool.
@author: Nick Verbeck
@since: 5/12/2008
@version: 0.1
"""
def __init__(self, PySQLConnectionObj, commitOnEnd = False):
"""
Constructor for PySQLQuery Class
@param PySQLConnectionObj: PySQLConnection Object representing your connection string
@param commitOnEnd: Default False, When query is complete do you wish to auto commit. This is a one time auto commit
@author: Nick Verbeck
@since: 5/12/2008
"""
self.connInfo = PySQLConnectionObj
self.record = {}
self.rowcount = 0
self.affectedRows = None
#The Real Connection to the DB
self.conn = None
self.lastError = None
self.lastInsertID = None
def __del__(self):
"""
On destruct make sure the current connection is returned back to the pool for use later
@author: Nick Verbeck
@since: 5/12/2008
"""
if self.conn is not None:
self._ReturnConnection()
def __enter__(self):
"""
Starts transaction, used with the 'with' statement.
@author: Denis Malinovsky
@since: 5/21/2010
"""
self.Query('START TRANSACTION')
log.logger.info('Starting Transaction')
def __exit__(self, exc_type, exc_value, traceback):
"""
Commits transaction, if no exception was raised.
@author: Denis Malinovsky
@since: 5/21/2010
"""
if exc_type is None:
self.Query('COMMIT')
log.logger.info('Commiting Transaction')
else:
self.Query('ROLLBACK')
log.logger.info('Rolling Back Transaction')
#TODO: In the future lets decorate all our query calls with a connection fetching and releasing handler. Help to centralize all this logic for use in transactions in the future.
def query(self, query, args=None):
"""
Execute the passed in query against the database
@param query: MySQL Query to execute. %s or %(key)s will be replaced by parameter args sequence
@param args: Sequence of value to replace in your query. A mapping may also be used but your query must use %(key)s
@author: Nick Verbeck
@since: 5/12/2008
"""
self.affectedRows = None
self.lastError = None
cursor = None
try:
try:
self._GetConnection()
log.logger.debug('Running query "%s" with args "%s"', query, args)
self.conn.query = query
#Execute query and store results
cursor = self.conn.getCursor()
self.affectedRows = cursor.execute(query, args)
self.lastInsertID = self.conn.connection.insert_id()
self.rowcount = cursor.rowcount
log.logger.debug('Query Resulted in %s affected rows, %s rows returned, %s last insert id', self.affectedRows, self.lastInsertID, self.rowcount)
self.record = cursor.fetchall()
self.conn.updateCheckTime()
except Exception, e:
self.lastError = e
self.affectedRows = None
finally:
if cursor is not None:
cursor.close()
self._ReturnConnection()
if self.lastError is not None:
raise self.lastError
else:
return self.affectedRows
execute = Query = query
def queryOne(self, query, args=None):
"""
Execute the passed in query against the database.
Uses a Generator & fetchone to reduce your process memory size.
@param query: MySQL Query to execute. %s or %(key)s will be replaced by parameter args sequence
@param args: Sequence of value to replace in your query. A mapping may also be used but your query must use %(key)s
@author: Nick Verbeck
@since: 5/12/2008
"""
self.affectedRows = None
self.lastError = None
cursor = None
try:
try:
self._GetConnection()
self.conn.query = query
#Execute query
cursor = self.conn.getCursor()
self.affectedRows = cursor.execute(query, args)
self.conn.updateCheckTime()
while 1:
row = cursor.fetchone()
if row is None:
break
else:
self.record = row
yield row
self.rowcount = cursor.rowcount
except Exception, e:
self.lastError = e
self.affectedRows = None
finally:
if cursor is not None:
cursor.close()
self._ReturnConnection()
if self.lastError is not None:
raise self.lastError
else:
raise StopIteration
executeOne = QueryOne = queryOne
def queryMany(self, query, args):
"""
Executes a series of the same Insert Statments
Each tuple in the args list will be applied to the query and executed.
This is the equivilant of MySQLDB.cursor.executemany()
@author: Nick Verbeck
@since: 9/7/2008
"""
self.lastError = None
self.affectedRows = None
self.rowcount = None
self.record = None
cursor = None
try:
try:
self._GetConnection()
self.conn.query = query
#Execute query and store results
cursor = self.conn.getCursor()
self.affectedRows = cursor.executemany(query, args)
self.conn.updateCheckTime()
except Exception, e:
self.lastError = e
finally:
if cursor is not None:
cursor.close()
self._ReturnConnection()
if self.lastError is not None:
raise self.lastError
else:
return self.affectedRows
executeMany = queryMany
def queryMulti(self, queries):
"""
Execute a series of Deletes,Inserts, & Updates in the Queires List
@author: Nick Verbeck
@since: 9/7/2008
"""
self.lastError = None
self.affectedRows = 0
self.rowcount = None
self.record = None
cursor = None
try:
try:
self._GetConnection()
#Execute query and store results
cursor = self.conn.getCursor()
for query in queries:
self.conn.query = query
if query.__class__ == [].__class__:
self.affectedRows += cursor.execute(query[0], query[1])
else:
self.affectedRows += cursor.execute(query)
self.conn.updateCheckTime()
except Exception, e:
self.lastError = e
finally:
if cursor is not None:
cursor.close()
self._ReturnConnection()
if self.lastError is not None:
raise self.lastError
else:
return self.affectedRows
executeMulti = queryMulti
def _GetConnection(self):
"""
Retieves a prelocked connection from the Pool
@author: Nick Verbeck
@since: 9/7/2008
"""
#Attempt to get a connection. If all connections are in use and we have reached the max number of connections,
#we wait 1 second and try again.
#The Connection is returned locked to be thread safe
while self.conn is None:
self.conn = Pool().GetConnection(self.connInfo)
if self.conn is not None:
break
else:
time.sleep(1)
def _ReturnConnection(self):
"""
Returns a connection back to the pool
@author: Nick Verbeck
@since: 9/7/2008
"""
if self.conn is not None:
if self.connInfo.commitOnEnd is True or self.commitOnEnd is True:
self.conn.Commit()
Pool().returnConnection(self.conn)
self.conn = None
def escape_string(self, string):
"""
This is just an adapter function to allow previus users of MySQLdb.
To be familier with there names of functions.
@see: escapeString
"""
return MySQLdb.escape_string(string)
def escapeString(self, string):
"""
Escapes a string for use in a query
This is the equivilate and MySQLdb.escape_string()
@author: Nick Verbeck
@since: 9/7/2008
"""
return MySQLdb.escapeString(string)
def escape(self, string):
"""
Escapes a string for use in a query
This is the equivilate and MySQLdb.escape()
@author: Nick Verbeck
@since: 9/7/2008
"""
return MySQLdb.escape(string)
|
nerdynick/PySQLPool | src/PySQLPool/query.py | PySQLQuery._ReturnConnection | python | def _ReturnConnection(self):
if self.conn is not None:
if self.connInfo.commitOnEnd is True or self.commitOnEnd is True:
self.conn.Commit()
Pool().returnConnection(self.conn)
self.conn = None | Returns a connection back to the pool
@author: Nick Verbeck
@since: 9/7/2008 | train | https://github.com/nerdynick/PySQLPool/blob/a561275fea091e2667b69ce376c507f541b56e7d/src/PySQLPool/query.py#L251-L263 | null | class PySQLQuery(object):
"""
Front-End class used for interaction with the PySQLPool core
This class is used to execute queries and to request a currently open connection from the pool.
If no open connections exist a new one is created by the pool.
@author: Nick Verbeck
@since: 5/12/2008
@version: 0.1
"""
def __init__(self, PySQLConnectionObj, commitOnEnd = False):
"""
Constructor for PySQLQuery Class
@param PySQLConnectionObj: PySQLConnection Object representing your connection string
@param commitOnEnd: Default False, When query is complete do you wish to auto commit. This is a one time auto commit
@author: Nick Verbeck
@since: 5/12/2008
"""
self.connInfo = PySQLConnectionObj
self.record = {}
self.rowcount = 0
self.affectedRows = None
#The Real Connection to the DB
self.conn = None
self.lastError = None
self.lastInsertID = None
def __del__(self):
"""
On destruct make sure the current connection is returned back to the pool for use later
@author: Nick Verbeck
@since: 5/12/2008
"""
if self.conn is not None:
self._ReturnConnection()
def __enter__(self):
"""
Starts transaction, used with the 'with' statement.
@author: Denis Malinovsky
@since: 5/21/2010
"""
self.Query('START TRANSACTION')
log.logger.info('Starting Transaction')
def __exit__(self, exc_type, exc_value, traceback):
"""
Commits transaction, if no exception was raised.
@author: Denis Malinovsky
@since: 5/21/2010
"""
if exc_type is None:
self.Query('COMMIT')
log.logger.info('Commiting Transaction')
else:
self.Query('ROLLBACK')
log.logger.info('Rolling Back Transaction')
#TODO: In the future lets decorate all our query calls with a connection fetching and releasing handler. Help to centralize all this logic for use in transactions in the future.
def query(self, query, args=None):
"""
Execute the passed in query against the database
@param query: MySQL Query to execute. %s or %(key)s will be replaced by parameter args sequence
@param args: Sequence of value to replace in your query. A mapping may also be used but your query must use %(key)s
@author: Nick Verbeck
@since: 5/12/2008
"""
self.affectedRows = None
self.lastError = None
cursor = None
try:
try:
self._GetConnection()
log.logger.debug('Running query "%s" with args "%s"', query, args)
self.conn.query = query
#Execute query and store results
cursor = self.conn.getCursor()
self.affectedRows = cursor.execute(query, args)
self.lastInsertID = self.conn.connection.insert_id()
self.rowcount = cursor.rowcount
log.logger.debug('Query Resulted in %s affected rows, %s rows returned, %s last insert id', self.affectedRows, self.lastInsertID, self.rowcount)
self.record = cursor.fetchall()
self.conn.updateCheckTime()
except Exception, e:
self.lastError = e
self.affectedRows = None
finally:
if cursor is not None:
cursor.close()
self._ReturnConnection()
if self.lastError is not None:
raise self.lastError
else:
return self.affectedRows
execute = Query = query
def queryOne(self, query, args=None):
"""
Execute the passed in query against the database.
Uses a Generator & fetchone to reduce your process memory size.
@param query: MySQL Query to execute. %s or %(key)s will be replaced by parameter args sequence
@param args: Sequence of value to replace in your query. A mapping may also be used but your query must use %(key)s
@author: Nick Verbeck
@since: 5/12/2008
"""
self.affectedRows = None
self.lastError = None
cursor = None
try:
try:
self._GetConnection()
self.conn.query = query
#Execute query
cursor = self.conn.getCursor()
self.affectedRows = cursor.execute(query, args)
self.conn.updateCheckTime()
while 1:
row = cursor.fetchone()
if row is None:
break
else:
self.record = row
yield row
self.rowcount = cursor.rowcount
except Exception, e:
self.lastError = e
self.affectedRows = None
finally:
if cursor is not None:
cursor.close()
self._ReturnConnection()
if self.lastError is not None:
raise self.lastError
else:
raise StopIteration
executeOne = QueryOne = queryOne
def queryMany(self, query, args):
"""
Executes a series of the same Insert Statments
Each tuple in the args list will be applied to the query and executed.
This is the equivilant of MySQLDB.cursor.executemany()
@author: Nick Verbeck
@since: 9/7/2008
"""
self.lastError = None
self.affectedRows = None
self.rowcount = None
self.record = None
cursor = None
try:
try:
self._GetConnection()
self.conn.query = query
#Execute query and store results
cursor = self.conn.getCursor()
self.affectedRows = cursor.executemany(query, args)
self.conn.updateCheckTime()
except Exception, e:
self.lastError = e
finally:
if cursor is not None:
cursor.close()
self._ReturnConnection()
if self.lastError is not None:
raise self.lastError
else:
return self.affectedRows
executeMany = queryMany
def queryMulti(self, queries):
"""
Execute a series of Deletes,Inserts, & Updates in the Queires List
@author: Nick Verbeck
@since: 9/7/2008
"""
self.lastError = None
self.affectedRows = 0
self.rowcount = None
self.record = None
cursor = None
try:
try:
self._GetConnection()
#Execute query and store results
cursor = self.conn.getCursor()
for query in queries:
self.conn.query = query
if query.__class__ == [].__class__:
self.affectedRows += cursor.execute(query[0], query[1])
else:
self.affectedRows += cursor.execute(query)
self.conn.updateCheckTime()
except Exception, e:
self.lastError = e
finally:
if cursor is not None:
cursor.close()
self._ReturnConnection()
if self.lastError is not None:
raise self.lastError
else:
return self.affectedRows
executeMulti = queryMulti
def _GetConnection(self):
"""
Retieves a prelocked connection from the Pool
@author: Nick Verbeck
@since: 9/7/2008
"""
#Attempt to get a connection. If all connections are in use and we have reached the max number of connections,
#we wait 1 second and try again.
#The Connection is returned locked to be thread safe
while self.conn is None:
self.conn = Pool().GetConnection(self.connInfo)
if self.conn is not None:
break
else:
time.sleep(1)
def _ReturnConnection(self):
"""
Returns a connection back to the pool
@author: Nick Verbeck
@since: 9/7/2008
"""
if self.conn is not None:
if self.connInfo.commitOnEnd is True or self.commitOnEnd is True:
self.conn.Commit()
Pool().returnConnection(self.conn)
self.conn = None
def escape_string(self, string):
"""
This is just an adapter function to allow previus users of MySQLdb.
To be familier with there names of functions.
@see: escapeString
"""
return MySQLdb.escape_string(string)
def escapeString(self, string):
"""
Escapes a string for use in a query
This is the equivilate and MySQLdb.escape_string()
@author: Nick Verbeck
@since: 9/7/2008
"""
return MySQLdb.escapeString(string)
def escape(self, string):
"""
Escapes a string for use in a query
This is the equivilate and MySQLdb.escape()
@author: Nick Verbeck
@since: 9/7/2008
"""
return MySQLdb.escape(string)
|
nerdynick/PySQLPool | src/PySQLPool/connection.py | ConnectionManager.lock | python | def lock(self, block=True):
self._locked = True
return self._lock.acquire(block) | Lock connection from being used else where | train | https://github.com/nerdynick/PySQLPool/blob/a561275fea091e2667b69ce376c507f541b56e7d/src/PySQLPool/connection.py#L117-L122 | null | class ConnectionManager(object):
"""
Physical Connection manager
Used to manage the physical MySQL connection and the thread safe locks on that connection
@author: Nick Verbeck
@since: 5/12/2008
@version: 0.1
"""
def __init__(self, connectionInfo):
"""
Constructor for ConnectionManager
@param connectionInfo: Connection Object representing your connection string
@author: Nick Verbeck
@since: 5/12/2008
"""
self.connectionInfo = connectionInfo
self.connection = None
#Lock management
self._lock = Semaphore()
self._locked = False
self.activeConnections = 0
self.query = None
self.lastConnectionCheck = None
def release(self):
"""
Release the connection lock
"""
if self._locked is True:
self._locked = False
self._lock.release()
def is_locked(self):
"""
Returns the status of this connection
"""
return self._locked
def getCursor(self):
"""
Get a Dictionary Cursor for executing queries
"""
if self.connection is None:
self.Connect()
return self.connection.cursor(MySQLdb.cursors.DictCursor)
def _updateCheckTime(self):
"""
Updates the connection check timestamp
"""
self.lastConnectionCheck = datetime.datetime.now()
def Connect(self):
"""
Creates a new physical connection to the database
@author: Nick Verbeck
@since: 5/12/2008
"""
if self.connection is None:
self.connection = MySQLdb.connect(*[], **self.connectionInfo.info)
if self.connectionInfo.commitOnEnd is True:
self.connection.autocommit()
self._updateCheckTime()
def autoCommit(self, autocommit):
self.connectionInfo.commitOnEnd = autocommit
if autocommit is True and self.connection is not None:
self.connection.autocommit()
def ReConnect(self):
"""
Attempts to close current connection if open and re-opens a new connection to the database
@author: Nick Verbeck
@since: 5/12/2008
"""
self.Close()
self.Connect()
def TestConnection(self, forceCheck = False):
"""
Tests the current physical connection if it is open and hasn't timed out
@return: boolean True is connection is open, False if connection is closed
@author: Nick Verbeck
@since: 5/12/2008
"""
if self.connection is None:
return False
elif forceCheck is True or (datetime.datetime.now() - self.lastConnectionCheck) >= connection_timeout:
try:
#TODO: Find a better way to test if connection is open
cursor = self.connection.cursor(MySQLdb.cursors.DictCursor)
cursor.execute('select current_user')
self._updateCheckTime()
return True
except Exception, e:
self.connection.close()
self.connection = None
return False
else:
return True
def being(self):
"""
Being a Transaction
@author: Nick Verbeck
@since: 5/14/2011
"""
try:
if self.connection is not None:
self.lock()
c = self.getCursor()
c.execute('BEGIN;')
c.close()
except Exception, e:
pass
def commit(self):
"""
Commit MySQL Transaction to database.
MySQLDB: If the database and the tables support transactions,
this commits the current transaction; otherwise
this method successfully does nothing.
@author: Nick Verbeck
@since: 5/12/2008
"""
try:
if self.connection is not None:
self.connection.commit()
self._updateCheckTime()
self.release()
except Exception, e:
pass
Commit = commit
def rollback(self):
"""
Rollback MySQL Transaction to database.
MySQLDB: If the database and tables support transactions, this rolls
back (cancels) the current transaction; otherwise a
NotSupportedError is raised.
@author: Nick Verbeck
@since: 5/12/2008
"""
try:
if self.connection is not None:
self.connection.rollback()
self._updateCheckTime()
self.release()
except Exception, e:
pass
def Close(self):
"""
Commits and closes the current connection
@author: Nick Verbeck
@since: 5/12/2008
"""
if self.connection is not None:
try:
self.connection.commit()
self.connection.close()
self.connection = None
except Exception, e:
pass
|
nerdynick/PySQLPool | src/PySQLPool/connection.py | ConnectionManager.release | python | def release(self):
if self._locked is True:
self._locked = False
self._lock.release() | Release the connection lock | train | https://github.com/nerdynick/PySQLPool/blob/a561275fea091e2667b69ce376c507f541b56e7d/src/PySQLPool/connection.py#L124-L130 | null | class ConnectionManager(object):
"""
Physical Connection manager
Used to manage the physical MySQL connection and the thread safe locks on that connection
@author: Nick Verbeck
@since: 5/12/2008
@version: 0.1
"""
def __init__(self, connectionInfo):
"""
Constructor for ConnectionManager
@param connectionInfo: Connection Object representing your connection string
@author: Nick Verbeck
@since: 5/12/2008
"""
self.connectionInfo = connectionInfo
self.connection = None
#Lock management
self._lock = Semaphore()
self._locked = False
self.activeConnections = 0
self.query = None
self.lastConnectionCheck = None
def lock(self, block=True):
"""
Lock connection from being used else where
"""
self._locked = True
return self._lock.acquire(block)
def is_locked(self):
"""
Returns the status of this connection
"""
return self._locked
def getCursor(self):
"""
Get a Dictionary Cursor for executing queries
"""
if self.connection is None:
self.Connect()
return self.connection.cursor(MySQLdb.cursors.DictCursor)
def _updateCheckTime(self):
"""
Updates the connection check timestamp
"""
self.lastConnectionCheck = datetime.datetime.now()
def Connect(self):
"""
Creates a new physical connection to the database
@author: Nick Verbeck
@since: 5/12/2008
"""
if self.connection is None:
self.connection = MySQLdb.connect(*[], **self.connectionInfo.info)
if self.connectionInfo.commitOnEnd is True:
self.connection.autocommit()
self._updateCheckTime()
def autoCommit(self, autocommit):
self.connectionInfo.commitOnEnd = autocommit
if autocommit is True and self.connection is not None:
self.connection.autocommit()
def ReConnect(self):
"""
Attempts to close current connection if open and re-opens a new connection to the database
@author: Nick Verbeck
@since: 5/12/2008
"""
self.Close()
self.Connect()
def TestConnection(self, forceCheck = False):
"""
Tests the current physical connection if it is open and hasn't timed out
@return: boolean True is connection is open, False if connection is closed
@author: Nick Verbeck
@since: 5/12/2008
"""
if self.connection is None:
return False
elif forceCheck is True or (datetime.datetime.now() - self.lastConnectionCheck) >= connection_timeout:
try:
#TODO: Find a better way to test if connection is open
cursor = self.connection.cursor(MySQLdb.cursors.DictCursor)
cursor.execute('select current_user')
self._updateCheckTime()
return True
except Exception, e:
self.connection.close()
self.connection = None
return False
else:
return True
def being(self):
"""
Being a Transaction
@author: Nick Verbeck
@since: 5/14/2011
"""
try:
if self.connection is not None:
self.lock()
c = self.getCursor()
c.execute('BEGIN;')
c.close()
except Exception, e:
pass
def commit(self):
"""
Commit MySQL Transaction to database.
MySQLDB: If the database and the tables support transactions,
this commits the current transaction; otherwise
this method successfully does nothing.
@author: Nick Verbeck
@since: 5/12/2008
"""
try:
if self.connection is not None:
self.connection.commit()
self._updateCheckTime()
self.release()
except Exception, e:
pass
Commit = commit
def rollback(self):
"""
Rollback MySQL Transaction to database.
MySQLDB: If the database and tables support transactions, this rolls
back (cancels) the current transaction; otherwise a
NotSupportedError is raised.
@author: Nick Verbeck
@since: 5/12/2008
"""
try:
if self.connection is not None:
self.connection.rollback()
self._updateCheckTime()
self.release()
except Exception, e:
pass
def Close(self):
"""
Commits and closes the current connection
@author: Nick Verbeck
@since: 5/12/2008
"""
if self.connection is not None:
try:
self.connection.commit()
self.connection.close()
self.connection = None
except Exception, e:
pass
|
nerdynick/PySQLPool | src/PySQLPool/connection.py | ConnectionManager.getCursor | python | def getCursor(self):
if self.connection is None:
self.Connect()
return self.connection.cursor(MySQLdb.cursors.DictCursor) | Get a Dictionary Cursor for executing queries | train | https://github.com/nerdynick/PySQLPool/blob/a561275fea091e2667b69ce376c507f541b56e7d/src/PySQLPool/connection.py#L138-L145 | [
"def Connect(self):\n\t\"\"\"\n\tCreates a new physical connection to the database\n\n\t@author: Nick Verbeck\n\t@since: 5/12/2008\n\t\"\"\"\n\tif self.connection is None:\n\t\tself.connection = MySQLdb.connect(*[], **self.connectionInfo.info)\n\n\tif self.connectionInfo.commitOnEnd is True:\n\t\tself.connection.autocommit()\n\n\tself._updateCheckTime()\n"
] | class ConnectionManager(object):
"""
Physical Connection manager
Used to manage the physical MySQL connection and the thread safe locks on that connection
@author: Nick Verbeck
@since: 5/12/2008
@version: 0.1
"""
def __init__(self, connectionInfo):
"""
Constructor for ConnectionManager
@param connectionInfo: Connection Object representing your connection string
@author: Nick Verbeck
@since: 5/12/2008
"""
self.connectionInfo = connectionInfo
self.connection = None
#Lock management
self._lock = Semaphore()
self._locked = False
self.activeConnections = 0
self.query = None
self.lastConnectionCheck = None
def lock(self, block=True):
"""
Lock connection from being used else where
"""
self._locked = True
return self._lock.acquire(block)
def release(self):
"""
Release the connection lock
"""
if self._locked is True:
self._locked = False
self._lock.release()
def is_locked(self):
"""
Returns the status of this connection
"""
return self._locked
def getCursor(self):
"""
Get a Dictionary Cursor for executing queries
"""
if self.connection is None:
self.Connect()
return self.connection.cursor(MySQLdb.cursors.DictCursor)
def _updateCheckTime(self):
"""
Updates the connection check timestamp
"""
self.lastConnectionCheck = datetime.datetime.now()
def Connect(self):
"""
Creates a new physical connection to the database
@author: Nick Verbeck
@since: 5/12/2008
"""
if self.connection is None:
self.connection = MySQLdb.connect(*[], **self.connectionInfo.info)
if self.connectionInfo.commitOnEnd is True:
self.connection.autocommit()
self._updateCheckTime()
def autoCommit(self, autocommit):
self.connectionInfo.commitOnEnd = autocommit
if autocommit is True and self.connection is not None:
self.connection.autocommit()
def ReConnect(self):
"""
Attempts to close current connection if open and re-opens a new connection to the database
@author: Nick Verbeck
@since: 5/12/2008
"""
self.Close()
self.Connect()
def TestConnection(self, forceCheck = False):
"""
Tests the current physical connection if it is open and hasn't timed out
@return: boolean True is connection is open, False if connection is closed
@author: Nick Verbeck
@since: 5/12/2008
"""
if self.connection is None:
return False
elif forceCheck is True or (datetime.datetime.now() - self.lastConnectionCheck) >= connection_timeout:
try:
#TODO: Find a better way to test if connection is open
cursor = self.connection.cursor(MySQLdb.cursors.DictCursor)
cursor.execute('select current_user')
self._updateCheckTime()
return True
except Exception, e:
self.connection.close()
self.connection = None
return False
else:
return True
def being(self):
"""
Being a Transaction
@author: Nick Verbeck
@since: 5/14/2011
"""
try:
if self.connection is not None:
self.lock()
c = self.getCursor()
c.execute('BEGIN;')
c.close()
except Exception, e:
pass
def commit(self):
"""
Commit MySQL Transaction to database.
MySQLDB: If the database and the tables support transactions,
this commits the current transaction; otherwise
this method successfully does nothing.
@author: Nick Verbeck
@since: 5/12/2008
"""
try:
if self.connection is not None:
self.connection.commit()
self._updateCheckTime()
self.release()
except Exception, e:
pass
Commit = commit
def rollback(self):
"""
Rollback MySQL Transaction to database.
MySQLDB: If the database and tables support transactions, this rolls
back (cancels) the current transaction; otherwise a
NotSupportedError is raised.
@author: Nick Verbeck
@since: 5/12/2008
"""
try:
if self.connection is not None:
self.connection.rollback()
self._updateCheckTime()
self.release()
except Exception, e:
pass
def Close(self):
"""
Commits and closes the current connection
@author: Nick Verbeck
@since: 5/12/2008
"""
if self.connection is not None:
try:
self.connection.commit()
self.connection.close()
self.connection = None
except Exception, e:
pass
|
nerdynick/PySQLPool | src/PySQLPool/connection.py | ConnectionManager.Connect | python | def Connect(self):
if self.connection is None:
self.connection = MySQLdb.connect(*[], **self.connectionInfo.info)
if self.connectionInfo.commitOnEnd is True:
self.connection.autocommit()
self._updateCheckTime() | Creates a new physical connection to the database
@author: Nick Verbeck
@since: 5/12/2008 | train | https://github.com/nerdynick/PySQLPool/blob/a561275fea091e2667b69ce376c507f541b56e7d/src/PySQLPool/connection.py#L153-L166 | [
"def _updateCheckTime(self):\n\t\"\"\"\n\tUpdates the connection check timestamp\n\t\"\"\"\n\tself.lastConnectionCheck = datetime.datetime.now()\n"
] | class ConnectionManager(object):
"""
Physical Connection manager
Used to manage the physical MySQL connection and the thread safe locks on that connection
@author: Nick Verbeck
@since: 5/12/2008
@version: 0.1
"""
def __init__(self, connectionInfo):
"""
Constructor for ConnectionManager
@param connectionInfo: Connection Object representing your connection string
@author: Nick Verbeck
@since: 5/12/2008
"""
self.connectionInfo = connectionInfo
self.connection = None
#Lock management
self._lock = Semaphore()
self._locked = False
self.activeConnections = 0
self.query = None
self.lastConnectionCheck = None
def lock(self, block=True):
"""
Lock connection from being used else where
"""
self._locked = True
return self._lock.acquire(block)
def release(self):
"""
Release the connection lock
"""
if self._locked is True:
self._locked = False
self._lock.release()
def is_locked(self):
"""
Returns the status of this connection
"""
return self._locked
def getCursor(self):
"""
Get a Dictionary Cursor for executing queries
"""
if self.connection is None:
self.Connect()
return self.connection.cursor(MySQLdb.cursors.DictCursor)
def _updateCheckTime(self):
"""
Updates the connection check timestamp
"""
self.lastConnectionCheck = datetime.datetime.now()
def Connect(self):
"""
Creates a new physical connection to the database
@author: Nick Verbeck
@since: 5/12/2008
"""
if self.connection is None:
self.connection = MySQLdb.connect(*[], **self.connectionInfo.info)
if self.connectionInfo.commitOnEnd is True:
self.connection.autocommit()
self._updateCheckTime()
def autoCommit(self, autocommit):
self.connectionInfo.commitOnEnd = autocommit
if autocommit is True and self.connection is not None:
self.connection.autocommit()
def ReConnect(self):
"""
Attempts to close current connection if open and re-opens a new connection to the database
@author: Nick Verbeck
@since: 5/12/2008
"""
self.Close()
self.Connect()
def TestConnection(self, forceCheck = False):
"""
Tests the current physical connection if it is open and hasn't timed out
@return: boolean True is connection is open, False if connection is closed
@author: Nick Verbeck
@since: 5/12/2008
"""
if self.connection is None:
return False
elif forceCheck is True or (datetime.datetime.now() - self.lastConnectionCheck) >= connection_timeout:
try:
#TODO: Find a better way to test if connection is open
cursor = self.connection.cursor(MySQLdb.cursors.DictCursor)
cursor.execute('select current_user')
self._updateCheckTime()
return True
except Exception, e:
self.connection.close()
self.connection = None
return False
else:
return True
def being(self):
"""
Being a Transaction
@author: Nick Verbeck
@since: 5/14/2011
"""
try:
if self.connection is not None:
self.lock()
c = self.getCursor()
c.execute('BEGIN;')
c.close()
except Exception, e:
pass
def commit(self):
"""
Commit MySQL Transaction to database.
MySQLDB: If the database and the tables support transactions,
this commits the current transaction; otherwise
this method successfully does nothing.
@author: Nick Verbeck
@since: 5/12/2008
"""
try:
if self.connection is not None:
self.connection.commit()
self._updateCheckTime()
self.release()
except Exception, e:
pass
Commit = commit
def rollback(self):
"""
Rollback MySQL Transaction to database.
MySQLDB: If the database and tables support transactions, this rolls
back (cancels) the current transaction; otherwise a
NotSupportedError is raised.
@author: Nick Verbeck
@since: 5/12/2008
"""
try:
if self.connection is not None:
self.connection.rollback()
self._updateCheckTime()
self.release()
except Exception, e:
pass
def Close(self):
"""
Commits and closes the current connection
@author: Nick Verbeck
@since: 5/12/2008
"""
if self.connection is not None:
try:
self.connection.commit()
self.connection.close()
self.connection = None
except Exception, e:
pass
|
nerdynick/PySQLPool | src/PySQLPool/connection.py | ConnectionManager.being | python | def being(self):
try:
if self.connection is not None:
self.lock()
c = self.getCursor()
c.execute('BEGIN;')
c.close()
except Exception, e:
pass | Being a Transaction
@author: Nick Verbeck
@since: 5/14/2011 | train | https://github.com/nerdynick/PySQLPool/blob/a561275fea091e2667b69ce376c507f541b56e7d/src/PySQLPool/connection.py#L207-L221 | [
"def lock(self, block=True):\n\t\"\"\"\n\tLock connection from being used else where\n\t\"\"\"\n\tself._locked = True\n\treturn self._lock.acquire(block)\n",
"def getCursor(self):\n\t\"\"\"\n\tGet a Dictionary Cursor for executing queries\n\t\"\"\"\n\tif self.connection is None:\n\t\tself.Connect()\n\n\treturn self.connection.cursor(MySQLdb.cursors.DictCursor)\n"
] | class ConnectionManager(object):
"""
Physical Connection manager
Used to manage the physical MySQL connection and the thread safe locks on that connection
@author: Nick Verbeck
@since: 5/12/2008
@version: 0.1
"""
def __init__(self, connectionInfo):
"""
Constructor for ConnectionManager
@param connectionInfo: Connection Object representing your connection string
@author: Nick Verbeck
@since: 5/12/2008
"""
self.connectionInfo = connectionInfo
self.connection = None
#Lock management
self._lock = Semaphore()
self._locked = False
self.activeConnections = 0
self.query = None
self.lastConnectionCheck = None
def lock(self, block=True):
"""
Lock connection from being used else where
"""
self._locked = True
return self._lock.acquire(block)
def release(self):
"""
Release the connection lock
"""
if self._locked is True:
self._locked = False
self._lock.release()
def is_locked(self):
"""
Returns the status of this connection
"""
return self._locked
def getCursor(self):
"""
Get a Dictionary Cursor for executing queries
"""
if self.connection is None:
self.Connect()
return self.connection.cursor(MySQLdb.cursors.DictCursor)
def _updateCheckTime(self):
"""
Updates the connection check timestamp
"""
self.lastConnectionCheck = datetime.datetime.now()
def Connect(self):
"""
Creates a new physical connection to the database
@author: Nick Verbeck
@since: 5/12/2008
"""
if self.connection is None:
self.connection = MySQLdb.connect(*[], **self.connectionInfo.info)
if self.connectionInfo.commitOnEnd is True:
self.connection.autocommit()
self._updateCheckTime()
def autoCommit(self, autocommit):
self.connectionInfo.commitOnEnd = autocommit
if autocommit is True and self.connection is not None:
self.connection.autocommit()
def ReConnect(self):
"""
Attempts to close current connection if open and re-opens a new connection to the database
@author: Nick Verbeck
@since: 5/12/2008
"""
self.Close()
self.Connect()
def TestConnection(self, forceCheck = False):
"""
Tests the current physical connection if it is open and hasn't timed out
@return: boolean True is connection is open, False if connection is closed
@author: Nick Verbeck
@since: 5/12/2008
"""
if self.connection is None:
return False
elif forceCheck is True or (datetime.datetime.now() - self.lastConnectionCheck) >= connection_timeout:
try:
#TODO: Find a better way to test if connection is open
cursor = self.connection.cursor(MySQLdb.cursors.DictCursor)
cursor.execute('select current_user')
self._updateCheckTime()
return True
except Exception, e:
self.connection.close()
self.connection = None
return False
else:
return True
def being(self):
"""
Being a Transaction
@author: Nick Verbeck
@since: 5/14/2011
"""
try:
if self.connection is not None:
self.lock()
c = self.getCursor()
c.execute('BEGIN;')
c.close()
except Exception, e:
pass
def commit(self):
"""
Commit MySQL Transaction to database.
MySQLDB: If the database and the tables support transactions,
this commits the current transaction; otherwise
this method successfully does nothing.
@author: Nick Verbeck
@since: 5/12/2008
"""
try:
if self.connection is not None:
self.connection.commit()
self._updateCheckTime()
self.release()
except Exception, e:
pass
Commit = commit
def rollback(self):
"""
Rollback MySQL Transaction to database.
MySQLDB: If the database and tables support transactions, this rolls
back (cancels) the current transaction; otherwise a
NotSupportedError is raised.
@author: Nick Verbeck
@since: 5/12/2008
"""
try:
if self.connection is not None:
self.connection.rollback()
self._updateCheckTime()
self.release()
except Exception, e:
pass
def Close(self):
"""
Commits and closes the current connection
@author: Nick Verbeck
@since: 5/12/2008
"""
if self.connection is not None:
try:
self.connection.commit()
self.connection.close()
self.connection = None
except Exception, e:
pass
|
nerdynick/PySQLPool | src/PySQLPool/connection.py | ConnectionManager.commit | python | def commit(self):
try:
if self.connection is not None:
self.connection.commit()
self._updateCheckTime()
self.release()
except Exception, e:
pass | Commit MySQL Transaction to database.
MySQLDB: If the database and the tables support transactions,
this commits the current transaction; otherwise
this method successfully does nothing.
@author: Nick Verbeck
@since: 5/12/2008 | train | https://github.com/nerdynick/PySQLPool/blob/a561275fea091e2667b69ce376c507f541b56e7d/src/PySQLPool/connection.py#L224-L240 | [
"def release(self):\n\t\"\"\"\n\tRelease the connection lock\n\t\"\"\"\n\tif self._locked is True:\n\t\tself._locked = False\n\t\tself._lock.release()\n",
"def _updateCheckTime(self):\n\t\"\"\"\n\tUpdates the connection check timestamp\n\t\"\"\"\n\tself.lastConnectionCheck = datetime.datetime.now()\n"
] | class ConnectionManager(object):
"""
Physical Connection manager
Used to manage the physical MySQL connection and the thread safe locks on that connection
@author: Nick Verbeck
@since: 5/12/2008
@version: 0.1
"""
def __init__(self, connectionInfo):
"""
Constructor for ConnectionManager
@param connectionInfo: Connection Object representing your connection string
@author: Nick Verbeck
@since: 5/12/2008
"""
self.connectionInfo = connectionInfo
self.connection = None
#Lock management
self._lock = Semaphore()
self._locked = False
self.activeConnections = 0
self.query = None
self.lastConnectionCheck = None
def lock(self, block=True):
"""
Lock connection from being used else where
"""
self._locked = True
return self._lock.acquire(block)
def release(self):
"""
Release the connection lock
"""
if self._locked is True:
self._locked = False
self._lock.release()
def is_locked(self):
"""
Returns the status of this connection
"""
return self._locked
def getCursor(self):
"""
Get a Dictionary Cursor for executing queries
"""
if self.connection is None:
self.Connect()
return self.connection.cursor(MySQLdb.cursors.DictCursor)
def _updateCheckTime(self):
"""
Updates the connection check timestamp
"""
self.lastConnectionCheck = datetime.datetime.now()
def Connect(self):
"""
Creates a new physical connection to the database
@author: Nick Verbeck
@since: 5/12/2008
"""
if self.connection is None:
self.connection = MySQLdb.connect(*[], **self.connectionInfo.info)
if self.connectionInfo.commitOnEnd is True:
self.connection.autocommit()
self._updateCheckTime()
def autoCommit(self, autocommit):
self.connectionInfo.commitOnEnd = autocommit
if autocommit is True and self.connection is not None:
self.connection.autocommit()
def ReConnect(self):
"""
Attempts to close current connection if open and re-opens a new connection to the database
@author: Nick Verbeck
@since: 5/12/2008
"""
self.Close()
self.Connect()
def TestConnection(self, forceCheck = False):
"""
Tests the current physical connection if it is open and hasn't timed out
@return: boolean True is connection is open, False if connection is closed
@author: Nick Verbeck
@since: 5/12/2008
"""
if self.connection is None:
return False
elif forceCheck is True or (datetime.datetime.now() - self.lastConnectionCheck) >= connection_timeout:
try:
#TODO: Find a better way to test if connection is open
cursor = self.connection.cursor(MySQLdb.cursors.DictCursor)
cursor.execute('select current_user')
self._updateCheckTime()
return True
except Exception, e:
self.connection.close()
self.connection = None
return False
else:
return True
def being(self):
"""
Being a Transaction
@author: Nick Verbeck
@since: 5/14/2011
"""
try:
if self.connection is not None:
self.lock()
c = self.getCursor()
c.execute('BEGIN;')
c.close()
except Exception, e:
pass
def commit(self):
"""
Commit MySQL Transaction to database.
MySQLDB: If the database and the tables support transactions,
this commits the current transaction; otherwise
this method successfully does nothing.
@author: Nick Verbeck
@since: 5/12/2008
"""
try:
if self.connection is not None:
self.connection.commit()
self._updateCheckTime()
self.release()
except Exception, e:
pass
Commit = commit
def rollback(self):
"""
Rollback MySQL Transaction to database.
MySQLDB: If the database and tables support transactions, this rolls
back (cancels) the current transaction; otherwise a
NotSupportedError is raised.
@author: Nick Verbeck
@since: 5/12/2008
"""
try:
if self.connection is not None:
self.connection.rollback()
self._updateCheckTime()
self.release()
except Exception, e:
pass
def Close(self):
"""
Commits and closes the current connection
@author: Nick Verbeck
@since: 5/12/2008
"""
if self.connection is not None:
try:
self.connection.commit()
self.connection.close()
self.connection = None
except Exception, e:
pass
|
nerdynick/PySQLPool | src/PySQLPool/connection.py | ConnectionManager.rollback | python | def rollback(self):
try:
if self.connection is not None:
self.connection.rollback()
self._updateCheckTime()
self.release()
except Exception, e:
pass | Rollback MySQL Transaction to database.
MySQLDB: If the database and tables support transactions, this rolls
back (cancels) the current transaction; otherwise a
NotSupportedError is raised.
@author: Nick Verbeck
@since: 5/12/2008 | train | https://github.com/nerdynick/PySQLPool/blob/a561275fea091e2667b69ce376c507f541b56e7d/src/PySQLPool/connection.py#L243-L259 | [
"def release(self):\n\t\"\"\"\n\tRelease the connection lock\n\t\"\"\"\n\tif self._locked is True:\n\t\tself._locked = False\n\t\tself._lock.release()\n",
"def _updateCheckTime(self):\n\t\"\"\"\n\tUpdates the connection check timestamp\n\t\"\"\"\n\tself.lastConnectionCheck = datetime.datetime.now()\n"
] | class ConnectionManager(object):
"""
Physical Connection manager
Used to manage the physical MySQL connection and the thread safe locks on that connection
@author: Nick Verbeck
@since: 5/12/2008
@version: 0.1
"""
def __init__(self, connectionInfo):
"""
Constructor for ConnectionManager
@param connectionInfo: Connection Object representing your connection string
@author: Nick Verbeck
@since: 5/12/2008
"""
self.connectionInfo = connectionInfo
self.connection = None
#Lock management
self._lock = Semaphore()
self._locked = False
self.activeConnections = 0
self.query = None
self.lastConnectionCheck = None
def lock(self, block=True):
"""
Lock connection from being used else where
"""
self._locked = True
return self._lock.acquire(block)
def release(self):
"""
Release the connection lock
"""
if self._locked is True:
self._locked = False
self._lock.release()
def is_locked(self):
"""
Returns the status of this connection
"""
return self._locked
def getCursor(self):
"""
Get a Dictionary Cursor for executing queries
"""
if self.connection is None:
self.Connect()
return self.connection.cursor(MySQLdb.cursors.DictCursor)
def _updateCheckTime(self):
"""
Updates the connection check timestamp
"""
self.lastConnectionCheck = datetime.datetime.now()
def Connect(self):
"""
Creates a new physical connection to the database
@author: Nick Verbeck
@since: 5/12/2008
"""
if self.connection is None:
self.connection = MySQLdb.connect(*[], **self.connectionInfo.info)
if self.connectionInfo.commitOnEnd is True:
self.connection.autocommit()
self._updateCheckTime()
def autoCommit(self, autocommit):
self.connectionInfo.commitOnEnd = autocommit
if autocommit is True and self.connection is not None:
self.connection.autocommit()
def ReConnect(self):
"""
Attempts to close current connection if open and re-opens a new connection to the database
@author: Nick Verbeck
@since: 5/12/2008
"""
self.Close()
self.Connect()
def TestConnection(self, forceCheck = False):
"""
Tests the current physical connection if it is open and hasn't timed out
@return: boolean True is connection is open, False if connection is closed
@author: Nick Verbeck
@since: 5/12/2008
"""
if self.connection is None:
return False
elif forceCheck is True or (datetime.datetime.now() - self.lastConnectionCheck) >= connection_timeout:
try:
#TODO: Find a better way to test if connection is open
cursor = self.connection.cursor(MySQLdb.cursors.DictCursor)
cursor.execute('select current_user')
self._updateCheckTime()
return True
except Exception, e:
self.connection.close()
self.connection = None
return False
else:
return True
def being(self):
"""
Being a Transaction
@author: Nick Verbeck
@since: 5/14/2011
"""
try:
if self.connection is not None:
self.lock()
c = self.getCursor()
c.execute('BEGIN;')
c.close()
except Exception, e:
pass
def commit(self):
"""
Commit MySQL Transaction to database.
MySQLDB: If the database and the tables support transactions,
this commits the current transaction; otherwise
this method successfully does nothing.
@author: Nick Verbeck
@since: 5/12/2008
"""
try:
if self.connection is not None:
self.connection.commit()
self._updateCheckTime()
self.release()
except Exception, e:
pass
Commit = commit
def rollback(self):
"""
Rollback MySQL Transaction to database.
MySQLDB: If the database and tables support transactions, this rolls
back (cancels) the current transaction; otherwise a
NotSupportedError is raised.
@author: Nick Verbeck
@since: 5/12/2008
"""
try:
if self.connection is not None:
self.connection.rollback()
self._updateCheckTime()
self.release()
except Exception, e:
pass
def Close(self):
"""
Commits and closes the current connection
@author: Nick Verbeck
@since: 5/12/2008
"""
if self.connection is not None:
try:
self.connection.commit()
self.connection.close()
self.connection = None
except Exception, e:
pass
|
nerdynick/PySQLPool | src/PySQLPool/connection.py | ConnectionManager.Close | python | def Close(self):
if self.connection is not None:
try:
self.connection.commit()
self.connection.close()
self.connection = None
except Exception, e:
pass | Commits and closes the current connection
@author: Nick Verbeck
@since: 5/12/2008 | train | https://github.com/nerdynick/PySQLPool/blob/a561275fea091e2667b69ce376c507f541b56e7d/src/PySQLPool/connection.py#L261-L274 | null | class ConnectionManager(object):
"""
Physical Connection manager
Used to manage the physical MySQL connection and the thread safe locks on that connection
@author: Nick Verbeck
@since: 5/12/2008
@version: 0.1
"""
def __init__(self, connectionInfo):
"""
Constructor for ConnectionManager
@param connectionInfo: Connection Object representing your connection string
@author: Nick Verbeck
@since: 5/12/2008
"""
self.connectionInfo = connectionInfo
self.connection = None
#Lock management
self._lock = Semaphore()
self._locked = False
self.activeConnections = 0
self.query = None
self.lastConnectionCheck = None
def lock(self, block=True):
"""
Lock connection from being used else where
"""
self._locked = True
return self._lock.acquire(block)
def release(self):
"""
Release the connection lock
"""
if self._locked is True:
self._locked = False
self._lock.release()
def is_locked(self):
"""
Returns the status of this connection
"""
return self._locked
def getCursor(self):
"""
Get a Dictionary Cursor for executing queries
"""
if self.connection is None:
self.Connect()
return self.connection.cursor(MySQLdb.cursors.DictCursor)
def _updateCheckTime(self):
"""
Updates the connection check timestamp
"""
self.lastConnectionCheck = datetime.datetime.now()
def Connect(self):
"""
Creates a new physical connection to the database
@author: Nick Verbeck
@since: 5/12/2008
"""
if self.connection is None:
self.connection = MySQLdb.connect(*[], **self.connectionInfo.info)
if self.connectionInfo.commitOnEnd is True:
self.connection.autocommit()
self._updateCheckTime()
def autoCommit(self, autocommit):
self.connectionInfo.commitOnEnd = autocommit
if autocommit is True and self.connection is not None:
self.connection.autocommit()
def ReConnect(self):
"""
Attempts to close current connection if open and re-opens a new connection to the database
@author: Nick Verbeck
@since: 5/12/2008
"""
self.Close()
self.Connect()
def TestConnection(self, forceCheck = False):
"""
Tests the current physical connection if it is open and hasn't timed out
@return: boolean True is connection is open, False if connection is closed
@author: Nick Verbeck
@since: 5/12/2008
"""
if self.connection is None:
return False
elif forceCheck is True or (datetime.datetime.now() - self.lastConnectionCheck) >= connection_timeout:
try:
#TODO: Find a better way to test if connection is open
cursor = self.connection.cursor(MySQLdb.cursors.DictCursor)
cursor.execute('select current_user')
self._updateCheckTime()
return True
except Exception, e:
self.connection.close()
self.connection = None
return False
else:
return True
def being(self):
"""
Being a Transaction
@author: Nick Verbeck
@since: 5/14/2011
"""
try:
if self.connection is not None:
self.lock()
c = self.getCursor()
c.execute('BEGIN;')
c.close()
except Exception, e:
pass
def commit(self):
"""
Commit MySQL Transaction to database.
MySQLDB: If the database and the tables support transactions,
this commits the current transaction; otherwise
this method successfully does nothing.
@author: Nick Verbeck
@since: 5/12/2008
"""
try:
if self.connection is not None:
self.connection.commit()
self._updateCheckTime()
self.release()
except Exception, e:
pass
Commit = commit
def rollback(self):
"""
Rollback MySQL Transaction to database.
MySQLDB: If the database and tables support transactions, this rolls
back (cancels) the current transaction; otherwise a
NotSupportedError is raised.
@author: Nick Verbeck
@since: 5/12/2008
"""
try:
if self.connection is not None:
self.connection.rollback()
self._updateCheckTime()
self.release()
except Exception, e:
pass
def Close(self):
"""
Commits and closes the current connection
@author: Nick Verbeck
@since: 5/12/2008
"""
if self.connection is not None:
try:
self.connection.commit()
self.connection.close()
self.connection = None
except Exception, e:
pass
|
nerdynick/PySQLPool | src/PySQLPool/pool.py | Pool.Terminate | python | def Terminate(self):
self.lock.acquire()
try:
for bucket in self.connections.values():
try:
for conn in bucket:
conn.lock()
try:
conn.Close()
except Exception:
#We may throw exceptions due to already closed connections
pass
conn.release()
except Exception:
pass
self.connections = {}
finally:
self.lock.release() | Close all open connections
Loop though all the connections and commit all queries and close all the connections.
This should be called at the end of your application.
@author: Nick Verbeck
@since: 5/12/2008 | train | https://github.com/nerdynick/PySQLPool/blob/a561275fea091e2667b69ce376c507f541b56e7d/src/PySQLPool/pool.py#L42-L69 | null | class Pool(object):
"""
MySQL Connection Pool Manager
This is the heart of the PySQLPool Library. The borg pattern is used here to store connections and manage the connections.
@author: Nick Verbeck
@since: 5/18/2008
@version: 0.2
"""
#Dictionary used for storing all Connection information
__Pool = {}
#Max Connections that can be opened among all connections
maxActiveConnections = 10
def __init__(self):
"""
Constructor for PySQLPool
@author: Nick Verbeck
@since: 5/12/2008
"""
self.__dict__ = self.__Pool
#For 1st instantiation lets setup all our variables
if not self.__dict__.has_key('lock'):
self.lock = Condition()
if not self.__dict__.has_key('connections'):
self.connections = {}
def Terminate(self):
"""
Close all open connections
Loop though all the connections and commit all queries and close all the connections.
This should be called at the end of your application.
@author: Nick Verbeck
@since: 5/12/2008
"""
self.lock.acquire()
try:
for bucket in self.connections.values():
try:
for conn in bucket:
conn.lock()
try:
conn.Close()
except Exception:
#We may throw exceptions due to already closed connections
pass
conn.release()
except Exception:
pass
self.connections = {}
finally:
self.lock.release()
def Cleanup(self):
"""
Cleanup Timed out connections
Loop though all the connections and test if still active. If inactive close socket.
@author: Nick Verbeck
@since: 2/20/2009
"""
self.lock.acquire()
try:
for bucket in self.connections.values():
try:
for conn in bucket:
conn.lock()
try:
open = conn.TestConnection(forceCheck=True)
if open is True:
conn.commit()
else:
#Remove the connection from the pool. Its dead better of recreating it.
index = bucket.index(conn)
del bucket[index]
conn.release()
except Exception:
conn.release()
except Exception:
pass
finally:
self.lock.release()
def Commit(self):
"""
Commits all currently open connections
@author: Nick Verbeck
@since: 9/12/2008
"""
self.lock.acquire()
try:
for bucket in self.connections.values():
try:
for conn in bucket:
conn.lock()
try:
conn.commit()
conn.release()
except Exception:
conn.release()
except Exception:
pass
finally:
self.lock.release()
def GetConnection(self, ConnectionObj):
"""
Get a Open and active connection
Returns a PySQLConnectionManager if one is open else it will create a new one if the max active connections hasn't been hit.
If all possible connections are used. Then None is returned.
@param PySQLConnectionObj: PySQLConnection Object representing your connection string
@author: Nick Verbeck
@since: 5/12/2008
"""
key = ConnectionObj.getKey()
connection = None
if self.connections.has_key(key):
connection = self._getConnectionFromPoolSet(key)
if connection is None:
self.lock.acquire()
if len(self.connections[key]) < self.maxActiveConnections:
#Create a new connection
connection = self._createConnection(ConnectionObj)
self.connections[key].append(connection)
self.lock.release()
else:
#Wait for a free connection. We maintain the lock on the pool so we are the 1st to get a connection.
while connection is None:
connection = self._getConnectionFromPoolSet(key)
self.lock.release()
#Create new Connection Pool Set
else:
self.lock.acquire()
#We do a double check now that its locked to be sure some other thread didn't create this while we may have been waiting.
if not self.connections.has_key(key):
self.connections[key] = []
if len(self.connections[key]) < self.maxActiveConnections:
#Create a new connection
connection = self._createConnection(ConnectionObj)
self.connections[key].append(connection)
else:
#A rare thing happened. So many threads created connections so fast we need to wait for a free one.
while connection is None:
connection = self._getConnectionFromPoolSet(key)
self.lock.release()
return connection
def _getConnectionFromPoolSet(self, key):
connection = None
for conn in self.connections[key]:
#Grab an active connection if maxActivePerConnection is not meet
#TODO: Implement a max usage per connection object
if not conn.is_locked():
conn.lock()
try:
if conn.TestConnection() is False:
conn.ReConnect()
connection = conn
conn.release()
except Exception:
conn.release()
raise
return connection
def _createConnection(self, info):
connection = ConnectionManager(info)
connection.Connect()
return connection
|
nerdynick/PySQLPool | src/PySQLPool/pool.py | Pool.Cleanup | python | def Cleanup(self):
self.lock.acquire()
try:
for bucket in self.connections.values():
try:
for conn in bucket:
conn.lock()
try:
open = conn.TestConnection(forceCheck=True)
if open is True:
conn.commit()
else:
#Remove the connection from the pool. Its dead better of recreating it.
index = bucket.index(conn)
del bucket[index]
conn.release()
except Exception:
conn.release()
except Exception:
pass
finally:
self.lock.release() | Cleanup Timed out connections
Loop though all the connections and test if still active. If inactive close socket.
@author: Nick Verbeck
@since: 2/20/2009 | train | https://github.com/nerdynick/PySQLPool/blob/a561275fea091e2667b69ce376c507f541b56e7d/src/PySQLPool/pool.py#L71-L100 | null | class Pool(object):
"""
MySQL Connection Pool Manager
This is the heart of the PySQLPool Library. The borg pattern is used here to store connections and manage the connections.
@author: Nick Verbeck
@since: 5/18/2008
@version: 0.2
"""
#Dictionary used for storing all Connection information
__Pool = {}
#Max Connections that can be opened among all connections
maxActiveConnections = 10
def __init__(self):
"""
Constructor for PySQLPool
@author: Nick Verbeck
@since: 5/12/2008
"""
self.__dict__ = self.__Pool
#For 1st instantiation lets setup all our variables
if not self.__dict__.has_key('lock'):
self.lock = Condition()
if not self.__dict__.has_key('connections'):
self.connections = {}
def Terminate(self):
"""
Close all open connections
Loop though all the connections and commit all queries and close all the connections.
This should be called at the end of your application.
@author: Nick Verbeck
@since: 5/12/2008
"""
self.lock.acquire()
try:
for bucket in self.connections.values():
try:
for conn in bucket:
conn.lock()
try:
conn.Close()
except Exception:
#We may throw exceptions due to already closed connections
pass
conn.release()
except Exception:
pass
self.connections = {}
finally:
self.lock.release()
def Cleanup(self):
"""
Cleanup Timed out connections
Loop though all the connections and test if still active. If inactive close socket.
@author: Nick Verbeck
@since: 2/20/2009
"""
self.lock.acquire()
try:
for bucket in self.connections.values():
try:
for conn in bucket:
conn.lock()
try:
open = conn.TestConnection(forceCheck=True)
if open is True:
conn.commit()
else:
#Remove the connection from the pool. Its dead better of recreating it.
index = bucket.index(conn)
del bucket[index]
conn.release()
except Exception:
conn.release()
except Exception:
pass
finally:
self.lock.release()
def Commit(self):
"""
Commits all currently open connections
@author: Nick Verbeck
@since: 9/12/2008
"""
self.lock.acquire()
try:
for bucket in self.connections.values():
try:
for conn in bucket:
conn.lock()
try:
conn.commit()
conn.release()
except Exception:
conn.release()
except Exception:
pass
finally:
self.lock.release()
def GetConnection(self, ConnectionObj):
"""
Get a Open and active connection
Returns a PySQLConnectionManager if one is open else it will create a new one if the max active connections hasn't been hit.
If all possible connections are used. Then None is returned.
@param PySQLConnectionObj: PySQLConnection Object representing your connection string
@author: Nick Verbeck
@since: 5/12/2008
"""
key = ConnectionObj.getKey()
connection = None
if self.connections.has_key(key):
connection = self._getConnectionFromPoolSet(key)
if connection is None:
self.lock.acquire()
if len(self.connections[key]) < self.maxActiveConnections:
#Create a new connection
connection = self._createConnection(ConnectionObj)
self.connections[key].append(connection)
self.lock.release()
else:
#Wait for a free connection. We maintain the lock on the pool so we are the 1st to get a connection.
while connection is None:
connection = self._getConnectionFromPoolSet(key)
self.lock.release()
#Create new Connection Pool Set
else:
self.lock.acquire()
#We do a double check now that its locked to be sure some other thread didn't create this while we may have been waiting.
if not self.connections.has_key(key):
self.connections[key] = []
if len(self.connections[key]) < self.maxActiveConnections:
#Create a new connection
connection = self._createConnection(ConnectionObj)
self.connections[key].append(connection)
else:
#A rare thing happened. So many threads created connections so fast we need to wait for a free one.
while connection is None:
connection = self._getConnectionFromPoolSet(key)
self.lock.release()
return connection
def _getConnectionFromPoolSet(self, key):
connection = None
for conn in self.connections[key]:
#Grab an active connection if maxActivePerConnection is not meet
#TODO: Implement a max usage per connection object
if not conn.is_locked():
conn.lock()
try:
if conn.TestConnection() is False:
conn.ReConnect()
connection = conn
conn.release()
except Exception:
conn.release()
raise
return connection
def _createConnection(self, info):
connection = ConnectionManager(info)
connection.Connect()
return connection
|
nerdynick/PySQLPool | src/PySQLPool/pool.py | Pool.Commit | python | def Commit(self):
self.lock.acquire()
try:
for bucket in self.connections.values():
try:
for conn in bucket:
conn.lock()
try:
conn.commit()
conn.release()
except Exception:
conn.release()
except Exception:
pass
finally:
self.lock.release() | Commits all currently open connections
@author: Nick Verbeck
@since: 9/12/2008 | train | https://github.com/nerdynick/PySQLPool/blob/a561275fea091e2667b69ce376c507f541b56e7d/src/PySQLPool/pool.py#L102-L123 | null | class Pool(object):
"""
MySQL Connection Pool Manager
This is the heart of the PySQLPool Library. The borg pattern is used here to store connections and manage the connections.
@author: Nick Verbeck
@since: 5/18/2008
@version: 0.2
"""
#Dictionary used for storing all Connection information
__Pool = {}
#Max Connections that can be opened among all connections
maxActiveConnections = 10
def __init__(self):
"""
Constructor for PySQLPool
@author: Nick Verbeck
@since: 5/12/2008
"""
self.__dict__ = self.__Pool
#For 1st instantiation lets setup all our variables
if not self.__dict__.has_key('lock'):
self.lock = Condition()
if not self.__dict__.has_key('connections'):
self.connections = {}
def Terminate(self):
"""
Close all open connections
Loop though all the connections and commit all queries and close all the connections.
This should be called at the end of your application.
@author: Nick Verbeck
@since: 5/12/2008
"""
self.lock.acquire()
try:
for bucket in self.connections.values():
try:
for conn in bucket:
conn.lock()
try:
conn.Close()
except Exception:
#We may throw exceptions due to already closed connections
pass
conn.release()
except Exception:
pass
self.connections = {}
finally:
self.lock.release()
def Cleanup(self):
"""
Cleanup Timed out connections
Loop though all the connections and test if still active. If inactive close socket.
@author: Nick Verbeck
@since: 2/20/2009
"""
self.lock.acquire()
try:
for bucket in self.connections.values():
try:
for conn in bucket:
conn.lock()
try:
open = conn.TestConnection(forceCheck=True)
if open is True:
conn.commit()
else:
#Remove the connection from the pool. Its dead better of recreating it.
index = bucket.index(conn)
del bucket[index]
conn.release()
except Exception:
conn.release()
except Exception:
pass
finally:
self.lock.release()
def Commit(self):
"""
Commits all currently open connections
@author: Nick Verbeck
@since: 9/12/2008
"""
self.lock.acquire()
try:
for bucket in self.connections.values():
try:
for conn in bucket:
conn.lock()
try:
conn.commit()
conn.release()
except Exception:
conn.release()
except Exception:
pass
finally:
self.lock.release()
def GetConnection(self, ConnectionObj):
"""
Get a Open and active connection
Returns a PySQLConnectionManager if one is open else it will create a new one if the max active connections hasn't been hit.
If all possible connections are used. Then None is returned.
@param PySQLConnectionObj: PySQLConnection Object representing your connection string
@author: Nick Verbeck
@since: 5/12/2008
"""
key = ConnectionObj.getKey()
connection = None
if self.connections.has_key(key):
connection = self._getConnectionFromPoolSet(key)
if connection is None:
self.lock.acquire()
if len(self.connections[key]) < self.maxActiveConnections:
#Create a new connection
connection = self._createConnection(ConnectionObj)
self.connections[key].append(connection)
self.lock.release()
else:
#Wait for a free connection. We maintain the lock on the pool so we are the 1st to get a connection.
while connection is None:
connection = self._getConnectionFromPoolSet(key)
self.lock.release()
#Create new Connection Pool Set
else:
self.lock.acquire()
#We do a double check now that its locked to be sure some other thread didn't create this while we may have been waiting.
if not self.connections.has_key(key):
self.connections[key] = []
if len(self.connections[key]) < self.maxActiveConnections:
#Create a new connection
connection = self._createConnection(ConnectionObj)
self.connections[key].append(connection)
else:
#A rare thing happened. So many threads created connections so fast we need to wait for a free one.
while connection is None:
connection = self._getConnectionFromPoolSet(key)
self.lock.release()
return connection
def _getConnectionFromPoolSet(self, key):
connection = None
for conn in self.connections[key]:
#Grab an active connection if maxActivePerConnection is not meet
#TODO: Implement a max usage per connection object
if not conn.is_locked():
conn.lock()
try:
if conn.TestConnection() is False:
conn.ReConnect()
connection = conn
conn.release()
except Exception:
conn.release()
raise
return connection
def _createConnection(self, info):
connection = ConnectionManager(info)
connection.Connect()
return connection
|
nerdynick/PySQLPool | src/PySQLPool/pool.py | Pool.GetConnection | python | def GetConnection(self, ConnectionObj):
key = ConnectionObj.getKey()
connection = None
if self.connections.has_key(key):
connection = self._getConnectionFromPoolSet(key)
if connection is None:
self.lock.acquire()
if len(self.connections[key]) < self.maxActiveConnections:
#Create a new connection
connection = self._createConnection(ConnectionObj)
self.connections[key].append(connection)
self.lock.release()
else:
#Wait for a free connection. We maintain the lock on the pool so we are the 1st to get a connection.
while connection is None:
connection = self._getConnectionFromPoolSet(key)
self.lock.release()
#Create new Connection Pool Set
else:
self.lock.acquire()
#We do a double check now that its locked to be sure some other thread didn't create this while we may have been waiting.
if not self.connections.has_key(key):
self.connections[key] = []
if len(self.connections[key]) < self.maxActiveConnections:
#Create a new connection
connection = self._createConnection(ConnectionObj)
self.connections[key].append(connection)
else:
#A rare thing happened. So many threads created connections so fast we need to wait for a free one.
while connection is None:
connection = self._getConnectionFromPoolSet(key)
self.lock.release()
return connection | Get a Open and active connection
Returns a PySQLConnectionManager if one is open else it will create a new one if the max active connections hasn't been hit.
If all possible connections are used. Then None is returned.
@param PySQLConnectionObj: PySQLConnection Object representing your connection string
@author: Nick Verbeck
@since: 5/12/2008 | train | https://github.com/nerdynick/PySQLPool/blob/a561275fea091e2667b69ce376c507f541b56e7d/src/PySQLPool/pool.py#L125-L174 | null | class Pool(object):
"""
MySQL Connection Pool Manager
This is the heart of the PySQLPool Library. The borg pattern is used here to store connections and manage the connections.
@author: Nick Verbeck
@since: 5/18/2008
@version: 0.2
"""
#Dictionary used for storing all Connection information
__Pool = {}
#Max Connections that can be opened among all connections
maxActiveConnections = 10
def __init__(self):
"""
Constructor for PySQLPool
@author: Nick Verbeck
@since: 5/12/2008
"""
self.__dict__ = self.__Pool
#For 1st instantiation lets setup all our variables
if not self.__dict__.has_key('lock'):
self.lock = Condition()
if not self.__dict__.has_key('connections'):
self.connections = {}
def Terminate(self):
"""
Close all open connections
Loop though all the connections and commit all queries and close all the connections.
This should be called at the end of your application.
@author: Nick Verbeck
@since: 5/12/2008
"""
self.lock.acquire()
try:
for bucket in self.connections.values():
try:
for conn in bucket:
conn.lock()
try:
conn.Close()
except Exception:
#We may throw exceptions due to already closed connections
pass
conn.release()
except Exception:
pass
self.connections = {}
finally:
self.lock.release()
def Cleanup(self):
"""
Cleanup Timed out connections
Loop though all the connections and test if still active. If inactive close socket.
@author: Nick Verbeck
@since: 2/20/2009
"""
self.lock.acquire()
try:
for bucket in self.connections.values():
try:
for conn in bucket:
conn.lock()
try:
open = conn.TestConnection(forceCheck=True)
if open is True:
conn.commit()
else:
#Remove the connection from the pool. Its dead better of recreating it.
index = bucket.index(conn)
del bucket[index]
conn.release()
except Exception:
conn.release()
except Exception:
pass
finally:
self.lock.release()
def Commit(self):
"""
Commits all currently open connections
@author: Nick Verbeck
@since: 9/12/2008
"""
self.lock.acquire()
try:
for bucket in self.connections.values():
try:
for conn in bucket:
conn.lock()
try:
conn.commit()
conn.release()
except Exception:
conn.release()
except Exception:
pass
finally:
self.lock.release()
def GetConnection(self, ConnectionObj):
"""
Get a Open and active connection
Returns a PySQLConnectionManager if one is open else it will create a new one if the max active connections hasn't been hit.
If all possible connections are used. Then None is returned.
@param PySQLConnectionObj: PySQLConnection Object representing your connection string
@author: Nick Verbeck
@since: 5/12/2008
"""
key = ConnectionObj.getKey()
connection = None
if self.connections.has_key(key):
connection = self._getConnectionFromPoolSet(key)
if connection is None:
self.lock.acquire()
if len(self.connections[key]) < self.maxActiveConnections:
#Create a new connection
connection = self._createConnection(ConnectionObj)
self.connections[key].append(connection)
self.lock.release()
else:
#Wait for a free connection. We maintain the lock on the pool so we are the 1st to get a connection.
while connection is None:
connection = self._getConnectionFromPoolSet(key)
self.lock.release()
#Create new Connection Pool Set
else:
self.lock.acquire()
#We do a double check now that its locked to be sure some other thread didn't create this while we may have been waiting.
if not self.connections.has_key(key):
self.connections[key] = []
if len(self.connections[key]) < self.maxActiveConnections:
#Create a new connection
connection = self._createConnection(ConnectionObj)
self.connections[key].append(connection)
else:
#A rare thing happened. So many threads created connections so fast we need to wait for a free one.
while connection is None:
connection = self._getConnectionFromPoolSet(key)
self.lock.release()
return connection
def _getConnectionFromPoolSet(self, key):
connection = None
for conn in self.connections[key]:
#Grab an active connection if maxActivePerConnection is not meet
#TODO: Implement a max usage per connection object
if not conn.is_locked():
conn.lock()
try:
if conn.TestConnection() is False:
conn.ReConnect()
connection = conn
conn.release()
except Exception:
conn.release()
raise
return connection
def _createConnection(self, info):
connection = ConnectionManager(info)
connection.Connect()
return connection
|
bachya/regenmaschine | regenmaschine/stats.py | Stats.on_date | python | async def on_date(self, date: datetime.date) -> dict:
return await self._request(
'get', 'dailystats/{0}'.format(date.strftime('%Y-%m-%d'))) | Get statistics for a certain date. | train | https://github.com/bachya/regenmaschine/blob/99afb648fe454dc4a7d5db85a02a8b3b5d26f8bc/regenmaschine/stats.py#L13-L16 | null | class Stats:
"""Define a statistics object."""
def __init__(self, request: Callable[..., Awaitable[dict]]) -> None:
"""Initialize."""
self._request = request
async def upcoming(self, details: bool = False) -> list:
"""Return watering statistics for the next 6 days."""
endpoint = 'dailystats'
key = 'DailyStats'
if details:
endpoint += '/details'
key = 'DailyStatsDetails'
data = await self._request('get', endpoint)
return data[key]
|
bachya/regenmaschine | regenmaschine/stats.py | Stats.upcoming | python | async def upcoming(self, details: bool = False) -> list:
endpoint = 'dailystats'
key = 'DailyStats'
if details:
endpoint += '/details'
key = 'DailyStatsDetails'
data = await self._request('get', endpoint)
return data[key] | Return watering statistics for the next 6 days. | train | https://github.com/bachya/regenmaschine/blob/99afb648fe454dc4a7d5db85a02a8b3b5d26f8bc/regenmaschine/stats.py#L18-L26 | null | class Stats:
"""Define a statistics object."""
def __init__(self, request: Callable[..., Awaitable[dict]]) -> None:
"""Initialize."""
self._request = request
async def on_date(self, date: datetime.date) -> dict:
"""Get statistics for a certain date."""
return await self._request(
'get', 'dailystats/{0}'.format(date.strftime('%Y-%m-%d')))
|
bachya/regenmaschine | example.py | main | python | async def main():
async with ClientSession() as websession:
try:
client = Client(websession)
await client.load_local('<IP ADDRESS>', '<PASSWORD>', websession)
for controller in client.controllers.values():
print('CLIENT INFORMATION')
print('Name: {0}'.format(controller.name))
print('MAC Address: {0}'.format(controller.mac))
print('API Version: {0}'.format(controller.api_version))
print(
'Software Version: {0}'.format(
controller.software_version))
print(
'Hardware Version: {0}'.format(
controller.hardware_version))
# Work with diagnostics:
print()
print('RAINMACHINE DIAGNOSTICS')
data = await controller.diagnostics.current()
print('Uptime: {0}'.format(data['uptime']))
print('Software Version: {0}'.format(data['softwareVersion']))
# Work with parsers:
print()
print('RAINMACHINE PARSERS')
for parser in await controller.parsers.current():
print(parser['name'])
# Work with programs:
print()
print('ALL PROGRAMS')
for program in await controller.programs.all(
include_inactive=True):
print(
'Program #{0}: {1}'.format(
program['uid'], program['name']))
print()
print('PROGRAM BY ID')
program_1 = await controller.programs.get(1)
print(
"Program 1's Start Time: {0}".format(
program_1['startTime']))
print()
print('NEXT RUN TIMES')
for program in await controller.programs.next():
print(
'Program #{0}: {1}'.format(
program['pid'], program['startTime']))
print()
print('RUNNING PROGRAMS')
for program in await controller.programs.running():
print('Program #{0}'.format(program['uid']))
print()
print('STARTING PROGRAM #1')
print(await controller.programs.start(1))
await asyncio.sleep(3)
print()
print('STOPPING PROGRAM #1')
print(await controller.programs.stop(1))
# Work with provisioning:
print()
print('PROVISIONING INFO')
name = await controller.provisioning.device_name
print('Device Name: {0}'.format(name))
settings = await controller.provisioning.settings()
print(
'Database Path: {0}'.format(
settings['system']['databasePath']))
print(
'Station Name: {0}'.format(
settings['location']['stationName']))
wifi = await controller.provisioning.wifi()
print('IP Address: {0}'.format(wifi['ipAddress']))
# Work with restrictions:
print()
print('RESTRICTIONS')
current = await controller.restrictions.current()
print(
'Rain Delay Restrictions: {0}'.format(
current['rainDelay']))
universal = await controller.restrictions.universal()
print(
'Freeze Protect: {0}'.format(
universal['freezeProtectEnabled']))
print('Hourly Restrictions:')
for restriction in await controller.restrictions.hourly():
print(restriction['name'])
raindelay = await controller.restrictions.raindelay()
print(
'Rain Delay Counter: {0}'.format(
raindelay['delayCounter']))
# Work with restrictions:
print()
print('STATS')
today = await controller.stats.on_date(
date=datetime.date.today())
print('Min for Today: {0}'.format(today['mint']))
for day in await controller.stats.upcoming(details=True):
print('{0} Min: {1}'.format(day['day'], day['mint']))
# Work with watering:
print()
print('WATERING')
for day in await controller.watering.log(
date=datetime.date.today()):
print(
'{0} duration: {1}'.format(
day['date'], day['realDuration']))
queue = await controller.watering.queue()
print('Current Queue: {0}'.format(queue))
print('Runs:')
for watering_run in await controller.watering.runs(
date=datetime.date.today()):
print(
'{0} ({1})'.format(
watering_run['dateTime'], watering_run['et0']))
print()
print('PAUSING ALL WATERING FOR 30 SECONDS')
print(await controller.watering.pause_all(30))
await asyncio.sleep(3)
print()
print('UNPAUSING WATERING')
print(await controller.watering.unpause_all())
print()
print('STOPPING ALL WATERING')
print(await controller.watering.stop_all())
# Work with zones:
print()
print('ALL ACTIVE ZONES')
for zone in await controller.zones.all(details=True):
print(
'Zone #{0}: {1} (soil: {2})'.format(
zone['uid'], zone['name'], zone['soil']))
print()
print('ZONE BY ID')
zone_1 = await controller.zones.get(1, details=True)
print(
"Zone 1's Name: {0} (soil: {1})".format(
zone_1['name'], zone_1['soil']))
print()
print('STARTING ZONE #1 FOR 3 SECONDS')
print(await controller.zones.start(1, 3))
await asyncio.sleep(3)
print()
print('STOPPING ZONE #1')
print(await controller.zones.stop(1))
except RainMachineError as err:
print(err) | Run. | train | https://github.com/bachya/regenmaschine/blob/99afb648fe454dc4a7d5db85a02a8b3b5d26f8bc/example.py#L12-L182 | [
"async def load_local( # pylint: disable=too-many-arguments\n self,\n host: str,\n password: str,\n port: int = DEFAULT_LOCAL_PORT,\n ssl: bool = True,\n skip_existing: bool = True) -> None:\n \"\"\"Create a local client.\"\"\"\n controller = LocalController(\n self._request, host, port, ssl, self._websession)\n await controller.login(password)\n\n wifi_data = await controller.provisioning.wifi()\n if skip_existing and wifi_data['macAddress'] in self.controllers:\n return\n\n version_data = await controller.api.versions()\n controller.api_version = version_data['apiVer']\n controller.hardware_version = version_data['hwVer']\n controller.mac = wifi_data['macAddress']\n controller.name = await controller.provisioning.device_name\n controller.software_version = version_data['swVer']\n\n self.controllers[controller.mac] = controller # type: ignore\n"
] | """Run an example script to quickly test."""
# pylint: disable=too-many-locals,too-many-statements
import asyncio
import datetime
from aiohttp import ClientSession
from regenmaschine import Client
from regenmaschine.errors import RainMachineError
asyncio.get_event_loop().run_until_complete(main())
|
bachya/regenmaschine | regenmaschine/client.py | _raise_for_remote_status | python | def _raise_for_remote_status(url: str, data: dict) -> None:
if data.get('errorType') and data['errorType'] > 0:
raise_remote_error(data['errorType'])
if data.get('statusCode') and data['statusCode'] != 200:
raise RequestError(
'Error requesting data from {0}: {1} {2}'.format(
url, data['statusCode'], data['message'])) | Raise an error from the remote API if necessary. | train | https://github.com/bachya/regenmaschine/blob/99afb648fe454dc4a7d5db85a02a8b3b5d26f8bc/regenmaschine/client.py#L140-L148 | null | """Define a client to interact with a RainMachine unit."""
import asyncio
from datetime import datetime
from typing import Dict
import async_timeout
from aiohttp import ClientSession
from aiohttp.client_exceptions import ClientError
from regenmaschine.controller import (
Controller, LocalController, RemoteController)
from regenmaschine.errors import (
RequestError, TokenExpiredError, raise_remote_error)
DEFAULT_LOCAL_PORT = 8080
DEFAULT_TIMEOUT = 10
class Client: # pylint: disable=too-few-public-methods
"""Define the client."""
def __init__(
self,
websession: ClientSession,
request_timeout: int = DEFAULT_TIMEOUT) -> None:
"""Initialize."""
self._websession = websession
self.controllers = {} # type: Dict[str, Controller]
self.request_timeout = request_timeout
async def load_local( # pylint: disable=too-many-arguments
self,
host: str,
password: str,
port: int = DEFAULT_LOCAL_PORT,
ssl: bool = True,
skip_existing: bool = True) -> None:
"""Create a local client."""
controller = LocalController(
self._request, host, port, ssl, self._websession)
await controller.login(password)
wifi_data = await controller.provisioning.wifi()
if skip_existing and wifi_data['macAddress'] in self.controllers:
return
version_data = await controller.api.versions()
controller.api_version = version_data['apiVer']
controller.hardware_version = version_data['hwVer']
controller.mac = wifi_data['macAddress']
controller.name = await controller.provisioning.device_name
controller.software_version = version_data['swVer']
self.controllers[controller.mac] = controller # type: ignore
async def load_remote(
self, email: str, password: str,
skip_existing: bool = True) -> None:
"""Create a local client."""
auth_resp = await self._request(
'post',
'https://my.rainmachine.com/login/auth',
json={'user': {
'email': email,
'pwd': password,
'remember': 1
}})
access_token = auth_resp['access_token']
sprinklers_resp = await self._request(
'post',
'https://my.rainmachine.com/devices/get-sprinklers',
access_token=access_token,
json={'user': {
'email': email,
'pwd': password,
'remember': 1
}})
for sprinkler in sprinklers_resp['sprinklers']:
if skip_existing and sprinkler['mac'] in self.controllers:
continue
controller = RemoteController(self._request, self._websession)
await controller.login(
access_token, sprinkler['sprinklerId'], password)
version_data = await controller.api.versions()
controller.api_version = version_data['apiVer']
controller.hardware_version = version_data['hwVer']
controller.mac = sprinkler['mac']
controller.name = sprinkler['name']
controller.software_version = version_data['swVer']
self.controllers[sprinkler['mac']] = controller
async def _request(
self,
method: str,
url: str,
*,
access_token: str = None,
access_token_expiration: datetime = None,
headers: dict = None,
params: dict = None,
json: dict = None,
ssl: bool = True) -> dict:
"""Make a request against the RainMachine device."""
if (access_token_expiration
and datetime.now() >= access_token_expiration):
raise TokenExpiredError('Long-lived access token has expired')
if not headers:
headers = {}
headers.update({'Content-Type': 'application/json'})
if not params:
params = {}
if access_token:
params.update({'access_token': access_token})
try:
async with async_timeout.timeout(self.request_timeout):
async with self._websession.request(
method, url, headers=headers, params=params, json=json,
ssl=ssl) as resp:
resp.raise_for_status()
data = await resp.json(content_type=None)
_raise_for_remote_status(url, data)
except ClientError as err:
raise RequestError(
'Error requesting data from {0}: {1}'.format(url, err))
except asyncio.TimeoutError:
raise RequestError('Timeout during request: {0}'.format(url))
return data
async def login(
host: str,
password: str,
websession: ClientSession,
*,
port: int = 8080,
ssl: bool = True,
request_timeout: int = DEFAULT_TIMEOUT) -> Controller:
"""Authenticate against a RainMachine device."""
print('regenmaschine.client.login() is deprecated; see documentation!')
client = Client(websession, request_timeout)
await client.load_local(host, password, port, ssl)
return next(iter(client.controllers.values()))
|
bachya/regenmaschine | regenmaschine/client.py | login | python | async def login(
host: str,
password: str,
websession: ClientSession,
*,
port: int = 8080,
ssl: bool = True,
request_timeout: int = DEFAULT_TIMEOUT) -> Controller:
print('regenmaschine.client.login() is deprecated; see documentation!')
client = Client(websession, request_timeout)
await client.load_local(host, password, port, ssl)
return next(iter(client.controllers.values())) | Authenticate against a RainMachine device. | train | https://github.com/bachya/regenmaschine/blob/99afb648fe454dc4a7d5db85a02a8b3b5d26f8bc/regenmaschine/client.py#L151-L163 | [
"async def load_local( # pylint: disable=too-many-arguments\n self,\n host: str,\n password: str,\n port: int = DEFAULT_LOCAL_PORT,\n ssl: bool = True,\n skip_existing: bool = True) -> None:\n \"\"\"Create a local client.\"\"\"\n controller = LocalController(\n self._request, host, port, ssl, self._websession)\n await controller.login(password)\n\n wifi_data = await controller.provisioning.wifi()\n if skip_existing and wifi_data['macAddress'] in self.controllers:\n return\n\n version_data = await controller.api.versions()\n controller.api_version = version_data['apiVer']\n controller.hardware_version = version_data['hwVer']\n controller.mac = wifi_data['macAddress']\n controller.name = await controller.provisioning.device_name\n controller.software_version = version_data['swVer']\n\n self.controllers[controller.mac] = controller # type: ignore\n"
] | """Define a client to interact with a RainMachine unit."""
import asyncio
from datetime import datetime
from typing import Dict
import async_timeout
from aiohttp import ClientSession
from aiohttp.client_exceptions import ClientError
from regenmaschine.controller import (
Controller, LocalController, RemoteController)
from regenmaschine.errors import (
RequestError, TokenExpiredError, raise_remote_error)
DEFAULT_LOCAL_PORT = 8080
DEFAULT_TIMEOUT = 10
class Client: # pylint: disable=too-few-public-methods
"""Define the client."""
def __init__(
self,
websession: ClientSession,
request_timeout: int = DEFAULT_TIMEOUT) -> None:
"""Initialize."""
self._websession = websession
self.controllers = {} # type: Dict[str, Controller]
self.request_timeout = request_timeout
async def load_local( # pylint: disable=too-many-arguments
self,
host: str,
password: str,
port: int = DEFAULT_LOCAL_PORT,
ssl: bool = True,
skip_existing: bool = True) -> None:
"""Create a local client."""
controller = LocalController(
self._request, host, port, ssl, self._websession)
await controller.login(password)
wifi_data = await controller.provisioning.wifi()
if skip_existing and wifi_data['macAddress'] in self.controllers:
return
version_data = await controller.api.versions()
controller.api_version = version_data['apiVer']
controller.hardware_version = version_data['hwVer']
controller.mac = wifi_data['macAddress']
controller.name = await controller.provisioning.device_name
controller.software_version = version_data['swVer']
self.controllers[controller.mac] = controller # type: ignore
async def load_remote(
self, email: str, password: str,
skip_existing: bool = True) -> None:
"""Create a local client."""
auth_resp = await self._request(
'post',
'https://my.rainmachine.com/login/auth',
json={'user': {
'email': email,
'pwd': password,
'remember': 1
}})
access_token = auth_resp['access_token']
sprinklers_resp = await self._request(
'post',
'https://my.rainmachine.com/devices/get-sprinklers',
access_token=access_token,
json={'user': {
'email': email,
'pwd': password,
'remember': 1
}})
for sprinkler in sprinklers_resp['sprinklers']:
if skip_existing and sprinkler['mac'] in self.controllers:
continue
controller = RemoteController(self._request, self._websession)
await controller.login(
access_token, sprinkler['sprinklerId'], password)
version_data = await controller.api.versions()
controller.api_version = version_data['apiVer']
controller.hardware_version = version_data['hwVer']
controller.mac = sprinkler['mac']
controller.name = sprinkler['name']
controller.software_version = version_data['swVer']
self.controllers[sprinkler['mac']] = controller
async def _request(
self,
method: str,
url: str,
*,
access_token: str = None,
access_token_expiration: datetime = None,
headers: dict = None,
params: dict = None,
json: dict = None,
ssl: bool = True) -> dict:
"""Make a request against the RainMachine device."""
if (access_token_expiration
and datetime.now() >= access_token_expiration):
raise TokenExpiredError('Long-lived access token has expired')
if not headers:
headers = {}
headers.update({'Content-Type': 'application/json'})
if not params:
params = {}
if access_token:
params.update({'access_token': access_token})
try:
async with async_timeout.timeout(self.request_timeout):
async with self._websession.request(
method, url, headers=headers, params=params, json=json,
ssl=ssl) as resp:
resp.raise_for_status()
data = await resp.json(content_type=None)
_raise_for_remote_status(url, data)
except ClientError as err:
raise RequestError(
'Error requesting data from {0}: {1}'.format(url, err))
except asyncio.TimeoutError:
raise RequestError('Timeout during request: {0}'.format(url))
return data
def _raise_for_remote_status(url: str, data: dict) -> None:
"""Raise an error from the remote API if necessary."""
if data.get('errorType') and data['errorType'] > 0:
raise_remote_error(data['errorType'])
if data.get('statusCode') and data['statusCode'] != 200:
raise RequestError(
'Error requesting data from {0}: {1} {2}'.format(
url, data['statusCode'], data['message']))
|
bachya/regenmaschine | regenmaschine/client.py | Client.load_local | python | async def load_local( # pylint: disable=too-many-arguments
self,
host: str,
password: str,
port: int = DEFAULT_LOCAL_PORT,
ssl: bool = True,
skip_existing: bool = True) -> None:
controller = LocalController(
self._request, host, port, ssl, self._websession)
await controller.login(password)
wifi_data = await controller.provisioning.wifi()
if skip_existing and wifi_data['macAddress'] in self.controllers:
return
version_data = await controller.api.versions()
controller.api_version = version_data['apiVer']
controller.hardware_version = version_data['hwVer']
controller.mac = wifi_data['macAddress']
controller.name = await controller.provisioning.device_name
controller.software_version = version_data['swVer']
self.controllers[controller.mac] = controller | Create a local client. | train | https://github.com/bachya/regenmaschine/blob/99afb648fe454dc4a7d5db85a02a8b3b5d26f8bc/regenmaschine/client.py#L31-L54 | [
"async def login(self, password):\n \"\"\"Authenticate against the device (locally).\"\"\"\n auth_resp = await self._client_request(\n 'post',\n '{0}/auth/login'.format(self._host),\n json={\n 'pwd': password,\n 'remember': 1\n })\n\n self._access_token = auth_resp['access_token']\n self._access_token_expiration = datetime.now() + timedelta(\n seconds=int(auth_resp['expires_in']) - 10)\n"
] | class Client: # pylint: disable=too-few-public-methods
"""Define the client."""
def __init__(
self,
websession: ClientSession,
request_timeout: int = DEFAULT_TIMEOUT) -> None:
"""Initialize."""
self._websession = websession
self.controllers = {} # type: Dict[str, Controller]
self.request_timeout = request_timeout
# type: ignore
async def load_remote(
self, email: str, password: str,
skip_existing: bool = True) -> None:
"""Create a local client."""
auth_resp = await self._request(
'post',
'https://my.rainmachine.com/login/auth',
json={'user': {
'email': email,
'pwd': password,
'remember': 1
}})
access_token = auth_resp['access_token']
sprinklers_resp = await self._request(
'post',
'https://my.rainmachine.com/devices/get-sprinklers',
access_token=access_token,
json={'user': {
'email': email,
'pwd': password,
'remember': 1
}})
for sprinkler in sprinklers_resp['sprinklers']:
if skip_existing and sprinkler['mac'] in self.controllers:
continue
controller = RemoteController(self._request, self._websession)
await controller.login(
access_token, sprinkler['sprinklerId'], password)
version_data = await controller.api.versions()
controller.api_version = version_data['apiVer']
controller.hardware_version = version_data['hwVer']
controller.mac = sprinkler['mac']
controller.name = sprinkler['name']
controller.software_version = version_data['swVer']
self.controllers[sprinkler['mac']] = controller
async def _request(
self,
method: str,
url: str,
*,
access_token: str = None,
access_token_expiration: datetime = None,
headers: dict = None,
params: dict = None,
json: dict = None,
ssl: bool = True) -> dict:
"""Make a request against the RainMachine device."""
if (access_token_expiration
and datetime.now() >= access_token_expiration):
raise TokenExpiredError('Long-lived access token has expired')
if not headers:
headers = {}
headers.update({'Content-Type': 'application/json'})
if not params:
params = {}
if access_token:
params.update({'access_token': access_token})
try:
async with async_timeout.timeout(self.request_timeout):
async with self._websession.request(
method, url, headers=headers, params=params, json=json,
ssl=ssl) as resp:
resp.raise_for_status()
data = await resp.json(content_type=None)
_raise_for_remote_status(url, data)
except ClientError as err:
raise RequestError(
'Error requesting data from {0}: {1}'.format(url, err))
except asyncio.TimeoutError:
raise RequestError('Timeout during request: {0}'.format(url))
return data
|
bachya/regenmaschine | regenmaschine/client.py | Client.load_remote | python | async def load_remote(
self, email: str, password: str,
skip_existing: bool = True) -> None:
auth_resp = await self._request(
'post',
'https://my.rainmachine.com/login/auth',
json={'user': {
'email': email,
'pwd': password,
'remember': 1
}})
access_token = auth_resp['access_token']
sprinklers_resp = await self._request(
'post',
'https://my.rainmachine.com/devices/get-sprinklers',
access_token=access_token,
json={'user': {
'email': email,
'pwd': password,
'remember': 1
}})
for sprinkler in sprinklers_resp['sprinklers']:
if skip_existing and sprinkler['mac'] in self.controllers:
continue
controller = RemoteController(self._request, self._websession)
await controller.login(
access_token, sprinkler['sprinklerId'], password)
version_data = await controller.api.versions()
controller.api_version = version_data['apiVer']
controller.hardware_version = version_data['hwVer']
controller.mac = sprinkler['mac']
controller.name = sprinkler['name']
controller.software_version = version_data['swVer']
self.controllers[sprinkler['mac']] = controller | Create a local client. | train | https://github.com/bachya/regenmaschine/blob/99afb648fe454dc4a7d5db85a02a8b3b5d26f8bc/regenmaschine/client.py#L56-L96 | [
"async def versions(self) -> dict:\n \"\"\"Get software, hardware, and API versions.\"\"\"\n return await self._request('get', 'apiVer')\n",
"async def _request(\n self,\n method: str,\n url: str,\n *,\n access_token: str = None,\n access_token_expiration: datetime = None,\n headers: dict = None,\n params: dict = None,\n json: dict = None,\n ssl: bool = True) -> dict:\n \"\"\"Make a request against the RainMachine device.\"\"\"\n if (access_token_expiration\n and datetime.now() >= access_token_expiration):\n raise TokenExpiredError('Long-lived access token has expired')\n\n if not headers:\n headers = {}\n headers.update({'Content-Type': 'application/json'})\n\n if not params:\n params = {}\n if access_token:\n params.update({'access_token': access_token})\n\n try:\n async with async_timeout.timeout(self.request_timeout):\n async with self._websession.request(\n method, url, headers=headers, params=params, json=json,\n ssl=ssl) as resp:\n resp.raise_for_status()\n data = await resp.json(content_type=None)\n _raise_for_remote_status(url, data)\n except ClientError as err:\n raise RequestError(\n 'Error requesting data from {0}: {1}'.format(url, err))\n except asyncio.TimeoutError:\n raise RequestError('Timeout during request: {0}'.format(url))\n\n return data\n",
"async def login(\n self, stage_1_access_token: str, sprinkler_id: str,\n password: str) -> None:\n \"\"\"Authenticate against the device (remotely).\"\"\"\n auth_resp = await self._client_request(\n 'post',\n 'https://my.rainmachine.com/devices/login-sprinkler',\n access_token=stage_1_access_token,\n json={\n 'sprinklerId': sprinkler_id,\n 'pwd': password,\n })\n\n self._access_token = auth_resp['access_token']\n self._host = URL_BASE_REMOTE.format(sprinkler_id)\n"
] | class Client: # pylint: disable=too-few-public-methods
"""Define the client."""
def __init__(
self,
websession: ClientSession,
request_timeout: int = DEFAULT_TIMEOUT) -> None:
"""Initialize."""
self._websession = websession
self.controllers = {} # type: Dict[str, Controller]
self.request_timeout = request_timeout
async def load_local( # pylint: disable=too-many-arguments
self,
host: str,
password: str,
port: int = DEFAULT_LOCAL_PORT,
ssl: bool = True,
skip_existing: bool = True) -> None:
"""Create a local client."""
controller = LocalController(
self._request, host, port, ssl, self._websession)
await controller.login(password)
wifi_data = await controller.provisioning.wifi()
if skip_existing and wifi_data['macAddress'] in self.controllers:
return
version_data = await controller.api.versions()
controller.api_version = version_data['apiVer']
controller.hardware_version = version_data['hwVer']
controller.mac = wifi_data['macAddress']
controller.name = await controller.provisioning.device_name
controller.software_version = version_data['swVer']
self.controllers[controller.mac] = controller # type: ignore
async def _request(
self,
method: str,
url: str,
*,
access_token: str = None,
access_token_expiration: datetime = None,
headers: dict = None,
params: dict = None,
json: dict = None,
ssl: bool = True) -> dict:
"""Make a request against the RainMachine device."""
if (access_token_expiration
and datetime.now() >= access_token_expiration):
raise TokenExpiredError('Long-lived access token has expired')
if not headers:
headers = {}
headers.update({'Content-Type': 'application/json'})
if not params:
params = {}
if access_token:
params.update({'access_token': access_token})
try:
async with async_timeout.timeout(self.request_timeout):
async with self._websession.request(
method, url, headers=headers, params=params, json=json,
ssl=ssl) as resp:
resp.raise_for_status()
data = await resp.json(content_type=None)
_raise_for_remote_status(url, data)
except ClientError as err:
raise RequestError(
'Error requesting data from {0}: {1}'.format(url, err))
except asyncio.TimeoutError:
raise RequestError('Timeout during request: {0}'.format(url))
return data
|
bachya/regenmaschine | regenmaschine/client.py | Client._request | python | async def _request(
self,
method: str,
url: str,
*,
access_token: str = None,
access_token_expiration: datetime = None,
headers: dict = None,
params: dict = None,
json: dict = None,
ssl: bool = True) -> dict:
if (access_token_expiration
and datetime.now() >= access_token_expiration):
raise TokenExpiredError('Long-lived access token has expired')
if not headers:
headers = {}
headers.update({'Content-Type': 'application/json'})
if not params:
params = {}
if access_token:
params.update({'access_token': access_token})
try:
async with async_timeout.timeout(self.request_timeout):
async with self._websession.request(
method, url, headers=headers, params=params, json=json,
ssl=ssl) as resp:
resp.raise_for_status()
data = await resp.json(content_type=None)
_raise_for_remote_status(url, data)
except ClientError as err:
raise RequestError(
'Error requesting data from {0}: {1}'.format(url, err))
except asyncio.TimeoutError:
raise RequestError('Timeout during request: {0}'.format(url))
return data | Make a request against the RainMachine device. | train | https://github.com/bachya/regenmaschine/blob/99afb648fe454dc4a7d5db85a02a8b3b5d26f8bc/regenmaschine/client.py#L98-L137 | null | class Client: # pylint: disable=too-few-public-methods
"""Define the client."""
def __init__(
self,
websession: ClientSession,
request_timeout: int = DEFAULT_TIMEOUT) -> None:
"""Initialize."""
self._websession = websession
self.controllers = {} # type: Dict[str, Controller]
self.request_timeout = request_timeout
async def load_local( # pylint: disable=too-many-arguments
self,
host: str,
password: str,
port: int = DEFAULT_LOCAL_PORT,
ssl: bool = True,
skip_existing: bool = True) -> None:
"""Create a local client."""
controller = LocalController(
self._request, host, port, ssl, self._websession)
await controller.login(password)
wifi_data = await controller.provisioning.wifi()
if skip_existing and wifi_data['macAddress'] in self.controllers:
return
version_data = await controller.api.versions()
controller.api_version = version_data['apiVer']
controller.hardware_version = version_data['hwVer']
controller.mac = wifi_data['macAddress']
controller.name = await controller.provisioning.device_name
controller.software_version = version_data['swVer']
self.controllers[controller.mac] = controller # type: ignore
async def load_remote(
self, email: str, password: str,
skip_existing: bool = True) -> None:
"""Create a local client."""
auth_resp = await self._request(
'post',
'https://my.rainmachine.com/login/auth',
json={'user': {
'email': email,
'pwd': password,
'remember': 1
}})
access_token = auth_resp['access_token']
sprinklers_resp = await self._request(
'post',
'https://my.rainmachine.com/devices/get-sprinklers',
access_token=access_token,
json={'user': {
'email': email,
'pwd': password,
'remember': 1
}})
for sprinkler in sprinklers_resp['sprinklers']:
if skip_existing and sprinkler['mac'] in self.controllers:
continue
controller = RemoteController(self._request, self._websession)
await controller.login(
access_token, sprinkler['sprinklerId'], password)
version_data = await controller.api.versions()
controller.api_version = version_data['apiVer']
controller.hardware_version = version_data['hwVer']
controller.mac = sprinkler['mac']
controller.name = sprinkler['name']
controller.software_version = version_data['swVer']
self.controllers[sprinkler['mac']] = controller
|
bachya/regenmaschine | regenmaschine/errors.py | raise_remote_error | python | def raise_remote_error(error_code: int) -> None:
try:
error = next((v for k, v in ERROR_CODES.items() if k == error_code))
raise RequestError(error)
except StopIteration:
raise RequestError(
'Unknown remote error code returned: {0}'.format(error_code)) | Raise the appropriate error with a remote error code. | train | https://github.com/bachya/regenmaschine/blob/99afb648fe454dc4a7d5db85a02a8b3b5d26f8bc/regenmaschine/errors.py#L27-L34 | null | """Define package errors."""
class RainMachineError(Exception):
"""Define a base error."""
pass
class RequestError(RainMachineError):
"""Define an error related to invalid requests."""
pass
class TokenExpiredError(RainMachineError):
"""Define an error for expired access tokens that can't be refreshed."""
pass
ERROR_CODES = {
1: 'The email has not been validated',
}
|
bachya/regenmaschine | regenmaschine/controller.py | Controller._request | python | async def _request(
self,
method: str,
endpoint: str,
*,
headers: dict = None,
params: dict = None,
json: dict = None,
ssl: bool = True) -> dict:
return await self._client_request(
method,
'{0}/{1}'.format(self._host, endpoint),
access_token=self._access_token,
access_token_expiration=self._access_token_expiration,
headers=headers,
params=params,
json=json,
ssl=ssl) | Wrap the generic request method to add access token, etc. | train | https://github.com/bachya/regenmaschine/blob/99afb648fe454dc4a7d5db85a02a8b3b5d26f8bc/regenmaschine/controller.py#L52-L70 | null | class Controller: # pylint: disable=too-many-instance-attributes
"""Define the controller."""
def __init__(
self, request: Callable[..., Awaitable[dict]],
websession: ClientSession) -> None:
"""Initialize."""
self._access_token = None # type: Optional[str]
self._access_token_expiration = None # type: Optional[datetime]
self._client_request = request
self._host = None # type: Optional[str]
self._ssl = True
self._websession = websession
self.api_version = None # type: Optional[str]
self.hardware_version = None # type: Optional[int]
self.mac = None
self.name = None # type: Optional[str]
self.software_version = None # type: Optional[str]
# API endpoints:
self.api = API(self._request)
self.diagnostics = Diagnostics(self._request)
self.parsers = Parser(self._request)
self.programs = Program(self._request)
self.provisioning = Provision(self._request)
self.restrictions = Restriction(self._request)
self.stats = Stats(self._request)
self.watering = Watering(self._request)
self.zones = Zone(self._request)
|
bachya/regenmaschine | regenmaschine/controller.py | LocalController.login | python | async def login(self, password):
auth_resp = await self._client_request(
'post',
'{0}/auth/login'.format(self._host),
json={
'pwd': password,
'remember': 1
})
self._access_token = auth_resp['access_token']
self._access_token_expiration = datetime.now() + timedelta(
seconds=int(auth_resp['expires_in']) - 10) | Authenticate against the device (locally). | train | https://github.com/bachya/regenmaschine/blob/99afb648fe454dc4a7d5db85a02a8b3b5d26f8bc/regenmaschine/controller.py#L85-L97 | null | class LocalController(Controller):
"""Define a controller accessed over the LAN."""
def __init__( # pylint: disable=too-many-arguments
self, request: Callable[..., Awaitable[dict]], host: str,
port: int, ssl: bool, websession: ClientSession) -> None:
"""Initialize."""
super().__init__(request, websession)
self._host = URL_BASE_LOCAL.format(host, port)
self._ssl = ssl
|
bachya/regenmaschine | regenmaschine/controller.py | RemoteController.login | python | async def login(
self, stage_1_access_token: str, sprinkler_id: str,
password: str) -> None:
auth_resp = await self._client_request(
'post',
'https://my.rainmachine.com/devices/login-sprinkler',
access_token=stage_1_access_token,
json={
'sprinklerId': sprinkler_id,
'pwd': password,
})
self._access_token = auth_resp['access_token']
self._host = URL_BASE_REMOTE.format(sprinkler_id) | Authenticate against the device (remotely). | train | https://github.com/bachya/regenmaschine/blob/99afb648fe454dc4a7d5db85a02a8b3b5d26f8bc/regenmaschine/controller.py#L103-L117 | [
"async def _request(\n self,\n method: str,\n url: str,\n *,\n access_token: str = None,\n access_token_expiration: datetime = None,\n headers: dict = None,\n params: dict = None,\n json: dict = None,\n ssl: bool = True) -> dict:\n \"\"\"Make a request against the RainMachine device.\"\"\"\n if (access_token_expiration\n and datetime.now() >= access_token_expiration):\n raise TokenExpiredError('Long-lived access token has expired')\n\n if not headers:\n headers = {}\n headers.update({'Content-Type': 'application/json'})\n\n if not params:\n params = {}\n if access_token:\n params.update({'access_token': access_token})\n\n try:\n async with async_timeout.timeout(self.request_timeout):\n async with self._websession.request(\n method, url, headers=headers, params=params, json=json,\n ssl=ssl) as resp:\n resp.raise_for_status()\n data = await resp.json(content_type=None)\n _raise_for_remote_status(url, data)\n except ClientError as err:\n raise RequestError(\n 'Error requesting data from {0}: {1}'.format(url, err))\n except asyncio.TimeoutError:\n raise RequestError('Timeout during request: {0}'.format(url))\n\n return data\n"
] | class RemoteController(Controller):
"""Define a controller accessed over RainMachine's cloud."""
|
bachya/regenmaschine | regenmaschine/zone.py | Zone._post | python | async def _post(self, zone_id: int = None, json: dict = None) -> dict:
return await self._request(
'post', 'zone/{0}/properties'.format(zone_id), json=json) | Post data to a (non)existing zone. | train | https://github.com/bachya/regenmaschine/blob/99afb648fe454dc4a7d5db85a02a8b3b5d26f8bc/regenmaschine/zone.py#L12-L15 | null | class Zone:
"""Define a zone object."""
def __init__(self, request: Callable[..., Awaitable[dict]]) -> None:
"""Initialize."""
self._request = request
async def all(
self, *, details: bool = False,
include_inactive: bool = False) -> list:
"""Return all zones (with optional advanced properties)."""
endpoint = 'zone'
if details:
endpoint += '/properties'
data = await self._request('get', endpoint)
return [z for z in data['zones'] if include_inactive or z['active']]
async def disable(self, zone_id: int) -> dict:
"""Disable a zone."""
return await self._post(zone_id, {'active': False})
async def enable(self, zone_id: int) -> dict:
"""Enable a zone."""
return await self._post(zone_id, {'active': True})
async def get(self, zone_id: int, *, details: bool = False) -> dict:
"""Return a specific zone."""
endpoint = 'zone/{0}'.format(zone_id)
if details:
endpoint += '/properties'
return await self._request('get', endpoint)
async def start(self, zone_id: int, time: int) -> dict:
"""Start a program."""
return await self._request(
'post', 'zone/{0}/start'.format(zone_id), json={'time': time})
async def stop(self, zone_id: int) -> dict:
"""Stop a program."""
return await self._request('post', 'zone/{0}/stop'.format(zone_id))
|
bachya/regenmaschine | regenmaschine/zone.py | Zone.all | python | async def all(
self, *, details: bool = False,
include_inactive: bool = False) -> list:
endpoint = 'zone'
if details:
endpoint += '/properties'
data = await self._request('get', endpoint)
return [z for z in data['zones'] if include_inactive or z['active']] | Return all zones (with optional advanced properties). | train | https://github.com/bachya/regenmaschine/blob/99afb648fe454dc4a7d5db85a02a8b3b5d26f8bc/regenmaschine/zone.py#L17-L25 | null | class Zone:
"""Define a zone object."""
def __init__(self, request: Callable[..., Awaitable[dict]]) -> None:
"""Initialize."""
self._request = request
async def _post(self, zone_id: int = None, json: dict = None) -> dict:
"""Post data to a (non)existing zone."""
return await self._request(
'post', 'zone/{0}/properties'.format(zone_id), json=json)
async def disable(self, zone_id: int) -> dict:
"""Disable a zone."""
return await self._post(zone_id, {'active': False})
async def enable(self, zone_id: int) -> dict:
"""Enable a zone."""
return await self._post(zone_id, {'active': True})
async def get(self, zone_id: int, *, details: bool = False) -> dict:
"""Return a specific zone."""
endpoint = 'zone/{0}'.format(zone_id)
if details:
endpoint += '/properties'
return await self._request('get', endpoint)
async def start(self, zone_id: int, time: int) -> dict:
"""Start a program."""
return await self._request(
'post', 'zone/{0}/start'.format(zone_id), json={'time': time})
async def stop(self, zone_id: int) -> dict:
"""Stop a program."""
return await self._request('post', 'zone/{0}/stop'.format(zone_id))
|
bachya/regenmaschine | regenmaschine/zone.py | Zone.get | python | async def get(self, zone_id: int, *, details: bool = False) -> dict:
endpoint = 'zone/{0}'.format(zone_id)
if details:
endpoint += '/properties'
return await self._request('get', endpoint) | Return a specific zone. | train | https://github.com/bachya/regenmaschine/blob/99afb648fe454dc4a7d5db85a02a8b3b5d26f8bc/regenmaschine/zone.py#L35-L40 | null | class Zone:
"""Define a zone object."""
def __init__(self, request: Callable[..., Awaitable[dict]]) -> None:
"""Initialize."""
self._request = request
async def _post(self, zone_id: int = None, json: dict = None) -> dict:
"""Post data to a (non)existing zone."""
return await self._request(
'post', 'zone/{0}/properties'.format(zone_id), json=json)
async def all(
self, *, details: bool = False,
include_inactive: bool = False) -> list:
"""Return all zones (with optional advanced properties)."""
endpoint = 'zone'
if details:
endpoint += '/properties'
data = await self._request('get', endpoint)
return [z for z in data['zones'] if include_inactive or z['active']]
async def disable(self, zone_id: int) -> dict:
"""Disable a zone."""
return await self._post(zone_id, {'active': False})
async def enable(self, zone_id: int) -> dict:
"""Enable a zone."""
return await self._post(zone_id, {'active': True})
async def start(self, zone_id: int, time: int) -> dict:
"""Start a program."""
return await self._request(
'post', 'zone/{0}/start'.format(zone_id), json={'time': time})
async def stop(self, zone_id: int) -> dict:
"""Stop a program."""
return await self._request('post', 'zone/{0}/stop'.format(zone_id))
|
bachya/regenmaschine | regenmaschine/zone.py | Zone.start | python | async def start(self, zone_id: int, time: int) -> dict:
return await self._request(
'post', 'zone/{0}/start'.format(zone_id), json={'time': time}) | Start a program. | train | https://github.com/bachya/regenmaschine/blob/99afb648fe454dc4a7d5db85a02a8b3b5d26f8bc/regenmaschine/zone.py#L42-L45 | null | class Zone:
"""Define a zone object."""
def __init__(self, request: Callable[..., Awaitable[dict]]) -> None:
"""Initialize."""
self._request = request
async def _post(self, zone_id: int = None, json: dict = None) -> dict:
"""Post data to a (non)existing zone."""
return await self._request(
'post', 'zone/{0}/properties'.format(zone_id), json=json)
async def all(
self, *, details: bool = False,
include_inactive: bool = False) -> list:
"""Return all zones (with optional advanced properties)."""
endpoint = 'zone'
if details:
endpoint += '/properties'
data = await self._request('get', endpoint)
return [z for z in data['zones'] if include_inactive or z['active']]
async def disable(self, zone_id: int) -> dict:
"""Disable a zone."""
return await self._post(zone_id, {'active': False})
async def enable(self, zone_id: int) -> dict:
"""Enable a zone."""
return await self._post(zone_id, {'active': True})
async def get(self, zone_id: int, *, details: bool = False) -> dict:
"""Return a specific zone."""
endpoint = 'zone/{0}'.format(zone_id)
if details:
endpoint += '/properties'
return await self._request('get', endpoint)
async def stop(self, zone_id: int) -> dict:
"""Stop a program."""
return await self._request('post', 'zone/{0}/stop'.format(zone_id))
|
bachya/regenmaschine | regenmaschine/watering.py | Watering.log | python | async def log(
self,
date: datetime.date = None,
days: int = None,
details: bool = False) -> list:
endpoint = 'watering/log'
if details:
endpoint += '/details'
if date and days:
endpoint = '{0}/{1}/{2}'.format(
endpoint, date.strftime('%Y-%m-%d'), days)
data = await self._request('get', endpoint)
return data['waterLog']['days'] | Get watering information for X days from Y date. | train | https://github.com/bachya/regenmaschine/blob/99afb648fe454dc4a7d5db85a02a8b3b5d26f8bc/regenmaschine/watering.py#L13-L28 | null | class Watering:
"""Define a watering object."""
def __init__(self, request: Callable[..., Awaitable[dict]]) -> None:
"""Initialize."""
self._request = request
async def pause_all(self, seconds: int) -> dict:
"""Pause all watering for a specified number of seconds."""
return await self._request(
'post', 'watering/pauseall', json={'duration': seconds})
async def queue(self) -> list:
"""Return the queue of active watering activities."""
data = await self._request('get', 'watering/queue')
return data['queue']
async def runs(self, date: datetime.date = None, days: int = None) -> list:
"""Return all program runs for X days from Y date."""
endpoint = 'watering/past'
if date and days:
endpoint = '{0}/{1}/{2}'.format(
endpoint, date.strftime('%Y-%m-%d'), days)
data = await self._request('get', endpoint)
return data['pastValues']
async def stop_all(self) -> dict:
"""Stop all programs and zones from running."""
return await self._request('post', 'watering/stopall')
async def unpause_all(self) -> dict:
"""Unpause all paused watering."""
return await self.pause_all(0)
|
bachya/regenmaschine | regenmaschine/watering.py | Watering.runs | python | async def runs(self, date: datetime.date = None, days: int = None) -> list:
endpoint = 'watering/past'
if date and days:
endpoint = '{0}/{1}/{2}'.format(
endpoint, date.strftime('%Y-%m-%d'), days)
data = await self._request('get', endpoint)
return data['pastValues'] | Return all program runs for X days from Y date. | train | https://github.com/bachya/regenmaschine/blob/99afb648fe454dc4a7d5db85a02a8b3b5d26f8bc/regenmaschine/watering.py#L40-L49 | null | class Watering:
"""Define a watering object."""
def __init__(self, request: Callable[..., Awaitable[dict]]) -> None:
"""Initialize."""
self._request = request
async def log(
self,
date: datetime.date = None,
days: int = None,
details: bool = False) -> list:
"""Get watering information for X days from Y date."""
endpoint = 'watering/log'
if details:
endpoint += '/details'
if date and days:
endpoint = '{0}/{1}/{2}'.format(
endpoint, date.strftime('%Y-%m-%d'), days)
data = await self._request('get', endpoint)
return data['waterLog']['days']
async def pause_all(self, seconds: int) -> dict:
"""Pause all watering for a specified number of seconds."""
return await self._request(
'post', 'watering/pauseall', json={'duration': seconds})
async def queue(self) -> list:
"""Return the queue of active watering activities."""
data = await self._request('get', 'watering/queue')
return data['queue']
async def stop_all(self) -> dict:
"""Stop all programs and zones from running."""
return await self._request('post', 'watering/stopall')
async def unpause_all(self) -> dict:
"""Unpause all paused watering."""
return await self.pause_all(0)
|
bachya/regenmaschine | regenmaschine/program.py | Program._post | python | async def _post(self, program_id: int = None, json: dict = None) -> dict:
return await self._request(
'post', 'program/{0}'.format(program_id), json=json) | Post data to a (non)existing program. | train | https://github.com/bachya/regenmaschine/blob/99afb648fe454dc4a7d5db85a02a8b3b5d26f8bc/regenmaschine/program.py#L12-L15 | null | class Program:
"""Define a program object."""
def __init__(self, request: Callable[..., Awaitable[dict]]) -> None:
"""Initialize."""
self._request = request
async def all(self, include_inactive: bool = False) -> list:
"""Return all programs."""
data = await self._request('get', 'program')
return [p for p in data['programs'] if include_inactive or p['active']]
async def disable(self, program_id: int) -> dict:
"""Disable a program."""
return await self._post(program_id, {'active': False})
async def enable(self, program_id: int) -> dict:
"""Enable a program."""
return await self._post(program_id, {'active': True})
async def get(self, program_id: int) -> dict:
"""Return a specific program."""
return await self._request('get', 'program/{0}'.format(program_id))
async def next(self) -> list:
"""Return the next run date/time for all programs."""
data = await self._request('get', 'program/nextrun')
return data['nextRuns']
async def running(self) -> list:
"""Return all running programs."""
data = await self._request('get', 'watering/program')
return data['programs']
async def start(self, program_id: int) -> dict:
"""Start a program."""
return await self._request(
'post', 'program/{0}/start'.format(program_id))
async def stop(self, program_id: int) -> dict:
"""Stop a program."""
return await self._request(
'post', 'program/{0}/stop'.format(program_id))
|
bachya/regenmaschine | regenmaschine/program.py | Program.all | python | async def all(self, include_inactive: bool = False) -> list:
data = await self._request('get', 'program')
return [p for p in data['programs'] if include_inactive or p['active']] | Return all programs. | train | https://github.com/bachya/regenmaschine/blob/99afb648fe454dc4a7d5db85a02a8b3b5d26f8bc/regenmaschine/program.py#L17-L20 | null | class Program:
"""Define a program object."""
def __init__(self, request: Callable[..., Awaitable[dict]]) -> None:
"""Initialize."""
self._request = request
async def _post(self, program_id: int = None, json: dict = None) -> dict:
"""Post data to a (non)existing program."""
return await self._request(
'post', 'program/{0}'.format(program_id), json=json)
async def disable(self, program_id: int) -> dict:
"""Disable a program."""
return await self._post(program_id, {'active': False})
async def enable(self, program_id: int) -> dict:
"""Enable a program."""
return await self._post(program_id, {'active': True})
async def get(self, program_id: int) -> dict:
"""Return a specific program."""
return await self._request('get', 'program/{0}'.format(program_id))
async def next(self) -> list:
"""Return the next run date/time for all programs."""
data = await self._request('get', 'program/nextrun')
return data['nextRuns']
async def running(self) -> list:
"""Return all running programs."""
data = await self._request('get', 'watering/program')
return data['programs']
async def start(self, program_id: int) -> dict:
"""Start a program."""
return await self._request(
'post', 'program/{0}/start'.format(program_id))
async def stop(self, program_id: int) -> dict:
"""Stop a program."""
return await self._request(
'post', 'program/{0}/stop'.format(program_id))
|
devopshq/crosspm | crosspm/helpers/usedby.py | Usedby.usedby_packages | python | def usedby_packages(self, deps_file_path=None, depslock_file_path=None, packages=None):
if deps_file_path is None:
deps_file_path = self._deps_path
if depslock_file_path is None:
depslock_file_path = self._depslock_path
if deps_file_path == depslock_file_path:
depslock_file_path += '.lock'
if packages is None:
self.search_dependencies(deps_file_path)
else:
self._root_package.packages = packages
self._log.info('Done!') | Lock packages. Downloader search packages | train | https://github.com/devopshq/crosspm/blob/c831442ecfaa1d43c66cb148857096cea292c950/crosspm/helpers/usedby.py#L11-L26 | [
"def search_dependencies(self, depslock_file_path):\n self._log.info('Check dependencies ...')\n self._root_package.find_usedby(depslock_file_path, property_validate=True)\n self._log.info('')\n self._log.info('Dependency tree:')\n self._root_package.print(0, self._config.output('tree', [{self._config.name_column: 0}]))\n"
] | class Usedby(Locker):
def __init__(self, config, do_load, recursive):
# Ignore do_load flag
super(Usedby, self).__init__(config, False, recursive)
def search_dependencies(self, depslock_file_path):
self._log.info('Check dependencies ...')
self._root_package.find_usedby(depslock_file_path, property_validate=True)
self._log.info('')
self._log.info('Dependency tree:')
self._root_package.print(0, self._config.output('tree', [{self._config.name_column: 0}]))
def entrypoint(self, *args, **kwargs):
self.usedby_packages(*args, **kwargs)
|
devopshq/crosspm | crosspm/helpers/downloader.py | Downloader.set_duplicated_flag | python | def set_duplicated_flag(self):
package_by_name = defaultdict(list)
for package1 in self._root_package.all_packages:
if package1 is None:
continue
pkg_name = package1.package_name
param_list = self._config.get_fails('unique', {})
params1 = package1.get_params(param_list)
for package2 in package_by_name[pkg_name]:
params2 = package2.get_params(param_list)
for x in param_list:
# START HACK for cached archive
param1 = params1[x]
param2 = params2[x]
if isinstance(param1, list):
param1 = [str(x) for x in param1]
if isinstance(param2, list):
param2 = [str(x) for x in param2]
# END
if str(param1) != str(param2):
package1.duplicated = True
package2.duplicated = True
package_by_name[pkg_name].append(package1) | For all package set flag duplicated, if it's not unique package
:return: | train | https://github.com/devopshq/crosspm/blob/c831442ecfaa1d43c66cb148857096cea292c950/crosspm/helpers/downloader.py#L189-L217 | null | class Downloader(Command):
def __init__(self, config, do_load, recursive):
self._log = logging.getLogger('crosspm')
self._config = config # type: Config
self.cache = config.cache
self.solid = config.solid
self.common_parser = Parser('common', {}, config)
self._root_package = Package('<root>', 0, {self._config.name_column: '<root>'}, self, None,
self.common_parser)
self.recursive = recursive
if not config.deps_path:
config.deps_path = \
config.deps_file_name if config.deps_file_name else CROSSPM_DEPENDENCY_FILENAME
deps_path = config.deps_path
if deps_path.__class__ is DependenciesContent:
# HACK
pass
self._deps_path = deps_path
else:
deps_path = config.deps_path.strip().strip('"').strip("'")
self._deps_path = os.path.realpath(os.path.expanduser(deps_path))
if not config.depslock_path:
config.depslock_path = \
config.deps_lock_file_name if config.deps_lock_file_name else CROSSPM_DEPENDENCY_LOCK_FILENAME
depslock_path = config.depslock_path
if depslock_path.__class__ is DependenciesContent:
# HACK
self._depslock_path = depslock_path
else:
depslock_path = depslock_path.strip().strip('"').strip("'")
self._depslock_path = os.path.realpath(os.path.expanduser(depslock_path))
self.do_load = do_load
def update_progress(self, msg, progress):
self._log.info('\r{0} [{1:10}] {2}%'.format(msg, '#' * int(float(progress) / 10.0), int(progress)))
# Get list of all packages needed to resolve all the dependencies.
# List of Package class instances.
def get_dependency_packages(self, list_or_file_path=None, property_validate=True):
"""
:param list_or_file_path:
:param property_validate: for `root` packages we need check property, bad if we find packages from `lock` file,
we can skip validate part
"""
if list_or_file_path is None:
list_or_file_path = self._depslock_path
if not os.path.isfile(list_or_file_path):
list_or_file_path = self._deps_path
_packages = OrderedDict()
if isinstance(list_or_file_path, str):
self._log.info('Reading dependencies ... [%s]', list_or_file_path)
for i, _src in enumerate(self._config.sources()):
if i > 0:
self._log.info('')
self._log.info('Next source ...')
_found_packages = _src.get_packages(self, list_or_file_path, property_validate)
_packages.update(
OrderedDict([(k, v) for k, v in _found_packages.items() if _packages.get(k, None) is None]))
if not self._config.no_fails:
if isinstance(list_or_file_path, (list, tuple)):
list_or_file_path = [x for x in list_or_file_path if
_packages.get(x[self._config.name_column], None) is None]
elif isinstance(list_or_file_path, dict) and isinstance(list_or_file_path.get('raw', None), list):
list_or_file_path['raw'] = [x for x in list_or_file_path['raw'] if
_packages.get(x[self._config.name_column], None) is None]
return _packages
def get_usedby_packages(self, list_or_file_path=None, property_validate=True):
"""
:param list_or_file_path:
:param property_validate: for `root` packages we need check property, bad if we find packages from `lock` file,
we can skip validate part
:return:
"""
if list_or_file_path is None:
list_or_file_path = self._depslock_path
if not os.path.isfile(list_or_file_path):
list_or_file_path = self._deps_path
_packages = OrderedDict()
if isinstance(list_or_file_path, str):
self._log.info('Reading dependencies ... [%s]', list_or_file_path)
for i, _src in enumerate(self._config.sources()):
if i > 0:
self._log.info('')
self._log.info('Next source ...')
_found_packages = _src.get_usedby(self, list_or_file_path, property_validate)
_packages.update(
OrderedDict([(k, v) for k, v in _found_packages.items() if _packages.get(k, None) is None]))
if not self._config.no_fails:
if isinstance(list_or_file_path, (list, tuple)):
list_or_file_path = [x for x in list_or_file_path if
_packages.get(x[self._config.name_column], None) is None]
elif isinstance(list_or_file_path, dict) and isinstance(list_or_file_path.get('raw', None), list):
list_or_file_path['raw'] = [x for x in list_or_file_path['raw'] if
_packages.get(x[self._config.name_column], None) is None]
return _packages
# Download packages or just unpack already loaded (it's up to adapter to decide)
def download_packages(self, depslock_file_path=None):
if depslock_file_path is None:
depslock_file_path = self._depslock_path
if depslock_file_path.__class__ is DependenciesContent:
# HACK для возможности проставления контента файла, а не пути
pass
elif isinstance(depslock_file_path, str):
if not os.path.isfile(depslock_file_path):
depslock_file_path = self._deps_path
deps_content = self._deps_path if isinstance(self._deps_path, DependenciesContent) else None
self.search_dependencies(depslock_file_path, deps_content=deps_content)
if self.do_load:
self._log.info('Unpack ...')
total = len(self._root_package.all_packages)
for i, _pkg in enumerate(self._root_package.all_packages):
self.update_progress('Download/Unpack:', float(i) / float(total) * 100.0)
if _pkg.download(): # self.packed_path):
_pkg.unpack() # self.unpacked_path)
self.update_progress('Download/Unpack:', 100)
self._log.info('')
self._log.info('Done!')
if self._config.lock_on_success:
from crosspm.helpers.locker import Locker
depslock_path = os.path.realpath(
os.path.join(os.path.dirname(depslock_file_path), self._config.deps_lock_file_name))
Locker(self._config, do_load=self.do_load, recursive=self.recursive).lock_packages(
depslock_file_path, depslock_path, packages=self._root_package.packages)
return self._root_package.all_packages
def entrypoint(self, *args, **kwargs):
self.download_packages(*args, **kwargs)
def search_dependencies(self, depslock_file_path, deps_content=None):
self._log.info('Check dependencies ...')
self._root_package.find_dependencies(depslock_file_path, property_validate=True, deps_content=deps_content, )
self._log.info('')
self.set_duplicated_flag()
self._log.info('Dependency tree:')
self._root_package.print(0, self._config.output('tree', [{self._config.name_column: 0}]))
self.check_unique(self._config.no_fails)
self.check_not_found()
def check_not_found(self):
_not_found = self.get_not_found_packages()
if _not_found:
raise CrosspmException(
CROSSPM_ERRORCODE_PACKAGE_NOT_FOUND,
'Some package(s) not found: {}'.format(', '.join(_not_found))
)
def get_not_found_packages(self):
return self._root_package.get_none_packages()
def add_package(self, pkg_name, package):
_added = False
if package is not None:
_added = True
return _added, package
def check_unique(self, no_fails):
if no_fails:
return
not_unique = set(x.package_name for x in self._root_package.all_packages if x and x.duplicated)
if not_unique:
raise CrosspmException(
CROSSPM_ERRORCODE_MULTIPLE_DEPS,
'Multiple versions of package "{}" found in dependencies.\n'
'See dependency tree in log (package with exclamation mark "!")'.format(
', '.join(not_unique)),
)
def get_raw_packages(self):
"""
Get all packages
:return: list of all packages
"""
return self._root_package.all_packages
def get_tree_packages(self):
"""
Get all packages, with hierarchy
:return: list of first level packages, with child
"""
return self._root_package.packages
|
devopshq/crosspm | crosspm/helpers/python.py | get_object_from_string | python | def get_object_from_string(object_path):
# split like crosspm.template.GUS => crosspm.template, GUS
try:
module_name, object_name = object_path.rsplit('.', maxsplit=1)
module_ = __import__(module_name, globals(), locals(), ['App'], 0)
variable_ = getattr(module_, object_name)
except Exception:
variable_ = None
return variable_ | Return python object from string
:param object_path: e.g os.path.join
:return: python object | train | https://github.com/devopshq/crosspm/blob/c831442ecfaa1d43c66cb148857096cea292c950/crosspm/helpers/python.py#L1-L14 | null | |
devopshq/crosspm | crosspm/helpers/locker.py | Locker.lock_packages | python | def lock_packages(self, deps_file_path=None, depslock_file_path=None, packages=None):
if deps_file_path is None:
deps_file_path = self._deps_path
if depslock_file_path is None:
depslock_file_path = self._depslock_path
if deps_file_path == depslock_file_path:
depslock_file_path += '.lock'
# raise CrosspmException(
# CROSSPM_ERRORCODE_WRONG_ARGS,
# 'Dependencies and Lock files are same: "{}".'.format(deps_file_path),
# )
if packages is None:
self.search_dependencies(deps_file_path)
else:
self._root_package.packages = packages
self._log.info('Writing lock file [{}]'.format(depslock_file_path))
output_params = {
'out_format': 'lock',
'output': depslock_file_path,
}
Output(config=self._config).write_output(output_params, self._root_package.packages)
self._log.info('Done!') | Lock packages. Downloader search packages | train | https://github.com/devopshq/crosspm/blob/c831442ecfaa1d43c66cb148857096cea292c950/crosspm/helpers/locker.py#L27-L54 | [
"def search_dependencies(self, depslock_file_path, deps_content=None):\n self._log.info('Check dependencies ...')\n self._root_package.find_dependencies(depslock_file_path, property_validate=True, deps_content=deps_content, )\n self._log.info('')\n self.set_duplicated_flag()\n self._log.info('Dependency tree:')\n self._root_package.print(0, self._config.output('tree', [{self._config.name_column: 0}]))\n self.check_unique(self._config.no_fails)\n self.check_not_found()\n",
"def write_output(self, params, packages):\n \"\"\"\n Функция вызывает определенную функцию для фиксированного out-format\n :param params:\n :param packages:\n :return:\n \"\"\"\n if params['out_format'] not in _output_format_map:\n raise CrosspmException(\n CROSSPM_ERRORCODE_UNKNOWN_OUT_TYPE,\n 'Unknown out_format: [{}]'.format(params['out_format']),\n )\n\n f = _output_format_map[params['out_format']]\n result = f(self, packages, **params)\n\n if result:\n out_file_path = os.path.realpath(os.path.expanduser(params['output']))\n self.write_to_file(result, out_file_path)\n self._log.info(\n 'Write packages info to file [%s]\\ncontent:\\n\\n%s',\n out_file_path,\n result,\n )\n"
] | class Locker(Downloader):
def __init__(self, config, do_load, recursive):
# TODO: revise logic to allow recursive search without downloading
super(Locker, self).__init__(config, do_load, recursive)
if not getattr(config, 'deps_path', ''):
config.deps_path = config.deps_file_name or CROSSPM_DEPENDENCY_FILENAME
deps_path = config.deps_path
if deps_path.__class__ is DependenciesContent:
# HACK
pass
self._deps_path = deps_path
else:
deps_path = config.deps_path.strip().strip('"').strip("'")
self._deps_path = os.path.realpath(os.path.expanduser(deps_path))
def entrypoint(self, *args, **kwargs):
self.lock_packages(*args, **kwargs)
|
devopshq/crosspm | crosspm/cpm.py | CrossPM.stdout | python | def stdout(self):
# --stdout
stdout = self._args['--stdout']
if stdout:
return True
# CROSSPM_STDOUT
stdout_env = os.getenv('CROSSPM_STDOUT', None)
if stdout_env is not None:
return True
return False | Флаг --stdout может быть взят из переменной окружения CROSSPM_STDOUT.
Если есть любое значение в CROSSPM_STDOUT - оно понимается как True
:return: | train | https://github.com/devopshq/crosspm/blob/c831442ecfaa1d43c66cb148857096cea292c950/crosspm/cpm.py#L144-L160 | null | class CrossPM:
_ready = False
def __init__(self, args=None, throw_exceptions=None, return_result=False):
self._config = None
self._output = None
self._return_result = return_result
if throw_exceptions is None:
# legacy behavior
if self._return_result:
self._throw_exceptions = False
else:
self._throw_exceptions = True
else:
self._throw_exceptions = throw_exceptions
self._log = logging.getLogger('crosspm')
args = self.prepare_args(args)
docopt_str = __doc__.format(app_name=app_name,
verb_level=Config.get_verbosity_level(),
log_default=Config.get_verbosity_level(0, True),
deps_default=CROSSPM_DEPENDENCY_FILENAME,
deps_lock_default=CROSSPM_DEPENDENCY_LOCK_FILENAME,
out_format=Output.get_output_types(),
out_format_default='stdout',
)
self._args = docopt(docopt_str,
argv=args,
version=version)
if self._args['--recursive']:
recursive_str = self._args['--recursive']
if recursive_str.lower() == 'true':
self._args['--recursive'] = True
elif recursive_str.lower() == 'false':
self._args['--recursive'] = False
else:
raise Exception("Unknown value to --recursive: {}".format(recursive_str))
if isinstance(self._args, str):
if self._throw_exceptions:
print(app_name)
print(self._args)
exit()
self._ready = True
if self._args['download']:
self.command_ = Downloader
elif self._args['lock']:
self.command_ = Locker
elif self._args['usedby']:
self.command_ = Usedby
else:
self.command_ = None
@property
@staticmethod
def prepare_args(args, windows=None):
"""
Prepare args - add support for old interface, e.g:
- --recursive was "flag" and for now it support True or False value
:param args:
:return:
"""
if windows is None:
windows = "win" in sys.platform
if isinstance(args, str):
args = shlex.split(args, posix=not windows)
elif isinstance(args, list):
pass
elif args is None:
args = sys.argv[1:]
else:
raise Exception("Unknown args type: {}".format(type(args)))
# --recursive => --recursive=True|False convert
for position, argument in enumerate(args):
# Normal way, skip change
if argument.lower() in ('--recursive=true', '--recursive=false'):
return args
elif argument.lower() == '--recursive':
if len(args) > position + 1 and args[position + 1].lower() in ["true", "false"]:
# --recursive true | false
return args
else:
# legacy way, convert --recursive to --recursive=true
args[position] = "--recursive=True"
return args
return args
@do_run
def read_config(self):
_deps_path = self._args['--deps-path']
# Передаём содержимое напрямую
if _deps_path is None and self._args['--dependencies-content'] is not None:
_deps_path = DependenciesContent(self._args['--dependencies-content'])
_depslock_path = self._args['--depslock-path']
if _depslock_path is None and self._args['--dependencies-lock-content'] is not None:
_depslock_path = DependenciesContent(self._args['--dependencies-lock-content'])
if self._args['lock']:
if self._args['DEPS']:
_deps_path = self._args['DEPS']
if self._args['DEPSLOCK']:
_depslock_path = self._args['DEPSLOCK']
self._config = Config(self._args['--config'], self._args['--options'], self._args['--no-fails'], _depslock_path,
_deps_path, self._args['--lock-on-success'],
self._args['--prefer-local'])
self._output = Output(self._config.output('result', None), self._config.name_column, self._config)
def exit(self, code, msg):
self._log.critical(msg)
if self._throw_exceptions:
sys.exit(code)
else:
return code, msg
@property
def recursive(self):
if self.command_ is Downloader:
if self._args['--recursive'] is None:
recursive = True
else:
recursive = self._args['--recursive']
else:
if self._args['--recursive'] is None:
recursive = False
else:
recursive = self._args['--recursive']
return recursive
@do_run
def check_common_args(self):
if self._args['--output']:
output = self._args['--output'].strip().strip("'").strip('"')
output_abs = os.path.abspath(output)
if os.path.isdir(output_abs):
raise CrosspmExceptionWrongArgs(
'"%s" is a directory - can\'t write to it'
)
self._args['--output'] = output
@do_run
def set_logging_level(self):
level_str = self._args['--verbose'].strip().lower()
log = self._args['--log']
if log:
log = log.strip().strip("'").strip('"')
log_abs = os.path.abspath(log)
if os.path.isdir(log_abs):
raise CrosspmExceptionWrongArgs(
'"%s" is a directory - can\'t write log to it'
)
else:
log_dir = os.path.dirname(log_abs)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
else:
log_abs = None
level = Config.get_verbosity_level(level_str or 'console')
self._log.handlers = []
if level or log_abs:
self._log.setLevel(level)
format_str = '%(asctime)-19s [%(levelname)-9s] %(message)s'
if level_str == 'debug':
format_str = '%(asctime)-19s [%(levelname)-9s] %(name)-12s: %(message)s'
formatter = logging.Formatter(format_str, datefmt="%Y-%m-%d %H:%M:%S")
if level:
# legacy way - Cmake catch message from stdout and parse PACKAGE_ROOT
# So, crosspm print debug and info message to stderr for debug purpose
if not self.stdout:
sh = logging.StreamHandler(stream=sys.stderr)
sh.setLevel(level)
self._log.addHandler(sh)
# If --stdout flag enabled
else:
sh = logging.StreamHandler(stream=sys.stderr)
sh.setLevel(logging.WARNING)
self._log.addHandler(sh)
sh = logging.StreamHandler(stream=sys.stdout)
sh.setLevel(level)
self._log.addHandler(sh)
if log_abs:
if not level_str:
level = Config.get_verbosity_level(0)
fh = logging.FileHandler(filename=log_abs)
fh.setLevel(level)
fh.setFormatter(formatter)
self._log.addHandler(fh)
def run(self):
time_start = time.time()
if self._ready:
errorcode, msg = self.set_logging_level()
self._log.info(app_name)
errorcode, msg = self.check_common_args()
if errorcode == 0:
errorcode, msg = self.read_config()
if errorcode == 0:
if self._args['download']:
errorcode, msg = self.command(self.command_)
elif self._args['lock']:
errorcode, msg = self.command(self.command_)
elif self._args['usedby']:
errorcode, msg = self.command(self.command_)
elif self._args['pack']:
errorcode, msg = self.pack()
elif self._args['cache']:
errorcode, msg = self.cache()
else:
errorcode, msg = CROSSPM_ERRORCODE_WRONG_ARGS, self._args
time_end = time.time()
self._log.info('Done in %2.2f sec' % (time_end - time_start))
return errorcode, msg
@do_run
def command(self, command_):
if self._return_result:
params = {}
else:
if self._args['--out-format'] == 'stdout':
if self._args['--output']:
raise CrosspmExceptionWrongArgs(
"unwanted argument '--output' while argument '--out-format={}'".format(
self._args['--out-format'],
))
elif not self._args['--output']:
raise CrosspmExceptionWrongArgs(
"argument '--output' required when argument '--out-format={}'".format(
self._args['--out-format'],
))
params = {
'out_format': ['--out-format', ''],
'output': ['--output', ''],
'output_template': ['--output-template', ''],
# 'out_prefix': ['--out-prefix', ''],
# 'depslock_path': ['--depslock-path', ''],
}
for k, v in params.items():
params[k] = self._args[v[0]] if v[0] in self._args else v[1]
if isinstance(params[k], str):
params[k] = params[k].strip('"').strip("'")
# try to dynamic load --output-template from python module
output_template = params['output_template']
if output_template:
# Try to load from python module
module_template = get_object_from_string(output_template)
if module_template is not None:
self._log.debug(
"Found output template path '{}' from '{}'".format(module_template, output_template))
params['output_template'] = module_template
else:
self._log.debug("Output template '{}' use like file path".format(output_template))
# check template exist
output_template = params['output_template']
if output_template and not os.path.exists(output_template):
raise CrosspmException(CROSSPM_ERRORCODE_CONFIG_NOT_FOUND,
"Can not find template '{}'".format(output_template))
do_load = not self._args['--list']
# hack for Locker
if command_ is Locker:
do_load = self.recursive
cpm_ = command_(self._config, do_load, self.recursive)
cpm_.entrypoint()
if self._return_result:
return self._return(cpm_)
else:
# self._output.write(params, packages)
self._output.write_output(params, cpm_.get_tree_packages())
return ''
def _return(self, cpm_downloader):
if str(self._return_result).lower() == 'raw':
return cpm_downloader.get_raw_packages()
if str(self._return_result).lower() == 'tree':
return cpm_downloader.get_tree_packages()
else:
return self._output.output_type_module(cpm_downloader.get_tree_packages())
@do_run
def pack(self):
Archive.create(self._args['<OUT>'], self._args['<SOURCE>'])
@do_run
def cache(self):
if self._args['clear']:
self._config.cache.clear(self._args['hard'])
elif self._args['size']:
self._config.cache.size()
elif self._args['age']:
self._config.cache.age()
else:
self._config.cache.info()
|
devopshq/crosspm | crosspm/cpm.py | CrossPM.prepare_args | python | def prepare_args(args, windows=None):
if windows is None:
windows = "win" in sys.platform
if isinstance(args, str):
args = shlex.split(args, posix=not windows)
elif isinstance(args, list):
pass
elif args is None:
args = sys.argv[1:]
else:
raise Exception("Unknown args type: {}".format(type(args)))
# --recursive => --recursive=True|False convert
for position, argument in enumerate(args):
# Normal way, skip change
if argument.lower() in ('--recursive=true', '--recursive=false'):
return args
elif argument.lower() == '--recursive':
if len(args) > position + 1 and args[position + 1].lower() in ["true", "false"]:
# --recursive true | false
return args
else:
# legacy way, convert --recursive to --recursive=true
args[position] = "--recursive=True"
return args
return args | Prepare args - add support for old interface, e.g:
- --recursive was "flag" and for now it support True or False value
:param args:
:return: | train | https://github.com/devopshq/crosspm/blob/c831442ecfaa1d43c66cb148857096cea292c950/crosspm/cpm.py#L163-L196 | null | class CrossPM:
_ready = False
def __init__(self, args=None, throw_exceptions=None, return_result=False):
self._config = None
self._output = None
self._return_result = return_result
if throw_exceptions is None:
# legacy behavior
if self._return_result:
self._throw_exceptions = False
else:
self._throw_exceptions = True
else:
self._throw_exceptions = throw_exceptions
self._log = logging.getLogger('crosspm')
args = self.prepare_args(args)
docopt_str = __doc__.format(app_name=app_name,
verb_level=Config.get_verbosity_level(),
log_default=Config.get_verbosity_level(0, True),
deps_default=CROSSPM_DEPENDENCY_FILENAME,
deps_lock_default=CROSSPM_DEPENDENCY_LOCK_FILENAME,
out_format=Output.get_output_types(),
out_format_default='stdout',
)
self._args = docopt(docopt_str,
argv=args,
version=version)
if self._args['--recursive']:
recursive_str = self._args['--recursive']
if recursive_str.lower() == 'true':
self._args['--recursive'] = True
elif recursive_str.lower() == 'false':
self._args['--recursive'] = False
else:
raise Exception("Unknown value to --recursive: {}".format(recursive_str))
if isinstance(self._args, str):
if self._throw_exceptions:
print(app_name)
print(self._args)
exit()
self._ready = True
if self._args['download']:
self.command_ = Downloader
elif self._args['lock']:
self.command_ = Locker
elif self._args['usedby']:
self.command_ = Usedby
else:
self.command_ = None
@property
def stdout(self):
"""
Флаг --stdout может быть взят из переменной окружения CROSSPM_STDOUT.
Если есть любое значение в CROSSPM_STDOUT - оно понимается как True
:return:
"""
# --stdout
stdout = self._args['--stdout']
if stdout:
return True
# CROSSPM_STDOUT
stdout_env = os.getenv('CROSSPM_STDOUT', None)
if stdout_env is not None:
return True
return False
@staticmethod
@do_run
def read_config(self):
_deps_path = self._args['--deps-path']
# Передаём содержимое напрямую
if _deps_path is None and self._args['--dependencies-content'] is not None:
_deps_path = DependenciesContent(self._args['--dependencies-content'])
_depslock_path = self._args['--depslock-path']
if _depslock_path is None and self._args['--dependencies-lock-content'] is not None:
_depslock_path = DependenciesContent(self._args['--dependencies-lock-content'])
if self._args['lock']:
if self._args['DEPS']:
_deps_path = self._args['DEPS']
if self._args['DEPSLOCK']:
_depslock_path = self._args['DEPSLOCK']
self._config = Config(self._args['--config'], self._args['--options'], self._args['--no-fails'], _depslock_path,
_deps_path, self._args['--lock-on-success'],
self._args['--prefer-local'])
self._output = Output(self._config.output('result', None), self._config.name_column, self._config)
def exit(self, code, msg):
self._log.critical(msg)
if self._throw_exceptions:
sys.exit(code)
else:
return code, msg
@property
def recursive(self):
if self.command_ is Downloader:
if self._args['--recursive'] is None:
recursive = True
else:
recursive = self._args['--recursive']
else:
if self._args['--recursive'] is None:
recursive = False
else:
recursive = self._args['--recursive']
return recursive
@do_run
def check_common_args(self):
if self._args['--output']:
output = self._args['--output'].strip().strip("'").strip('"')
output_abs = os.path.abspath(output)
if os.path.isdir(output_abs):
raise CrosspmExceptionWrongArgs(
'"%s" is a directory - can\'t write to it'
)
self._args['--output'] = output
@do_run
def set_logging_level(self):
level_str = self._args['--verbose'].strip().lower()
log = self._args['--log']
if log:
log = log.strip().strip("'").strip('"')
log_abs = os.path.abspath(log)
if os.path.isdir(log_abs):
raise CrosspmExceptionWrongArgs(
'"%s" is a directory - can\'t write log to it'
)
else:
log_dir = os.path.dirname(log_abs)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
else:
log_abs = None
level = Config.get_verbosity_level(level_str or 'console')
self._log.handlers = []
if level or log_abs:
self._log.setLevel(level)
format_str = '%(asctime)-19s [%(levelname)-9s] %(message)s'
if level_str == 'debug':
format_str = '%(asctime)-19s [%(levelname)-9s] %(name)-12s: %(message)s'
formatter = logging.Formatter(format_str, datefmt="%Y-%m-%d %H:%M:%S")
if level:
# legacy way - Cmake catch message from stdout and parse PACKAGE_ROOT
# So, crosspm print debug and info message to stderr for debug purpose
if not self.stdout:
sh = logging.StreamHandler(stream=sys.stderr)
sh.setLevel(level)
self._log.addHandler(sh)
# If --stdout flag enabled
else:
sh = logging.StreamHandler(stream=sys.stderr)
sh.setLevel(logging.WARNING)
self._log.addHandler(sh)
sh = logging.StreamHandler(stream=sys.stdout)
sh.setLevel(level)
self._log.addHandler(sh)
if log_abs:
if not level_str:
level = Config.get_verbosity_level(0)
fh = logging.FileHandler(filename=log_abs)
fh.setLevel(level)
fh.setFormatter(formatter)
self._log.addHandler(fh)
def run(self):
time_start = time.time()
if self._ready:
errorcode, msg = self.set_logging_level()
self._log.info(app_name)
errorcode, msg = self.check_common_args()
if errorcode == 0:
errorcode, msg = self.read_config()
if errorcode == 0:
if self._args['download']:
errorcode, msg = self.command(self.command_)
elif self._args['lock']:
errorcode, msg = self.command(self.command_)
elif self._args['usedby']:
errorcode, msg = self.command(self.command_)
elif self._args['pack']:
errorcode, msg = self.pack()
elif self._args['cache']:
errorcode, msg = self.cache()
else:
errorcode, msg = CROSSPM_ERRORCODE_WRONG_ARGS, self._args
time_end = time.time()
self._log.info('Done in %2.2f sec' % (time_end - time_start))
return errorcode, msg
@do_run
def command(self, command_):
if self._return_result:
params = {}
else:
if self._args['--out-format'] == 'stdout':
if self._args['--output']:
raise CrosspmExceptionWrongArgs(
"unwanted argument '--output' while argument '--out-format={}'".format(
self._args['--out-format'],
))
elif not self._args['--output']:
raise CrosspmExceptionWrongArgs(
"argument '--output' required when argument '--out-format={}'".format(
self._args['--out-format'],
))
params = {
'out_format': ['--out-format', ''],
'output': ['--output', ''],
'output_template': ['--output-template', ''],
# 'out_prefix': ['--out-prefix', ''],
# 'depslock_path': ['--depslock-path', ''],
}
for k, v in params.items():
params[k] = self._args[v[0]] if v[0] in self._args else v[1]
if isinstance(params[k], str):
params[k] = params[k].strip('"').strip("'")
# try to dynamic load --output-template from python module
output_template = params['output_template']
if output_template:
# Try to load from python module
module_template = get_object_from_string(output_template)
if module_template is not None:
self._log.debug(
"Found output template path '{}' from '{}'".format(module_template, output_template))
params['output_template'] = module_template
else:
self._log.debug("Output template '{}' use like file path".format(output_template))
# check template exist
output_template = params['output_template']
if output_template and not os.path.exists(output_template):
raise CrosspmException(CROSSPM_ERRORCODE_CONFIG_NOT_FOUND,
"Can not find template '{}'".format(output_template))
do_load = not self._args['--list']
# hack for Locker
if command_ is Locker:
do_load = self.recursive
cpm_ = command_(self._config, do_load, self.recursive)
cpm_.entrypoint()
if self._return_result:
return self._return(cpm_)
else:
# self._output.write(params, packages)
self._output.write_output(params, cpm_.get_tree_packages())
return ''
def _return(self, cpm_downloader):
if str(self._return_result).lower() == 'raw':
return cpm_downloader.get_raw_packages()
if str(self._return_result).lower() == 'tree':
return cpm_downloader.get_tree_packages()
else:
return self._output.output_type_module(cpm_downloader.get_tree_packages())
@do_run
def pack(self):
Archive.create(self._args['<OUT>'], self._args['<SOURCE>'])
@do_run
def cache(self):
if self._args['clear']:
self._config.cache.clear(self._args['hard'])
elif self._args['size']:
self._config.cache.size()
elif self._args['age']:
self._config.cache.age()
else:
self._config.cache.info()
|
devopshq/crosspm | crosspm/adapters/artifactoryaql.py | Adapter.search_auth | python | def search_auth(self, list_or_file_path, source):
_auth = source.args['auth']
if isinstance(_auth, str):
if ':' in _auth:
_auth = _auth.split(':')
elif _auth.endswith('}') and (
_auth.startswith('{') or ':' in _auth): # {auth}, {user}:{password}, user:{password}
_auth = self.get_auth(list_or_file_path, _auth)
_auth = self.split_auth(_auth)
if isinstance(_auth, list):
for i in range(len(_auth)):
if _auth[i].endswith('}') and (
_auth[i].startswith('{') or ':' in _auth[i]): # {auth}, {user}:{password}, user:{password}
_auth[i] = self.get_auth(list_or_file_path, _auth[i])
if ':' in _auth[i]:
_auth = self.split_auth(_auth[i])
source.args['auth'] = _auth | Looking for auth in env, cmdline, str
:param list_or_file_path:
:param source: | train | https://github.com/devopshq/crosspm/blob/c831442ecfaa1d43c66cb148857096cea292c950/crosspm/adapters/artifactoryaql.py#L283-L306 | null | class Adapter(BaseAdapter):
def get_packages(self, source, parser, downloader, list_or_file_path, property_validate=True):
"""
:param source:
:param parser:
:param downloader:
:param list_or_file_path:
:param property_validate: for `root` packages we need check property, bad if we find packages from `lock` file,
we can skip validate part
:return:
"""
_auth_type = source.args['auth_type'].lower() if 'auth_type' in source.args else 'simple'
_art_auth_etc = {}
if 'auth' in source.args:
self.search_auth(list_or_file_path, source)
if _auth_type == 'simple':
_art_auth_etc['auth'] = HTTPBasicAuth(*tuple(source.args['auth']))
session.auth = _art_auth_etc['auth']
# elif _auth_type == 'cert':
# _art_auth_etc['cert'] = os.path.realpath(os.path.expanduser(source.args['auth']))
if 'auth' not in _art_auth_etc:
msg = 'You have to set auth parameter for sources with artifactory-aql adapter'
# self._log.error(msg)
raise CrosspmException(
CROSSPM_ERRORCODE_ADAPTER_ERROR,
msg
)
if 'verify' in source.args:
_art_auth_etc['verify'] = source.args['verify'].lower in ['true', 'yes', '1']
else:
_art_auth_etc['verify'] = False
_pkg_name_column = self._config.name_column
_secret_variables = self._config.secret_variables
_packages_found = OrderedDict()
_pkg_name_old = ""
_packed_exist = False
_packed_cache_params = None
self._log.info('parser: {}'.format(parser._name))
for _paths in parser.get_paths(list_or_file_path, source):
# If "parser"-column specified - find only in this parser
parser_names = _paths['params'].get('parser')
if parser_names and parser_names != "*":
self._log.info("Specified parsers: {}".format(parser_names))
parsers = parser_names.split(',')
if parser._name not in parsers:
self._log.info("Skip parser: {}".format(parser._name))
continue
_packages = []
_params_found = {}
_params_found_raw = {}
last_error = ''
_pkg_name = _paths['params'][_pkg_name_column]
if _pkg_name != _pkg_name_old:
_pkg_name_old = _pkg_name
self._log.info(
'{}: {}'.format(_pkg_name,
{k: v for k, v in _paths['params'].items() if
(k not in (_pkg_name_column, 'repo') and k not in _secret_variables)
}
)
)
for _sub_paths in _paths['paths']:
_tmp_params = dict(_paths['params'])
self._log.info('repo: {}'.format(_sub_paths['repo']))
for _path in _sub_paths['paths']:
_tmp_params['repo'] = _sub_paths['repo']
# ------ START ----
# HACK for prefer-local
if self._config.prefer_local and not parser.has_rule('properties'):
params = parser.get_params_with_extra('path', _paths['params'])
for param in params:
param['repo'] = _tmp_params['repo']
_path_packed = downloader.cache.path_packed(None, param)
_packed_exist = os.path.isfile(_path_packed)
if _packed_exist:
self._log.info("Skip searching, use package cache in path {}".format(_path_packed))
_packed_cache_params = param
break # break check local cache
if _packed_exist:
break # break connect to artifactory
# ------ END ----
_path_fixed, _path_pattern, _file_name_pattern = parser.split_fixed_pattern_with_file_name(_path)
try:
_artifactory_server = _tmp_params['server']
_search_repo = _tmp_params['repo']
# Get AQL path pattern, with fixed part path, without artifactory url and repository name
_aql_path_pattern = _path_fixed[len(_artifactory_server) + 1 + len(_search_repo) + 1:]
if _path_pattern:
_aql_path_pattern = _aql_path_pattern + "/" + _path_pattern
_aql_query_url = '{}/api/search/aql'.format(_artifactory_server)
_aql_query_dict = {
"repo": {
"$eq": _search_repo,
},
"path": {
"$match": _aql_path_pattern,
},
"name": {
"$match": _file_name_pattern,
},
}
# Remove path if is empty string
if not _aql_path_pattern:
_aql_query_dict.pop('path')
query = 'items.find({query_dict}).include("*", "property")'.format(
query_dict=json.dumps(_aql_query_dict))
session.auth = _art_auth_etc['auth']
r = session.post(_aql_query_url, data=query, verify=_art_auth_etc['verify'])
r.raise_for_status()
_found_paths = r.json()
for _found in _found_paths['results']:
_repo_path = "{artifactory}/{repo}/{path}/{file_name}".format(
artifactory=_artifactory_server,
repo=_found['repo'],
path=_found['path'],
file_name=_found['name'])
_repo_path = ArtifactoryPath(_repo_path, **_art_auth_etc)
_mark = 'found'
_matched, _params, _params_raw = parser.validate_path(str(_repo_path), _tmp_params)
if _matched:
_params_found[_repo_path] = {k: v for k, v in _params.items()}
_params_found_raw[_repo_path] = {k: v for k, v in _params_raw.items()}
_mark = 'match'
# Check if it's `root` packages or from `lock` file
# ALSO, if from `lock` and have * in name - validate with property
property_validate_tmp = property_validate or '*' in _file_name_pattern
# If have not rule in config, skip this part
if parser.has_rule('properties') and property_validate_tmp:
_found_properties = {x['key']: x.get('value', '') for x in _found['properties']}
_valid, _params = parser.validate(_found_properties, 'properties', _tmp_params,
return_params=True)
else:
_valid, _params = True, {}
if _valid:
_mark = 'valid'
_packages += [_repo_path]
_params_found[_repo_path].update({k: v for k, v in _params.items()})
_params_found[_repo_path]['filename'] = str(_repo_path.name)
_params_found[_repo_path]['parser'] = parser._name
self._log.debug(' {}: {}'.format(_mark, str(_repo_path)))
except RuntimeError as e:
try:
err = json.loads(e.args[0])
except Exception:
err = {}
if isinstance(err, dict):
# Check errors
# :e.args[0]: {
# "errors" : [ {
# "status" : 404,
# "message" : "Not Found"
# } ]
# }
for error in err.get('errors', []):
err_status = error.get('status', -1)
err_msg = error.get('message', '')
if err_status == 401:
msg = 'Authentication error[{}]{}'.format(err_status,
(': {}'.format(
err_msg)) if err_msg else '')
elif err_status == 404:
msg = last_error
else:
msg = 'Error[{}]{}'.format(err_status,
(': {}'.format(err_msg)) if err_msg else '')
if last_error != msg:
self._log.error(msg)
last_error = msg
_package = None
# HACK for prefer-local
if _packed_exist:
# HACK - Normalize params for cached archive
for key, value in _packed_cache_params.items():
if isinstance(value, list):
value = ['' if x is None else x for x in value]
_packed_cache_params[key] = value
_package = Package(_pkg_name, None, _paths['params'], downloader, self, parser,
_packed_cache_params, list_or_file_path['raw'], {}, in_cache=True)
# END HACK
if _packages:
_tmp = copy.deepcopy(_params_found)
_packages = parser.filter_one(_packages, _paths['params'], _tmp)
if isinstance(_packages, dict):
_packages = [_packages]
if len(_packages) == 1:
_stat_pkg = self.pkg_stat(_packages[0]['path'])
_params_raw = _params_found_raw.get(_packages[0]['path'], {})
_params_tmp = _params_found.get(_packages[0]['path'], {})
_params_tmp.update({k: v for k, v in _packages[0]['params'].items() if k not in _params_tmp})
_package = Package(_pkg_name, _packages[0]['path'], _paths['params'], downloader, self, parser,
_params_tmp, _params_raw, _stat_pkg)
_mark = 'chosen'
self._log.info(' {}: {}'.format(_mark, str(_packages[0]['path'])))
elif len(_packages) > 1:
raise CrosspmException(
CROSSPM_ERRORCODE_MULTIPLE_DEPS,
'Multiple instances found for package [{}] not found.'.format(_pkg_name)
)
else:
# Package not found: may be error, but it could be in other source.
pass
else:
# Package not found: may be error, but it could be in other source.
pass
if (_package is not None) or (not self._config.no_fails):
_added, _package = downloader.add_package(_pkg_name, _package)
else:
_added = False
if _package is not None:
_pkg_name = _package.name
if _added or (_package is not None):
if (_package is not None) or (not self._config.no_fails):
if (_package is not None) or (_packages_found.get(_pkg_name, None) is None):
_packages_found[_pkg_name] = _package
if _added and (_package is not None):
if downloader.do_load:
_package.download()
_deps_file = _package.get_file(self._config.deps_lock_file_name)
if downloader.recursive:
if _deps_file:
_package.find_dependencies(_deps_file, property_validate=False)
elif self._config.deps_file_name:
_deps_file = _package.get_file(self._config.deps_file_name)
if _deps_file and os.path.isfile(_deps_file):
_package.find_dependencies(_deps_file, property_validate=False)
# HACK for not found packages
_package_names = [x[self._config.name_column] for x in list_or_file_path['raw']]
_packages_found_names = [x.name for x in _packages_found.values()]
for package in _package_names:
if package not in _packages_found_names:
_packages_found[package] = None
return _packages_found
def get_auth(self, list_or_file_path, _auth):
try:
return list_or_file_path['raw'][0][_auth[1:-1]]
except Exception:
msg = 'Cred {_auth} not found in options'.format(**locals())
raise CrosspmException(CROSSPM_ERRORCODE_ADAPTER_ERROR, msg)
def split_auth(self, _auth):
if ':' in _auth:
return _auth.split(':')
else:
msg = 'Wrong format of oneline credentials. Use user:password'
raise CrosspmException(CROSSPM_ERRORCODE_ADAPTER_ERROR, msg)
@staticmethod
def pkg_stat(package):
_stat_attr = {'ctime': 'st_atime',
'mtime': 'st_mtime',
'size': 'st_size'}
_stat_pkg = package.stat()
_stat_pkg = {k: getattr(_stat_pkg, k, None) for k in _stat_attr.keys()}
_stat_pkg = {
k: time.mktime(v.timetuple()) + float(v.microsecond) / 1000000.0 if isinstance(v, datetime) else v
for k, v in _stat_pkg.items()
}
return _stat_pkg
def get_usedby(self, source, parser, downloader, list_or_file_path, property_validate=True):
"""
:param source:
:param parser:
:param downloader:
:param list_or_file_path:
:param property_validate: for `root` packages we need check property, bad if we find packages from `lock` file,
we can skip validate part
:return:
"""
_auth_type = source.args['auth_type'].lower() if 'auth_type' in source.args else 'simple'
_art_auth_etc = {}
if 'auth' in source.args:
self.search_auth(list_or_file_path, source)
if _auth_type == 'simple':
_art_auth_etc['auth'] = HTTPBasicAuth(*tuple(source.args['auth']))
session.auth = _art_auth_etc['auth']
# elif _auth_type == 'cert':
# _art_auth_etc['cert'] = os.path.realpath(os.path.expanduser(source.args['auth']))
if 'auth' not in _art_auth_etc:
msg = 'You have to set auth parameter for sources with artifactory-aql adapter'
# self._log.error(msg)
raise CrosspmException(
CROSSPM_ERRORCODE_ADAPTER_ERROR,
msg
)
if 'verify' in source.args:
_art_auth_etc['verify'] = source.args['verify'].lower in ['true', 'yes', '1']
else:
_art_auth_etc['verify'] = False
_secret_variables = self._config.secret_variables
_pkg_name_col = self._config.name_column
_packages_found = OrderedDict()
_pkg_name_old = ""
for _paths in parser.get_paths(list_or_file_path, source):
_packages = []
_params_found = {}
_params_found_raw = {}
last_error = ''
_pkg_name = _paths['params'][_pkg_name_col]
if _pkg_name != _pkg_name_old:
_pkg_name_old = _pkg_name
self._log.info(
'{}: {}'.format(_pkg_name,
{k: v for k, v in _paths['params'].items() if
(k not in (_pkg_name_col, 'repo') and k not in _secret_variables)
}
)
)
for _sub_paths in _paths['paths']:
_tmp_params = dict(_paths['params'])
self._log.info('repo: {}'.format(_sub_paths['repo']))
_tmp_params['repo'] = _sub_paths['repo']
try:
_artifactory_server = _tmp_params['server']
_search_repo = _tmp_params['repo']
# TODO: Попробовать использовать lru_cache для кеширования кучи запросов
_aql_query_url = '{}/api/search/aql'.format(_artifactory_server)
_aql_query_dict = {
"repo": {
"$eq": _search_repo,
},
}
_usedby_aql = parser.get_usedby_aql(_tmp_params)
if _usedby_aql is None:
continue
_aql_query_dict.update(_usedby_aql)
query = 'items.find({query_dict}).include("*", "property")'.format(
query_dict=json.dumps(_aql_query_dict))
session.auth = _art_auth_etc['auth']
r = session.post(_aql_query_url, data=query, verify=_art_auth_etc['verify'])
r.raise_for_status()
_found_paths = r.json()
for _found in _found_paths['results']:
_repo_path = "{artifactory}/{repo}/{path}/{file_name}".format(
artifactory=_artifactory_server,
repo=_found['repo'],
path=_found['path'],
file_name=_found['name'])
_repo_path = ArtifactoryPath(_repo_path, **_art_auth_etc)
_found_properties = {x['key']: x.get('value', '') for x in _found['properties']}
_matched, _params, _params_raw = parser.validate_path(str(_repo_path), _tmp_params)
_params_found[_repo_path] = {k: v for k, v in _params.items()}
_params_found_raw[_repo_path] = {k: v for k, v in _params_raw.items()}
_params = _tmp_params
_packages += [_repo_path]
_params_found[_repo_path].update({k: v for k, v in _params.items()})
_params_found[_repo_path]['filename'] = str(_repo_path.name)
_params_raw = _params_found_raw.get(_repo_path, {})
params_found = {}
# TODO: Проставление params брать из config.yaml usedby
params = parser.get_params_from_properties(_found_properties)
params.update(parser.get_params_from_path(str(_repo_path)))
_package = Package(params[_pkg_name_col], _repo_path, params, downloader, self,
parser,
params_found, _params_raw)
_package.find_usedby(None, property_validate=False)
_packages_found[str(_repo_path)] = _package
# _package.find_dependencies(_deps_file, property_validate=False)
_mark = 'chosen'
self._log.info(' {}: {}'.format(_mark, str(_repo_path)))
except RuntimeError as e:
try:
err = json.loads(e.args[0])
except Exception:
err = {}
if isinstance(err, dict):
# Check errors
# :e.args[0]: {
# "errors" : [ {
# "status" : 404,
# "message" : "Not Found"
# } ]
# }
for error in err.get('errors', []):
err_status = error.get('status', -1)
err_msg = error.get('message', '')
if err_status == 401:
msg = 'Authentication error[{}]{}'.format(err_status,
(': {}'.format(
err_msg)) if err_msg else '')
elif err_status == 404:
msg = last_error
else:
msg = 'Error[{}]{}'.format(err_status,
(': {}'.format(err_msg)) if err_msg else '')
if last_error != msg:
self._log.error(msg)
last_error = msg
return _packages_found
def download_package(self, package, dest_path):
self.prepare_dirs(dest_path)
try:
_stat_pkg = self.pkg_stat(package)
# with package.open() as _src:
session.auth = package.auth
_src = session.get(str(package), verify=package.verify, stream=True)
_src.raise_for_status()
with open(dest_path, 'wb+') as _dest:
for chunk in _src.iter_content(CHUNK_SIZE):
if chunk: # filter out keep-alive new chunks
_dest.write(chunk)
_dest.flush()
_src.close()
os.utime(dest_path, (_stat_pkg['ctime'], _stat_pkg['mtime']))
except Exception as e:
code = CROSSPM_ERRORCODE_SERVER_CONNECT_ERROR
msg = 'FAILED to download package {} at url: [{}]'.format(
package.name,
str(package),
)
raise CrosspmException(code, msg) from e
return dest_path
def prepare_dirs(self, dest_path):
dest_dir = os.path.dirname(dest_path)
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
elif os.path.exists(dest_path):
os.remove(dest_path)
@staticmethod
def get_package_filename(package):
if isinstance(package, ArtifactoryPath):
return package.name
return ''
@staticmethod
def get_package_path(package):
if isinstance(package, ArtifactoryPath):
return str(package)
return ''
|
devopshq/crosspm | crosspm/helpers/package.py | Package.download | python | def download(self, force=False):
exists, dest_path = self._downloader.cache.exists_packed(package=self, pkg_path=self.packed_path,
check_stat=not self._in_cache)
unp_exists, unp_path = self._downloader.cache.exists_unpacked(package=self, pkg_path=self.unpacked_path)
# Если архива нет, то и кешу доверять не стоит
if not exists:
unp_exists = False
if exists and not self.packed_path:
self.packed_path = dest_path
if force or not exists:
# _packed_path = self._packed_path
dest_path_tmp = dest_path + ".tmp"
if os.path.exists(dest_path_tmp):
os.remove(dest_path_tmp)
self._adapter.download_package(self._pkg, dest_path_tmp)
os.rename(dest_path_tmp, dest_path)
self.packed_path = dest_path
# if not _packed_path:
self._not_cached = True
else:
if unp_exists and not self.unpacked_path:
self.unpacked_path = unp_path
self._not_cached = False
if self._not_cached and unp_exists:
shutil.rmtree(unp_path, ignore_errors=True)
return self.packed_path | Download file containing this package.
:param force: Force download even if it seems file already exists
:return: Full path with filename of downloaded package file. | train | https://github.com/devopshq/crosspm/blob/c831442ecfaa1d43c66cb148857096cea292c950/crosspm/helpers/package.py#L57-L92 | null | class Package:
def __init__(self, name, pkg, params, downloader, adapter, parser, params_found=None, params_found_raw=None,
stat=None, in_cache=False):
self.name = name
self.package_name = name
self.packed_path = ''
self.unpacked_path = ''
self.duplicated = False
self.packages = OrderedDict()
self.pkg = pkg # type: ArtifactoryPath
# Someone use this internal object, do not remove them :)
self._pkg = self.pkg
if isinstance(pkg, int):
if pkg == 0:
self._root = True
self._raw = []
self._root = False
self._params_found = {}
self._params_found_raw = {}
self._not_cached = True
self._log = logging.getLogger('crosspm')
self._params = params
self._adapter = adapter
self._parser = parser
self._downloader = downloader # type: Downloader
self._in_cache = in_cache
if params_found:
self._params_found = params_found
if params_found_raw:
self._params_found_raw = params_found_raw
self.stat = stat
def get_file(self, file_name, unpack_force=True):
if unpack_force:
self.unpack()
_dest_file = os.path.normpath(self.get_file_path(file_name))
_dest_file = _dest_file if os.path.isfile(_dest_file) else None
return _dest_file
def get_file_path(self, file_name):
_dest_file = os.path.join(self.unpacked_path, file_name)
return _dest_file
def find_dependencies(self, depslock_file_path, property_validate=True, deps_content=None):
"""
Find all dependencies by package
:param depslock_file_path:
:param property_validate: for `root` packages we need check property, bad if we find packages from `lock` file,
:param deps_content: HACK for use --dependencies-content and existed dependencies.txt.lock file
we can skip validate part
:return:
"""
self._raw = [x for x in
self._downloader.common_parser.iter_packages_params(depslock_file_path, deps_content=deps_content)]
self.packages = self._downloader.get_dependency_packages({'raw': self._raw},
property_validate=property_validate)
def find_usedby(self, depslock_file_path, property_validate=True):
"""
Find all dependencies by package
:param depslock_file_path:
:param property_validate: for `root` packages we need check property, bad if we find packages from `lock` file,
we can skip validate part
:return:
"""
if depslock_file_path is None:
self._raw = [self._params]
self._raw[0]['repo'] = None
self._raw[0]['server'] = None
else:
self._raw = [x for x in self._downloader.common_parser.iter_packages_params(depslock_file_path)]
self.packages = self._downloader.get_usedby_packages({'raw': self._raw},
property_validate=property_validate)
def unpack(self, force=False):
if self._downloader.solid(self):
self.unpacked_path = self.packed_path
else:
exists, dest_path = self._downloader.cache.exists_unpacked(package=self, pkg_path=self.unpacked_path)
if exists and not self.unpacked_path:
self.unpacked_path = dest_path
# if force or not exists:
# if not dest_path:
# dest_path = self._downloader.unpacked_path
# temp_path = os.path.realpath(os.path.join(dest_path, self._name))
# _exists = os.path.exists(temp_path)
if not self._not_cached:
self.unpacked_path = dest_path if exists else '' # temp_path if exists else ''
if force or self._not_cached or (not exists):
Archive.extract(self.packed_path, dest_path) # temp_path)
self.unpacked_path = dest_path # temp_path
self._not_cached = False
def pack(self, src_path):
Archive.create(self.packed_path, src_path)
def print(self, level=0, output=None):
def do_print(left):
res_str = ''
for out_item in output:
for k, v in out_item.items():
cur_str = self.get_params(merged=True).get(k, '')
if not res_str:
cur_str = self._params.get(k, '')
if not res_str:
res_str = '{}{}'.format(left, cur_str)
continue
cur_format = ' {}'
if v > 0:
cur_format = '{:%s}' % (v if len(cur_str) <= v else v + len(left))
res_str += cur_format.format(cur_str)
break
self._log.info(res_str)
_sign = ' '
if not self._root:
if self.duplicated:
_sign = '!'
elif self.unpacked_path:
_sign = '+'
elif self.packed_path:
_sign = '>'
else:
_sign = '-'
_left = '{}{}'.format(' ' * 4 * level, _sign)
do_print(_left)
for _pkg_name in self.packages:
_pkg = self.packages[_pkg_name]
if not _pkg:
_left = '{}-'.format(' ' * 4 * (level + 1))
self._log.info('{}{}'.format(_left, _pkg_name))
else:
_pkg.print(level + 1, output)
if self._root:
self._log.info('')
def get_params(self, param_list=None, get_path=False, merged=False, raw=False):
"""
Get Package params
:param param_list: name or list of parameters
:param get_path:
:param merged: if version splited, True return version in string
:param raw:
:return:
"""
# Convert parameter name to list
if param_list and isinstance(param_list, str):
param_list = [param_list]
if param_list and isinstance(param_list, (list, tuple)):
result = {k: v for k, v in self._params_found.items() if k in param_list}
result.update({k: v for k, v in self._params.items() if (k in param_list and k not in result)})
else:
result = {k: v for k, v in self._params_found.items()}
result.update({k: v for k, v in self._params.items() if k not in result})
if get_path:
result['path'] = self.unpacked_path
if merged:
result.update(self._parser.merge_valued(result))
if raw:
result.update({k: v for k, v in self._params_found_raw.items()})
return result
def set_full_unique_name(self):
self.name = self._parser.get_full_package_name(self)
return self.name
def get_name_and_path(self, name_only=False):
if name_only:
return self.name
return self.name, self.unpacked_path
def get_none_packages(self):
"""
Get packages with None (not founded), recursively
"""
not_found = set()
for package_name, package in self.packages.items():
if package is None:
not_found.add(package_name)
else:
if package.packages:
not_found = not_found | package.get_none_packages()
return not_found
@property
def all_packages(self):
packages = []
for package in self.packages.values():
if package:
packages.extend(package.all_packages)
packages.extend(self.packages.values())
return packages
def ext(self, check_ext):
if self._pkg:
if not isinstance(check_ext, (list, tuple)):
check_ext = [check_ext]
name = self._adapter.get_package_filename(self._pkg)
if any((fnmatch.fnmatch(name, x) or fnmatch.fnmatch(name, '*%s' % x)) for x in check_ext):
return True
return False
@property
def md5(self):
try:
return ArtifactoryPath.stat(self.pkg).md5
except AttributeError:
return md5sum(self.packed_path)
|
devopshq/crosspm | crosspm/helpers/package.py | Package.find_dependencies | python | def find_dependencies(self, depslock_file_path, property_validate=True, deps_content=None):
self._raw = [x for x in
self._downloader.common_parser.iter_packages_params(depslock_file_path, deps_content=deps_content)]
self.packages = self._downloader.get_dependency_packages({'raw': self._raw},
property_validate=property_validate) | Find all dependencies by package
:param depslock_file_path:
:param property_validate: for `root` packages we need check property, bad if we find packages from `lock` file,
:param deps_content: HACK for use --dependencies-content and existed dependencies.txt.lock file
we can skip validate part
:return: | train | https://github.com/devopshq/crosspm/blob/c831442ecfaa1d43c66cb148857096cea292c950/crosspm/helpers/package.py#L105-L117 | null | class Package:
def __init__(self, name, pkg, params, downloader, adapter, parser, params_found=None, params_found_raw=None,
stat=None, in_cache=False):
self.name = name
self.package_name = name
self.packed_path = ''
self.unpacked_path = ''
self.duplicated = False
self.packages = OrderedDict()
self.pkg = pkg # type: ArtifactoryPath
# Someone use this internal object, do not remove them :)
self._pkg = self.pkg
if isinstance(pkg, int):
if pkg == 0:
self._root = True
self._raw = []
self._root = False
self._params_found = {}
self._params_found_raw = {}
self._not_cached = True
self._log = logging.getLogger('crosspm')
self._params = params
self._adapter = adapter
self._parser = parser
self._downloader = downloader # type: Downloader
self._in_cache = in_cache
if params_found:
self._params_found = params_found
if params_found_raw:
self._params_found_raw = params_found_raw
self.stat = stat
def download(self, force=False):
"""
Download file containing this package.
:param force: Force download even if it seems file already exists
:return: Full path with filename of downloaded package file.
"""
exists, dest_path = self._downloader.cache.exists_packed(package=self, pkg_path=self.packed_path,
check_stat=not self._in_cache)
unp_exists, unp_path = self._downloader.cache.exists_unpacked(package=self, pkg_path=self.unpacked_path)
# Если архива нет, то и кешу доверять не стоит
if not exists:
unp_exists = False
if exists and not self.packed_path:
self.packed_path = dest_path
if force or not exists:
# _packed_path = self._packed_path
dest_path_tmp = dest_path + ".tmp"
if os.path.exists(dest_path_tmp):
os.remove(dest_path_tmp)
self._adapter.download_package(self._pkg, dest_path_tmp)
os.rename(dest_path_tmp, dest_path)
self.packed_path = dest_path
# if not _packed_path:
self._not_cached = True
else:
if unp_exists and not self.unpacked_path:
self.unpacked_path = unp_path
self._not_cached = False
if self._not_cached and unp_exists:
shutil.rmtree(unp_path, ignore_errors=True)
return self.packed_path
def get_file(self, file_name, unpack_force=True):
if unpack_force:
self.unpack()
_dest_file = os.path.normpath(self.get_file_path(file_name))
_dest_file = _dest_file if os.path.isfile(_dest_file) else None
return _dest_file
def get_file_path(self, file_name):
_dest_file = os.path.join(self.unpacked_path, file_name)
return _dest_file
def find_usedby(self, depslock_file_path, property_validate=True):
"""
Find all dependencies by package
:param depslock_file_path:
:param property_validate: for `root` packages we need check property, bad if we find packages from `lock` file,
we can skip validate part
:return:
"""
if depslock_file_path is None:
self._raw = [self._params]
self._raw[0]['repo'] = None
self._raw[0]['server'] = None
else:
self._raw = [x for x in self._downloader.common_parser.iter_packages_params(depslock_file_path)]
self.packages = self._downloader.get_usedby_packages({'raw': self._raw},
property_validate=property_validate)
def unpack(self, force=False):
if self._downloader.solid(self):
self.unpacked_path = self.packed_path
else:
exists, dest_path = self._downloader.cache.exists_unpacked(package=self, pkg_path=self.unpacked_path)
if exists and not self.unpacked_path:
self.unpacked_path = dest_path
# if force or not exists:
# if not dest_path:
# dest_path = self._downloader.unpacked_path
# temp_path = os.path.realpath(os.path.join(dest_path, self._name))
# _exists = os.path.exists(temp_path)
if not self._not_cached:
self.unpacked_path = dest_path if exists else '' # temp_path if exists else ''
if force or self._not_cached or (not exists):
Archive.extract(self.packed_path, dest_path) # temp_path)
self.unpacked_path = dest_path # temp_path
self._not_cached = False
def pack(self, src_path):
Archive.create(self.packed_path, src_path)
def print(self, level=0, output=None):
def do_print(left):
res_str = ''
for out_item in output:
for k, v in out_item.items():
cur_str = self.get_params(merged=True).get(k, '')
if not res_str:
cur_str = self._params.get(k, '')
if not res_str:
res_str = '{}{}'.format(left, cur_str)
continue
cur_format = ' {}'
if v > 0:
cur_format = '{:%s}' % (v if len(cur_str) <= v else v + len(left))
res_str += cur_format.format(cur_str)
break
self._log.info(res_str)
_sign = ' '
if not self._root:
if self.duplicated:
_sign = '!'
elif self.unpacked_path:
_sign = '+'
elif self.packed_path:
_sign = '>'
else:
_sign = '-'
_left = '{}{}'.format(' ' * 4 * level, _sign)
do_print(_left)
for _pkg_name in self.packages:
_pkg = self.packages[_pkg_name]
if not _pkg:
_left = '{}-'.format(' ' * 4 * (level + 1))
self._log.info('{}{}'.format(_left, _pkg_name))
else:
_pkg.print(level + 1, output)
if self._root:
self._log.info('')
def get_params(self, param_list=None, get_path=False, merged=False, raw=False):
"""
Get Package params
:param param_list: name or list of parameters
:param get_path:
:param merged: if version splited, True return version in string
:param raw:
:return:
"""
# Convert parameter name to list
if param_list and isinstance(param_list, str):
param_list = [param_list]
if param_list and isinstance(param_list, (list, tuple)):
result = {k: v for k, v in self._params_found.items() if k in param_list}
result.update({k: v for k, v in self._params.items() if (k in param_list and k not in result)})
else:
result = {k: v for k, v in self._params_found.items()}
result.update({k: v for k, v in self._params.items() if k not in result})
if get_path:
result['path'] = self.unpacked_path
if merged:
result.update(self._parser.merge_valued(result))
if raw:
result.update({k: v for k, v in self._params_found_raw.items()})
return result
def set_full_unique_name(self):
self.name = self._parser.get_full_package_name(self)
return self.name
def get_name_and_path(self, name_only=False):
if name_only:
return self.name
return self.name, self.unpacked_path
def get_none_packages(self):
"""
Get packages with None (not founded), recursively
"""
not_found = set()
for package_name, package in self.packages.items():
if package is None:
not_found.add(package_name)
else:
if package.packages:
not_found = not_found | package.get_none_packages()
return not_found
@property
def all_packages(self):
packages = []
for package in self.packages.values():
if package:
packages.extend(package.all_packages)
packages.extend(self.packages.values())
return packages
def ext(self, check_ext):
if self._pkg:
if not isinstance(check_ext, (list, tuple)):
check_ext = [check_ext]
name = self._adapter.get_package_filename(self._pkg)
if any((fnmatch.fnmatch(name, x) or fnmatch.fnmatch(name, '*%s' % x)) for x in check_ext):
return True
return False
@property
def md5(self):
try:
return ArtifactoryPath.stat(self.pkg).md5
except AttributeError:
return md5sum(self.packed_path)
|
devopshq/crosspm | crosspm/helpers/package.py | Package.find_usedby | python | def find_usedby(self, depslock_file_path, property_validate=True):
if depslock_file_path is None:
self._raw = [self._params]
self._raw[0]['repo'] = None
self._raw[0]['server'] = None
else:
self._raw = [x for x in self._downloader.common_parser.iter_packages_params(depslock_file_path)]
self.packages = self._downloader.get_usedby_packages({'raw': self._raw},
property_validate=property_validate) | Find all dependencies by package
:param depslock_file_path:
:param property_validate: for `root` packages we need check property, bad if we find packages from `lock` file,
we can skip validate part
:return: | train | https://github.com/devopshq/crosspm/blob/c831442ecfaa1d43c66cb148857096cea292c950/crosspm/helpers/package.py#L119-L134 | null | class Package:
def __init__(self, name, pkg, params, downloader, adapter, parser, params_found=None, params_found_raw=None,
stat=None, in_cache=False):
self.name = name
self.package_name = name
self.packed_path = ''
self.unpacked_path = ''
self.duplicated = False
self.packages = OrderedDict()
self.pkg = pkg # type: ArtifactoryPath
# Someone use this internal object, do not remove them :)
self._pkg = self.pkg
if isinstance(pkg, int):
if pkg == 0:
self._root = True
self._raw = []
self._root = False
self._params_found = {}
self._params_found_raw = {}
self._not_cached = True
self._log = logging.getLogger('crosspm')
self._params = params
self._adapter = adapter
self._parser = parser
self._downloader = downloader # type: Downloader
self._in_cache = in_cache
if params_found:
self._params_found = params_found
if params_found_raw:
self._params_found_raw = params_found_raw
self.stat = stat
def download(self, force=False):
"""
Download file containing this package.
:param force: Force download even if it seems file already exists
:return: Full path with filename of downloaded package file.
"""
exists, dest_path = self._downloader.cache.exists_packed(package=self, pkg_path=self.packed_path,
check_stat=not self._in_cache)
unp_exists, unp_path = self._downloader.cache.exists_unpacked(package=self, pkg_path=self.unpacked_path)
# Если архива нет, то и кешу доверять не стоит
if not exists:
unp_exists = False
if exists and not self.packed_path:
self.packed_path = dest_path
if force or not exists:
# _packed_path = self._packed_path
dest_path_tmp = dest_path + ".tmp"
if os.path.exists(dest_path_tmp):
os.remove(dest_path_tmp)
self._adapter.download_package(self._pkg, dest_path_tmp)
os.rename(dest_path_tmp, dest_path)
self.packed_path = dest_path
# if not _packed_path:
self._not_cached = True
else:
if unp_exists and not self.unpacked_path:
self.unpacked_path = unp_path
self._not_cached = False
if self._not_cached and unp_exists:
shutil.rmtree(unp_path, ignore_errors=True)
return self.packed_path
def get_file(self, file_name, unpack_force=True):
if unpack_force:
self.unpack()
_dest_file = os.path.normpath(self.get_file_path(file_name))
_dest_file = _dest_file if os.path.isfile(_dest_file) else None
return _dest_file
def get_file_path(self, file_name):
_dest_file = os.path.join(self.unpacked_path, file_name)
return _dest_file
def find_dependencies(self, depslock_file_path, property_validate=True, deps_content=None):
"""
Find all dependencies by package
:param depslock_file_path:
:param property_validate: for `root` packages we need check property, bad if we find packages from `lock` file,
:param deps_content: HACK for use --dependencies-content and existed dependencies.txt.lock file
we can skip validate part
:return:
"""
self._raw = [x for x in
self._downloader.common_parser.iter_packages_params(depslock_file_path, deps_content=deps_content)]
self.packages = self._downloader.get_dependency_packages({'raw': self._raw},
property_validate=property_validate)
def unpack(self, force=False):
if self._downloader.solid(self):
self.unpacked_path = self.packed_path
else:
exists, dest_path = self._downloader.cache.exists_unpacked(package=self, pkg_path=self.unpacked_path)
if exists and not self.unpacked_path:
self.unpacked_path = dest_path
# if force or not exists:
# if not dest_path:
# dest_path = self._downloader.unpacked_path
# temp_path = os.path.realpath(os.path.join(dest_path, self._name))
# _exists = os.path.exists(temp_path)
if not self._not_cached:
self.unpacked_path = dest_path if exists else '' # temp_path if exists else ''
if force or self._not_cached or (not exists):
Archive.extract(self.packed_path, dest_path) # temp_path)
self.unpacked_path = dest_path # temp_path
self._not_cached = False
def pack(self, src_path):
Archive.create(self.packed_path, src_path)
def print(self, level=0, output=None):
def do_print(left):
res_str = ''
for out_item in output:
for k, v in out_item.items():
cur_str = self.get_params(merged=True).get(k, '')
if not res_str:
cur_str = self._params.get(k, '')
if not res_str:
res_str = '{}{}'.format(left, cur_str)
continue
cur_format = ' {}'
if v > 0:
cur_format = '{:%s}' % (v if len(cur_str) <= v else v + len(left))
res_str += cur_format.format(cur_str)
break
self._log.info(res_str)
_sign = ' '
if not self._root:
if self.duplicated:
_sign = '!'
elif self.unpacked_path:
_sign = '+'
elif self.packed_path:
_sign = '>'
else:
_sign = '-'
_left = '{}{}'.format(' ' * 4 * level, _sign)
do_print(_left)
for _pkg_name in self.packages:
_pkg = self.packages[_pkg_name]
if not _pkg:
_left = '{}-'.format(' ' * 4 * (level + 1))
self._log.info('{}{}'.format(_left, _pkg_name))
else:
_pkg.print(level + 1, output)
if self._root:
self._log.info('')
def get_params(self, param_list=None, get_path=False, merged=False, raw=False):
"""
Get Package params
:param param_list: name or list of parameters
:param get_path:
:param merged: if version splited, True return version in string
:param raw:
:return:
"""
# Convert parameter name to list
if param_list and isinstance(param_list, str):
param_list = [param_list]
if param_list and isinstance(param_list, (list, tuple)):
result = {k: v for k, v in self._params_found.items() if k in param_list}
result.update({k: v for k, v in self._params.items() if (k in param_list and k not in result)})
else:
result = {k: v for k, v in self._params_found.items()}
result.update({k: v for k, v in self._params.items() if k not in result})
if get_path:
result['path'] = self.unpacked_path
if merged:
result.update(self._parser.merge_valued(result))
if raw:
result.update({k: v for k, v in self._params_found_raw.items()})
return result
def set_full_unique_name(self):
self.name = self._parser.get_full_package_name(self)
return self.name
def get_name_and_path(self, name_only=False):
if name_only:
return self.name
return self.name, self.unpacked_path
def get_none_packages(self):
"""
Get packages with None (not founded), recursively
"""
not_found = set()
for package_name, package in self.packages.items():
if package is None:
not_found.add(package_name)
else:
if package.packages:
not_found = not_found | package.get_none_packages()
return not_found
@property
def all_packages(self):
packages = []
for package in self.packages.values():
if package:
packages.extend(package.all_packages)
packages.extend(self.packages.values())
return packages
def ext(self, check_ext):
if self._pkg:
if not isinstance(check_ext, (list, tuple)):
check_ext = [check_ext]
name = self._adapter.get_package_filename(self._pkg)
if any((fnmatch.fnmatch(name, x) or fnmatch.fnmatch(name, '*%s' % x)) for x in check_ext):
return True
return False
@property
def md5(self):
try:
return ArtifactoryPath.stat(self.pkg).md5
except AttributeError:
return md5sum(self.packed_path)
|
devopshq/crosspm | crosspm/helpers/package.py | Package.get_params | python | def get_params(self, param_list=None, get_path=False, merged=False, raw=False):
# Convert parameter name to list
if param_list and isinstance(param_list, str):
param_list = [param_list]
if param_list and isinstance(param_list, (list, tuple)):
result = {k: v for k, v in self._params_found.items() if k in param_list}
result.update({k: v for k, v in self._params.items() if (k in param_list and k not in result)})
else:
result = {k: v for k, v in self._params_found.items()}
result.update({k: v for k, v in self._params.items() if k not in result})
if get_path:
result['path'] = self.unpacked_path
if merged:
result.update(self._parser.merge_valued(result))
if raw:
result.update({k: v for k, v in self._params_found_raw.items()})
return result | Get Package params
:param param_list: name or list of parameters
:param get_path:
:param merged: if version splited, True return version in string
:param raw:
:return: | train | https://github.com/devopshq/crosspm/blob/c831442ecfaa1d43c66cb148857096cea292c950/crosspm/helpers/package.py#L200-L224 | null | class Package:
def __init__(self, name, pkg, params, downloader, adapter, parser, params_found=None, params_found_raw=None,
stat=None, in_cache=False):
self.name = name
self.package_name = name
self.packed_path = ''
self.unpacked_path = ''
self.duplicated = False
self.packages = OrderedDict()
self.pkg = pkg # type: ArtifactoryPath
# Someone use this internal object, do not remove them :)
self._pkg = self.pkg
if isinstance(pkg, int):
if pkg == 0:
self._root = True
self._raw = []
self._root = False
self._params_found = {}
self._params_found_raw = {}
self._not_cached = True
self._log = logging.getLogger('crosspm')
self._params = params
self._adapter = adapter
self._parser = parser
self._downloader = downloader # type: Downloader
self._in_cache = in_cache
if params_found:
self._params_found = params_found
if params_found_raw:
self._params_found_raw = params_found_raw
self.stat = stat
def download(self, force=False):
"""
Download file containing this package.
:param force: Force download even if it seems file already exists
:return: Full path with filename of downloaded package file.
"""
exists, dest_path = self._downloader.cache.exists_packed(package=self, pkg_path=self.packed_path,
check_stat=not self._in_cache)
unp_exists, unp_path = self._downloader.cache.exists_unpacked(package=self, pkg_path=self.unpacked_path)
# Если архива нет, то и кешу доверять не стоит
if not exists:
unp_exists = False
if exists and not self.packed_path:
self.packed_path = dest_path
if force or not exists:
# _packed_path = self._packed_path
dest_path_tmp = dest_path + ".tmp"
if os.path.exists(dest_path_tmp):
os.remove(dest_path_tmp)
self._adapter.download_package(self._pkg, dest_path_tmp)
os.rename(dest_path_tmp, dest_path)
self.packed_path = dest_path
# if not _packed_path:
self._not_cached = True
else:
if unp_exists and not self.unpacked_path:
self.unpacked_path = unp_path
self._not_cached = False
if self._not_cached and unp_exists:
shutil.rmtree(unp_path, ignore_errors=True)
return self.packed_path
def get_file(self, file_name, unpack_force=True):
if unpack_force:
self.unpack()
_dest_file = os.path.normpath(self.get_file_path(file_name))
_dest_file = _dest_file if os.path.isfile(_dest_file) else None
return _dest_file
def get_file_path(self, file_name):
_dest_file = os.path.join(self.unpacked_path, file_name)
return _dest_file
def find_dependencies(self, depslock_file_path, property_validate=True, deps_content=None):
"""
Find all dependencies by package
:param depslock_file_path:
:param property_validate: for `root` packages we need check property, bad if we find packages from `lock` file,
:param deps_content: HACK for use --dependencies-content and existed dependencies.txt.lock file
we can skip validate part
:return:
"""
self._raw = [x for x in
self._downloader.common_parser.iter_packages_params(depslock_file_path, deps_content=deps_content)]
self.packages = self._downloader.get_dependency_packages({'raw': self._raw},
property_validate=property_validate)
def find_usedby(self, depslock_file_path, property_validate=True):
"""
Find all dependencies by package
:param depslock_file_path:
:param property_validate: for `root` packages we need check property, bad if we find packages from `lock` file,
we can skip validate part
:return:
"""
if depslock_file_path is None:
self._raw = [self._params]
self._raw[0]['repo'] = None
self._raw[0]['server'] = None
else:
self._raw = [x for x in self._downloader.common_parser.iter_packages_params(depslock_file_path)]
self.packages = self._downloader.get_usedby_packages({'raw': self._raw},
property_validate=property_validate)
def unpack(self, force=False):
if self._downloader.solid(self):
self.unpacked_path = self.packed_path
else:
exists, dest_path = self._downloader.cache.exists_unpacked(package=self, pkg_path=self.unpacked_path)
if exists and not self.unpacked_path:
self.unpacked_path = dest_path
# if force or not exists:
# if not dest_path:
# dest_path = self._downloader.unpacked_path
# temp_path = os.path.realpath(os.path.join(dest_path, self._name))
# _exists = os.path.exists(temp_path)
if not self._not_cached:
self.unpacked_path = dest_path if exists else '' # temp_path if exists else ''
if force or self._not_cached or (not exists):
Archive.extract(self.packed_path, dest_path) # temp_path)
self.unpacked_path = dest_path # temp_path
self._not_cached = False
def pack(self, src_path):
Archive.create(self.packed_path, src_path)
def print(self, level=0, output=None):
def do_print(left):
res_str = ''
for out_item in output:
for k, v in out_item.items():
cur_str = self.get_params(merged=True).get(k, '')
if not res_str:
cur_str = self._params.get(k, '')
if not res_str:
res_str = '{}{}'.format(left, cur_str)
continue
cur_format = ' {}'
if v > 0:
cur_format = '{:%s}' % (v if len(cur_str) <= v else v + len(left))
res_str += cur_format.format(cur_str)
break
self._log.info(res_str)
_sign = ' '
if not self._root:
if self.duplicated:
_sign = '!'
elif self.unpacked_path:
_sign = '+'
elif self.packed_path:
_sign = '>'
else:
_sign = '-'
_left = '{}{}'.format(' ' * 4 * level, _sign)
do_print(_left)
for _pkg_name in self.packages:
_pkg = self.packages[_pkg_name]
if not _pkg:
_left = '{}-'.format(' ' * 4 * (level + 1))
self._log.info('{}{}'.format(_left, _pkg_name))
else:
_pkg.print(level + 1, output)
if self._root:
self._log.info('')
def set_full_unique_name(self):
self.name = self._parser.get_full_package_name(self)
return self.name
def get_name_and_path(self, name_only=False):
if name_only:
return self.name
return self.name, self.unpacked_path
def get_none_packages(self):
"""
Get packages with None (not founded), recursively
"""
not_found = set()
for package_name, package in self.packages.items():
if package is None:
not_found.add(package_name)
else:
if package.packages:
not_found = not_found | package.get_none_packages()
return not_found
@property
def all_packages(self):
packages = []
for package in self.packages.values():
if package:
packages.extend(package.all_packages)
packages.extend(self.packages.values())
return packages
def ext(self, check_ext):
if self._pkg:
if not isinstance(check_ext, (list, tuple)):
check_ext = [check_ext]
name = self._adapter.get_package_filename(self._pkg)
if any((fnmatch.fnmatch(name, x) or fnmatch.fnmatch(name, '*%s' % x)) for x in check_ext):
return True
return False
@property
def md5(self):
try:
return ArtifactoryPath.stat(self.pkg).md5
except AttributeError:
return md5sum(self.packed_path)
|
devopshq/crosspm | crosspm/helpers/package.py | Package.get_none_packages | python | def get_none_packages(self):
not_found = set()
for package_name, package in self.packages.items():
if package is None:
not_found.add(package_name)
else:
if package.packages:
not_found = not_found | package.get_none_packages()
return not_found | Get packages with None (not founded), recursively | train | https://github.com/devopshq/crosspm/blob/c831442ecfaa1d43c66cb148857096cea292c950/crosspm/helpers/package.py#L235-L246 | null | class Package:
def __init__(self, name, pkg, params, downloader, adapter, parser, params_found=None, params_found_raw=None,
stat=None, in_cache=False):
self.name = name
self.package_name = name
self.packed_path = ''
self.unpacked_path = ''
self.duplicated = False
self.packages = OrderedDict()
self.pkg = pkg # type: ArtifactoryPath
# Someone use this internal object, do not remove them :)
self._pkg = self.pkg
if isinstance(pkg, int):
if pkg == 0:
self._root = True
self._raw = []
self._root = False
self._params_found = {}
self._params_found_raw = {}
self._not_cached = True
self._log = logging.getLogger('crosspm')
self._params = params
self._adapter = adapter
self._parser = parser
self._downloader = downloader # type: Downloader
self._in_cache = in_cache
if params_found:
self._params_found = params_found
if params_found_raw:
self._params_found_raw = params_found_raw
self.stat = stat
def download(self, force=False):
"""
Download file containing this package.
:param force: Force download even if it seems file already exists
:return: Full path with filename of downloaded package file.
"""
exists, dest_path = self._downloader.cache.exists_packed(package=self, pkg_path=self.packed_path,
check_stat=not self._in_cache)
unp_exists, unp_path = self._downloader.cache.exists_unpacked(package=self, pkg_path=self.unpacked_path)
# Если архива нет, то и кешу доверять не стоит
if not exists:
unp_exists = False
if exists and not self.packed_path:
self.packed_path = dest_path
if force or not exists:
# _packed_path = self._packed_path
dest_path_tmp = dest_path + ".tmp"
if os.path.exists(dest_path_tmp):
os.remove(dest_path_tmp)
self._adapter.download_package(self._pkg, dest_path_tmp)
os.rename(dest_path_tmp, dest_path)
self.packed_path = dest_path
# if not _packed_path:
self._not_cached = True
else:
if unp_exists and not self.unpacked_path:
self.unpacked_path = unp_path
self._not_cached = False
if self._not_cached and unp_exists:
shutil.rmtree(unp_path, ignore_errors=True)
return self.packed_path
def get_file(self, file_name, unpack_force=True):
if unpack_force:
self.unpack()
_dest_file = os.path.normpath(self.get_file_path(file_name))
_dest_file = _dest_file if os.path.isfile(_dest_file) else None
return _dest_file
def get_file_path(self, file_name):
_dest_file = os.path.join(self.unpacked_path, file_name)
return _dest_file
def find_dependencies(self, depslock_file_path, property_validate=True, deps_content=None):
"""
Find all dependencies by package
:param depslock_file_path:
:param property_validate: for `root` packages we need check property, bad if we find packages from `lock` file,
:param deps_content: HACK for use --dependencies-content and existed dependencies.txt.lock file
we can skip validate part
:return:
"""
self._raw = [x for x in
self._downloader.common_parser.iter_packages_params(depslock_file_path, deps_content=deps_content)]
self.packages = self._downloader.get_dependency_packages({'raw': self._raw},
property_validate=property_validate)
def find_usedby(self, depslock_file_path, property_validate=True):
"""
Find all dependencies by package
:param depslock_file_path:
:param property_validate: for `root` packages we need check property, bad if we find packages from `lock` file,
we can skip validate part
:return:
"""
if depslock_file_path is None:
self._raw = [self._params]
self._raw[0]['repo'] = None
self._raw[0]['server'] = None
else:
self._raw = [x for x in self._downloader.common_parser.iter_packages_params(depslock_file_path)]
self.packages = self._downloader.get_usedby_packages({'raw': self._raw},
property_validate=property_validate)
def unpack(self, force=False):
if self._downloader.solid(self):
self.unpacked_path = self.packed_path
else:
exists, dest_path = self._downloader.cache.exists_unpacked(package=self, pkg_path=self.unpacked_path)
if exists and not self.unpacked_path:
self.unpacked_path = dest_path
# if force or not exists:
# if not dest_path:
# dest_path = self._downloader.unpacked_path
# temp_path = os.path.realpath(os.path.join(dest_path, self._name))
# _exists = os.path.exists(temp_path)
if not self._not_cached:
self.unpacked_path = dest_path if exists else '' # temp_path if exists else ''
if force or self._not_cached or (not exists):
Archive.extract(self.packed_path, dest_path) # temp_path)
self.unpacked_path = dest_path # temp_path
self._not_cached = False
def pack(self, src_path):
Archive.create(self.packed_path, src_path)
def print(self, level=0, output=None):
def do_print(left):
res_str = ''
for out_item in output:
for k, v in out_item.items():
cur_str = self.get_params(merged=True).get(k, '')
if not res_str:
cur_str = self._params.get(k, '')
if not res_str:
res_str = '{}{}'.format(left, cur_str)
continue
cur_format = ' {}'
if v > 0:
cur_format = '{:%s}' % (v if len(cur_str) <= v else v + len(left))
res_str += cur_format.format(cur_str)
break
self._log.info(res_str)
_sign = ' '
if not self._root:
if self.duplicated:
_sign = '!'
elif self.unpacked_path:
_sign = '+'
elif self.packed_path:
_sign = '>'
else:
_sign = '-'
_left = '{}{}'.format(' ' * 4 * level, _sign)
do_print(_left)
for _pkg_name in self.packages:
_pkg = self.packages[_pkg_name]
if not _pkg:
_left = '{}-'.format(' ' * 4 * (level + 1))
self._log.info('{}{}'.format(_left, _pkg_name))
else:
_pkg.print(level + 1, output)
if self._root:
self._log.info('')
def get_params(self, param_list=None, get_path=False, merged=False, raw=False):
"""
Get Package params
:param param_list: name or list of parameters
:param get_path:
:param merged: if version splited, True return version in string
:param raw:
:return:
"""
# Convert parameter name to list
if param_list and isinstance(param_list, str):
param_list = [param_list]
if param_list and isinstance(param_list, (list, tuple)):
result = {k: v for k, v in self._params_found.items() if k in param_list}
result.update({k: v for k, v in self._params.items() if (k in param_list and k not in result)})
else:
result = {k: v for k, v in self._params_found.items()}
result.update({k: v for k, v in self._params.items() if k not in result})
if get_path:
result['path'] = self.unpacked_path
if merged:
result.update(self._parser.merge_valued(result))
if raw:
result.update({k: v for k, v in self._params_found_raw.items()})
return result
def set_full_unique_name(self):
self.name = self._parser.get_full_package_name(self)
return self.name
def get_name_and_path(self, name_only=False):
if name_only:
return self.name
return self.name, self.unpacked_path
@property
def all_packages(self):
packages = []
for package in self.packages.values():
if package:
packages.extend(package.all_packages)
packages.extend(self.packages.values())
return packages
def ext(self, check_ext):
if self._pkg:
if not isinstance(check_ext, (list, tuple)):
check_ext = [check_ext]
name = self._adapter.get_package_filename(self._pkg)
if any((fnmatch.fnmatch(name, x) or fnmatch.fnmatch(name, '*%s' % x)) for x in check_ext):
return True
return False
@property
def md5(self):
try:
return ArtifactoryPath.stat(self.pkg).md5
except AttributeError:
return md5sum(self.packed_path)
|
devopshq/crosspm | crosspm/helpers/parser.py | Parser.get_usedby_aql | python | def get_usedby_aql(self, params):
if self._usedby is None:
return None
_result = {}
params = self.merge_valued(params)
for k, v in self._usedby['AQL'].items():
if isinstance(v, str):
k = k.format(**params)
v = v.format(**params)
_result[k] = v
return _result | Возвращает запрос AQL (без репозитория), из файла конфигурации
:param params:
:return: | train | https://github.com/devopshq/crosspm/blob/c831442ecfaa1d43c66cb148857096cea292c950/crosspm/helpers/parser.py#L36-L52 | [
"def merge_valued(self, params):\n result = {}\n for k, v in self._config.iter_valued_columns2(params.keys()):\n if not v:\n result[k] = self.merge_with_mask(k, params[k])\n return result\n"
] | class Parser:
def __init__(self, name, data, config):
self._rules_vars = {}
self._rules_vars_extra = {}
self._columns = {}
self._defaults = {}
self._defaults_masked = {}
self._col_types = []
self._name = name
self._sort = data.get('sort', [])
self._index = data.get('index', -1)
# Должно быть вида key: str_value
self._rules = {k: v for k, v in data.items() if k not in ['columns', 'index', 'sort', 'defaults', 'usedby']}
if 'columns' in data:
self._columns = {k: self.parse_value_template(v) for k, v in data['columns'].items() if v != ''}
self._config = config
self.init_rules_vars()
if 'defaults' in data:
self.init_defaults(data['defaults'])
self._usedby = data.get('usedby', None)
def get_vars(self):
_vars = []
for _rule_vars in self._rules_vars.values():
_vars += [item for sublist in _rule_vars for item in sublist if item not in _vars]
# _vars += [x for x in _rule_vars if x not in _vars]
return _vars
def init_defaults(self, defaults):
if isinstance(defaults, dict):
self._defaults = {k: v for k, v in defaults.items()}
self._defaults_masked = {}
for _rules_name, _rules in self._rules.items():
if _rules_name == 'path':
continue
self._defaults_masked[_rules_name] = []
for _rule in _rules:
_rule_tmp = _rule
for _name in [x for x in re.findall('{.*?}', _rule_tmp)]:
_name_replace = _name[1:-1].split('|')[0]
_rule_tmp.replace(_name, _name_replace)
if _name_replace not in self._defaults:
self._defaults[_name_replace] = ''
_tmp = [x.strip() for x in _rule_tmp.split('=')]
_mask = re.sub('{.*?}', '*', _tmp[0])
_val = ''
_key = _tmp[0].format(**self._defaults)
if len(_tmp) > 1:
_val = _tmp[1].format(**self._defaults)
self._defaults_masked[_rules_name].append({'mask': _mask, 'key': _key, 'value': _val})
def init_rules_vars(self):
self._rules_vars = {}
for _name in self._rules:
if not isinstance(self._rules[_name], (list, tuple)):
self._rules[_name] = [self._rules[_name]]
to_del = []
for _rule in self._rules[_name]:
if not _rule:
to_del.append(_rule)
for _rule in to_del:
self._rules[_name].remove(_rule)
for z, _rule in enumerate(self._rules[_name]):
if _name not in self._rules_vars:
self._rules_vars[_name] = []
for i in range(len(self._rules_vars[_name]), z + 1):
self._rules_vars[_name].append([])
if _name not in self._rules_vars_extra:
self._rules_vars_extra[_name] = []
for i in range(len(self._rules_vars_extra[_name]), z + 1):
self._rules_vars_extra[_name].append({})
self._rules_vars[_name][z] = list({x[1:-1].strip(): 0 for x in re.findall('{.*?}', _rule)}.keys())
# self._rules_vars_extra[_name][z] = {}
for i, _item in enumerate(self._rules_vars[_name][z]):
_tmp = [x for x in _item.split('|') if x]
if len(_tmp) > 1:
self._rules[_name][z] = self._rules[_name][z].replace(('{%s}' % _item), ('{%s}' % _tmp[0]))
self._rules_vars[_name][z][i] = _tmp[0]
self._rules_vars_extra[_name][z][_tmp[0]] = _tmp[1:]
# if len(_rules) == 1:
# _rules = _rules[0]
def parse_by_mask(self, column, value, types=False, ext_mask=False):
# см https://habrahabr.ru/post/269759/
if column not in self._columns:
return value # nothing to parse
if isinstance(value, list):
return value[:]
_res = []
# extMask = extMask and value == '*'
orig_value = value
rule_parsed = self._columns[column]
prev_om_sep = False
om_sep = ''
for _part in rule_parsed:
if _part[1]:
for _subpart in _part[0]:
if not _subpart[1]:
om_sep = _subpart[0]
break
# TODO: make parsing smarter
for x, _part in enumerate(rule_parsed):
for y, _subpart in enumerate(_part[0]):
if _subpart[1]:
_pos = -1
_sym = ''
if y < len(_part[0]) - 1:
_sym = _part[0][y + 1][0]
_pos = value.find(_sym)
cur_om_sep = _pos >= 0 and _part[1]
else:
cur_om_sep = False
z = x
while True:
if z < len(rule_parsed) - 1:
z += 1
_sym = rule_parsed[z][0][0][0]
_pos = value.find(_sym)
if _pos >= 0:
cur_om_sep = rule_parsed[z][1]
break
elif not rule_parsed[z][1]:
break
else:
break
if _pos >= 0:
_atom = value[:_pos]
value = value[_pos + len(_sym):]
else:
if ext_mask:
if orig_value == '*':
_atom = '*' if not _part[1] else None
elif _part[1]:
if prev_om_sep:
_atom = value # '2.3.*-*' - Do not include versions without last part
# _atom = '' # '2.3.*-' - Do not include versions with last part
else:
_atom = None # '2.3.*' - Include versions both with or without last part
else:
if om_sep:
_pos = value.find(om_sep)
if _pos >= 0:
_atom = value[:_pos]
if _atom != '*':
value = value[_pos:]
else:
_atom = value
if _atom != '*':
value = ''
else:
_atom = value
if _atom != '*':
value = ''
else:
_atom = value
value = ''
if types:
_res += [(_atom, _subpart[0])]
else:
_res += [_atom]
prev_om_sep = cur_om_sep
return _res
def merge_with_mask(self, column, value):
if column not in self._columns:
if isinstance(value, (list, tuple)):
# TODO: Check for value is not None - if it is, raise "column value not set"
# if None in value:
# value = ['' if x is None else x for x in value]
value = ''.join(value)
return value # nothing to parse
if not isinstance(value, (list, tuple)):
return value # nothing to parse
_res = ''
_res_tmp = ''
rule_parsed = self._columns[column]
_value = value
for _part in rule_parsed:
for _subpart in _part[0]:
if _subpart[1]:
_exist = False
if len(_value) > 0:
if not _part[1]:
_exist = True
elif _value[0] not in ('', None):
_exist = True
if _exist:
_res_atom = str(_value[0])
if _part[1]:
if _res_atom in [None, '*', '']:
_res_tmp = ''
if _res and _res[-1] == '*':
_res_atom = ''
_res += _res_tmp
_res_tmp = ''
_res += _res_atom
_value = _value[1:]
else:
_res_tmp = ''
if _part[1]:
_value = _value[1:]
# break
else:
# TODO: Error handling?
# break
pass
else:
if _part[1]:
_res_tmp += _subpart[0]
else:
_res_tmp = ''
_res += _subpart[0]
return _res + _res_tmp
def validate_by_mask(self, column, value, param):
_res_value = []
if column not in self._columns:
_res = True # nothing to validate
_res_value = value
elif not isinstance(param, (list, tuple)):
_res = False
else:
_res = True
for i, (_tmp, tp) in enumerate(self.parse_by_mask(column, value, True)):
if tp == 'int':
try:
_tmp = int(_tmp)
except Exception:
_tmp = str(_tmp)
if not self.validate_atom(_tmp, param[i]):
_res = False
_res_value = []
break
else:
_res_value.append(_tmp)
return _res, _res_value
@staticmethod
def validate_atom(value, text):
_sign = ''
if text:
if text.startswith(('>=', '<=', '==',)):
_sign = text[:2]
text = text[2:]
elif text.startswith(('>', '<', '=',)):
_sign = text[:1]
text = text[1:]
if _sign == '=':
_sign = '=='
var1 = value
var2 = text if text else '*'
if isinstance(var1, int):
try:
var2 = int(var2)
if not _sign:
_sign = '=='
except Exception:
var1 = str(var1)
if _sign:
_match = eval('var1 {} var2'.format(_sign))
else:
_match = fnmatch.fnmatch(var1, var2)
return _match
def validate_path(self, path, params):
_rule_name = 'path'
def do_check(rule_number, _path):
def iter_with_extras(_col_name, _value0):
_res0 = [_value0]
if _col_name in self._rules_vars_extra[_rule_name][rule_number]:
_res0 += self._rules_vars_extra[_rule_name][rule_number][_col_name]
for _res1 in sorted(_res0, key=lambda x: 0 - len(x)):
yield _res1
def get_symbol_in_mask(_sym1, _val_mask):
_count = 0
_count_ext = 0
if _val_mask and isinstance(_val_mask, (list, tuple)):
for _xx in _val_mask:
for _yy in _xx[0]:
if not _yy[1]:
if _xx[1]:
_count_ext += _yy[0].count(_sym1)
else:
_count += _yy[0].count(_sym1)
return _count, _count_ext
def get_atom(_x, _y, _path0, _val_mask=None):
_pos = -1
if _y < len(_part[0]) - 1:
_sym0 = _part[0][_y + 1][0]
_sym_count, _sym_ext = get_symbol_in_mask(_sym0, _val_mask)
_pos0 = -1
for _xx in range(_sym_count + 1):
_pos0 = _path0.find(_sym0, _pos0 + 1)
if _pos0 < 0:
break
if _pos0 < 0 or _sym_ext == 0:
_pos = _pos0
else:
_pos = [_pos0]
for _xx in range(_sym_ext):
_pos0 = _path0.find(_sym0, _pos0 + 1)
if _pos0 < 0:
_pos += [len(_path0)]
break
else:
_pos += [_pos0]
elif _x < len(rule_parsed) - 1:
if rule_parsed[_x + 1][1]:
_tmp0 = [xx.strip() for xx in rule_parsed[_x + 1][0][0][0].split('|')]
else:
_tmp0 = [rule_parsed[_x + 1][0][0][0]]
for _sym0 in _tmp0:
_pos = _path0.find(_sym0)
if _pos >= 0:
break
if isinstance(_pos, int):
if _pos >= 0:
_atom0 = [{'atom': _path0[:_pos],
'path': _path0[_pos:],
}]
else:
_atom0 = [{'atom': _path0,
'path0': '',
}]
else:
_atom0 = [{'atom': _path0[:_pos[_xx]], 'path': _path0[_pos[_xx]:]} for _xx in range(len(_pos))]
return _atom0
_res = True
_new_path = ''
rule = self._rules[_rule_name][rule_number]
rule_parsed = self.parse_value_template(rule)
_res_params = {}
_res_params_raw = {}
for x, _part in enumerate(rule_parsed):
for y, _subpart in enumerate(_part[0]):
if _subpart[1]:
_value = params[_subpart[0]]
if _subpart[0] in self._columns:
# we have a mask to process
_atoms = get_atom(x, y, _path, self._columns[_subpart[0]])
_match = False
for _value_item in iter_with_extras(_subpart[0], _value):
for _atom_item in _atoms:
_atom = _atom_item['atom']
_valid, _valid_value = self.validate_by_mask(_subpart[0], _atom, _value_item)
if _valid:
_res_params[_subpart[0]] = _valid_value
_res_params_raw[_subpart[0]] = _atom
_new_path += _atom
_path = _atom_item['path']
_match = True
break
if _match:
break
if not _match:
return False, {}, {}
else:
if _value is None:
_match = False
for _value_item in sorted(self._config.get_values(_subpart[0]),
key=lambda x: 0 - len(x)):
_atom = _path[:len(_value_item)]
if fnmatch.fnmatch(_atom, _value_item): # may be just comparing would be better
_res_params[_subpart[0]] = _atom
_new_path += _atom
_path = _path[len(_value_item):]
_match = True
break
if not _match:
return False, {}, {}
else:
# it's a plain value
_plain = not any(x in _value for x in ('>=', '<=', '==', '>', '<', '=', '*'))
_mask = '*' in _value
if _plain or (_subpart[0] not in self._columns):
_match = False
if _mask:
# process masked values (ex. branch = release*)
_atoms = get_atom(x, y, _path)
for _value_item in iter_with_extras(_subpart[0], _value):
for _atom_item in _atoms:
_atom = _atom_item['atom']
if self.validate_atom(_atom, _value_item):
_res_params[_subpart[0]] = _atom
_new_path += _atom
_path = _atom_item['path']
_match = True
break
if _match:
break
else:
for _value_item in iter_with_extras(_subpart[0], _value):
_atom = _path[:len(_value_item)]
if fnmatch.fnmatch(_atom,
_value_item): # may be just comparing would be better
_res_params[_subpart[0]] = _atom
_new_path += _atom
_path = _path[len(_value_item):]
_match = True
break
if not _match:
return False, {}, {}
else:
_atoms = get_atom(x, y, _path, self._columns[_subpart[0]])
_match = False
for _value_item in iter_with_extras(_subpart[0], _value):
for _atom_item in _atoms:
_atom = _atom_item['atom']
if self.validate_atom(_atom, _value_item):
_res_params[_subpart[0]] = _atom
_new_path += _atom
_path = _atom_item['path']
_match = True
break
if _match:
break
if not _match:
return False, {}, {}
else:
# just part of template
_res = False
if _part[1]:
# square brackets means this part can be one of values
_tmp = [xx.strip() for xx in _subpart[0].split('|')]
else:
_tmp = [_subpart[0]]
for _sym in _tmp:
_atom = _path[:len(_sym)]
if fnmatch.fnmatch(_atom, _sym): # may be just comparing would be better
_path = _path[len(_sym):]
_new_path += _atom
_res = True
break
else:
# HACK for * in path when more than one folder use
# e.g.:
# _sym = /pool/*/
# _path = /pool/detects/e/filename.deb
try:
if '*' in _sym:
re_str = fnmatch.translate(_sym)
# \/pool\/.*\/\Z(?ms) => \/pool\/.*\/
if re_str.endswith('\\Z(?ms)'):
re_str = re_str[:-7]
found_str = re.match(re_str, _path).group()
_path = _path[len(found_str):]
_new_path += found_str
_res = True
break
except Exception as e:
logging.error("Something wrong when parse '{}' in '{}'".format(_sym, _path))
logging.exception(e)
if not _res:
return False, {}, {}
return _res, _res_params, _res_params_raw
_result = False
_result_params = {}
_result_params_raw = {}
# rule = self._rules[_rule_name]
# for _rule in self._rules[_rule_name]:
for i in range(len(self._rules[_rule_name])):
_ok, _params, _params_raw = do_check(i, str(path))
if _ok:
_result = True
_result_params.update({k: v for k, v in _params.items() if k not in _result_params})
_result_params_raw.update({k: v for k, v in _params_raw.items() if k not in _result_params_raw})
break
return _result, _result_params, _result_params_raw
def validate(self, value, rule_name, params, return_params=False):
# Если правила для валидации не заданы - говорим что пакет нам подходит
if rule_name not in self._rules:
return (True, {}) if return_params else True
if len(self._rules[rule_name]) == 0:
return (True, {}) if return_params else True
if self._rules[rule_name] is None:
return (True, {}) if return_params else True
_valid = True
_result_params = {}
# Все возможные вариант rule. Для properties - bad, snapshot, etc...
# Но содержим массив массивов
_all_dirties = self.fill_rule(rule_name, params, return_params=True, return_defaults=True)
for _dirties in _all_dirties:
# _dirties - набор конкретных правил для валидации
_res_sub = False
_res_sub_params = {}
for _dirt in _dirties:
# _dirty - Одно "возможное" правило?
_res_var = False
# TODO: Use split_with_regexp() instead
_dirty = [x.split(']') for x in _dirt['var'].split('[')]
_dirty = self.list_flatter(_dirty)
_variants = self.get_variants(_dirty, [])
if isinstance(value, str):
_res_var = value in _variants
elif isinstance(value, (list, tuple)):
_res_var = False
for _variant in _variants:
if _variant in value:
_res_var = True
break
elif isinstance(value, dict):
_key = ''
if 'mask' in _dirt.get('default', {}):
_mask = _dirt['default'].get('mask', '')
if len(fnmatch.filter(value.keys(), _mask)) == 0:
_key = _dirt['default'].get('key', '')
value[_key] = [_dirt['default'].get('value', '')]
for _variant in _variants:
_tmp = [x.strip() for x in _variant.split('=')]
_tmp = [x if len(x) > 0 else '*' for x in _tmp]
_key_list = fnmatch.filter(value.keys(), _tmp[0])
if len(_key_list) == 0 and '*' in _key:
_key_list = [_key]
for _key in _key_list:
if len(_tmp) > 1:
_tmp_val = value[_key]
if isinstance(_tmp_val, str):
_tmp_val = [_tmp_val]
elif not isinstance(_tmp_val, (list, tuple, dict)):
raise CrosspmException(
CROSSPM_ERRORCODE_CONFIG_FORMAT_ERROR,
'Parser rule for [{}] not able to process [{}] data type.'.format(
rule_name, type(_tmp_val))
)
if len(fnmatch.filter(_tmp_val, _tmp[1])) > 0:
_res_var = True
break
else:
_res_var = True
break
else:
raise CrosspmException(
CROSSPM_ERRORCODE_CONFIG_FORMAT_ERROR,
'Parser rule for [{}] not able to process [{}] data type.'.format(rule_name, type(value))
)
_res_sub = _res_sub or _res_var
if _res_sub:
_res_sub_params = _dirt['params']
break
_valid = _valid and _res_sub
if _valid:
_result_params.update(_res_sub_params)
return (_valid, _result_params) if return_params else _valid
def iter_matched_values(self, column_name, value):
_values = self._config.get_values(column_name)
for _value in _values:
if (value is None) or (self.values_match(_value, value, _values)):
if isinstance(_values, dict):
_value = _values[_value]
yield _value
@staticmethod
def values_match(_value, value, _values=None):
if value is None:
return _value is None
_sign = ''
if value.startswith(('>=', '<=', '==',)):
_sign = value[:2]
value = value[2:]
elif value.startswith(('>', '<', '=',)):
_sign = value[:1]
value = value[1:]
if _sign == '=':
_sign = '=='
var1, var2 = _value, value
if isinstance(_values, dict):
var2 = 0 if isinstance(var1, int) else ''
for k, v in _values.items():
if value == v:
var2 = k
break
if int in [type(var1), type(var2)]:
try:
var1a = int(var1)
var2a = int(var2)
if not _sign:
_sign = '=='
except Exception:
var1a = str(var1)
var2a = str(var2)
var1, var2 = var1a, var2a
if _sign:
_match = eval('var1 {} var2'.format(_sign))
else:
_match = fnmatch.fnmatch(var1, var2)
return _match
def fill_rule(self, rule_name, params, return_params=False, return_defaults=False):
def fill_rule_inner(_cols, _params_inner, _pars=None):
if _pars is None:
_pars = {}
for _cl in _cols:
for _val in _cl[1]:
_pars[_cl[0]] = _val
if len(_cols) > 1:
_params_inner = fill_rule_inner(_cols[1:], _params_inner, _pars)
else:
_params_inner.append({k: v for k, v in _pars.items()})
break
return _params_inner
_res = []
for z in range(len(self._rules_vars[rule_name])):
_res_part = []
_params = {k: v for k, v in params.items()}
_columns = []
for _col, _valued in self._config.iter_valued_columns2(self._rules_vars[rule_name][z]):
if _valued:
_columns += [[_col, [x for x in self.iter_matched_values(_col, params[_col])]]]
else:
if not isinstance(params[_col], (list, tuple)):
_tmp = [params[_col]]
else:
_tmp = [x for x in params[_col]]
for i, _tmp_item in enumerate(_tmp):
if _tmp_item and _tmp_item.startswith(('>=', '<=', '==', '>', '<', '=',)):
_tmp[i] = '*'
_params[_col] = self.merge_with_mask(_col, _tmp)
if _col in self._rules_vars_extra[rule_name][z]:
if len(self._rules_vars_extra[rule_name][z][_col]) > 0:
_params[_col] = '[%s%s]' % (
_params[_col], ''.join('|{}'.format(x) for x in self._rules_vars_extra[rule_name][z][_col]))
for _par in fill_rule_inner(_columns, []):
_params.update(_par)
_var = self._rules[rule_name][z].format(**_params)
if return_params or return_defaults:
_tmp_res_part = {'var': _var}
if return_params:
_tmp_res_part['params'] = {k: v for k, v in _par.items()}
if return_defaults and rule_name in self._defaults_masked:
_tmp_res_part['default'] = self._defaults_masked[rule_name][z]
else:
_tmp_res_part['default'] = {}
_res_part += [_tmp_res_part]
else:
_res_part += [_var]
if len(_res_part) == 0:
_var = self._rules[rule_name][z].format(**_params)
if return_params or return_defaults:
_tmp_res_part = {'var': _var}
if return_params:
_tmp_res_part['params'] = {}
if return_defaults and rule_name in self._defaults_masked:
_tmp_res_part['default'] = self._defaults_masked[rule_name][z]
else:
_tmp_res_part['default'] = {}
_res_part += [_tmp_res_part]
else:
_res_part += [_var]
_res += [_res_part]
return _res
def merge_valued(self, params):
result = {}
for k, v in self._config.iter_valued_columns2(params.keys()):
if not v:
result[k] = self.merge_with_mask(k, params[k])
return result
def get_params_with_extra(self, rule_name, params):
"""
Get params with extra, like 'any'
:param rule_name: 'path'
:param params: default params
:return: list of combination params
"""
# HACK for prefer-local
result = []
extra_params = self._rules_vars_extra.get(rule_name, {})[0]
_tmp_params = copy.deepcopy(params)
_fixed_params = {}
# Save params with list type - this type not changed
for key, value in _tmp_params.items():
if isinstance(value, list):
_fixed_params[key] = value
_tmp_params = {k: v for k, v in _tmp_params.items() if k not in _fixed_params}
# extend with extra_vars - like 'any'
for key, value in _tmp_params.items():
if not isinstance(value, list) and key:
_tmp_params[key] = list([value])
if key in extra_params:
_tmp_params[key].extend(extra_params[key])
# get combinations
keys = sorted(_tmp_params)
combinations = itertools.product(*(_tmp_params[x] for x in keys))
for comb in combinations:
_dict = dict(zip(keys, comb))
_dict.update(_fixed_params)
result.append(_dict)
return result
def get_paths(self, list_or_file_path, source):
if 'path' not in self._rules:
return None
_paths = []
for _params in self.iter_packages_params(list_or_file_path):
if _params['repo'] is None or _params['repo'] == '*':
repo_list = source.args['repo']
elif _params['repo'] not in source.args['repo']:
continue
else:
repo_list = [_params['repo']]
_params['server'] = source.args['server']
_sub_paths = {
'params': {k: v for k, v in _params.items() if k != 'repo'},
'paths': [],
}
for _repo in repo_list:
_params['repo'] = _repo
# _dirty = self._rules['path'].format(**_params)
_all_dirties = self.fill_rule('path', _params)
# _params.pop('server')
# _params.pop('repo')
for _dirties in _all_dirties:
for _dirty in _dirties:
# TODO: Use split_with_regexp() instead
_dirty = [x.split(']') for x in _dirty.split('[')]
_dirty = self.list_flatter(_dirty)
_sub_paths['paths'] += [{'paths': self.get_variants(_dirty, []),
'repo': _repo,
}]
_paths += [_sub_paths]
return _paths
def get_variants(self, dirty, paths):
if len(dirty) == 1:
if dirty[0] not in paths:
paths.append(dirty[0])
else:
for i, stub in enumerate(dirty):
if i % 2 != 0:
for _variant in stub.split("|"):
_res = ''.join(dirty[:i]) + _variant
_res += dirty[i + 1] if len(dirty) > i else ''
_res = [_res]
if len(dirty) > i + 1:
_res += dirty[i + 2:]
# print(_res)
paths = self.get_variants(_res, paths)
break
return paths
def iter_packages_params(self, list_or_file_path, deps_content=None):
if deps_content is not None:
# HACK for download with --dependencies-content and existed file dependencies.txt.lock
list_or_file_path = deps_content
if list_or_file_path.__class__ is DependenciesContent:
# Даёт возможность передать сразу контекнт файла, а не файл
for i, line in enumerate(list_or_file_path.splitlines()):
yield self.get_package_params(i, line)
elif isinstance(list_or_file_path, str):
if not os.path.exists(list_or_file_path):
raise CrosspmException(
CROSSPM_ERRORCODE_FILE_DEPS_NOT_FOUND,
'File not found: [{}]'.format(list_or_file_path),
)
with open(list_or_file_path, 'r') as f:
for i, line in enumerate(f):
line = line.strip()
# if i == 0 and line.startswith(''.join(map(chr,(1087,187,1111)))):
if i == 0 and line.startswith(chr(1087) + chr(187) + chr(1111)): # TODO: why?
line = line[3:]
if not line or line.startswith(('#', '[',)):
continue
yield self.get_package_params(i, line)
elif (isinstance(list_or_file_path, dict)) and ('raw' in list_or_file_path):
for _item in list_or_file_path['raw']:
_tmp_item = {k: self.parse_by_mask(k, v, False, True) for k, v in _item.items()}
yield _tmp_item
else:
for _item in list_or_file_path:
yield _item
def get_package_params(self, line_no, line):
_vars = {}
for i, v in enumerate(line.split()):
v = v.strip()
if v == '-':
v = None # get default value on next line
k, v = self._config.check_column_value(i, v, True)
if k:
_vars[k] = self.parse_by_mask(k, v, False, True)
if len(_vars) == 0:
raise CrosspmException(
CROSSPM_ERRORCODE_WRONG_SYNTAX,
'Nothing parsed at line {}: [{}]'.format(line_no, line.strip())
)
update_items = self._config.complete_params(_vars, False)
update_vars = {k: self.parse_by_mask(k, v, False, True) for k, v in update_items.items()}
# Expend default params to passed params
try:
update_vars = {k: v.format(**_vars) if isinstance(v, str) else v for k, v in update_vars.items()}
except Exception as e:
pass
self._config._log.info(
"We catch exception when try update defaults Params, don't use this functional. Message:\n {}".format(
repr(e)))
_vars.update(update_vars)
return _vars
def list_flatter(self, _src):
_res = []
for x in _src:
_res += self.list_flatter(x) if isinstance(x, (list, tuple)) else [x]
return _res
@staticmethod
def split_with_regexp(regexp, text):
prev_pos = 0
_res = []
for x in ([x.group()[1:-1].strip(), x.span()] for x in re.finditer(regexp, text)):
if x[1][0] > prev_pos:
_res += [[text[prev_pos:x[1][0]], False]]
_res += [[x[0], True]]
prev_pos = x[1][1]
if prev_pos < len(text):
_res += [[text[prev_pos:], False]]
return _res
def parse_value_template(self, value):
# _regexp = ''
must_not = self.split_with_regexp(r'\[.*?\]', value)
for i, x in enumerate(must_not):
must_not[i] = [self.split_with_regexp('{.*?}', x[0]), x[1]]
# _atom = '(?P<_1_int>[\\w*><=]+)'
return must_not
@staticmethod
def split_fixed_pattern(path):
"""
Split path into fixed and masked parts
:param path: e.g
https://repo.example.com/artifactory/libs-cpp-release.snapshot/boost/1.60-pm/*.*.*/vc110/x86/win/boost.*.*.*.tar.gz
:return:
_path_fixed: https://repo.example.com/artifactory/libs-cpp-release.snapshot/boost/1.60-pm/
_path_pattern: *.*.*/vc110/x86/win/boost.*.*.*.tar.gz
"""
_first_pattern_pos = path.find('*')
_path_separator_pos = path.rfind('/', 0, _first_pattern_pos) + 1
_path_fixed = path[:_path_separator_pos]
_path_pattern = path[_path_separator_pos:]
return _path_fixed, _path_pattern
@staticmethod
def split_fixed_pattern_with_file_name(path):
"""
Split path into fixed, masked parts and filename
:param path: e.g
https://repo.example.com/artifactory/libs-cpp-release.snapshot/boost/1.60-pm/*.*.*/vc110/x86/win/boost.*.*.*.tar.gz
:return:
_path_fixed: https://repo.example.com/artifactory/libs-cpp-release.snapshot/boost/1.60-pm/
_path_pattern: *.*.*/vc100/x86/win
_file_name_pattern: boost.*.*.*.tar.gz
"""
_first_pattern_pos = path.find('*')
_path_separator_pos = path.rfind('/', 0, _first_pattern_pos)
_path_fixed = path[:_path_separator_pos]
_path_pattern = path[_path_separator_pos + 1:]
_file_name_pattern_separator_pos = _path_pattern.rfind('/', 0)
_file_name_pattern = _path_pattern[_file_name_pattern_separator_pos + 1:]
if _path_pattern.find('*') == -1 or _file_name_pattern_separator_pos == -1:
_path_pattern = ""
else:
_path_pattern = _path_pattern[:_file_name_pattern_separator_pos]
return _path_fixed, _path_pattern, _file_name_pattern
def filter_one(self, packages, params, params_found):
def merge_params(path):
_res_params = {k: v for k, v in params_found.get(path, {}).items()}
_res_params.update({k: v for k, v in params.items() if k not in _res_params})
return _res_params
def filter_fn(item):
_result = True
_atoms_found = item['params']
for _atom_name in item['columns']:
if _atom_name in _atoms_found:
_rules = params[_atom_name]
if not isinstance(_rules, (list, tuple)):
_rules = [_rules]
_vars = _atoms_found[_atom_name]
if not isinstance(_vars, (list, tuple)):
_vars = [_vars]
i = -1
for _column in item['columns'][_atom_name]:
for _sub_col in _column[0]:
if _sub_col[1]:
i += 1
if _column[1]:
_var = _vars[i] if len(_vars) > i else ''
_rule = _rules[i] if len(_rules) > i else ''
_is_var = (_var is not None) and (len(str(_var)) > 0)
if _rule is None: # '2.3.*' - Include versions both with or without last part
pass
elif _rule == '' and _is_var and len(
str(_var)) > 0: # '2.3.*-' - Do not include versions with last part
_result = False
break
elif _rule and not _is_var: # '2.3.*-*' - Do not include versions without last part
_result = False
break
if not _result:
break
if not _result:
break
return _result
def sorted_fn(item):
_result = []
_atoms_found = item['params']
for _atom_name in self._sort:
if _atom_name == '*':
_result += [_atoms_found[x] for x in _atoms_found if x not in self._sort]
else:
_atom_item = _atoms_found.get(_atom_name, [])
if isinstance(_atom_item, (list, tuple)):
if _atom_name in self._columns:
i = -1
for _column in item['columns'][_atom_name]:
for _sub_col in _column[0]:
if _sub_col[1]:
i += 1
if _sub_col[0] == 'int':
try:
_atom_item[i] = int(_atom_item[i])
except ValueError:
_atom_item[i] = 0
elif _sub_col[0] == 'str':
try:
_atom_item[i] = str(_atom_item[i])
except ValueError:
_atom_item[i] = ''
_result += [_atoms_found.get(_atom_name, [])]
_result = [item for sublist in _result for item in sublist]
return _result
ext_packages = [{'params': merge_params(x), 'columns': self._columns, 'path': x} for x in packages]
# Filter by columns with parsing template (i.e. version)
filtered_packages = list(filter(
filter_fn,
ext_packages,
))
sorted_packages = sorted(
filtered_packages,
key=sorted_fn,
)
try:
result = sorted_packages[self._index]
except Exception:
result = []
return result
def get_full_package_name(self, package):
param_list = [x for x in self._config.get_fails('unique', {})]
if self._config.name_column not in param_list:
param_list.insert(0, self._config.name_column)
params = package.get_params(param_list)
pkg_name = '/'.join(self.merge_with_mask(x, params[x]) for x in param_list)
return pkg_name
def has_rule(self, rule_name):
res = False
if self._rules.get(rule_name, False):
res = True
return res
def get_params_from_properties(self, properties):
# Парсит свойства артефакта и выдаёт параметры
result = {y: properties.get(x, '') for x, y in self._usedby.get('property-parser', {}).items()}
return result
def get_params_from_path(self, path):
pattern = self._usedby.get('path-parser', None)
if pattern is None:
return {}
match = re.match(pattern, path)
if match is None:
return {}
return match.groupdict()
|
devopshq/crosspm | crosspm/helpers/parser.py | Parser.get_params_with_extra | python | def get_params_with_extra(self, rule_name, params):
# HACK for prefer-local
result = []
extra_params = self._rules_vars_extra.get(rule_name, {})[0]
_tmp_params = copy.deepcopy(params)
_fixed_params = {}
# Save params with list type - this type not changed
for key, value in _tmp_params.items():
if isinstance(value, list):
_fixed_params[key] = value
_tmp_params = {k: v for k, v in _tmp_params.items() if k not in _fixed_params}
# extend with extra_vars - like 'any'
for key, value in _tmp_params.items():
if not isinstance(value, list) and key:
_tmp_params[key] = list([value])
if key in extra_params:
_tmp_params[key].extend(extra_params[key])
# get combinations
keys = sorted(_tmp_params)
combinations = itertools.product(*(_tmp_params[x] for x in keys))
for comb in combinations:
_dict = dict(zip(keys, comb))
_dict.update(_fixed_params)
result.append(_dict)
return result | Get params with extra, like 'any'
:param rule_name: 'path'
:param params: default params
:return: list of combination params | train | https://github.com/devopshq/crosspm/blob/c831442ecfaa1d43c66cb148857096cea292c950/crosspm/helpers/parser.py#L726-L760 | null | class Parser:
def __init__(self, name, data, config):
self._rules_vars = {}
self._rules_vars_extra = {}
self._columns = {}
self._defaults = {}
self._defaults_masked = {}
self._col_types = []
self._name = name
self._sort = data.get('sort', [])
self._index = data.get('index', -1)
# Должно быть вида key: str_value
self._rules = {k: v for k, v in data.items() if k not in ['columns', 'index', 'sort', 'defaults', 'usedby']}
if 'columns' in data:
self._columns = {k: self.parse_value_template(v) for k, v in data['columns'].items() if v != ''}
self._config = config
self.init_rules_vars()
if 'defaults' in data:
self.init_defaults(data['defaults'])
self._usedby = data.get('usedby', None)
def get_usedby_aql(self, params):
"""
Возвращает запрос AQL (без репозитория), из файла конфигурации
:param params:
:return:
"""
if self._usedby is None:
return None
_result = {}
params = self.merge_valued(params)
for k, v in self._usedby['AQL'].items():
if isinstance(v, str):
k = k.format(**params)
v = v.format(**params)
_result[k] = v
return _result
def get_vars(self):
_vars = []
for _rule_vars in self._rules_vars.values():
_vars += [item for sublist in _rule_vars for item in sublist if item not in _vars]
# _vars += [x for x in _rule_vars if x not in _vars]
return _vars
def init_defaults(self, defaults):
if isinstance(defaults, dict):
self._defaults = {k: v for k, v in defaults.items()}
self._defaults_masked = {}
for _rules_name, _rules in self._rules.items():
if _rules_name == 'path':
continue
self._defaults_masked[_rules_name] = []
for _rule in _rules:
_rule_tmp = _rule
for _name in [x for x in re.findall('{.*?}', _rule_tmp)]:
_name_replace = _name[1:-1].split('|')[0]
_rule_tmp.replace(_name, _name_replace)
if _name_replace not in self._defaults:
self._defaults[_name_replace] = ''
_tmp = [x.strip() for x in _rule_tmp.split('=')]
_mask = re.sub('{.*?}', '*', _tmp[0])
_val = ''
_key = _tmp[0].format(**self._defaults)
if len(_tmp) > 1:
_val = _tmp[1].format(**self._defaults)
self._defaults_masked[_rules_name].append({'mask': _mask, 'key': _key, 'value': _val})
def init_rules_vars(self):
self._rules_vars = {}
for _name in self._rules:
if not isinstance(self._rules[_name], (list, tuple)):
self._rules[_name] = [self._rules[_name]]
to_del = []
for _rule in self._rules[_name]:
if not _rule:
to_del.append(_rule)
for _rule in to_del:
self._rules[_name].remove(_rule)
for z, _rule in enumerate(self._rules[_name]):
if _name not in self._rules_vars:
self._rules_vars[_name] = []
for i in range(len(self._rules_vars[_name]), z + 1):
self._rules_vars[_name].append([])
if _name not in self._rules_vars_extra:
self._rules_vars_extra[_name] = []
for i in range(len(self._rules_vars_extra[_name]), z + 1):
self._rules_vars_extra[_name].append({})
self._rules_vars[_name][z] = list({x[1:-1].strip(): 0 for x in re.findall('{.*?}', _rule)}.keys())
# self._rules_vars_extra[_name][z] = {}
for i, _item in enumerate(self._rules_vars[_name][z]):
_tmp = [x for x in _item.split('|') if x]
if len(_tmp) > 1:
self._rules[_name][z] = self._rules[_name][z].replace(('{%s}' % _item), ('{%s}' % _tmp[0]))
self._rules_vars[_name][z][i] = _tmp[0]
self._rules_vars_extra[_name][z][_tmp[0]] = _tmp[1:]
# if len(_rules) == 1:
# _rules = _rules[0]
def parse_by_mask(self, column, value, types=False, ext_mask=False):
# см https://habrahabr.ru/post/269759/
if column not in self._columns:
return value # nothing to parse
if isinstance(value, list):
return value[:]
_res = []
# extMask = extMask and value == '*'
orig_value = value
rule_parsed = self._columns[column]
prev_om_sep = False
om_sep = ''
for _part in rule_parsed:
if _part[1]:
for _subpart in _part[0]:
if not _subpart[1]:
om_sep = _subpart[0]
break
# TODO: make parsing smarter
for x, _part in enumerate(rule_parsed):
for y, _subpart in enumerate(_part[0]):
if _subpart[1]:
_pos = -1
_sym = ''
if y < len(_part[0]) - 1:
_sym = _part[0][y + 1][0]
_pos = value.find(_sym)
cur_om_sep = _pos >= 0 and _part[1]
else:
cur_om_sep = False
z = x
while True:
if z < len(rule_parsed) - 1:
z += 1
_sym = rule_parsed[z][0][0][0]
_pos = value.find(_sym)
if _pos >= 0:
cur_om_sep = rule_parsed[z][1]
break
elif not rule_parsed[z][1]:
break
else:
break
if _pos >= 0:
_atom = value[:_pos]
value = value[_pos + len(_sym):]
else:
if ext_mask:
if orig_value == '*':
_atom = '*' if not _part[1] else None
elif _part[1]:
if prev_om_sep:
_atom = value # '2.3.*-*' - Do not include versions without last part
# _atom = '' # '2.3.*-' - Do not include versions with last part
else:
_atom = None # '2.3.*' - Include versions both with or without last part
else:
if om_sep:
_pos = value.find(om_sep)
if _pos >= 0:
_atom = value[:_pos]
if _atom != '*':
value = value[_pos:]
else:
_atom = value
if _atom != '*':
value = ''
else:
_atom = value
if _atom != '*':
value = ''
else:
_atom = value
value = ''
if types:
_res += [(_atom, _subpart[0])]
else:
_res += [_atom]
prev_om_sep = cur_om_sep
return _res
def merge_with_mask(self, column, value):
if column not in self._columns:
if isinstance(value, (list, tuple)):
# TODO: Check for value is not None - if it is, raise "column value not set"
# if None in value:
# value = ['' if x is None else x for x in value]
value = ''.join(value)
return value # nothing to parse
if not isinstance(value, (list, tuple)):
return value # nothing to parse
_res = ''
_res_tmp = ''
rule_parsed = self._columns[column]
_value = value
for _part in rule_parsed:
for _subpart in _part[0]:
if _subpart[1]:
_exist = False
if len(_value) > 0:
if not _part[1]:
_exist = True
elif _value[0] not in ('', None):
_exist = True
if _exist:
_res_atom = str(_value[0])
if _part[1]:
if _res_atom in [None, '*', '']:
_res_tmp = ''
if _res and _res[-1] == '*':
_res_atom = ''
_res += _res_tmp
_res_tmp = ''
_res += _res_atom
_value = _value[1:]
else:
_res_tmp = ''
if _part[1]:
_value = _value[1:]
# break
else:
# TODO: Error handling?
# break
pass
else:
if _part[1]:
_res_tmp += _subpart[0]
else:
_res_tmp = ''
_res += _subpart[0]
return _res + _res_tmp
def validate_by_mask(self, column, value, param):
_res_value = []
if column not in self._columns:
_res = True # nothing to validate
_res_value = value
elif not isinstance(param, (list, tuple)):
_res = False
else:
_res = True
for i, (_tmp, tp) in enumerate(self.parse_by_mask(column, value, True)):
if tp == 'int':
try:
_tmp = int(_tmp)
except Exception:
_tmp = str(_tmp)
if not self.validate_atom(_tmp, param[i]):
_res = False
_res_value = []
break
else:
_res_value.append(_tmp)
return _res, _res_value
@staticmethod
def validate_atom(value, text):
_sign = ''
if text:
if text.startswith(('>=', '<=', '==',)):
_sign = text[:2]
text = text[2:]
elif text.startswith(('>', '<', '=',)):
_sign = text[:1]
text = text[1:]
if _sign == '=':
_sign = '=='
var1 = value
var2 = text if text else '*'
if isinstance(var1, int):
try:
var2 = int(var2)
if not _sign:
_sign = '=='
except Exception:
var1 = str(var1)
if _sign:
_match = eval('var1 {} var2'.format(_sign))
else:
_match = fnmatch.fnmatch(var1, var2)
return _match
def validate_path(self, path, params):
_rule_name = 'path'
def do_check(rule_number, _path):
def iter_with_extras(_col_name, _value0):
_res0 = [_value0]
if _col_name in self._rules_vars_extra[_rule_name][rule_number]:
_res0 += self._rules_vars_extra[_rule_name][rule_number][_col_name]
for _res1 in sorted(_res0, key=lambda x: 0 - len(x)):
yield _res1
def get_symbol_in_mask(_sym1, _val_mask):
_count = 0
_count_ext = 0
if _val_mask and isinstance(_val_mask, (list, tuple)):
for _xx in _val_mask:
for _yy in _xx[0]:
if not _yy[1]:
if _xx[1]:
_count_ext += _yy[0].count(_sym1)
else:
_count += _yy[0].count(_sym1)
return _count, _count_ext
def get_atom(_x, _y, _path0, _val_mask=None):
_pos = -1
if _y < len(_part[0]) - 1:
_sym0 = _part[0][_y + 1][0]
_sym_count, _sym_ext = get_symbol_in_mask(_sym0, _val_mask)
_pos0 = -1
for _xx in range(_sym_count + 1):
_pos0 = _path0.find(_sym0, _pos0 + 1)
if _pos0 < 0:
break
if _pos0 < 0 or _sym_ext == 0:
_pos = _pos0
else:
_pos = [_pos0]
for _xx in range(_sym_ext):
_pos0 = _path0.find(_sym0, _pos0 + 1)
if _pos0 < 0:
_pos += [len(_path0)]
break
else:
_pos += [_pos0]
elif _x < len(rule_parsed) - 1:
if rule_parsed[_x + 1][1]:
_tmp0 = [xx.strip() for xx in rule_parsed[_x + 1][0][0][0].split('|')]
else:
_tmp0 = [rule_parsed[_x + 1][0][0][0]]
for _sym0 in _tmp0:
_pos = _path0.find(_sym0)
if _pos >= 0:
break
if isinstance(_pos, int):
if _pos >= 0:
_atom0 = [{'atom': _path0[:_pos],
'path': _path0[_pos:],
}]
else:
_atom0 = [{'atom': _path0,
'path0': '',
}]
else:
_atom0 = [{'atom': _path0[:_pos[_xx]], 'path': _path0[_pos[_xx]:]} for _xx in range(len(_pos))]
return _atom0
_res = True
_new_path = ''
rule = self._rules[_rule_name][rule_number]
rule_parsed = self.parse_value_template(rule)
_res_params = {}
_res_params_raw = {}
for x, _part in enumerate(rule_parsed):
for y, _subpart in enumerate(_part[0]):
if _subpart[1]:
_value = params[_subpart[0]]
if _subpart[0] in self._columns:
# we have a mask to process
_atoms = get_atom(x, y, _path, self._columns[_subpart[0]])
_match = False
for _value_item in iter_with_extras(_subpart[0], _value):
for _atom_item in _atoms:
_atom = _atom_item['atom']
_valid, _valid_value = self.validate_by_mask(_subpart[0], _atom, _value_item)
if _valid:
_res_params[_subpart[0]] = _valid_value
_res_params_raw[_subpart[0]] = _atom
_new_path += _atom
_path = _atom_item['path']
_match = True
break
if _match:
break
if not _match:
return False, {}, {}
else:
if _value is None:
_match = False
for _value_item in sorted(self._config.get_values(_subpart[0]),
key=lambda x: 0 - len(x)):
_atom = _path[:len(_value_item)]
if fnmatch.fnmatch(_atom, _value_item): # may be just comparing would be better
_res_params[_subpart[0]] = _atom
_new_path += _atom
_path = _path[len(_value_item):]
_match = True
break
if not _match:
return False, {}, {}
else:
# it's a plain value
_plain = not any(x in _value for x in ('>=', '<=', '==', '>', '<', '=', '*'))
_mask = '*' in _value
if _plain or (_subpart[0] not in self._columns):
_match = False
if _mask:
# process masked values (ex. branch = release*)
_atoms = get_atom(x, y, _path)
for _value_item in iter_with_extras(_subpart[0], _value):
for _atom_item in _atoms:
_atom = _atom_item['atom']
if self.validate_atom(_atom, _value_item):
_res_params[_subpart[0]] = _atom
_new_path += _atom
_path = _atom_item['path']
_match = True
break
if _match:
break
else:
for _value_item in iter_with_extras(_subpart[0], _value):
_atom = _path[:len(_value_item)]
if fnmatch.fnmatch(_atom,
_value_item): # may be just comparing would be better
_res_params[_subpart[0]] = _atom
_new_path += _atom
_path = _path[len(_value_item):]
_match = True
break
if not _match:
return False, {}, {}
else:
_atoms = get_atom(x, y, _path, self._columns[_subpart[0]])
_match = False
for _value_item in iter_with_extras(_subpart[0], _value):
for _atom_item in _atoms:
_atom = _atom_item['atom']
if self.validate_atom(_atom, _value_item):
_res_params[_subpart[0]] = _atom
_new_path += _atom
_path = _atom_item['path']
_match = True
break
if _match:
break
if not _match:
return False, {}, {}
else:
# just part of template
_res = False
if _part[1]:
# square brackets means this part can be one of values
_tmp = [xx.strip() for xx in _subpart[0].split('|')]
else:
_tmp = [_subpart[0]]
for _sym in _tmp:
_atom = _path[:len(_sym)]
if fnmatch.fnmatch(_atom, _sym): # may be just comparing would be better
_path = _path[len(_sym):]
_new_path += _atom
_res = True
break
else:
# HACK for * in path when more than one folder use
# e.g.:
# _sym = /pool/*/
# _path = /pool/detects/e/filename.deb
try:
if '*' in _sym:
re_str = fnmatch.translate(_sym)
# \/pool\/.*\/\Z(?ms) => \/pool\/.*\/
if re_str.endswith('\\Z(?ms)'):
re_str = re_str[:-7]
found_str = re.match(re_str, _path).group()
_path = _path[len(found_str):]
_new_path += found_str
_res = True
break
except Exception as e:
logging.error("Something wrong when parse '{}' in '{}'".format(_sym, _path))
logging.exception(e)
if not _res:
return False, {}, {}
return _res, _res_params, _res_params_raw
_result = False
_result_params = {}
_result_params_raw = {}
# rule = self._rules[_rule_name]
# for _rule in self._rules[_rule_name]:
for i in range(len(self._rules[_rule_name])):
_ok, _params, _params_raw = do_check(i, str(path))
if _ok:
_result = True
_result_params.update({k: v for k, v in _params.items() if k not in _result_params})
_result_params_raw.update({k: v for k, v in _params_raw.items() if k not in _result_params_raw})
break
return _result, _result_params, _result_params_raw
def validate(self, value, rule_name, params, return_params=False):
# Если правила для валидации не заданы - говорим что пакет нам подходит
if rule_name not in self._rules:
return (True, {}) if return_params else True
if len(self._rules[rule_name]) == 0:
return (True, {}) if return_params else True
if self._rules[rule_name] is None:
return (True, {}) if return_params else True
_valid = True
_result_params = {}
# Все возможные вариант rule. Для properties - bad, snapshot, etc...
# Но содержим массив массивов
_all_dirties = self.fill_rule(rule_name, params, return_params=True, return_defaults=True)
for _dirties in _all_dirties:
# _dirties - набор конкретных правил для валидации
_res_sub = False
_res_sub_params = {}
for _dirt in _dirties:
# _dirty - Одно "возможное" правило?
_res_var = False
# TODO: Use split_with_regexp() instead
_dirty = [x.split(']') for x in _dirt['var'].split('[')]
_dirty = self.list_flatter(_dirty)
_variants = self.get_variants(_dirty, [])
if isinstance(value, str):
_res_var = value in _variants
elif isinstance(value, (list, tuple)):
_res_var = False
for _variant in _variants:
if _variant in value:
_res_var = True
break
elif isinstance(value, dict):
_key = ''
if 'mask' in _dirt.get('default', {}):
_mask = _dirt['default'].get('mask', '')
if len(fnmatch.filter(value.keys(), _mask)) == 0:
_key = _dirt['default'].get('key', '')
value[_key] = [_dirt['default'].get('value', '')]
for _variant in _variants:
_tmp = [x.strip() for x in _variant.split('=')]
_tmp = [x if len(x) > 0 else '*' for x in _tmp]
_key_list = fnmatch.filter(value.keys(), _tmp[0])
if len(_key_list) == 0 and '*' in _key:
_key_list = [_key]
for _key in _key_list:
if len(_tmp) > 1:
_tmp_val = value[_key]
if isinstance(_tmp_val, str):
_tmp_val = [_tmp_val]
elif not isinstance(_tmp_val, (list, tuple, dict)):
raise CrosspmException(
CROSSPM_ERRORCODE_CONFIG_FORMAT_ERROR,
'Parser rule for [{}] not able to process [{}] data type.'.format(
rule_name, type(_tmp_val))
)
if len(fnmatch.filter(_tmp_val, _tmp[1])) > 0:
_res_var = True
break
else:
_res_var = True
break
else:
raise CrosspmException(
CROSSPM_ERRORCODE_CONFIG_FORMAT_ERROR,
'Parser rule for [{}] not able to process [{}] data type.'.format(rule_name, type(value))
)
_res_sub = _res_sub or _res_var
if _res_sub:
_res_sub_params = _dirt['params']
break
_valid = _valid and _res_sub
if _valid:
_result_params.update(_res_sub_params)
return (_valid, _result_params) if return_params else _valid
def iter_matched_values(self, column_name, value):
_values = self._config.get_values(column_name)
for _value in _values:
if (value is None) or (self.values_match(_value, value, _values)):
if isinstance(_values, dict):
_value = _values[_value]
yield _value
@staticmethod
def values_match(_value, value, _values=None):
if value is None:
return _value is None
_sign = ''
if value.startswith(('>=', '<=', '==',)):
_sign = value[:2]
value = value[2:]
elif value.startswith(('>', '<', '=',)):
_sign = value[:1]
value = value[1:]
if _sign == '=':
_sign = '=='
var1, var2 = _value, value
if isinstance(_values, dict):
var2 = 0 if isinstance(var1, int) else ''
for k, v in _values.items():
if value == v:
var2 = k
break
if int in [type(var1), type(var2)]:
try:
var1a = int(var1)
var2a = int(var2)
if not _sign:
_sign = '=='
except Exception:
var1a = str(var1)
var2a = str(var2)
var1, var2 = var1a, var2a
if _sign:
_match = eval('var1 {} var2'.format(_sign))
else:
_match = fnmatch.fnmatch(var1, var2)
return _match
def fill_rule(self, rule_name, params, return_params=False, return_defaults=False):
def fill_rule_inner(_cols, _params_inner, _pars=None):
if _pars is None:
_pars = {}
for _cl in _cols:
for _val in _cl[1]:
_pars[_cl[0]] = _val
if len(_cols) > 1:
_params_inner = fill_rule_inner(_cols[1:], _params_inner, _pars)
else:
_params_inner.append({k: v for k, v in _pars.items()})
break
return _params_inner
_res = []
for z in range(len(self._rules_vars[rule_name])):
_res_part = []
_params = {k: v for k, v in params.items()}
_columns = []
for _col, _valued in self._config.iter_valued_columns2(self._rules_vars[rule_name][z]):
if _valued:
_columns += [[_col, [x for x in self.iter_matched_values(_col, params[_col])]]]
else:
if not isinstance(params[_col], (list, tuple)):
_tmp = [params[_col]]
else:
_tmp = [x for x in params[_col]]
for i, _tmp_item in enumerate(_tmp):
if _tmp_item and _tmp_item.startswith(('>=', '<=', '==', '>', '<', '=',)):
_tmp[i] = '*'
_params[_col] = self.merge_with_mask(_col, _tmp)
if _col in self._rules_vars_extra[rule_name][z]:
if len(self._rules_vars_extra[rule_name][z][_col]) > 0:
_params[_col] = '[%s%s]' % (
_params[_col], ''.join('|{}'.format(x) for x in self._rules_vars_extra[rule_name][z][_col]))
for _par in fill_rule_inner(_columns, []):
_params.update(_par)
_var = self._rules[rule_name][z].format(**_params)
if return_params or return_defaults:
_tmp_res_part = {'var': _var}
if return_params:
_tmp_res_part['params'] = {k: v for k, v in _par.items()}
if return_defaults and rule_name in self._defaults_masked:
_tmp_res_part['default'] = self._defaults_masked[rule_name][z]
else:
_tmp_res_part['default'] = {}
_res_part += [_tmp_res_part]
else:
_res_part += [_var]
if len(_res_part) == 0:
_var = self._rules[rule_name][z].format(**_params)
if return_params or return_defaults:
_tmp_res_part = {'var': _var}
if return_params:
_tmp_res_part['params'] = {}
if return_defaults and rule_name in self._defaults_masked:
_tmp_res_part['default'] = self._defaults_masked[rule_name][z]
else:
_tmp_res_part['default'] = {}
_res_part += [_tmp_res_part]
else:
_res_part += [_var]
_res += [_res_part]
return _res
def merge_valued(self, params):
result = {}
for k, v in self._config.iter_valued_columns2(params.keys()):
if not v:
result[k] = self.merge_with_mask(k, params[k])
return result
def get_paths(self, list_or_file_path, source):
if 'path' not in self._rules:
return None
_paths = []
for _params in self.iter_packages_params(list_or_file_path):
if _params['repo'] is None or _params['repo'] == '*':
repo_list = source.args['repo']
elif _params['repo'] not in source.args['repo']:
continue
else:
repo_list = [_params['repo']]
_params['server'] = source.args['server']
_sub_paths = {
'params': {k: v for k, v in _params.items() if k != 'repo'},
'paths': [],
}
for _repo in repo_list:
_params['repo'] = _repo
# _dirty = self._rules['path'].format(**_params)
_all_dirties = self.fill_rule('path', _params)
# _params.pop('server')
# _params.pop('repo')
for _dirties in _all_dirties:
for _dirty in _dirties:
# TODO: Use split_with_regexp() instead
_dirty = [x.split(']') for x in _dirty.split('[')]
_dirty = self.list_flatter(_dirty)
_sub_paths['paths'] += [{'paths': self.get_variants(_dirty, []),
'repo': _repo,
}]
_paths += [_sub_paths]
return _paths
def get_variants(self, dirty, paths):
if len(dirty) == 1:
if dirty[0] not in paths:
paths.append(dirty[0])
else:
for i, stub in enumerate(dirty):
if i % 2 != 0:
for _variant in stub.split("|"):
_res = ''.join(dirty[:i]) + _variant
_res += dirty[i + 1] if len(dirty) > i else ''
_res = [_res]
if len(dirty) > i + 1:
_res += dirty[i + 2:]
# print(_res)
paths = self.get_variants(_res, paths)
break
return paths
def iter_packages_params(self, list_or_file_path, deps_content=None):
if deps_content is not None:
# HACK for download with --dependencies-content and existed file dependencies.txt.lock
list_or_file_path = deps_content
if list_or_file_path.__class__ is DependenciesContent:
# Даёт возможность передать сразу контекнт файла, а не файл
for i, line in enumerate(list_or_file_path.splitlines()):
yield self.get_package_params(i, line)
elif isinstance(list_or_file_path, str):
if not os.path.exists(list_or_file_path):
raise CrosspmException(
CROSSPM_ERRORCODE_FILE_DEPS_NOT_FOUND,
'File not found: [{}]'.format(list_or_file_path),
)
with open(list_or_file_path, 'r') as f:
for i, line in enumerate(f):
line = line.strip()
# if i == 0 and line.startswith(''.join(map(chr,(1087,187,1111)))):
if i == 0 and line.startswith(chr(1087) + chr(187) + chr(1111)): # TODO: why?
line = line[3:]
if not line or line.startswith(('#', '[',)):
continue
yield self.get_package_params(i, line)
elif (isinstance(list_or_file_path, dict)) and ('raw' in list_or_file_path):
for _item in list_or_file_path['raw']:
_tmp_item = {k: self.parse_by_mask(k, v, False, True) for k, v in _item.items()}
yield _tmp_item
else:
for _item in list_or_file_path:
yield _item
def get_package_params(self, line_no, line):
_vars = {}
for i, v in enumerate(line.split()):
v = v.strip()
if v == '-':
v = None # get default value on next line
k, v = self._config.check_column_value(i, v, True)
if k:
_vars[k] = self.parse_by_mask(k, v, False, True)
if len(_vars) == 0:
raise CrosspmException(
CROSSPM_ERRORCODE_WRONG_SYNTAX,
'Nothing parsed at line {}: [{}]'.format(line_no, line.strip())
)
update_items = self._config.complete_params(_vars, False)
update_vars = {k: self.parse_by_mask(k, v, False, True) for k, v in update_items.items()}
# Expend default params to passed params
try:
update_vars = {k: v.format(**_vars) if isinstance(v, str) else v for k, v in update_vars.items()}
except Exception as e:
pass
self._config._log.info(
"We catch exception when try update defaults Params, don't use this functional. Message:\n {}".format(
repr(e)))
_vars.update(update_vars)
return _vars
def list_flatter(self, _src):
_res = []
for x in _src:
_res += self.list_flatter(x) if isinstance(x, (list, tuple)) else [x]
return _res
@staticmethod
def split_with_regexp(regexp, text):
prev_pos = 0
_res = []
for x in ([x.group()[1:-1].strip(), x.span()] for x in re.finditer(regexp, text)):
if x[1][0] > prev_pos:
_res += [[text[prev_pos:x[1][0]], False]]
_res += [[x[0], True]]
prev_pos = x[1][1]
if prev_pos < len(text):
_res += [[text[prev_pos:], False]]
return _res
def parse_value_template(self, value):
# _regexp = ''
must_not = self.split_with_regexp(r'\[.*?\]', value)
for i, x in enumerate(must_not):
must_not[i] = [self.split_with_regexp('{.*?}', x[0]), x[1]]
# _atom = '(?P<_1_int>[\\w*><=]+)'
return must_not
@staticmethod
def split_fixed_pattern(path):
"""
Split path into fixed and masked parts
:param path: e.g
https://repo.example.com/artifactory/libs-cpp-release.snapshot/boost/1.60-pm/*.*.*/vc110/x86/win/boost.*.*.*.tar.gz
:return:
_path_fixed: https://repo.example.com/artifactory/libs-cpp-release.snapshot/boost/1.60-pm/
_path_pattern: *.*.*/vc110/x86/win/boost.*.*.*.tar.gz
"""
_first_pattern_pos = path.find('*')
_path_separator_pos = path.rfind('/', 0, _first_pattern_pos) + 1
_path_fixed = path[:_path_separator_pos]
_path_pattern = path[_path_separator_pos:]
return _path_fixed, _path_pattern
@staticmethod
def split_fixed_pattern_with_file_name(path):
"""
Split path into fixed, masked parts and filename
:param path: e.g
https://repo.example.com/artifactory/libs-cpp-release.snapshot/boost/1.60-pm/*.*.*/vc110/x86/win/boost.*.*.*.tar.gz
:return:
_path_fixed: https://repo.example.com/artifactory/libs-cpp-release.snapshot/boost/1.60-pm/
_path_pattern: *.*.*/vc100/x86/win
_file_name_pattern: boost.*.*.*.tar.gz
"""
_first_pattern_pos = path.find('*')
_path_separator_pos = path.rfind('/', 0, _first_pattern_pos)
_path_fixed = path[:_path_separator_pos]
_path_pattern = path[_path_separator_pos + 1:]
_file_name_pattern_separator_pos = _path_pattern.rfind('/', 0)
_file_name_pattern = _path_pattern[_file_name_pattern_separator_pos + 1:]
if _path_pattern.find('*') == -1 or _file_name_pattern_separator_pos == -1:
_path_pattern = ""
else:
_path_pattern = _path_pattern[:_file_name_pattern_separator_pos]
return _path_fixed, _path_pattern, _file_name_pattern
def filter_one(self, packages, params, params_found):
def merge_params(path):
_res_params = {k: v for k, v in params_found.get(path, {}).items()}
_res_params.update({k: v for k, v in params.items() if k not in _res_params})
return _res_params
def filter_fn(item):
_result = True
_atoms_found = item['params']
for _atom_name in item['columns']:
if _atom_name in _atoms_found:
_rules = params[_atom_name]
if not isinstance(_rules, (list, tuple)):
_rules = [_rules]
_vars = _atoms_found[_atom_name]
if not isinstance(_vars, (list, tuple)):
_vars = [_vars]
i = -1
for _column in item['columns'][_atom_name]:
for _sub_col in _column[0]:
if _sub_col[1]:
i += 1
if _column[1]:
_var = _vars[i] if len(_vars) > i else ''
_rule = _rules[i] if len(_rules) > i else ''
_is_var = (_var is not None) and (len(str(_var)) > 0)
if _rule is None: # '2.3.*' - Include versions both with or without last part
pass
elif _rule == '' and _is_var and len(
str(_var)) > 0: # '2.3.*-' - Do not include versions with last part
_result = False
break
elif _rule and not _is_var: # '2.3.*-*' - Do not include versions without last part
_result = False
break
if not _result:
break
if not _result:
break
return _result
def sorted_fn(item):
_result = []
_atoms_found = item['params']
for _atom_name in self._sort:
if _atom_name == '*':
_result += [_atoms_found[x] for x in _atoms_found if x not in self._sort]
else:
_atom_item = _atoms_found.get(_atom_name, [])
if isinstance(_atom_item, (list, tuple)):
if _atom_name in self._columns:
i = -1
for _column in item['columns'][_atom_name]:
for _sub_col in _column[0]:
if _sub_col[1]:
i += 1
if _sub_col[0] == 'int':
try:
_atom_item[i] = int(_atom_item[i])
except ValueError:
_atom_item[i] = 0
elif _sub_col[0] == 'str':
try:
_atom_item[i] = str(_atom_item[i])
except ValueError:
_atom_item[i] = ''
_result += [_atoms_found.get(_atom_name, [])]
_result = [item for sublist in _result for item in sublist]
return _result
ext_packages = [{'params': merge_params(x), 'columns': self._columns, 'path': x} for x in packages]
# Filter by columns with parsing template (i.e. version)
filtered_packages = list(filter(
filter_fn,
ext_packages,
))
sorted_packages = sorted(
filtered_packages,
key=sorted_fn,
)
try:
result = sorted_packages[self._index]
except Exception:
result = []
return result
def get_full_package_name(self, package):
param_list = [x for x in self._config.get_fails('unique', {})]
if self._config.name_column not in param_list:
param_list.insert(0, self._config.name_column)
params = package.get_params(param_list)
pkg_name = '/'.join(self.merge_with_mask(x, params[x]) for x in param_list)
return pkg_name
def has_rule(self, rule_name):
res = False
if self._rules.get(rule_name, False):
res = True
return res
def get_params_from_properties(self, properties):
# Парсит свойства артефакта и выдаёт параметры
result = {y: properties.get(x, '') for x, y in self._usedby.get('property-parser', {}).items()}
return result
def get_params_from_path(self, path):
pattern = self._usedby.get('path-parser', None)
if pattern is None:
return {}
match = re.match(pattern, path)
if match is None:
return {}
return match.groupdict()
|
devopshq/crosspm | crosspm/helpers/parser.py | Parser.split_fixed_pattern | python | def split_fixed_pattern(path):
_first_pattern_pos = path.find('*')
_path_separator_pos = path.rfind('/', 0, _first_pattern_pos) + 1
_path_fixed = path[:_path_separator_pos]
_path_pattern = path[_path_separator_pos:]
return _path_fixed, _path_pattern | Split path into fixed and masked parts
:param path: e.g
https://repo.example.com/artifactory/libs-cpp-release.snapshot/boost/1.60-pm/*.*.*/vc110/x86/win/boost.*.*.*.tar.gz
:return:
_path_fixed: https://repo.example.com/artifactory/libs-cpp-release.snapshot/boost/1.60-pm/
_path_pattern: *.*.*/vc110/x86/win/boost.*.*.*.tar.gz | train | https://github.com/devopshq/crosspm/blob/c831442ecfaa1d43c66cb148857096cea292c950/crosspm/helpers/parser.py#L908-L921 | null | class Parser:
def __init__(self, name, data, config):
self._rules_vars = {}
self._rules_vars_extra = {}
self._columns = {}
self._defaults = {}
self._defaults_masked = {}
self._col_types = []
self._name = name
self._sort = data.get('sort', [])
self._index = data.get('index', -1)
# Должно быть вида key: str_value
self._rules = {k: v for k, v in data.items() if k not in ['columns', 'index', 'sort', 'defaults', 'usedby']}
if 'columns' in data:
self._columns = {k: self.parse_value_template(v) for k, v in data['columns'].items() if v != ''}
self._config = config
self.init_rules_vars()
if 'defaults' in data:
self.init_defaults(data['defaults'])
self._usedby = data.get('usedby', None)
def get_usedby_aql(self, params):
"""
Возвращает запрос AQL (без репозитория), из файла конфигурации
:param params:
:return:
"""
if self._usedby is None:
return None
_result = {}
params = self.merge_valued(params)
for k, v in self._usedby['AQL'].items():
if isinstance(v, str):
k = k.format(**params)
v = v.format(**params)
_result[k] = v
return _result
def get_vars(self):
_vars = []
for _rule_vars in self._rules_vars.values():
_vars += [item for sublist in _rule_vars for item in sublist if item not in _vars]
# _vars += [x for x in _rule_vars if x not in _vars]
return _vars
def init_defaults(self, defaults):
if isinstance(defaults, dict):
self._defaults = {k: v for k, v in defaults.items()}
self._defaults_masked = {}
for _rules_name, _rules in self._rules.items():
if _rules_name == 'path':
continue
self._defaults_masked[_rules_name] = []
for _rule in _rules:
_rule_tmp = _rule
for _name in [x for x in re.findall('{.*?}', _rule_tmp)]:
_name_replace = _name[1:-1].split('|')[0]
_rule_tmp.replace(_name, _name_replace)
if _name_replace not in self._defaults:
self._defaults[_name_replace] = ''
_tmp = [x.strip() for x in _rule_tmp.split('=')]
_mask = re.sub('{.*?}', '*', _tmp[0])
_val = ''
_key = _tmp[0].format(**self._defaults)
if len(_tmp) > 1:
_val = _tmp[1].format(**self._defaults)
self._defaults_masked[_rules_name].append({'mask': _mask, 'key': _key, 'value': _val})
def init_rules_vars(self):
self._rules_vars = {}
for _name in self._rules:
if not isinstance(self._rules[_name], (list, tuple)):
self._rules[_name] = [self._rules[_name]]
to_del = []
for _rule in self._rules[_name]:
if not _rule:
to_del.append(_rule)
for _rule in to_del:
self._rules[_name].remove(_rule)
for z, _rule in enumerate(self._rules[_name]):
if _name not in self._rules_vars:
self._rules_vars[_name] = []
for i in range(len(self._rules_vars[_name]), z + 1):
self._rules_vars[_name].append([])
if _name not in self._rules_vars_extra:
self._rules_vars_extra[_name] = []
for i in range(len(self._rules_vars_extra[_name]), z + 1):
self._rules_vars_extra[_name].append({})
self._rules_vars[_name][z] = list({x[1:-1].strip(): 0 for x in re.findall('{.*?}', _rule)}.keys())
# self._rules_vars_extra[_name][z] = {}
for i, _item in enumerate(self._rules_vars[_name][z]):
_tmp = [x for x in _item.split('|') if x]
if len(_tmp) > 1:
self._rules[_name][z] = self._rules[_name][z].replace(('{%s}' % _item), ('{%s}' % _tmp[0]))
self._rules_vars[_name][z][i] = _tmp[0]
self._rules_vars_extra[_name][z][_tmp[0]] = _tmp[1:]
# if len(_rules) == 1:
# _rules = _rules[0]
def parse_by_mask(self, column, value, types=False, ext_mask=False):
# см https://habrahabr.ru/post/269759/
if column not in self._columns:
return value # nothing to parse
if isinstance(value, list):
return value[:]
_res = []
# extMask = extMask and value == '*'
orig_value = value
rule_parsed = self._columns[column]
prev_om_sep = False
om_sep = ''
for _part in rule_parsed:
if _part[1]:
for _subpart in _part[0]:
if not _subpart[1]:
om_sep = _subpart[0]
break
# TODO: make parsing smarter
for x, _part in enumerate(rule_parsed):
for y, _subpart in enumerate(_part[0]):
if _subpart[1]:
_pos = -1
_sym = ''
if y < len(_part[0]) - 1:
_sym = _part[0][y + 1][0]
_pos = value.find(_sym)
cur_om_sep = _pos >= 0 and _part[1]
else:
cur_om_sep = False
z = x
while True:
if z < len(rule_parsed) - 1:
z += 1
_sym = rule_parsed[z][0][0][0]
_pos = value.find(_sym)
if _pos >= 0:
cur_om_sep = rule_parsed[z][1]
break
elif not rule_parsed[z][1]:
break
else:
break
if _pos >= 0:
_atom = value[:_pos]
value = value[_pos + len(_sym):]
else:
if ext_mask:
if orig_value == '*':
_atom = '*' if not _part[1] else None
elif _part[1]:
if prev_om_sep:
_atom = value # '2.3.*-*' - Do not include versions without last part
# _atom = '' # '2.3.*-' - Do not include versions with last part
else:
_atom = None # '2.3.*' - Include versions both with or without last part
else:
if om_sep:
_pos = value.find(om_sep)
if _pos >= 0:
_atom = value[:_pos]
if _atom != '*':
value = value[_pos:]
else:
_atom = value
if _atom != '*':
value = ''
else:
_atom = value
if _atom != '*':
value = ''
else:
_atom = value
value = ''
if types:
_res += [(_atom, _subpart[0])]
else:
_res += [_atom]
prev_om_sep = cur_om_sep
return _res
def merge_with_mask(self, column, value):
if column not in self._columns:
if isinstance(value, (list, tuple)):
# TODO: Check for value is not None - if it is, raise "column value not set"
# if None in value:
# value = ['' if x is None else x for x in value]
value = ''.join(value)
return value # nothing to parse
if not isinstance(value, (list, tuple)):
return value # nothing to parse
_res = ''
_res_tmp = ''
rule_parsed = self._columns[column]
_value = value
for _part in rule_parsed:
for _subpart in _part[0]:
if _subpart[1]:
_exist = False
if len(_value) > 0:
if not _part[1]:
_exist = True
elif _value[0] not in ('', None):
_exist = True
if _exist:
_res_atom = str(_value[0])
if _part[1]:
if _res_atom in [None, '*', '']:
_res_tmp = ''
if _res and _res[-1] == '*':
_res_atom = ''
_res += _res_tmp
_res_tmp = ''
_res += _res_atom
_value = _value[1:]
else:
_res_tmp = ''
if _part[1]:
_value = _value[1:]
# break
else:
# TODO: Error handling?
# break
pass
else:
if _part[1]:
_res_tmp += _subpart[0]
else:
_res_tmp = ''
_res += _subpart[0]
return _res + _res_tmp
def validate_by_mask(self, column, value, param):
_res_value = []
if column not in self._columns:
_res = True # nothing to validate
_res_value = value
elif not isinstance(param, (list, tuple)):
_res = False
else:
_res = True
for i, (_tmp, tp) in enumerate(self.parse_by_mask(column, value, True)):
if tp == 'int':
try:
_tmp = int(_tmp)
except Exception:
_tmp = str(_tmp)
if not self.validate_atom(_tmp, param[i]):
_res = False
_res_value = []
break
else:
_res_value.append(_tmp)
return _res, _res_value
@staticmethod
def validate_atom(value, text):
_sign = ''
if text:
if text.startswith(('>=', '<=', '==',)):
_sign = text[:2]
text = text[2:]
elif text.startswith(('>', '<', '=',)):
_sign = text[:1]
text = text[1:]
if _sign == '=':
_sign = '=='
var1 = value
var2 = text if text else '*'
if isinstance(var1, int):
try:
var2 = int(var2)
if not _sign:
_sign = '=='
except Exception:
var1 = str(var1)
if _sign:
_match = eval('var1 {} var2'.format(_sign))
else:
_match = fnmatch.fnmatch(var1, var2)
return _match
def validate_path(self, path, params):
_rule_name = 'path'
def do_check(rule_number, _path):
def iter_with_extras(_col_name, _value0):
_res0 = [_value0]
if _col_name in self._rules_vars_extra[_rule_name][rule_number]:
_res0 += self._rules_vars_extra[_rule_name][rule_number][_col_name]
for _res1 in sorted(_res0, key=lambda x: 0 - len(x)):
yield _res1
def get_symbol_in_mask(_sym1, _val_mask):
_count = 0
_count_ext = 0
if _val_mask and isinstance(_val_mask, (list, tuple)):
for _xx in _val_mask:
for _yy in _xx[0]:
if not _yy[1]:
if _xx[1]:
_count_ext += _yy[0].count(_sym1)
else:
_count += _yy[0].count(_sym1)
return _count, _count_ext
def get_atom(_x, _y, _path0, _val_mask=None):
_pos = -1
if _y < len(_part[0]) - 1:
_sym0 = _part[0][_y + 1][0]
_sym_count, _sym_ext = get_symbol_in_mask(_sym0, _val_mask)
_pos0 = -1
for _xx in range(_sym_count + 1):
_pos0 = _path0.find(_sym0, _pos0 + 1)
if _pos0 < 0:
break
if _pos0 < 0 or _sym_ext == 0:
_pos = _pos0
else:
_pos = [_pos0]
for _xx in range(_sym_ext):
_pos0 = _path0.find(_sym0, _pos0 + 1)
if _pos0 < 0:
_pos += [len(_path0)]
break
else:
_pos += [_pos0]
elif _x < len(rule_parsed) - 1:
if rule_parsed[_x + 1][1]:
_tmp0 = [xx.strip() for xx in rule_parsed[_x + 1][0][0][0].split('|')]
else:
_tmp0 = [rule_parsed[_x + 1][0][0][0]]
for _sym0 in _tmp0:
_pos = _path0.find(_sym0)
if _pos >= 0:
break
if isinstance(_pos, int):
if _pos >= 0:
_atom0 = [{'atom': _path0[:_pos],
'path': _path0[_pos:],
}]
else:
_atom0 = [{'atom': _path0,
'path0': '',
}]
else:
_atom0 = [{'atom': _path0[:_pos[_xx]], 'path': _path0[_pos[_xx]:]} for _xx in range(len(_pos))]
return _atom0
_res = True
_new_path = ''
rule = self._rules[_rule_name][rule_number]
rule_parsed = self.parse_value_template(rule)
_res_params = {}
_res_params_raw = {}
for x, _part in enumerate(rule_parsed):
for y, _subpart in enumerate(_part[0]):
if _subpart[1]:
_value = params[_subpart[0]]
if _subpart[0] in self._columns:
# we have a mask to process
_atoms = get_atom(x, y, _path, self._columns[_subpart[0]])
_match = False
for _value_item in iter_with_extras(_subpart[0], _value):
for _atom_item in _atoms:
_atom = _atom_item['atom']
_valid, _valid_value = self.validate_by_mask(_subpart[0], _atom, _value_item)
if _valid:
_res_params[_subpart[0]] = _valid_value
_res_params_raw[_subpart[0]] = _atom
_new_path += _atom
_path = _atom_item['path']
_match = True
break
if _match:
break
if not _match:
return False, {}, {}
else:
if _value is None:
_match = False
for _value_item in sorted(self._config.get_values(_subpart[0]),
key=lambda x: 0 - len(x)):
_atom = _path[:len(_value_item)]
if fnmatch.fnmatch(_atom, _value_item): # may be just comparing would be better
_res_params[_subpart[0]] = _atom
_new_path += _atom
_path = _path[len(_value_item):]
_match = True
break
if not _match:
return False, {}, {}
else:
# it's a plain value
_plain = not any(x in _value for x in ('>=', '<=', '==', '>', '<', '=', '*'))
_mask = '*' in _value
if _plain or (_subpart[0] not in self._columns):
_match = False
if _mask:
# process masked values (ex. branch = release*)
_atoms = get_atom(x, y, _path)
for _value_item in iter_with_extras(_subpart[0], _value):
for _atom_item in _atoms:
_atom = _atom_item['atom']
if self.validate_atom(_atom, _value_item):
_res_params[_subpart[0]] = _atom
_new_path += _atom
_path = _atom_item['path']
_match = True
break
if _match:
break
else:
for _value_item in iter_with_extras(_subpart[0], _value):
_atom = _path[:len(_value_item)]
if fnmatch.fnmatch(_atom,
_value_item): # may be just comparing would be better
_res_params[_subpart[0]] = _atom
_new_path += _atom
_path = _path[len(_value_item):]
_match = True
break
if not _match:
return False, {}, {}
else:
_atoms = get_atom(x, y, _path, self._columns[_subpart[0]])
_match = False
for _value_item in iter_with_extras(_subpart[0], _value):
for _atom_item in _atoms:
_atom = _atom_item['atom']
if self.validate_atom(_atom, _value_item):
_res_params[_subpart[0]] = _atom
_new_path += _atom
_path = _atom_item['path']
_match = True
break
if _match:
break
if not _match:
return False, {}, {}
else:
# just part of template
_res = False
if _part[1]:
# square brackets means this part can be one of values
_tmp = [xx.strip() for xx in _subpart[0].split('|')]
else:
_tmp = [_subpart[0]]
for _sym in _tmp:
_atom = _path[:len(_sym)]
if fnmatch.fnmatch(_atom, _sym): # may be just comparing would be better
_path = _path[len(_sym):]
_new_path += _atom
_res = True
break
else:
# HACK for * in path when more than one folder use
# e.g.:
# _sym = /pool/*/
# _path = /pool/detects/e/filename.deb
try:
if '*' in _sym:
re_str = fnmatch.translate(_sym)
# \/pool\/.*\/\Z(?ms) => \/pool\/.*\/
if re_str.endswith('\\Z(?ms)'):
re_str = re_str[:-7]
found_str = re.match(re_str, _path).group()
_path = _path[len(found_str):]
_new_path += found_str
_res = True
break
except Exception as e:
logging.error("Something wrong when parse '{}' in '{}'".format(_sym, _path))
logging.exception(e)
if not _res:
return False, {}, {}
return _res, _res_params, _res_params_raw
_result = False
_result_params = {}
_result_params_raw = {}
# rule = self._rules[_rule_name]
# for _rule in self._rules[_rule_name]:
for i in range(len(self._rules[_rule_name])):
_ok, _params, _params_raw = do_check(i, str(path))
if _ok:
_result = True
_result_params.update({k: v for k, v in _params.items() if k not in _result_params})
_result_params_raw.update({k: v for k, v in _params_raw.items() if k not in _result_params_raw})
break
return _result, _result_params, _result_params_raw
def validate(self, value, rule_name, params, return_params=False):
# Если правила для валидации не заданы - говорим что пакет нам подходит
if rule_name not in self._rules:
return (True, {}) if return_params else True
if len(self._rules[rule_name]) == 0:
return (True, {}) if return_params else True
if self._rules[rule_name] is None:
return (True, {}) if return_params else True
_valid = True
_result_params = {}
# Все возможные вариант rule. Для properties - bad, snapshot, etc...
# Но содержим массив массивов
_all_dirties = self.fill_rule(rule_name, params, return_params=True, return_defaults=True)
for _dirties in _all_dirties:
# _dirties - набор конкретных правил для валидации
_res_sub = False
_res_sub_params = {}
for _dirt in _dirties:
# _dirty - Одно "возможное" правило?
_res_var = False
# TODO: Use split_with_regexp() instead
_dirty = [x.split(']') for x in _dirt['var'].split('[')]
_dirty = self.list_flatter(_dirty)
_variants = self.get_variants(_dirty, [])
if isinstance(value, str):
_res_var = value in _variants
elif isinstance(value, (list, tuple)):
_res_var = False
for _variant in _variants:
if _variant in value:
_res_var = True
break
elif isinstance(value, dict):
_key = ''
if 'mask' in _dirt.get('default', {}):
_mask = _dirt['default'].get('mask', '')
if len(fnmatch.filter(value.keys(), _mask)) == 0:
_key = _dirt['default'].get('key', '')
value[_key] = [_dirt['default'].get('value', '')]
for _variant in _variants:
_tmp = [x.strip() for x in _variant.split('=')]
_tmp = [x if len(x) > 0 else '*' for x in _tmp]
_key_list = fnmatch.filter(value.keys(), _tmp[0])
if len(_key_list) == 0 and '*' in _key:
_key_list = [_key]
for _key in _key_list:
if len(_tmp) > 1:
_tmp_val = value[_key]
if isinstance(_tmp_val, str):
_tmp_val = [_tmp_val]
elif not isinstance(_tmp_val, (list, tuple, dict)):
raise CrosspmException(
CROSSPM_ERRORCODE_CONFIG_FORMAT_ERROR,
'Parser rule for [{}] not able to process [{}] data type.'.format(
rule_name, type(_tmp_val))
)
if len(fnmatch.filter(_tmp_val, _tmp[1])) > 0:
_res_var = True
break
else:
_res_var = True
break
else:
raise CrosspmException(
CROSSPM_ERRORCODE_CONFIG_FORMAT_ERROR,
'Parser rule for [{}] not able to process [{}] data type.'.format(rule_name, type(value))
)
_res_sub = _res_sub or _res_var
if _res_sub:
_res_sub_params = _dirt['params']
break
_valid = _valid and _res_sub
if _valid:
_result_params.update(_res_sub_params)
return (_valid, _result_params) if return_params else _valid
def iter_matched_values(self, column_name, value):
_values = self._config.get_values(column_name)
for _value in _values:
if (value is None) or (self.values_match(_value, value, _values)):
if isinstance(_values, dict):
_value = _values[_value]
yield _value
@staticmethod
def values_match(_value, value, _values=None):
if value is None:
return _value is None
_sign = ''
if value.startswith(('>=', '<=', '==',)):
_sign = value[:2]
value = value[2:]
elif value.startswith(('>', '<', '=',)):
_sign = value[:1]
value = value[1:]
if _sign == '=':
_sign = '=='
var1, var2 = _value, value
if isinstance(_values, dict):
var2 = 0 if isinstance(var1, int) else ''
for k, v in _values.items():
if value == v:
var2 = k
break
if int in [type(var1), type(var2)]:
try:
var1a = int(var1)
var2a = int(var2)
if not _sign:
_sign = '=='
except Exception:
var1a = str(var1)
var2a = str(var2)
var1, var2 = var1a, var2a
if _sign:
_match = eval('var1 {} var2'.format(_sign))
else:
_match = fnmatch.fnmatch(var1, var2)
return _match
def fill_rule(self, rule_name, params, return_params=False, return_defaults=False):
def fill_rule_inner(_cols, _params_inner, _pars=None):
if _pars is None:
_pars = {}
for _cl in _cols:
for _val in _cl[1]:
_pars[_cl[0]] = _val
if len(_cols) > 1:
_params_inner = fill_rule_inner(_cols[1:], _params_inner, _pars)
else:
_params_inner.append({k: v for k, v in _pars.items()})
break
return _params_inner
_res = []
for z in range(len(self._rules_vars[rule_name])):
_res_part = []
_params = {k: v for k, v in params.items()}
_columns = []
for _col, _valued in self._config.iter_valued_columns2(self._rules_vars[rule_name][z]):
if _valued:
_columns += [[_col, [x for x in self.iter_matched_values(_col, params[_col])]]]
else:
if not isinstance(params[_col], (list, tuple)):
_tmp = [params[_col]]
else:
_tmp = [x for x in params[_col]]
for i, _tmp_item in enumerate(_tmp):
if _tmp_item and _tmp_item.startswith(('>=', '<=', '==', '>', '<', '=',)):
_tmp[i] = '*'
_params[_col] = self.merge_with_mask(_col, _tmp)
if _col in self._rules_vars_extra[rule_name][z]:
if len(self._rules_vars_extra[rule_name][z][_col]) > 0:
_params[_col] = '[%s%s]' % (
_params[_col], ''.join('|{}'.format(x) for x in self._rules_vars_extra[rule_name][z][_col]))
for _par in fill_rule_inner(_columns, []):
_params.update(_par)
_var = self._rules[rule_name][z].format(**_params)
if return_params or return_defaults:
_tmp_res_part = {'var': _var}
if return_params:
_tmp_res_part['params'] = {k: v for k, v in _par.items()}
if return_defaults and rule_name in self._defaults_masked:
_tmp_res_part['default'] = self._defaults_masked[rule_name][z]
else:
_tmp_res_part['default'] = {}
_res_part += [_tmp_res_part]
else:
_res_part += [_var]
if len(_res_part) == 0:
_var = self._rules[rule_name][z].format(**_params)
if return_params or return_defaults:
_tmp_res_part = {'var': _var}
if return_params:
_tmp_res_part['params'] = {}
if return_defaults and rule_name in self._defaults_masked:
_tmp_res_part['default'] = self._defaults_masked[rule_name][z]
else:
_tmp_res_part['default'] = {}
_res_part += [_tmp_res_part]
else:
_res_part += [_var]
_res += [_res_part]
return _res
def merge_valued(self, params):
result = {}
for k, v in self._config.iter_valued_columns2(params.keys()):
if not v:
result[k] = self.merge_with_mask(k, params[k])
return result
def get_params_with_extra(self, rule_name, params):
"""
Get params with extra, like 'any'
:param rule_name: 'path'
:param params: default params
:return: list of combination params
"""
# HACK for prefer-local
result = []
extra_params = self._rules_vars_extra.get(rule_name, {})[0]
_tmp_params = copy.deepcopy(params)
_fixed_params = {}
# Save params with list type - this type not changed
for key, value in _tmp_params.items():
if isinstance(value, list):
_fixed_params[key] = value
_tmp_params = {k: v for k, v in _tmp_params.items() if k not in _fixed_params}
# extend with extra_vars - like 'any'
for key, value in _tmp_params.items():
if not isinstance(value, list) and key:
_tmp_params[key] = list([value])
if key in extra_params:
_tmp_params[key].extend(extra_params[key])
# get combinations
keys = sorted(_tmp_params)
combinations = itertools.product(*(_tmp_params[x] for x in keys))
for comb in combinations:
_dict = dict(zip(keys, comb))
_dict.update(_fixed_params)
result.append(_dict)
return result
def get_paths(self, list_or_file_path, source):
if 'path' not in self._rules:
return None
_paths = []
for _params in self.iter_packages_params(list_or_file_path):
if _params['repo'] is None or _params['repo'] == '*':
repo_list = source.args['repo']
elif _params['repo'] not in source.args['repo']:
continue
else:
repo_list = [_params['repo']]
_params['server'] = source.args['server']
_sub_paths = {
'params': {k: v for k, v in _params.items() if k != 'repo'},
'paths': [],
}
for _repo in repo_list:
_params['repo'] = _repo
# _dirty = self._rules['path'].format(**_params)
_all_dirties = self.fill_rule('path', _params)
# _params.pop('server')
# _params.pop('repo')
for _dirties in _all_dirties:
for _dirty in _dirties:
# TODO: Use split_with_regexp() instead
_dirty = [x.split(']') for x in _dirty.split('[')]
_dirty = self.list_flatter(_dirty)
_sub_paths['paths'] += [{'paths': self.get_variants(_dirty, []),
'repo': _repo,
}]
_paths += [_sub_paths]
return _paths
def get_variants(self, dirty, paths):
if len(dirty) == 1:
if dirty[0] not in paths:
paths.append(dirty[0])
else:
for i, stub in enumerate(dirty):
if i % 2 != 0:
for _variant in stub.split("|"):
_res = ''.join(dirty[:i]) + _variant
_res += dirty[i + 1] if len(dirty) > i else ''
_res = [_res]
if len(dirty) > i + 1:
_res += dirty[i + 2:]
# print(_res)
paths = self.get_variants(_res, paths)
break
return paths
def iter_packages_params(self, list_or_file_path, deps_content=None):
if deps_content is not None:
# HACK for download with --dependencies-content and existed file dependencies.txt.lock
list_or_file_path = deps_content
if list_or_file_path.__class__ is DependenciesContent:
# Даёт возможность передать сразу контекнт файла, а не файл
for i, line in enumerate(list_or_file_path.splitlines()):
yield self.get_package_params(i, line)
elif isinstance(list_or_file_path, str):
if not os.path.exists(list_or_file_path):
raise CrosspmException(
CROSSPM_ERRORCODE_FILE_DEPS_NOT_FOUND,
'File not found: [{}]'.format(list_or_file_path),
)
with open(list_or_file_path, 'r') as f:
for i, line in enumerate(f):
line = line.strip()
# if i == 0 and line.startswith(''.join(map(chr,(1087,187,1111)))):
if i == 0 and line.startswith(chr(1087) + chr(187) + chr(1111)): # TODO: why?
line = line[3:]
if not line or line.startswith(('#', '[',)):
continue
yield self.get_package_params(i, line)
elif (isinstance(list_or_file_path, dict)) and ('raw' in list_or_file_path):
for _item in list_or_file_path['raw']:
_tmp_item = {k: self.parse_by_mask(k, v, False, True) for k, v in _item.items()}
yield _tmp_item
else:
for _item in list_or_file_path:
yield _item
def get_package_params(self, line_no, line):
_vars = {}
for i, v in enumerate(line.split()):
v = v.strip()
if v == '-':
v = None # get default value on next line
k, v = self._config.check_column_value(i, v, True)
if k:
_vars[k] = self.parse_by_mask(k, v, False, True)
if len(_vars) == 0:
raise CrosspmException(
CROSSPM_ERRORCODE_WRONG_SYNTAX,
'Nothing parsed at line {}: [{}]'.format(line_no, line.strip())
)
update_items = self._config.complete_params(_vars, False)
update_vars = {k: self.parse_by_mask(k, v, False, True) for k, v in update_items.items()}
# Expend default params to passed params
try:
update_vars = {k: v.format(**_vars) if isinstance(v, str) else v for k, v in update_vars.items()}
except Exception as e:
pass
self._config._log.info(
"We catch exception when try update defaults Params, don't use this functional. Message:\n {}".format(
repr(e)))
_vars.update(update_vars)
return _vars
def list_flatter(self, _src):
_res = []
for x in _src:
_res += self.list_flatter(x) if isinstance(x, (list, tuple)) else [x]
return _res
@staticmethod
def split_with_regexp(regexp, text):
prev_pos = 0
_res = []
for x in ([x.group()[1:-1].strip(), x.span()] for x in re.finditer(regexp, text)):
if x[1][0] > prev_pos:
_res += [[text[prev_pos:x[1][0]], False]]
_res += [[x[0], True]]
prev_pos = x[1][1]
if prev_pos < len(text):
_res += [[text[prev_pos:], False]]
return _res
def parse_value_template(self, value):
# _regexp = ''
must_not = self.split_with_regexp(r'\[.*?\]', value)
for i, x in enumerate(must_not):
must_not[i] = [self.split_with_regexp('{.*?}', x[0]), x[1]]
# _atom = '(?P<_1_int>[\\w*><=]+)'
return must_not
@staticmethod
@staticmethod
def split_fixed_pattern_with_file_name(path):
"""
Split path into fixed, masked parts and filename
:param path: e.g
https://repo.example.com/artifactory/libs-cpp-release.snapshot/boost/1.60-pm/*.*.*/vc110/x86/win/boost.*.*.*.tar.gz
:return:
_path_fixed: https://repo.example.com/artifactory/libs-cpp-release.snapshot/boost/1.60-pm/
_path_pattern: *.*.*/vc100/x86/win
_file_name_pattern: boost.*.*.*.tar.gz
"""
_first_pattern_pos = path.find('*')
_path_separator_pos = path.rfind('/', 0, _first_pattern_pos)
_path_fixed = path[:_path_separator_pos]
_path_pattern = path[_path_separator_pos + 1:]
_file_name_pattern_separator_pos = _path_pattern.rfind('/', 0)
_file_name_pattern = _path_pattern[_file_name_pattern_separator_pos + 1:]
if _path_pattern.find('*') == -1 or _file_name_pattern_separator_pos == -1:
_path_pattern = ""
else:
_path_pattern = _path_pattern[:_file_name_pattern_separator_pos]
return _path_fixed, _path_pattern, _file_name_pattern
def filter_one(self, packages, params, params_found):
def merge_params(path):
_res_params = {k: v for k, v in params_found.get(path, {}).items()}
_res_params.update({k: v for k, v in params.items() if k not in _res_params})
return _res_params
def filter_fn(item):
_result = True
_atoms_found = item['params']
for _atom_name in item['columns']:
if _atom_name in _atoms_found:
_rules = params[_atom_name]
if not isinstance(_rules, (list, tuple)):
_rules = [_rules]
_vars = _atoms_found[_atom_name]
if not isinstance(_vars, (list, tuple)):
_vars = [_vars]
i = -1
for _column in item['columns'][_atom_name]:
for _sub_col in _column[0]:
if _sub_col[1]:
i += 1
if _column[1]:
_var = _vars[i] if len(_vars) > i else ''
_rule = _rules[i] if len(_rules) > i else ''
_is_var = (_var is not None) and (len(str(_var)) > 0)
if _rule is None: # '2.3.*' - Include versions both with or without last part
pass
elif _rule == '' and _is_var and len(
str(_var)) > 0: # '2.3.*-' - Do not include versions with last part
_result = False
break
elif _rule and not _is_var: # '2.3.*-*' - Do not include versions without last part
_result = False
break
if not _result:
break
if not _result:
break
return _result
def sorted_fn(item):
_result = []
_atoms_found = item['params']
for _atom_name in self._sort:
if _atom_name == '*':
_result += [_atoms_found[x] for x in _atoms_found if x not in self._sort]
else:
_atom_item = _atoms_found.get(_atom_name, [])
if isinstance(_atom_item, (list, tuple)):
if _atom_name in self._columns:
i = -1
for _column in item['columns'][_atom_name]:
for _sub_col in _column[0]:
if _sub_col[1]:
i += 1
if _sub_col[0] == 'int':
try:
_atom_item[i] = int(_atom_item[i])
except ValueError:
_atom_item[i] = 0
elif _sub_col[0] == 'str':
try:
_atom_item[i] = str(_atom_item[i])
except ValueError:
_atom_item[i] = ''
_result += [_atoms_found.get(_atom_name, [])]
_result = [item for sublist in _result for item in sublist]
return _result
ext_packages = [{'params': merge_params(x), 'columns': self._columns, 'path': x} for x in packages]
# Filter by columns with parsing template (i.e. version)
filtered_packages = list(filter(
filter_fn,
ext_packages,
))
sorted_packages = sorted(
filtered_packages,
key=sorted_fn,
)
try:
result = sorted_packages[self._index]
except Exception:
result = []
return result
def get_full_package_name(self, package):
param_list = [x for x in self._config.get_fails('unique', {})]
if self._config.name_column not in param_list:
param_list.insert(0, self._config.name_column)
params = package.get_params(param_list)
pkg_name = '/'.join(self.merge_with_mask(x, params[x]) for x in param_list)
return pkg_name
def has_rule(self, rule_name):
res = False
if self._rules.get(rule_name, False):
res = True
return res
def get_params_from_properties(self, properties):
# Парсит свойства артефакта и выдаёт параметры
result = {y: properties.get(x, '') for x, y in self._usedby.get('property-parser', {}).items()}
return result
def get_params_from_path(self, path):
pattern = self._usedby.get('path-parser', None)
if pattern is None:
return {}
match = re.match(pattern, path)
if match is None:
return {}
return match.groupdict()
|
devopshq/crosspm | crosspm/helpers/parser.py | Parser.split_fixed_pattern_with_file_name | python | def split_fixed_pattern_with_file_name(path):
_first_pattern_pos = path.find('*')
_path_separator_pos = path.rfind('/', 0, _first_pattern_pos)
_path_fixed = path[:_path_separator_pos]
_path_pattern = path[_path_separator_pos + 1:]
_file_name_pattern_separator_pos = _path_pattern.rfind('/', 0)
_file_name_pattern = _path_pattern[_file_name_pattern_separator_pos + 1:]
if _path_pattern.find('*') == -1 or _file_name_pattern_separator_pos == -1:
_path_pattern = ""
else:
_path_pattern = _path_pattern[:_file_name_pattern_separator_pos]
return _path_fixed, _path_pattern, _file_name_pattern | Split path into fixed, masked parts and filename
:param path: e.g
https://repo.example.com/artifactory/libs-cpp-release.snapshot/boost/1.60-pm/*.*.*/vc110/x86/win/boost.*.*.*.tar.gz
:return:
_path_fixed: https://repo.example.com/artifactory/libs-cpp-release.snapshot/boost/1.60-pm/
_path_pattern: *.*.*/vc100/x86/win
_file_name_pattern: boost.*.*.*.tar.gz | train | https://github.com/devopshq/crosspm/blob/c831442ecfaa1d43c66cb148857096cea292c950/crosspm/helpers/parser.py#L924-L946 | null | class Parser:
def __init__(self, name, data, config):
self._rules_vars = {}
self._rules_vars_extra = {}
self._columns = {}
self._defaults = {}
self._defaults_masked = {}
self._col_types = []
self._name = name
self._sort = data.get('sort', [])
self._index = data.get('index', -1)
# Должно быть вида key: str_value
self._rules = {k: v for k, v in data.items() if k not in ['columns', 'index', 'sort', 'defaults', 'usedby']}
if 'columns' in data:
self._columns = {k: self.parse_value_template(v) for k, v in data['columns'].items() if v != ''}
self._config = config
self.init_rules_vars()
if 'defaults' in data:
self.init_defaults(data['defaults'])
self._usedby = data.get('usedby', None)
def get_usedby_aql(self, params):
"""
Возвращает запрос AQL (без репозитория), из файла конфигурации
:param params:
:return:
"""
if self._usedby is None:
return None
_result = {}
params = self.merge_valued(params)
for k, v in self._usedby['AQL'].items():
if isinstance(v, str):
k = k.format(**params)
v = v.format(**params)
_result[k] = v
return _result
def get_vars(self):
_vars = []
for _rule_vars in self._rules_vars.values():
_vars += [item for sublist in _rule_vars for item in sublist if item not in _vars]
# _vars += [x for x in _rule_vars if x not in _vars]
return _vars
def init_defaults(self, defaults):
if isinstance(defaults, dict):
self._defaults = {k: v for k, v in defaults.items()}
self._defaults_masked = {}
for _rules_name, _rules in self._rules.items():
if _rules_name == 'path':
continue
self._defaults_masked[_rules_name] = []
for _rule in _rules:
_rule_tmp = _rule
for _name in [x for x in re.findall('{.*?}', _rule_tmp)]:
_name_replace = _name[1:-1].split('|')[0]
_rule_tmp.replace(_name, _name_replace)
if _name_replace not in self._defaults:
self._defaults[_name_replace] = ''
_tmp = [x.strip() for x in _rule_tmp.split('=')]
_mask = re.sub('{.*?}', '*', _tmp[0])
_val = ''
_key = _tmp[0].format(**self._defaults)
if len(_tmp) > 1:
_val = _tmp[1].format(**self._defaults)
self._defaults_masked[_rules_name].append({'mask': _mask, 'key': _key, 'value': _val})
def init_rules_vars(self):
self._rules_vars = {}
for _name in self._rules:
if not isinstance(self._rules[_name], (list, tuple)):
self._rules[_name] = [self._rules[_name]]
to_del = []
for _rule in self._rules[_name]:
if not _rule:
to_del.append(_rule)
for _rule in to_del:
self._rules[_name].remove(_rule)
for z, _rule in enumerate(self._rules[_name]):
if _name not in self._rules_vars:
self._rules_vars[_name] = []
for i in range(len(self._rules_vars[_name]), z + 1):
self._rules_vars[_name].append([])
if _name not in self._rules_vars_extra:
self._rules_vars_extra[_name] = []
for i in range(len(self._rules_vars_extra[_name]), z + 1):
self._rules_vars_extra[_name].append({})
self._rules_vars[_name][z] = list({x[1:-1].strip(): 0 for x in re.findall('{.*?}', _rule)}.keys())
# self._rules_vars_extra[_name][z] = {}
for i, _item in enumerate(self._rules_vars[_name][z]):
_tmp = [x for x in _item.split('|') if x]
if len(_tmp) > 1:
self._rules[_name][z] = self._rules[_name][z].replace(('{%s}' % _item), ('{%s}' % _tmp[0]))
self._rules_vars[_name][z][i] = _tmp[0]
self._rules_vars_extra[_name][z][_tmp[0]] = _tmp[1:]
# if len(_rules) == 1:
# _rules = _rules[0]
def parse_by_mask(self, column, value, types=False, ext_mask=False):
# см https://habrahabr.ru/post/269759/
if column not in self._columns:
return value # nothing to parse
if isinstance(value, list):
return value[:]
_res = []
# extMask = extMask and value == '*'
orig_value = value
rule_parsed = self._columns[column]
prev_om_sep = False
om_sep = ''
for _part in rule_parsed:
if _part[1]:
for _subpart in _part[0]:
if not _subpart[1]:
om_sep = _subpart[0]
break
# TODO: make parsing smarter
for x, _part in enumerate(rule_parsed):
for y, _subpart in enumerate(_part[0]):
if _subpart[1]:
_pos = -1
_sym = ''
if y < len(_part[0]) - 1:
_sym = _part[0][y + 1][0]
_pos = value.find(_sym)
cur_om_sep = _pos >= 0 and _part[1]
else:
cur_om_sep = False
z = x
while True:
if z < len(rule_parsed) - 1:
z += 1
_sym = rule_parsed[z][0][0][0]
_pos = value.find(_sym)
if _pos >= 0:
cur_om_sep = rule_parsed[z][1]
break
elif not rule_parsed[z][1]:
break
else:
break
if _pos >= 0:
_atom = value[:_pos]
value = value[_pos + len(_sym):]
else:
if ext_mask:
if orig_value == '*':
_atom = '*' if not _part[1] else None
elif _part[1]:
if prev_om_sep:
_atom = value # '2.3.*-*' - Do not include versions without last part
# _atom = '' # '2.3.*-' - Do not include versions with last part
else:
_atom = None # '2.3.*' - Include versions both with or without last part
else:
if om_sep:
_pos = value.find(om_sep)
if _pos >= 0:
_atom = value[:_pos]
if _atom != '*':
value = value[_pos:]
else:
_atom = value
if _atom != '*':
value = ''
else:
_atom = value
if _atom != '*':
value = ''
else:
_atom = value
value = ''
if types:
_res += [(_atom, _subpart[0])]
else:
_res += [_atom]
prev_om_sep = cur_om_sep
return _res
def merge_with_mask(self, column, value):
if column not in self._columns:
if isinstance(value, (list, tuple)):
# TODO: Check for value is not None - if it is, raise "column value not set"
# if None in value:
# value = ['' if x is None else x for x in value]
value = ''.join(value)
return value # nothing to parse
if not isinstance(value, (list, tuple)):
return value # nothing to parse
_res = ''
_res_tmp = ''
rule_parsed = self._columns[column]
_value = value
for _part in rule_parsed:
for _subpart in _part[0]:
if _subpart[1]:
_exist = False
if len(_value) > 0:
if not _part[1]:
_exist = True
elif _value[0] not in ('', None):
_exist = True
if _exist:
_res_atom = str(_value[0])
if _part[1]:
if _res_atom in [None, '*', '']:
_res_tmp = ''
if _res and _res[-1] == '*':
_res_atom = ''
_res += _res_tmp
_res_tmp = ''
_res += _res_atom
_value = _value[1:]
else:
_res_tmp = ''
if _part[1]:
_value = _value[1:]
# break
else:
# TODO: Error handling?
# break
pass
else:
if _part[1]:
_res_tmp += _subpart[0]
else:
_res_tmp = ''
_res += _subpart[0]
return _res + _res_tmp
def validate_by_mask(self, column, value, param):
_res_value = []
if column not in self._columns:
_res = True # nothing to validate
_res_value = value
elif not isinstance(param, (list, tuple)):
_res = False
else:
_res = True
for i, (_tmp, tp) in enumerate(self.parse_by_mask(column, value, True)):
if tp == 'int':
try:
_tmp = int(_tmp)
except Exception:
_tmp = str(_tmp)
if not self.validate_atom(_tmp, param[i]):
_res = False
_res_value = []
break
else:
_res_value.append(_tmp)
return _res, _res_value
@staticmethod
def validate_atom(value, text):
_sign = ''
if text:
if text.startswith(('>=', '<=', '==',)):
_sign = text[:2]
text = text[2:]
elif text.startswith(('>', '<', '=',)):
_sign = text[:1]
text = text[1:]
if _sign == '=':
_sign = '=='
var1 = value
var2 = text if text else '*'
if isinstance(var1, int):
try:
var2 = int(var2)
if not _sign:
_sign = '=='
except Exception:
var1 = str(var1)
if _sign:
_match = eval('var1 {} var2'.format(_sign))
else:
_match = fnmatch.fnmatch(var1, var2)
return _match
def validate_path(self, path, params):
_rule_name = 'path'
def do_check(rule_number, _path):
def iter_with_extras(_col_name, _value0):
_res0 = [_value0]
if _col_name in self._rules_vars_extra[_rule_name][rule_number]:
_res0 += self._rules_vars_extra[_rule_name][rule_number][_col_name]
for _res1 in sorted(_res0, key=lambda x: 0 - len(x)):
yield _res1
def get_symbol_in_mask(_sym1, _val_mask):
_count = 0
_count_ext = 0
if _val_mask and isinstance(_val_mask, (list, tuple)):
for _xx in _val_mask:
for _yy in _xx[0]:
if not _yy[1]:
if _xx[1]:
_count_ext += _yy[0].count(_sym1)
else:
_count += _yy[0].count(_sym1)
return _count, _count_ext
def get_atom(_x, _y, _path0, _val_mask=None):
_pos = -1
if _y < len(_part[0]) - 1:
_sym0 = _part[0][_y + 1][0]
_sym_count, _sym_ext = get_symbol_in_mask(_sym0, _val_mask)
_pos0 = -1
for _xx in range(_sym_count + 1):
_pos0 = _path0.find(_sym0, _pos0 + 1)
if _pos0 < 0:
break
if _pos0 < 0 or _sym_ext == 0:
_pos = _pos0
else:
_pos = [_pos0]
for _xx in range(_sym_ext):
_pos0 = _path0.find(_sym0, _pos0 + 1)
if _pos0 < 0:
_pos += [len(_path0)]
break
else:
_pos += [_pos0]
elif _x < len(rule_parsed) - 1:
if rule_parsed[_x + 1][1]:
_tmp0 = [xx.strip() for xx in rule_parsed[_x + 1][0][0][0].split('|')]
else:
_tmp0 = [rule_parsed[_x + 1][0][0][0]]
for _sym0 in _tmp0:
_pos = _path0.find(_sym0)
if _pos >= 0:
break
if isinstance(_pos, int):
if _pos >= 0:
_atom0 = [{'atom': _path0[:_pos],
'path': _path0[_pos:],
}]
else:
_atom0 = [{'atom': _path0,
'path0': '',
}]
else:
_atom0 = [{'atom': _path0[:_pos[_xx]], 'path': _path0[_pos[_xx]:]} for _xx in range(len(_pos))]
return _atom0
_res = True
_new_path = ''
rule = self._rules[_rule_name][rule_number]
rule_parsed = self.parse_value_template(rule)
_res_params = {}
_res_params_raw = {}
for x, _part in enumerate(rule_parsed):
for y, _subpart in enumerate(_part[0]):
if _subpart[1]:
_value = params[_subpart[0]]
if _subpart[0] in self._columns:
# we have a mask to process
_atoms = get_atom(x, y, _path, self._columns[_subpart[0]])
_match = False
for _value_item in iter_with_extras(_subpart[0], _value):
for _atom_item in _atoms:
_atom = _atom_item['atom']
_valid, _valid_value = self.validate_by_mask(_subpart[0], _atom, _value_item)
if _valid:
_res_params[_subpart[0]] = _valid_value
_res_params_raw[_subpart[0]] = _atom
_new_path += _atom
_path = _atom_item['path']
_match = True
break
if _match:
break
if not _match:
return False, {}, {}
else:
if _value is None:
_match = False
for _value_item in sorted(self._config.get_values(_subpart[0]),
key=lambda x: 0 - len(x)):
_atom = _path[:len(_value_item)]
if fnmatch.fnmatch(_atom, _value_item): # may be just comparing would be better
_res_params[_subpart[0]] = _atom
_new_path += _atom
_path = _path[len(_value_item):]
_match = True
break
if not _match:
return False, {}, {}
else:
# it's a plain value
_plain = not any(x in _value for x in ('>=', '<=', '==', '>', '<', '=', '*'))
_mask = '*' in _value
if _plain or (_subpart[0] not in self._columns):
_match = False
if _mask:
# process masked values (ex. branch = release*)
_atoms = get_atom(x, y, _path)
for _value_item in iter_with_extras(_subpart[0], _value):
for _atom_item in _atoms:
_atom = _atom_item['atom']
if self.validate_atom(_atom, _value_item):
_res_params[_subpart[0]] = _atom
_new_path += _atom
_path = _atom_item['path']
_match = True
break
if _match:
break
else:
for _value_item in iter_with_extras(_subpart[0], _value):
_atom = _path[:len(_value_item)]
if fnmatch.fnmatch(_atom,
_value_item): # may be just comparing would be better
_res_params[_subpart[0]] = _atom
_new_path += _atom
_path = _path[len(_value_item):]
_match = True
break
if not _match:
return False, {}, {}
else:
_atoms = get_atom(x, y, _path, self._columns[_subpart[0]])
_match = False
for _value_item in iter_with_extras(_subpart[0], _value):
for _atom_item in _atoms:
_atom = _atom_item['atom']
if self.validate_atom(_atom, _value_item):
_res_params[_subpart[0]] = _atom
_new_path += _atom
_path = _atom_item['path']
_match = True
break
if _match:
break
if not _match:
return False, {}, {}
else:
# just part of template
_res = False
if _part[1]:
# square brackets means this part can be one of values
_tmp = [xx.strip() for xx in _subpart[0].split('|')]
else:
_tmp = [_subpart[0]]
for _sym in _tmp:
_atom = _path[:len(_sym)]
if fnmatch.fnmatch(_atom, _sym): # may be just comparing would be better
_path = _path[len(_sym):]
_new_path += _atom
_res = True
break
else:
# HACK for * in path when more than one folder use
# e.g.:
# _sym = /pool/*/
# _path = /pool/detects/e/filename.deb
try:
if '*' in _sym:
re_str = fnmatch.translate(_sym)
# \/pool\/.*\/\Z(?ms) => \/pool\/.*\/
if re_str.endswith('\\Z(?ms)'):
re_str = re_str[:-7]
found_str = re.match(re_str, _path).group()
_path = _path[len(found_str):]
_new_path += found_str
_res = True
break
except Exception as e:
logging.error("Something wrong when parse '{}' in '{}'".format(_sym, _path))
logging.exception(e)
if not _res:
return False, {}, {}
return _res, _res_params, _res_params_raw
_result = False
_result_params = {}
_result_params_raw = {}
# rule = self._rules[_rule_name]
# for _rule in self._rules[_rule_name]:
for i in range(len(self._rules[_rule_name])):
_ok, _params, _params_raw = do_check(i, str(path))
if _ok:
_result = True
_result_params.update({k: v for k, v in _params.items() if k not in _result_params})
_result_params_raw.update({k: v for k, v in _params_raw.items() if k not in _result_params_raw})
break
return _result, _result_params, _result_params_raw
def validate(self, value, rule_name, params, return_params=False):
# Если правила для валидации не заданы - говорим что пакет нам подходит
if rule_name not in self._rules:
return (True, {}) if return_params else True
if len(self._rules[rule_name]) == 0:
return (True, {}) if return_params else True
if self._rules[rule_name] is None:
return (True, {}) if return_params else True
_valid = True
_result_params = {}
# Все возможные вариант rule. Для properties - bad, snapshot, etc...
# Но содержим массив массивов
_all_dirties = self.fill_rule(rule_name, params, return_params=True, return_defaults=True)
for _dirties in _all_dirties:
# _dirties - набор конкретных правил для валидации
_res_sub = False
_res_sub_params = {}
for _dirt in _dirties:
# _dirty - Одно "возможное" правило?
_res_var = False
# TODO: Use split_with_regexp() instead
_dirty = [x.split(']') for x in _dirt['var'].split('[')]
_dirty = self.list_flatter(_dirty)
_variants = self.get_variants(_dirty, [])
if isinstance(value, str):
_res_var = value in _variants
elif isinstance(value, (list, tuple)):
_res_var = False
for _variant in _variants:
if _variant in value:
_res_var = True
break
elif isinstance(value, dict):
_key = ''
if 'mask' in _dirt.get('default', {}):
_mask = _dirt['default'].get('mask', '')
if len(fnmatch.filter(value.keys(), _mask)) == 0:
_key = _dirt['default'].get('key', '')
value[_key] = [_dirt['default'].get('value', '')]
for _variant in _variants:
_tmp = [x.strip() for x in _variant.split('=')]
_tmp = [x if len(x) > 0 else '*' for x in _tmp]
_key_list = fnmatch.filter(value.keys(), _tmp[0])
if len(_key_list) == 0 and '*' in _key:
_key_list = [_key]
for _key in _key_list:
if len(_tmp) > 1:
_tmp_val = value[_key]
if isinstance(_tmp_val, str):
_tmp_val = [_tmp_val]
elif not isinstance(_tmp_val, (list, tuple, dict)):
raise CrosspmException(
CROSSPM_ERRORCODE_CONFIG_FORMAT_ERROR,
'Parser rule for [{}] not able to process [{}] data type.'.format(
rule_name, type(_tmp_val))
)
if len(fnmatch.filter(_tmp_val, _tmp[1])) > 0:
_res_var = True
break
else:
_res_var = True
break
else:
raise CrosspmException(
CROSSPM_ERRORCODE_CONFIG_FORMAT_ERROR,
'Parser rule for [{}] not able to process [{}] data type.'.format(rule_name, type(value))
)
_res_sub = _res_sub or _res_var
if _res_sub:
_res_sub_params = _dirt['params']
break
_valid = _valid and _res_sub
if _valid:
_result_params.update(_res_sub_params)
return (_valid, _result_params) if return_params else _valid
def iter_matched_values(self, column_name, value):
_values = self._config.get_values(column_name)
for _value in _values:
if (value is None) or (self.values_match(_value, value, _values)):
if isinstance(_values, dict):
_value = _values[_value]
yield _value
@staticmethod
def values_match(_value, value, _values=None):
if value is None:
return _value is None
_sign = ''
if value.startswith(('>=', '<=', '==',)):
_sign = value[:2]
value = value[2:]
elif value.startswith(('>', '<', '=',)):
_sign = value[:1]
value = value[1:]
if _sign == '=':
_sign = '=='
var1, var2 = _value, value
if isinstance(_values, dict):
var2 = 0 if isinstance(var1, int) else ''
for k, v in _values.items():
if value == v:
var2 = k
break
if int in [type(var1), type(var2)]:
try:
var1a = int(var1)
var2a = int(var2)
if not _sign:
_sign = '=='
except Exception:
var1a = str(var1)
var2a = str(var2)
var1, var2 = var1a, var2a
if _sign:
_match = eval('var1 {} var2'.format(_sign))
else:
_match = fnmatch.fnmatch(var1, var2)
return _match
def fill_rule(self, rule_name, params, return_params=False, return_defaults=False):
def fill_rule_inner(_cols, _params_inner, _pars=None):
if _pars is None:
_pars = {}
for _cl in _cols:
for _val in _cl[1]:
_pars[_cl[0]] = _val
if len(_cols) > 1:
_params_inner = fill_rule_inner(_cols[1:], _params_inner, _pars)
else:
_params_inner.append({k: v for k, v in _pars.items()})
break
return _params_inner
_res = []
for z in range(len(self._rules_vars[rule_name])):
_res_part = []
_params = {k: v for k, v in params.items()}
_columns = []
for _col, _valued in self._config.iter_valued_columns2(self._rules_vars[rule_name][z]):
if _valued:
_columns += [[_col, [x for x in self.iter_matched_values(_col, params[_col])]]]
else:
if not isinstance(params[_col], (list, tuple)):
_tmp = [params[_col]]
else:
_tmp = [x for x in params[_col]]
for i, _tmp_item in enumerate(_tmp):
if _tmp_item and _tmp_item.startswith(('>=', '<=', '==', '>', '<', '=',)):
_tmp[i] = '*'
_params[_col] = self.merge_with_mask(_col, _tmp)
if _col in self._rules_vars_extra[rule_name][z]:
if len(self._rules_vars_extra[rule_name][z][_col]) > 0:
_params[_col] = '[%s%s]' % (
_params[_col], ''.join('|{}'.format(x) for x in self._rules_vars_extra[rule_name][z][_col]))
for _par in fill_rule_inner(_columns, []):
_params.update(_par)
_var = self._rules[rule_name][z].format(**_params)
if return_params or return_defaults:
_tmp_res_part = {'var': _var}
if return_params:
_tmp_res_part['params'] = {k: v for k, v in _par.items()}
if return_defaults and rule_name in self._defaults_masked:
_tmp_res_part['default'] = self._defaults_masked[rule_name][z]
else:
_tmp_res_part['default'] = {}
_res_part += [_tmp_res_part]
else:
_res_part += [_var]
if len(_res_part) == 0:
_var = self._rules[rule_name][z].format(**_params)
if return_params or return_defaults:
_tmp_res_part = {'var': _var}
if return_params:
_tmp_res_part['params'] = {}
if return_defaults and rule_name in self._defaults_masked:
_tmp_res_part['default'] = self._defaults_masked[rule_name][z]
else:
_tmp_res_part['default'] = {}
_res_part += [_tmp_res_part]
else:
_res_part += [_var]
_res += [_res_part]
return _res
def merge_valued(self, params):
result = {}
for k, v in self._config.iter_valued_columns2(params.keys()):
if not v:
result[k] = self.merge_with_mask(k, params[k])
return result
def get_params_with_extra(self, rule_name, params):
"""
Get params with extra, like 'any'
:param rule_name: 'path'
:param params: default params
:return: list of combination params
"""
# HACK for prefer-local
result = []
extra_params = self._rules_vars_extra.get(rule_name, {})[0]
_tmp_params = copy.deepcopy(params)
_fixed_params = {}
# Save params with list type - this type not changed
for key, value in _tmp_params.items():
if isinstance(value, list):
_fixed_params[key] = value
_tmp_params = {k: v for k, v in _tmp_params.items() if k not in _fixed_params}
# extend with extra_vars - like 'any'
for key, value in _tmp_params.items():
if not isinstance(value, list) and key:
_tmp_params[key] = list([value])
if key in extra_params:
_tmp_params[key].extend(extra_params[key])
# get combinations
keys = sorted(_tmp_params)
combinations = itertools.product(*(_tmp_params[x] for x in keys))
for comb in combinations:
_dict = dict(zip(keys, comb))
_dict.update(_fixed_params)
result.append(_dict)
return result
def get_paths(self, list_or_file_path, source):
if 'path' not in self._rules:
return None
_paths = []
for _params in self.iter_packages_params(list_or_file_path):
if _params['repo'] is None or _params['repo'] == '*':
repo_list = source.args['repo']
elif _params['repo'] not in source.args['repo']:
continue
else:
repo_list = [_params['repo']]
_params['server'] = source.args['server']
_sub_paths = {
'params': {k: v for k, v in _params.items() if k != 'repo'},
'paths': [],
}
for _repo in repo_list:
_params['repo'] = _repo
# _dirty = self._rules['path'].format(**_params)
_all_dirties = self.fill_rule('path', _params)
# _params.pop('server')
# _params.pop('repo')
for _dirties in _all_dirties:
for _dirty in _dirties:
# TODO: Use split_with_regexp() instead
_dirty = [x.split(']') for x in _dirty.split('[')]
_dirty = self.list_flatter(_dirty)
_sub_paths['paths'] += [{'paths': self.get_variants(_dirty, []),
'repo': _repo,
}]
_paths += [_sub_paths]
return _paths
def get_variants(self, dirty, paths):
if len(dirty) == 1:
if dirty[0] not in paths:
paths.append(dirty[0])
else:
for i, stub in enumerate(dirty):
if i % 2 != 0:
for _variant in stub.split("|"):
_res = ''.join(dirty[:i]) + _variant
_res += dirty[i + 1] if len(dirty) > i else ''
_res = [_res]
if len(dirty) > i + 1:
_res += dirty[i + 2:]
# print(_res)
paths = self.get_variants(_res, paths)
break
return paths
def iter_packages_params(self, list_or_file_path, deps_content=None):
if deps_content is not None:
# HACK for download with --dependencies-content and existed file dependencies.txt.lock
list_or_file_path = deps_content
if list_or_file_path.__class__ is DependenciesContent:
# Даёт возможность передать сразу контекнт файла, а не файл
for i, line in enumerate(list_or_file_path.splitlines()):
yield self.get_package_params(i, line)
elif isinstance(list_or_file_path, str):
if not os.path.exists(list_or_file_path):
raise CrosspmException(
CROSSPM_ERRORCODE_FILE_DEPS_NOT_FOUND,
'File not found: [{}]'.format(list_or_file_path),
)
with open(list_or_file_path, 'r') as f:
for i, line in enumerate(f):
line = line.strip()
# if i == 0 and line.startswith(''.join(map(chr,(1087,187,1111)))):
if i == 0 and line.startswith(chr(1087) + chr(187) + chr(1111)): # TODO: why?
line = line[3:]
if not line or line.startswith(('#', '[',)):
continue
yield self.get_package_params(i, line)
elif (isinstance(list_or_file_path, dict)) and ('raw' in list_or_file_path):
for _item in list_or_file_path['raw']:
_tmp_item = {k: self.parse_by_mask(k, v, False, True) for k, v in _item.items()}
yield _tmp_item
else:
for _item in list_or_file_path:
yield _item
def get_package_params(self, line_no, line):
_vars = {}
for i, v in enumerate(line.split()):
v = v.strip()
if v == '-':
v = None # get default value on next line
k, v = self._config.check_column_value(i, v, True)
if k:
_vars[k] = self.parse_by_mask(k, v, False, True)
if len(_vars) == 0:
raise CrosspmException(
CROSSPM_ERRORCODE_WRONG_SYNTAX,
'Nothing parsed at line {}: [{}]'.format(line_no, line.strip())
)
update_items = self._config.complete_params(_vars, False)
update_vars = {k: self.parse_by_mask(k, v, False, True) for k, v in update_items.items()}
# Expend default params to passed params
try:
update_vars = {k: v.format(**_vars) if isinstance(v, str) else v for k, v in update_vars.items()}
except Exception as e:
pass
self._config._log.info(
"We catch exception when try update defaults Params, don't use this functional. Message:\n {}".format(
repr(e)))
_vars.update(update_vars)
return _vars
def list_flatter(self, _src):
_res = []
for x in _src:
_res += self.list_flatter(x) if isinstance(x, (list, tuple)) else [x]
return _res
@staticmethod
def split_with_regexp(regexp, text):
prev_pos = 0
_res = []
for x in ([x.group()[1:-1].strip(), x.span()] for x in re.finditer(regexp, text)):
if x[1][0] > prev_pos:
_res += [[text[prev_pos:x[1][0]], False]]
_res += [[x[0], True]]
prev_pos = x[1][1]
if prev_pos < len(text):
_res += [[text[prev_pos:], False]]
return _res
def parse_value_template(self, value):
# _regexp = ''
must_not = self.split_with_regexp(r'\[.*?\]', value)
for i, x in enumerate(must_not):
must_not[i] = [self.split_with_regexp('{.*?}', x[0]), x[1]]
# _atom = '(?P<_1_int>[\\w*><=]+)'
return must_not
@staticmethod
def split_fixed_pattern(path):
"""
Split path into fixed and masked parts
:param path: e.g
https://repo.example.com/artifactory/libs-cpp-release.snapshot/boost/1.60-pm/*.*.*/vc110/x86/win/boost.*.*.*.tar.gz
:return:
_path_fixed: https://repo.example.com/artifactory/libs-cpp-release.snapshot/boost/1.60-pm/
_path_pattern: *.*.*/vc110/x86/win/boost.*.*.*.tar.gz
"""
_first_pattern_pos = path.find('*')
_path_separator_pos = path.rfind('/', 0, _first_pattern_pos) + 1
_path_fixed = path[:_path_separator_pos]
_path_pattern = path[_path_separator_pos:]
return _path_fixed, _path_pattern
@staticmethod
def filter_one(self, packages, params, params_found):
def merge_params(path):
_res_params = {k: v for k, v in params_found.get(path, {}).items()}
_res_params.update({k: v for k, v in params.items() if k not in _res_params})
return _res_params
def filter_fn(item):
_result = True
_atoms_found = item['params']
for _atom_name in item['columns']:
if _atom_name in _atoms_found:
_rules = params[_atom_name]
if not isinstance(_rules, (list, tuple)):
_rules = [_rules]
_vars = _atoms_found[_atom_name]
if not isinstance(_vars, (list, tuple)):
_vars = [_vars]
i = -1
for _column in item['columns'][_atom_name]:
for _sub_col in _column[0]:
if _sub_col[1]:
i += 1
if _column[1]:
_var = _vars[i] if len(_vars) > i else ''
_rule = _rules[i] if len(_rules) > i else ''
_is_var = (_var is not None) and (len(str(_var)) > 0)
if _rule is None: # '2.3.*' - Include versions both with or without last part
pass
elif _rule == '' and _is_var and len(
str(_var)) > 0: # '2.3.*-' - Do not include versions with last part
_result = False
break
elif _rule and not _is_var: # '2.3.*-*' - Do not include versions without last part
_result = False
break
if not _result:
break
if not _result:
break
return _result
def sorted_fn(item):
_result = []
_atoms_found = item['params']
for _atom_name in self._sort:
if _atom_name == '*':
_result += [_atoms_found[x] for x in _atoms_found if x not in self._sort]
else:
_atom_item = _atoms_found.get(_atom_name, [])
if isinstance(_atom_item, (list, tuple)):
if _atom_name in self._columns:
i = -1
for _column in item['columns'][_atom_name]:
for _sub_col in _column[0]:
if _sub_col[1]:
i += 1
if _sub_col[0] == 'int':
try:
_atom_item[i] = int(_atom_item[i])
except ValueError:
_atom_item[i] = 0
elif _sub_col[0] == 'str':
try:
_atom_item[i] = str(_atom_item[i])
except ValueError:
_atom_item[i] = ''
_result += [_atoms_found.get(_atom_name, [])]
_result = [item for sublist in _result for item in sublist]
return _result
ext_packages = [{'params': merge_params(x), 'columns': self._columns, 'path': x} for x in packages]
# Filter by columns with parsing template (i.e. version)
filtered_packages = list(filter(
filter_fn,
ext_packages,
))
sorted_packages = sorted(
filtered_packages,
key=sorted_fn,
)
try:
result = sorted_packages[self._index]
except Exception:
result = []
return result
def get_full_package_name(self, package):
param_list = [x for x in self._config.get_fails('unique', {})]
if self._config.name_column not in param_list:
param_list.insert(0, self._config.name_column)
params = package.get_params(param_list)
pkg_name = '/'.join(self.merge_with_mask(x, params[x]) for x in param_list)
return pkg_name
def has_rule(self, rule_name):
res = False
if self._rules.get(rule_name, False):
res = True
return res
def get_params_from_properties(self, properties):
# Парсит свойства артефакта и выдаёт параметры
result = {y: properties.get(x, '') for x, y in self._usedby.get('property-parser', {}).items()}
return result
def get_params_from_path(self, path):
pattern = self._usedby.get('path-parser', None)
if pattern is None:
return {}
match = re.match(pattern, path)
if match is None:
return {}
return match.groupdict()
|
devopshq/crosspm | crosspm/helpers/output.py | register_output_format | python | def register_output_format(name):
def check_decorator(fn):
_output_format_map[name] = fn
def wrapper(*args, **kwargs):
return fn(*args, **kwargs)
return wrapper
return check_decorator | Load output format function to dictionary (decorator with this function name) | train | https://github.com/devopshq/crosspm/blob/c831442ecfaa1d43c66cb148857096cea292c950/crosspm/helpers/output.py#L55-L68 | null | # -*- coding: utf-8 -*-
import json
import logging
import os
import re
import sys
from jinja2 import Environment, FileSystemLoader
from collections import OrderedDict
from crosspm.helpers.exceptions import *
from crosspm.helpers.parser import Parser
_output_format_map = {} # Contain map for output format, load from decorator
(
PLAIN,
DICT,
LIST
) = range(3)
class OutFormat:
def __init__(self, value, esc_path=False):
self._value = value
self._esc_path = esc_path
def __format__(self, fmt):
result = self._value
fmts = fmt.split('.')
if self._esc_path and ('path' not in fmts):
fmts.insert(0, 'path')
for fmt in fmts:
if fmt == 'upper':
result = str(result).upper()
elif fmt == 'lower':
result = str(result).lower()
elif fmt == 'quote':
result = '"{}"'.format(result)
elif fmt == 'unquote':
result = str(result).strip('"')
elif fmt == 'safe':
result = str(result).replace('-', '_')
result = re.sub(r'[^A-Z_a-z_0-9]', '', result)
if result:
if not (result[0].isalpha() or '_' == result[0]):
result = '_' + result
elif fmt == 'path':
result = str(result).replace('\\\\', '\\').replace('\\', '\\\\').replace('"', '\\"').replace("'", "\\'")
return str(result)
class Output:
_output_config = {
'root': {'PACKAGES_ROOT'},
# 'key': 'package',
'value': 'path',
# 'columns': [
# {
# 'column': 'package',
# 'value': '{:upper}_ROOT',
# },
# {
# 'column': 'path',
# 'value': '{}',
# }
# ]
}
_name_column = ''
_columns = []
def __init__(self, data=None, name_column='', config=None):
self._log = logging.getLogger('crosspm')
self._config = config
if name_column:
self._name_column = name_column
self._output_config['key'] = name_column
if data and isinstance(data, dict):
self._output_config = data
# self.init_config()
if 'columns' not in self._output_config:
self._output_config['columns'] = []
if len(self._output_config['columns']) == 0:
# TODO: implement this:
# if config.no_fails:
# param_list = [x for x in config.get_fails('unique', {})]
# if self._name_column not in param_list:
# param_list.insert(0, self._name_column)
# pkg_name = '/'.join('{%s:upper}' % x for x in param_list)
# else:
pkg_name = '{:upper}_ROOT'
self._output_config['columns'] = [
{
'column': self._name_column,
'value': pkg_name,
},
{
'column': 'path',
'value': '{}',
},
]
self.init_config()
if 'columns' not in self._output_config:
self._output_config['columns'] = []
def init_config(self):
root = self._output_config.get('root', '')
if isinstance(root, str):
self._output_config['type'] = PLAIN
elif isinstance(root, (dict, set)):
self._output_config['type'] = DICT
root = [x for x in root]
self._output_config['root'] = root[0] if len(root) > 0 else ''
elif isinstance(root, (list, tuple)):
self._output_config['type'] = LIST
self._output_config['root'] = root[0] if len(root) > 0 else ''
key = self._output_config.get('key', '')
key0 = None
if 'columns' in self._output_config:
self._columns = []
for item in self._output_config['columns']:
if not item.get('value', ''):
item['value'] = '{}'
if not item.get('name', ''):
item['name'] = '{}'
if not item.get('column', ''):
item['column'] = ''
for cl in [y for y in [x[0] for x in Parser.split_with_regexp('{.*?}', item['name']) if x[1]] if y]:
col = cl.split(':')[0]
if col:
item['column'] = col
break
if item['column']:
self._columns.append(item['column'])
if key == item['column']:
key0 = ''
elif key0 is None:
key0 = item['column']
if key0:
key = key0
self._output_config['key'] = key
if self._columns:
if self._name_column and self._name_column not in self._columns:
self._columns.append(self._name_column)
else:
if not self._name_column:
self._name_column = 'package'
if 'value' not in self._output_config:
self._output_config['value'] = ''
@staticmethod
def get_var_name(pkg_name):
result = '{:upper.safe}'.format(OutFormat(pkg_name))
return result
def write_to_file(self, text, out_file_path):
out_dir_path = os.path.dirname(out_file_path)
if not os.path.exists(out_dir_path):
self._log.info('mkdirs [%s] ...', out_dir_path)
os.makedirs(out_dir_path)
elif not os.path.isdir(out_dir_path):
raise CrosspmException(
CROSSPM_ERRORCODE_FILE_IO,
'Unable to make directory [{}]. File with the same name exists'.format(
out_dir_path
))
with open(out_file_path, 'w+') as f:
f.write(text)
@staticmethod
def get_output_types():
return list(_output_format_map.keys())
def write_output(self, params, packages):
"""
Функция вызывает определенную функцию для фиксированного out-format
:param params:
:param packages:
:return:
"""
if params['out_format'] not in _output_format_map:
raise CrosspmException(
CROSSPM_ERRORCODE_UNKNOWN_OUT_TYPE,
'Unknown out_format: [{}]'.format(params['out_format']),
)
f = _output_format_map[params['out_format']]
result = f(self, packages, **params)
if result:
out_file_path = os.path.realpath(os.path.expanduser(params['output']))
self.write_to_file(result, out_file_path)
self._log.info(
'Write packages info to file [%s]\ncontent:\n\n%s',
out_file_path,
result,
)
def format_column(self, column, name, value):
for item in self._output_config['columns']:
if item['column'] == column:
name = item['name'].format(OutFormat(name))
value = item['value'].format(OutFormat(value))
break
return {'name': name, 'value': value}
@register_output_format('stdout')
def output_format_stdout(self, packages, **kwargs):
self._output_config['type'] = PLAIN
_packages = self.output_format_module(packages)
for k in _packages:
v = _packages[k]
sys.stdout.write('{}: {}\n'.format(self.get_var_name(k), v))
sys.stdout.flush()
return None
@register_output_format('shell')
def output_format_shell(self, packages, **kwargs):
self._output_config['type'] = PLAIN
result = '\n'
_packages = self.output_format_module(packages)
for k in _packages:
v = _packages[k]
result += "{}='{}'\n".format(self.get_var_name(k), v)
result += '\n'
return result
@register_output_format('jinja')
def output_format_jinja(self, packages, output_template, **kwargs):
output_template = os.path.realpath(output_template)
template_dir, template_name = os.path.split(output_template)
j2_env = Environment(loader=FileSystemLoader(template_dir), trim_blocks=True)
result = j2_env.get_template(template_name).render(packages=packages)
return result
@register_output_format('cmd')
def output_format_cmd(self, packages, **kwargs):
self._output_config['type'] = PLAIN
result = '\n'
_packages = self.output_format_module(packages)
for k in _packages:
v = _packages[k]
result += "set {}={}\n".format(self.get_var_name(k), v)
result += '\n'
return result
@register_output_format('lock')
def output_format_lock(self, packages, **kwargs):
""" Text to lock file """
self._output_config['type'] = PLAIN
text = ''
tmp_packages = OrderedDict()
columns = self._config.get_columns()
widths = {}
for _pkg in packages.values():
_pkg_name = _pkg.package_name
_params = _pkg.get_params(columns, merged=True, raw=False)
if _pkg_name not in tmp_packages:
tmp_packages[_pkg_name] = _params
comment = 1
for _col in columns:
widths[_col] = max(widths.get(_col, len(_col)), len(str(_params.get(_col, '')))) + comment
comment = 0
comment = 1
for _col in columns:
text += '{}{} '.format(_col, ' ' * (widths[_col] - len(_col) - comment))
comment = 0
text = '#{}\n'.format(text.strip())
for _pkg_name in sorted(tmp_packages, key=lambda x: str(x).lower()):
_pkg = tmp_packages[_pkg_name]
line = ''
for _col in columns:
line += '{}{} '.format(_pkg[_col], ' ' * (widths[_col] - len(str(_pkg[_col]))))
text += '{}\n'.format(line.strip())
return text
def output_format_module(self, packages, esc_path=False):
"""
Create out with child first position
"""
def create_ordered_list(packages_):
"""
Recursive for package.packages
"""
list_ = []
for _pkg_name in packages_:
_pkg = packages_[_pkg_name]
if _pkg and _pkg.packages:
list_.extend(create_ordered_list(_pkg.packages))
if _pkg:
_pkg_params = _pkg.get_params(self._columns, True)
_res_item = {}
for item in self._output_config['columns']:
name = item['name'].format(OutFormat(item['column']))
value = _pkg_params.get(item['column'], '')
if not isinstance(value, (list, dict, tuple)):
try:
value = item['value'].format(
OutFormat(value, (item['column'] == 'path') if esc_path else False))
except Exception:
value = ''
# TODO: implement this:
# if not value:
# try:
# value = item['value'].format(OutFormat(_pkg.get_params('', True)))
# except Exception as e:
# pass
_res_item[name] = value
list_.append(_res_item)
return list_
result_list = create_ordered_list(packages, )
if self._output_config['type'] == LIST:
return result_list
result = OrderedDict()
for item in result_list:
# TODO: Error handling
name = item[self._output_config['key']]
if self._output_config['value']:
value = item[self._output_config['value']]
else:
value = OrderedDict([(k, v) for k, v in item.items() if k != self._output_config['key']])
result[name] = value
return result
@register_output_format('python')
def output_format_python(self, packages, **kwargs):
def get_value(_v):
if isinstance(_v, (int, float, bool)):
_res = '{}'.format(str(_v))
elif isinstance(_v, (dict, tuple, list)):
_res = '{}'.format(str(_v))
else:
_res = "'{}'".format(str(_v))
return _res
def items_iter(_dict_or_list):
if isinstance(_dict_or_list, dict):
for k in sorted(_dict_or_list, key=lambda x: str(x).lower()):
yield k
else:
for k in _dict_or_list:
yield k
result = '# -*- coding: utf-8 -*-\n\n'
res = ''
dict_or_list = self.output_format_module(packages, True)
for item in items_iter(dict_or_list):
if self._output_config['type'] == LIST:
res += ' {\n'
for k, v in item.items():
res += " '{}': {},\n".format(k, get_value(v))
res += ' },\n'
elif self._output_config['type'] == DICT:
res += " '{}': ".format(item)
if isinstance(dict_or_list[item], dict):
res += '{\n'
for k, v in dict_or_list[item].items():
res += " '{}': {},\n".format(k, get_value(v))
res += ' },\n'
elif isinstance(dict_or_list[item], (list, tuple)):
res += '[\n'
for v in dict_or_list[item]:
res += " {},\n".format(get_value(v))
res += ' ],\n'
else: # str
res += "{},\n".format(get_value(dict_or_list[item]))
else:
res += '{} = '.format(self.get_var_name(item), get_value(dict_or_list[item]))
if isinstance(dict_or_list[item], dict):
res += '{\n'
for k, v in dict_or_list[item].items():
res += " '{}': {},\n".format(k, get_value(v))
res += '}\n'
elif isinstance(dict_or_list[item], (list, tuple)):
res += '[\n'
for v in dict_or_list[item]:
res += " {},\n".format(get_value(v))
res += ']\n'
else: # str
res += "{}\n".format(get_value(dict_or_list[item]))
if self._output_config['type'] == LIST:
result += '{} = [\n'.format(self._output_config['root'] or 'PACKAGES_ROOT')
result += res
result += ']\n'
elif self._output_config['type'] == DICT:
result += '{} = {}\n'.format(self._output_config['root'] or 'PACKAGES_ROOT', '{')
result += res
result += '}\n'
else:
result += res
return result
@register_output_format('json')
def output_format_json(self, packages, **kwargs):
dict_or_list = self.output_format_module(packages)
if self._output_config['root']:
dict_or_list = {
self._output_config['root']: dict_or_list
}
# TODO: Find a proper way to avoid double backslashes in path only (or not?)
result = json.dumps(dict_or_list, indent=True) # .replace('\\\\', '\\')
return result
|
devopshq/crosspm | crosspm/helpers/output.py | Output.write_output | python | def write_output(self, params, packages):
if params['out_format'] not in _output_format_map:
raise CrosspmException(
CROSSPM_ERRORCODE_UNKNOWN_OUT_TYPE,
'Unknown out_format: [{}]'.format(params['out_format']),
)
f = _output_format_map[params['out_format']]
result = f(self, packages, **params)
if result:
out_file_path = os.path.realpath(os.path.expanduser(params['output']))
self.write_to_file(result, out_file_path)
self._log.info(
'Write packages info to file [%s]\ncontent:\n\n%s',
out_file_path,
result,
) | Функция вызывает определенную функцию для фиксированного out-format
:param params:
:param packages:
:return: | train | https://github.com/devopshq/crosspm/blob/c831442ecfaa1d43c66cb148857096cea292c950/crosspm/helpers/output.py#L202-L225 | [
"def write_to_file(self, text, out_file_path):\n out_dir_path = os.path.dirname(out_file_path)\n\n if not os.path.exists(out_dir_path):\n self._log.info('mkdirs [%s] ...', out_dir_path)\n os.makedirs(out_dir_path)\n\n elif not os.path.isdir(out_dir_path):\n raise CrosspmException(\n CROSSPM_ERRORCODE_FILE_IO,\n 'Unable to make directory [{}]. File with the same name exists'.format(\n out_dir_path\n ))\n\n with open(out_file_path, 'w+') as f:\n f.write(text)\n"
] | class Output:
_output_config = {
'root': {'PACKAGES_ROOT'},
# 'key': 'package',
'value': 'path',
# 'columns': [
# {
# 'column': 'package',
# 'value': '{:upper}_ROOT',
# },
# {
# 'column': 'path',
# 'value': '{}',
# }
# ]
}
_name_column = ''
_columns = []
def __init__(self, data=None, name_column='', config=None):
self._log = logging.getLogger('crosspm')
self._config = config
if name_column:
self._name_column = name_column
self._output_config['key'] = name_column
if data and isinstance(data, dict):
self._output_config = data
# self.init_config()
if 'columns' not in self._output_config:
self._output_config['columns'] = []
if len(self._output_config['columns']) == 0:
# TODO: implement this:
# if config.no_fails:
# param_list = [x for x in config.get_fails('unique', {})]
# if self._name_column not in param_list:
# param_list.insert(0, self._name_column)
# pkg_name = '/'.join('{%s:upper}' % x for x in param_list)
# else:
pkg_name = '{:upper}_ROOT'
self._output_config['columns'] = [
{
'column': self._name_column,
'value': pkg_name,
},
{
'column': 'path',
'value': '{}',
},
]
self.init_config()
if 'columns' not in self._output_config:
self._output_config['columns'] = []
def init_config(self):
root = self._output_config.get('root', '')
if isinstance(root, str):
self._output_config['type'] = PLAIN
elif isinstance(root, (dict, set)):
self._output_config['type'] = DICT
root = [x for x in root]
self._output_config['root'] = root[0] if len(root) > 0 else ''
elif isinstance(root, (list, tuple)):
self._output_config['type'] = LIST
self._output_config['root'] = root[0] if len(root) > 0 else ''
key = self._output_config.get('key', '')
key0 = None
if 'columns' in self._output_config:
self._columns = []
for item in self._output_config['columns']:
if not item.get('value', ''):
item['value'] = '{}'
if not item.get('name', ''):
item['name'] = '{}'
if not item.get('column', ''):
item['column'] = ''
for cl in [y for y in [x[0] for x in Parser.split_with_regexp('{.*?}', item['name']) if x[1]] if y]:
col = cl.split(':')[0]
if col:
item['column'] = col
break
if item['column']:
self._columns.append(item['column'])
if key == item['column']:
key0 = ''
elif key0 is None:
key0 = item['column']
if key0:
key = key0
self._output_config['key'] = key
if self._columns:
if self._name_column and self._name_column not in self._columns:
self._columns.append(self._name_column)
else:
if not self._name_column:
self._name_column = 'package'
if 'value' not in self._output_config:
self._output_config['value'] = ''
@staticmethod
def get_var_name(pkg_name):
result = '{:upper.safe}'.format(OutFormat(pkg_name))
return result
def write_to_file(self, text, out_file_path):
out_dir_path = os.path.dirname(out_file_path)
if not os.path.exists(out_dir_path):
self._log.info('mkdirs [%s] ...', out_dir_path)
os.makedirs(out_dir_path)
elif not os.path.isdir(out_dir_path):
raise CrosspmException(
CROSSPM_ERRORCODE_FILE_IO,
'Unable to make directory [{}]. File with the same name exists'.format(
out_dir_path
))
with open(out_file_path, 'w+') as f:
f.write(text)
@staticmethod
def get_output_types():
return list(_output_format_map.keys())
def format_column(self, column, name, value):
for item in self._output_config['columns']:
if item['column'] == column:
name = item['name'].format(OutFormat(name))
value = item['value'].format(OutFormat(value))
break
return {'name': name, 'value': value}
@register_output_format('stdout')
def output_format_stdout(self, packages, **kwargs):
self._output_config['type'] = PLAIN
_packages = self.output_format_module(packages)
for k in _packages:
v = _packages[k]
sys.stdout.write('{}: {}\n'.format(self.get_var_name(k), v))
sys.stdout.flush()
return None
@register_output_format('shell')
def output_format_shell(self, packages, **kwargs):
self._output_config['type'] = PLAIN
result = '\n'
_packages = self.output_format_module(packages)
for k in _packages:
v = _packages[k]
result += "{}='{}'\n".format(self.get_var_name(k), v)
result += '\n'
return result
@register_output_format('jinja')
def output_format_jinja(self, packages, output_template, **kwargs):
output_template = os.path.realpath(output_template)
template_dir, template_name = os.path.split(output_template)
j2_env = Environment(loader=FileSystemLoader(template_dir), trim_blocks=True)
result = j2_env.get_template(template_name).render(packages=packages)
return result
@register_output_format('cmd')
def output_format_cmd(self, packages, **kwargs):
self._output_config['type'] = PLAIN
result = '\n'
_packages = self.output_format_module(packages)
for k in _packages:
v = _packages[k]
result += "set {}={}\n".format(self.get_var_name(k), v)
result += '\n'
return result
@register_output_format('lock')
def output_format_lock(self, packages, **kwargs):
""" Text to lock file """
self._output_config['type'] = PLAIN
text = ''
tmp_packages = OrderedDict()
columns = self._config.get_columns()
widths = {}
for _pkg in packages.values():
_pkg_name = _pkg.package_name
_params = _pkg.get_params(columns, merged=True, raw=False)
if _pkg_name not in tmp_packages:
tmp_packages[_pkg_name] = _params
comment = 1
for _col in columns:
widths[_col] = max(widths.get(_col, len(_col)), len(str(_params.get(_col, '')))) + comment
comment = 0
comment = 1
for _col in columns:
text += '{}{} '.format(_col, ' ' * (widths[_col] - len(_col) - comment))
comment = 0
text = '#{}\n'.format(text.strip())
for _pkg_name in sorted(tmp_packages, key=lambda x: str(x).lower()):
_pkg = tmp_packages[_pkg_name]
line = ''
for _col in columns:
line += '{}{} '.format(_pkg[_col], ' ' * (widths[_col] - len(str(_pkg[_col]))))
text += '{}\n'.format(line.strip())
return text
def output_format_module(self, packages, esc_path=False):
"""
Create out with child first position
"""
def create_ordered_list(packages_):
"""
Recursive for package.packages
"""
list_ = []
for _pkg_name in packages_:
_pkg = packages_[_pkg_name]
if _pkg and _pkg.packages:
list_.extend(create_ordered_list(_pkg.packages))
if _pkg:
_pkg_params = _pkg.get_params(self._columns, True)
_res_item = {}
for item in self._output_config['columns']:
name = item['name'].format(OutFormat(item['column']))
value = _pkg_params.get(item['column'], '')
if not isinstance(value, (list, dict, tuple)):
try:
value = item['value'].format(
OutFormat(value, (item['column'] == 'path') if esc_path else False))
except Exception:
value = ''
# TODO: implement this:
# if not value:
# try:
# value = item['value'].format(OutFormat(_pkg.get_params('', True)))
# except Exception as e:
# pass
_res_item[name] = value
list_.append(_res_item)
return list_
result_list = create_ordered_list(packages, )
if self._output_config['type'] == LIST:
return result_list
result = OrderedDict()
for item in result_list:
# TODO: Error handling
name = item[self._output_config['key']]
if self._output_config['value']:
value = item[self._output_config['value']]
else:
value = OrderedDict([(k, v) for k, v in item.items() if k != self._output_config['key']])
result[name] = value
return result
@register_output_format('python')
def output_format_python(self, packages, **kwargs):
def get_value(_v):
if isinstance(_v, (int, float, bool)):
_res = '{}'.format(str(_v))
elif isinstance(_v, (dict, tuple, list)):
_res = '{}'.format(str(_v))
else:
_res = "'{}'".format(str(_v))
return _res
def items_iter(_dict_or_list):
if isinstance(_dict_or_list, dict):
for k in sorted(_dict_or_list, key=lambda x: str(x).lower()):
yield k
else:
for k in _dict_or_list:
yield k
result = '# -*- coding: utf-8 -*-\n\n'
res = ''
dict_or_list = self.output_format_module(packages, True)
for item in items_iter(dict_or_list):
if self._output_config['type'] == LIST:
res += ' {\n'
for k, v in item.items():
res += " '{}': {},\n".format(k, get_value(v))
res += ' },\n'
elif self._output_config['type'] == DICT:
res += " '{}': ".format(item)
if isinstance(dict_or_list[item], dict):
res += '{\n'
for k, v in dict_or_list[item].items():
res += " '{}': {},\n".format(k, get_value(v))
res += ' },\n'
elif isinstance(dict_or_list[item], (list, tuple)):
res += '[\n'
for v in dict_or_list[item]:
res += " {},\n".format(get_value(v))
res += ' ],\n'
else: # str
res += "{},\n".format(get_value(dict_or_list[item]))
else:
res += '{} = '.format(self.get_var_name(item), get_value(dict_or_list[item]))
if isinstance(dict_or_list[item], dict):
res += '{\n'
for k, v in dict_or_list[item].items():
res += " '{}': {},\n".format(k, get_value(v))
res += '}\n'
elif isinstance(dict_or_list[item], (list, tuple)):
res += '[\n'
for v in dict_or_list[item]:
res += " {},\n".format(get_value(v))
res += ']\n'
else: # str
res += "{}\n".format(get_value(dict_or_list[item]))
if self._output_config['type'] == LIST:
result += '{} = [\n'.format(self._output_config['root'] or 'PACKAGES_ROOT')
result += res
result += ']\n'
elif self._output_config['type'] == DICT:
result += '{} = {}\n'.format(self._output_config['root'] or 'PACKAGES_ROOT', '{')
result += res
result += '}\n'
else:
result += res
return result
@register_output_format('json')
def output_format_json(self, packages, **kwargs):
dict_or_list = self.output_format_module(packages)
if self._output_config['root']:
dict_or_list = {
self._output_config['root']: dict_or_list
}
# TODO: Find a proper way to avoid double backslashes in path only (or not?)
result = json.dumps(dict_or_list, indent=True) # .replace('\\\\', '\\')
return result
|
devopshq/crosspm | crosspm/helpers/output.py | Output.output_format_lock | python | def output_format_lock(self, packages, **kwargs):
self._output_config['type'] = PLAIN
text = ''
tmp_packages = OrderedDict()
columns = self._config.get_columns()
widths = {}
for _pkg in packages.values():
_pkg_name = _pkg.package_name
_params = _pkg.get_params(columns, merged=True, raw=False)
if _pkg_name not in tmp_packages:
tmp_packages[_pkg_name] = _params
comment = 1
for _col in columns:
widths[_col] = max(widths.get(_col, len(_col)), len(str(_params.get(_col, '')))) + comment
comment = 0
comment = 1
for _col in columns:
text += '{}{} '.format(_col, ' ' * (widths[_col] - len(_col) - comment))
comment = 0
text = '#{}\n'.format(text.strip())
for _pkg_name in sorted(tmp_packages, key=lambda x: str(x).lower()):
_pkg = tmp_packages[_pkg_name]
line = ''
for _col in columns:
line += '{}{} '.format(_pkg[_col], ' ' * (widths[_col] - len(str(_pkg[_col]))))
text += '{}\n'.format(line.strip())
return text | Text to lock file | train | https://github.com/devopshq/crosspm/blob/c831442ecfaa1d43c66cb148857096cea292c950/crosspm/helpers/output.py#L276-L303 | null | class Output:
_output_config = {
'root': {'PACKAGES_ROOT'},
# 'key': 'package',
'value': 'path',
# 'columns': [
# {
# 'column': 'package',
# 'value': '{:upper}_ROOT',
# },
# {
# 'column': 'path',
# 'value': '{}',
# }
# ]
}
_name_column = ''
_columns = []
def __init__(self, data=None, name_column='', config=None):
self._log = logging.getLogger('crosspm')
self._config = config
if name_column:
self._name_column = name_column
self._output_config['key'] = name_column
if data and isinstance(data, dict):
self._output_config = data
# self.init_config()
if 'columns' not in self._output_config:
self._output_config['columns'] = []
if len(self._output_config['columns']) == 0:
# TODO: implement this:
# if config.no_fails:
# param_list = [x for x in config.get_fails('unique', {})]
# if self._name_column not in param_list:
# param_list.insert(0, self._name_column)
# pkg_name = '/'.join('{%s:upper}' % x for x in param_list)
# else:
pkg_name = '{:upper}_ROOT'
self._output_config['columns'] = [
{
'column': self._name_column,
'value': pkg_name,
},
{
'column': 'path',
'value': '{}',
},
]
self.init_config()
if 'columns' not in self._output_config:
self._output_config['columns'] = []
def init_config(self):
root = self._output_config.get('root', '')
if isinstance(root, str):
self._output_config['type'] = PLAIN
elif isinstance(root, (dict, set)):
self._output_config['type'] = DICT
root = [x for x in root]
self._output_config['root'] = root[0] if len(root) > 0 else ''
elif isinstance(root, (list, tuple)):
self._output_config['type'] = LIST
self._output_config['root'] = root[0] if len(root) > 0 else ''
key = self._output_config.get('key', '')
key0 = None
if 'columns' in self._output_config:
self._columns = []
for item in self._output_config['columns']:
if not item.get('value', ''):
item['value'] = '{}'
if not item.get('name', ''):
item['name'] = '{}'
if not item.get('column', ''):
item['column'] = ''
for cl in [y for y in [x[0] for x in Parser.split_with_regexp('{.*?}', item['name']) if x[1]] if y]:
col = cl.split(':')[0]
if col:
item['column'] = col
break
if item['column']:
self._columns.append(item['column'])
if key == item['column']:
key0 = ''
elif key0 is None:
key0 = item['column']
if key0:
key = key0
self._output_config['key'] = key
if self._columns:
if self._name_column and self._name_column not in self._columns:
self._columns.append(self._name_column)
else:
if not self._name_column:
self._name_column = 'package'
if 'value' not in self._output_config:
self._output_config['value'] = ''
@staticmethod
def get_var_name(pkg_name):
result = '{:upper.safe}'.format(OutFormat(pkg_name))
return result
def write_to_file(self, text, out_file_path):
out_dir_path = os.path.dirname(out_file_path)
if not os.path.exists(out_dir_path):
self._log.info('mkdirs [%s] ...', out_dir_path)
os.makedirs(out_dir_path)
elif not os.path.isdir(out_dir_path):
raise CrosspmException(
CROSSPM_ERRORCODE_FILE_IO,
'Unable to make directory [{}]. File with the same name exists'.format(
out_dir_path
))
with open(out_file_path, 'w+') as f:
f.write(text)
@staticmethod
def get_output_types():
return list(_output_format_map.keys())
def write_output(self, params, packages):
"""
Функция вызывает определенную функцию для фиксированного out-format
:param params:
:param packages:
:return:
"""
if params['out_format'] not in _output_format_map:
raise CrosspmException(
CROSSPM_ERRORCODE_UNKNOWN_OUT_TYPE,
'Unknown out_format: [{}]'.format(params['out_format']),
)
f = _output_format_map[params['out_format']]
result = f(self, packages, **params)
if result:
out_file_path = os.path.realpath(os.path.expanduser(params['output']))
self.write_to_file(result, out_file_path)
self._log.info(
'Write packages info to file [%s]\ncontent:\n\n%s',
out_file_path,
result,
)
def format_column(self, column, name, value):
for item in self._output_config['columns']:
if item['column'] == column:
name = item['name'].format(OutFormat(name))
value = item['value'].format(OutFormat(value))
break
return {'name': name, 'value': value}
@register_output_format('stdout')
def output_format_stdout(self, packages, **kwargs):
self._output_config['type'] = PLAIN
_packages = self.output_format_module(packages)
for k in _packages:
v = _packages[k]
sys.stdout.write('{}: {}\n'.format(self.get_var_name(k), v))
sys.stdout.flush()
return None
@register_output_format('shell')
def output_format_shell(self, packages, **kwargs):
self._output_config['type'] = PLAIN
result = '\n'
_packages = self.output_format_module(packages)
for k in _packages:
v = _packages[k]
result += "{}='{}'\n".format(self.get_var_name(k), v)
result += '\n'
return result
@register_output_format('jinja')
def output_format_jinja(self, packages, output_template, **kwargs):
output_template = os.path.realpath(output_template)
template_dir, template_name = os.path.split(output_template)
j2_env = Environment(loader=FileSystemLoader(template_dir), trim_blocks=True)
result = j2_env.get_template(template_name).render(packages=packages)
return result
@register_output_format('cmd')
def output_format_cmd(self, packages, **kwargs):
self._output_config['type'] = PLAIN
result = '\n'
_packages = self.output_format_module(packages)
for k in _packages:
v = _packages[k]
result += "set {}={}\n".format(self.get_var_name(k), v)
result += '\n'
return result
@register_output_format('lock')
def output_format_module(self, packages, esc_path=False):
"""
Create out with child first position
"""
def create_ordered_list(packages_):
"""
Recursive for package.packages
"""
list_ = []
for _pkg_name in packages_:
_pkg = packages_[_pkg_name]
if _pkg and _pkg.packages:
list_.extend(create_ordered_list(_pkg.packages))
if _pkg:
_pkg_params = _pkg.get_params(self._columns, True)
_res_item = {}
for item in self._output_config['columns']:
name = item['name'].format(OutFormat(item['column']))
value = _pkg_params.get(item['column'], '')
if not isinstance(value, (list, dict, tuple)):
try:
value = item['value'].format(
OutFormat(value, (item['column'] == 'path') if esc_path else False))
except Exception:
value = ''
# TODO: implement this:
# if not value:
# try:
# value = item['value'].format(OutFormat(_pkg.get_params('', True)))
# except Exception as e:
# pass
_res_item[name] = value
list_.append(_res_item)
return list_
result_list = create_ordered_list(packages, )
if self._output_config['type'] == LIST:
return result_list
result = OrderedDict()
for item in result_list:
# TODO: Error handling
name = item[self._output_config['key']]
if self._output_config['value']:
value = item[self._output_config['value']]
else:
value = OrderedDict([(k, v) for k, v in item.items() if k != self._output_config['key']])
result[name] = value
return result
@register_output_format('python')
def output_format_python(self, packages, **kwargs):
def get_value(_v):
if isinstance(_v, (int, float, bool)):
_res = '{}'.format(str(_v))
elif isinstance(_v, (dict, tuple, list)):
_res = '{}'.format(str(_v))
else:
_res = "'{}'".format(str(_v))
return _res
def items_iter(_dict_or_list):
if isinstance(_dict_or_list, dict):
for k in sorted(_dict_or_list, key=lambda x: str(x).lower()):
yield k
else:
for k in _dict_or_list:
yield k
result = '# -*- coding: utf-8 -*-\n\n'
res = ''
dict_or_list = self.output_format_module(packages, True)
for item in items_iter(dict_or_list):
if self._output_config['type'] == LIST:
res += ' {\n'
for k, v in item.items():
res += " '{}': {},\n".format(k, get_value(v))
res += ' },\n'
elif self._output_config['type'] == DICT:
res += " '{}': ".format(item)
if isinstance(dict_or_list[item], dict):
res += '{\n'
for k, v in dict_or_list[item].items():
res += " '{}': {},\n".format(k, get_value(v))
res += ' },\n'
elif isinstance(dict_or_list[item], (list, tuple)):
res += '[\n'
for v in dict_or_list[item]:
res += " {},\n".format(get_value(v))
res += ' ],\n'
else: # str
res += "{},\n".format(get_value(dict_or_list[item]))
else:
res += '{} = '.format(self.get_var_name(item), get_value(dict_or_list[item]))
if isinstance(dict_or_list[item], dict):
res += '{\n'
for k, v in dict_or_list[item].items():
res += " '{}': {},\n".format(k, get_value(v))
res += '}\n'
elif isinstance(dict_or_list[item], (list, tuple)):
res += '[\n'
for v in dict_or_list[item]:
res += " {},\n".format(get_value(v))
res += ']\n'
else: # str
res += "{}\n".format(get_value(dict_or_list[item]))
if self._output_config['type'] == LIST:
result += '{} = [\n'.format(self._output_config['root'] or 'PACKAGES_ROOT')
result += res
result += ']\n'
elif self._output_config['type'] == DICT:
result += '{} = {}\n'.format(self._output_config['root'] or 'PACKAGES_ROOT', '{')
result += res
result += '}\n'
else:
result += res
return result
@register_output_format('json')
def output_format_json(self, packages, **kwargs):
dict_or_list = self.output_format_module(packages)
if self._output_config['root']:
dict_or_list = {
self._output_config['root']: dict_or_list
}
# TODO: Find a proper way to avoid double backslashes in path only (or not?)
result = json.dumps(dict_or_list, indent=True) # .replace('\\\\', '\\')
return result
|
devopshq/crosspm | crosspm/helpers/output.py | Output.output_format_module | python | def output_format_module(self, packages, esc_path=False):
def create_ordered_list(packages_):
"""
Recursive for package.packages
"""
list_ = []
for _pkg_name in packages_:
_pkg = packages_[_pkg_name]
if _pkg and _pkg.packages:
list_.extend(create_ordered_list(_pkg.packages))
if _pkg:
_pkg_params = _pkg.get_params(self._columns, True)
_res_item = {}
for item in self._output_config['columns']:
name = item['name'].format(OutFormat(item['column']))
value = _pkg_params.get(item['column'], '')
if not isinstance(value, (list, dict, tuple)):
try:
value = item['value'].format(
OutFormat(value, (item['column'] == 'path') if esc_path else False))
except Exception:
value = ''
# TODO: implement this:
# if not value:
# try:
# value = item['value'].format(OutFormat(_pkg.get_params('', True)))
# except Exception as e:
# pass
_res_item[name] = value
list_.append(_res_item)
return list_
result_list = create_ordered_list(packages, )
if self._output_config['type'] == LIST:
return result_list
result = OrderedDict()
for item in result_list:
# TODO: Error handling
name = item[self._output_config['key']]
if self._output_config['value']:
value = item[self._output_config['value']]
else:
value = OrderedDict([(k, v) for k, v in item.items() if k != self._output_config['key']])
result[name] = value
return result | Create out with child first position | train | https://github.com/devopshq/crosspm/blob/c831442ecfaa1d43c66cb148857096cea292c950/crosspm/helpers/output.py#L305-L357 | [
"def create_ordered_list(packages_):\n \"\"\"\n Recursive for package.packages\n \"\"\"\n list_ = []\n for _pkg_name in packages_:\n _pkg = packages_[_pkg_name]\n if _pkg and _pkg.packages:\n list_.extend(create_ordered_list(_pkg.packages))\n if _pkg:\n _pkg_params = _pkg.get_params(self._columns, True)\n _res_item = {}\n for item in self._output_config['columns']:\n name = item['name'].format(OutFormat(item['column']))\n value = _pkg_params.get(item['column'], '')\n if not isinstance(value, (list, dict, tuple)):\n try:\n value = item['value'].format(\n OutFormat(value, (item['column'] == 'path') if esc_path else False))\n except Exception:\n value = ''\n # TODO: implement this:\n # if not value:\n # try:\n # value = item['value'].format(OutFormat(_pkg.get_params('', True)))\n # except Exception as e:\n # pass\n _res_item[name] = value\n list_.append(_res_item)\n return list_\n"
] | class Output:
_output_config = {
'root': {'PACKAGES_ROOT'},
# 'key': 'package',
'value': 'path',
# 'columns': [
# {
# 'column': 'package',
# 'value': '{:upper}_ROOT',
# },
# {
# 'column': 'path',
# 'value': '{}',
# }
# ]
}
_name_column = ''
_columns = []
def __init__(self, data=None, name_column='', config=None):
self._log = logging.getLogger('crosspm')
self._config = config
if name_column:
self._name_column = name_column
self._output_config['key'] = name_column
if data and isinstance(data, dict):
self._output_config = data
# self.init_config()
if 'columns' not in self._output_config:
self._output_config['columns'] = []
if len(self._output_config['columns']) == 0:
# TODO: implement this:
# if config.no_fails:
# param_list = [x for x in config.get_fails('unique', {})]
# if self._name_column not in param_list:
# param_list.insert(0, self._name_column)
# pkg_name = '/'.join('{%s:upper}' % x for x in param_list)
# else:
pkg_name = '{:upper}_ROOT'
self._output_config['columns'] = [
{
'column': self._name_column,
'value': pkg_name,
},
{
'column': 'path',
'value': '{}',
},
]
self.init_config()
if 'columns' not in self._output_config:
self._output_config['columns'] = []
def init_config(self):
root = self._output_config.get('root', '')
if isinstance(root, str):
self._output_config['type'] = PLAIN
elif isinstance(root, (dict, set)):
self._output_config['type'] = DICT
root = [x for x in root]
self._output_config['root'] = root[0] if len(root) > 0 else ''
elif isinstance(root, (list, tuple)):
self._output_config['type'] = LIST
self._output_config['root'] = root[0] if len(root) > 0 else ''
key = self._output_config.get('key', '')
key0 = None
if 'columns' in self._output_config:
self._columns = []
for item in self._output_config['columns']:
if not item.get('value', ''):
item['value'] = '{}'
if not item.get('name', ''):
item['name'] = '{}'
if not item.get('column', ''):
item['column'] = ''
for cl in [y for y in [x[0] for x in Parser.split_with_regexp('{.*?}', item['name']) if x[1]] if y]:
col = cl.split(':')[0]
if col:
item['column'] = col
break
if item['column']:
self._columns.append(item['column'])
if key == item['column']:
key0 = ''
elif key0 is None:
key0 = item['column']
if key0:
key = key0
self._output_config['key'] = key
if self._columns:
if self._name_column and self._name_column not in self._columns:
self._columns.append(self._name_column)
else:
if not self._name_column:
self._name_column = 'package'
if 'value' not in self._output_config:
self._output_config['value'] = ''
@staticmethod
def get_var_name(pkg_name):
result = '{:upper.safe}'.format(OutFormat(pkg_name))
return result
def write_to_file(self, text, out_file_path):
out_dir_path = os.path.dirname(out_file_path)
if not os.path.exists(out_dir_path):
self._log.info('mkdirs [%s] ...', out_dir_path)
os.makedirs(out_dir_path)
elif not os.path.isdir(out_dir_path):
raise CrosspmException(
CROSSPM_ERRORCODE_FILE_IO,
'Unable to make directory [{}]. File with the same name exists'.format(
out_dir_path
))
with open(out_file_path, 'w+') as f:
f.write(text)
@staticmethod
def get_output_types():
return list(_output_format_map.keys())
def write_output(self, params, packages):
"""
Функция вызывает определенную функцию для фиксированного out-format
:param params:
:param packages:
:return:
"""
if params['out_format'] not in _output_format_map:
raise CrosspmException(
CROSSPM_ERRORCODE_UNKNOWN_OUT_TYPE,
'Unknown out_format: [{}]'.format(params['out_format']),
)
f = _output_format_map[params['out_format']]
result = f(self, packages, **params)
if result:
out_file_path = os.path.realpath(os.path.expanduser(params['output']))
self.write_to_file(result, out_file_path)
self._log.info(
'Write packages info to file [%s]\ncontent:\n\n%s',
out_file_path,
result,
)
def format_column(self, column, name, value):
for item in self._output_config['columns']:
if item['column'] == column:
name = item['name'].format(OutFormat(name))
value = item['value'].format(OutFormat(value))
break
return {'name': name, 'value': value}
@register_output_format('stdout')
def output_format_stdout(self, packages, **kwargs):
self._output_config['type'] = PLAIN
_packages = self.output_format_module(packages)
for k in _packages:
v = _packages[k]
sys.stdout.write('{}: {}\n'.format(self.get_var_name(k), v))
sys.stdout.flush()
return None
@register_output_format('shell')
def output_format_shell(self, packages, **kwargs):
self._output_config['type'] = PLAIN
result = '\n'
_packages = self.output_format_module(packages)
for k in _packages:
v = _packages[k]
result += "{}='{}'\n".format(self.get_var_name(k), v)
result += '\n'
return result
@register_output_format('jinja')
def output_format_jinja(self, packages, output_template, **kwargs):
output_template = os.path.realpath(output_template)
template_dir, template_name = os.path.split(output_template)
j2_env = Environment(loader=FileSystemLoader(template_dir), trim_blocks=True)
result = j2_env.get_template(template_name).render(packages=packages)
return result
@register_output_format('cmd')
def output_format_cmd(self, packages, **kwargs):
self._output_config['type'] = PLAIN
result = '\n'
_packages = self.output_format_module(packages)
for k in _packages:
v = _packages[k]
result += "set {}={}\n".format(self.get_var_name(k), v)
result += '\n'
return result
@register_output_format('lock')
def output_format_lock(self, packages, **kwargs):
""" Text to lock file """
self._output_config['type'] = PLAIN
text = ''
tmp_packages = OrderedDict()
columns = self._config.get_columns()
widths = {}
for _pkg in packages.values():
_pkg_name = _pkg.package_name
_params = _pkg.get_params(columns, merged=True, raw=False)
if _pkg_name not in tmp_packages:
tmp_packages[_pkg_name] = _params
comment = 1
for _col in columns:
widths[_col] = max(widths.get(_col, len(_col)), len(str(_params.get(_col, '')))) + comment
comment = 0
comment = 1
for _col in columns:
text += '{}{} '.format(_col, ' ' * (widths[_col] - len(_col) - comment))
comment = 0
text = '#{}\n'.format(text.strip())
for _pkg_name in sorted(tmp_packages, key=lambda x: str(x).lower()):
_pkg = tmp_packages[_pkg_name]
line = ''
for _col in columns:
line += '{}{} '.format(_pkg[_col], ' ' * (widths[_col] - len(str(_pkg[_col]))))
text += '{}\n'.format(line.strip())
return text
@register_output_format('python')
def output_format_python(self, packages, **kwargs):
def get_value(_v):
if isinstance(_v, (int, float, bool)):
_res = '{}'.format(str(_v))
elif isinstance(_v, (dict, tuple, list)):
_res = '{}'.format(str(_v))
else:
_res = "'{}'".format(str(_v))
return _res
def items_iter(_dict_or_list):
if isinstance(_dict_or_list, dict):
for k in sorted(_dict_or_list, key=lambda x: str(x).lower()):
yield k
else:
for k in _dict_or_list:
yield k
result = '# -*- coding: utf-8 -*-\n\n'
res = ''
dict_or_list = self.output_format_module(packages, True)
for item in items_iter(dict_or_list):
if self._output_config['type'] == LIST:
res += ' {\n'
for k, v in item.items():
res += " '{}': {},\n".format(k, get_value(v))
res += ' },\n'
elif self._output_config['type'] == DICT:
res += " '{}': ".format(item)
if isinstance(dict_or_list[item], dict):
res += '{\n'
for k, v in dict_or_list[item].items():
res += " '{}': {},\n".format(k, get_value(v))
res += ' },\n'
elif isinstance(dict_or_list[item], (list, tuple)):
res += '[\n'
for v in dict_or_list[item]:
res += " {},\n".format(get_value(v))
res += ' ],\n'
else: # str
res += "{},\n".format(get_value(dict_or_list[item]))
else:
res += '{} = '.format(self.get_var_name(item), get_value(dict_or_list[item]))
if isinstance(dict_or_list[item], dict):
res += '{\n'
for k, v in dict_or_list[item].items():
res += " '{}': {},\n".format(k, get_value(v))
res += '}\n'
elif isinstance(dict_or_list[item], (list, tuple)):
res += '[\n'
for v in dict_or_list[item]:
res += " {},\n".format(get_value(v))
res += ']\n'
else: # str
res += "{}\n".format(get_value(dict_or_list[item]))
if self._output_config['type'] == LIST:
result += '{} = [\n'.format(self._output_config['root'] or 'PACKAGES_ROOT')
result += res
result += ']\n'
elif self._output_config['type'] == DICT:
result += '{} = {}\n'.format(self._output_config['root'] or 'PACKAGES_ROOT', '{')
result += res
result += '}\n'
else:
result += res
return result
@register_output_format('json')
def output_format_json(self, packages, **kwargs):
dict_or_list = self.output_format_module(packages)
if self._output_config['root']:
dict_or_list = {
self._output_config['root']: dict_or_list
}
# TODO: Find a proper way to avoid double backslashes in path only (or not?)
result = json.dumps(dict_or_list, indent=True) # .replace('\\\\', '\\')
return result
|
ianclegg/winrmlib | winrmlib/api/authentication.py | HttpCredSSPAuth._get_rsa_public_key | python | def _get_rsa_public_key(cert):
openssl_pkey = cert.get_pubkey()
openssl_lib = _util.binding.lib
ffi = _util.binding.ffi
buf = ffi.new("unsigned char **")
rsa = openssl_lib.EVP_PKEY_get1_RSA(openssl_pkey._pkey)
length = openssl_lib.i2d_RSAPublicKey(rsa, buf)
public_key = ffi.buffer(buf[0], length)[:]
ffi.gc(buf[0], openssl_lib.OPENSSL_free)
return public_key | PyOpenSSL does not provide a public method to export the public key from a certificate as a properly formatted
ASN.1 RSAPublicKey structure. There are 'hacks' which use dump_privatekey(crypto.FILETYPE_ASN1, <public_key>),
but this dumps the public key within a PrivateKeyInfo structure which is not suitable for a comparison. This
approach uses the PyOpenSSL CFFI bindings to invoke the i2d_RSAPublicKey() which correctly extracts the key
material in an ASN.1 RSAPublicKey structure.
:param cert: The ASN.1 Encoded Certificate
:return: The ASN.1 Encoded RSAPublicKey structure containing the supplied certificates public Key | train | https://github.com/ianclegg/winrmlib/blob/489b3ce5d0e6a9a7301ba5d345ba82fa824c1431/winrmlib/api/authentication.py#L564-L582 | null | class HttpCredSSPAuth(AuthBase):
"""
"""
def __init__(self, domain, username, password):
self.credssp_regex = re.compile('(?:.*,)*\s*CredSSP\s*([^,]*),?', re.I)
self.password_authenticator = PasswordAuthentication(domain, username, password)
# Windows 2008 R2 and earlier are unable to negotiate TLS 1.2 by default so use TLS 1.0
# TODO: provide an mechanism to enable TLS 1.2 which works with modern Windows servers
self.tls_context = SSL.Context(SSL.TLSv1_METHOD)
# OpenSSL introduced a fix to CBC ciphers by adding empty fragments, but this breaks Microsoft's TLS 1.0
# implementation. OpenSSL added a flag we need to use which ensures OpenSSL does not send empty fragments
# SSL_OP_DONT_INSERT_EMPTY_FRAGMENTS (0x00000800) | SSL_OP_TLS_BLOCK_PADDING_BUG (0x00000200)
self.tls_context.set_options(0x00000800 | 0x00000200)
self.tls_context.set_cipher_list('ALL')
self.tls_connection = None
def _get_credssp_header(self, response):
authenticate_header = response.headers.get('www-authenticate', None)
if authenticate_header:
match_obj = self.credssp_regex.search(authenticate_header)
if match_obj and len(match_obj.groups()) > 0:
encoded = match_obj.group(1)
return base64.b64decode(encoded)
return None
@staticmethod
def _set_credssp_header(request, value):
request.headers['Authorization'] = "CredSSP {0}".format(base64.b64encode(value))
return request
@staticmethod
def _credssp_processor(self, context):
"""
Implements a state machine
:return:
"""
http_response = (yield)
credssp_context = self._get_credssp_header(http_response)
if credssp_context is None:
raise Exception('The remote host did not respond with a \'www-authenticate\' header containing '
'\'CredSSP\' as an available authentication mechanism')
# 1. First, secure the channel with a TLS Handshake
if not credssp_context:
self.tls_connection = SSL.Connection(self.tls_context)
self.tls_connection.set_connect_state()
while True:
try:
self.tls_connection.do_handshake()
except SSL.WantReadError:
http_response = yield self._set_credssp_header(http_response.request, self.tls_connection.bio_read(4096))
credssp_context = self._get_credssp_header(http_response)
if credssp_context is None or not credssp_context:
raise Exception('The remote host rejected the CredSSP TLS handshake')
self.tls_connection.bio_write(credssp_context)
else:
break
# add logging to display the negotiated cipher (move to a function)
openssl_lib = _util.binding.lib
ffi = _util.binding.ffi
cipher = openssl_lib.SSL_get_current_cipher(self.tls_connection._ssl)
cipher_name = ffi.string( openssl_lib.SSL_CIPHER_get_name(cipher))
log.debug("Negotiated TLS Cipher: %s", cipher_name)
# 2. Send an TSRequest containing an NTLM Negotiate Request
context_generator = context.initialize_security_context()
negotiate_token = context_generator.send(None)
log.debug("NTLM Type 1: %s", AsHex(negotiate_token))
ts_request = TSRequest()
ts_request['negoTokens'] = negotiate_token
self.tls_connection.send(ts_request.getData())
http_response = yield self._set_credssp_header(http_response.request, self.tls_connection.bio_read(4096))
# Extract and decrypt the encoded TSRequest response struct from the Negotiate header
authenticate_header = self._get_credssp_header(http_response)
if not authenticate_header or authenticate_header is None:
raise Exception("The remote host rejected the CredSSP negotiation token")
self.tls_connection.bio_write(authenticate_header)
# NTLM Challenge Response and Server Public Key Validation
ts_request = TSRequest()
ts_request.fromString(self.tls_connection.recv(8192))
challenge_token = ts_request['negoTokens']
log.debug("NTLM Type 2: %s", AsHex(challenge_token))
server_cert = self.tls_connection.get_peer_certificate()
# not using channel bindings
#certificate_digest = base64.b16decode(server_cert.digest('SHA256').replace(':', ''))
## channel_binding_structure = gss_channel_bindings_struct()
## channel_binding_structure['application_data'] = "tls-server-end-point:" + certificate_digest
public_key = HttpCredSSPAuth._get_rsa_public_key(server_cert)
# The _RSAPublicKey must be 'wrapped' using the negotiated GSSAPI mechanism and send to the server along with
# the final SPNEGO token. This step of the CredSSP protocol is designed to thwart 'man-in-the-middle' attacks
# Build and encrypt the response to the server
ts_request = TSRequest()
type3= context_generator.send(challenge_token)
log.debug("NTLM Type 3: %s", AsHex(type3))
ts_request['negoTokens'] = type3
public_key_encrypted, signature = context.wrap_message(public_key)
ts_request['pubKeyAuth'] = signature + public_key_encrypted
self.tls_connection.send(ts_request.getData())
enc_type3 = self.tls_connection.bio_read(8192)
http_response = yield self._set_credssp_header(http_response.request, enc_type3)
# TLS decrypt the response, then ASN decode and check the error code
auth_response = self._get_credssp_header(http_response)
if not auth_response or auth_response is None:
raise Exception("The remote host rejected the challenge response")
self.tls_connection.bio_write(auth_response)
ts_request = TSRequest()
ts_request.fromString(self.tls_connection.recv(8192))
# TODO: determine how to validate server certificate here
#a = ts_request['pubKeyAuth']
# print ":".join("{:02x}".format(ord(c)) for c in a)
# 4. Send the Credentials to be delegated, these are encrypted with both NTLM v2 and then by TLS
tsp = TSPasswordCreds()
tsp['domain'] = self.password_authenticator.get_domain()
tsp['username'] = self.password_authenticator.get_username()
tsp['password'] = self.password_authenticator.get_password()
tsc = TSCredentials()
tsc['type'] = 1
tsc['credentials'] = tsp.getData()
ts_request = TSRequest()
encrypted, signature = context.wrap_message(tsc.getData())
ts_request['authInfo'] = signature + encrypted
self.tls_connection.send(ts_request.getData())
token = self.tls_connection.bio_read(8192)
http_response.request.body = self.body
http_response = yield self._set_credssp_header(self._encrypt(http_response.request, self.tls_connection), token)
if http_response.status_code == 401:
raise Exception('Authentication Failed')
#does the server need to respond with a final credssp header, probably not.
#if we arrive here something probably went wrong?
#if self._get_credssp_header(http_response) is None:
# raise Exception('Authentication Failed')
@staticmethod
def _encrypt(request, context):
body = '--Encrypted Boundary\r\n'
body += '\tContent-Type: application/HTTP-CredSSP-session-encrypted\r\n'
body += '\tOriginalContent: type=application/soap+xml;charset=UTF-8;Length=' + str(len(request.body)) + '\r\n'
body += '--Encrypted Boundary\r\n'
body += 'Content-Type: application/octet-stream\r\n'
# CredSSP uses the initial TLS context for encryption
context.send(request.body)
body += struct.pack('<i', 16)
body += context.bio_read(8192)
body += '--Encrypted Boundary--\r\n'
request.body = body
request.headers['Content-Length'] = str(len(body))
request.headers['Content-Type'] = 'multipart/encrypted;'
request.headers['Content-Type'] += 'protocol="application/HTTP-CredSSP-session-encrypted";'
request.headers['Content-Type'] += 'boundary="Encrypted Boundary"'
return request
@staticmethod
def _decrypt(response, context):
# TODO: there is some extra validation to sanitise the incoming response here
mime_parts = response.content.split('--Encrypted Boundary')
length = int(mime_parts[1].split(';Length=')[1].strip())
parts = mime_parts[2].split("application/octet-stream")
payload = parts[1][2:]
# The first 4 bytes of the response indicate the length of the signature. When using CredSSP the signature
# is not computed or sent, we simply validate this is 0x10
signature_length = struct.unpack('<i', payload[:4])[0]
tls_text = payload[4:]
#if len(tls_text) != length:
# raise Exception("extracted length not equal to expected length")
context.bio_write(tls_text)
plaintext = context.recv(8192)
return plaintext
def handle_401(self, response, **kwargs):
self.context = NtlmContext(self.password_authenticator, session_security='encrypt')
credssp_processor = self._credssp_processor(self.context)
next(credssp_processor)
while response.status_code == 401:
# This is required
response.content
response.raw.release_conn()
client_request = credssp_processor.send(response)
response = response.connection.send(client_request, **kwargs)
return response
def handle_response(self, response, **kwargs):
#if self.pos is not None:
# Rewind the file position indicator of the body to where
# it was to resend the request.
# response.request.body.seek(self.pos)
if response.status_code == 401:
response = self.handle_401(response, **kwargs)
log.debug("handle_response(): returning {0}".format(response))
# TODO: check the response header to see if the response is encrypted first
if response.status_code == 200:
logging.debug("decrypting body")
message = self._decrypt(response, self.tls_connection)
response._content = message
return response
def __call__(self, request):
request.headers["Connection"] = "Keep-Alive"
# TODO: implement support for unencrypted requests
request.register_hook('response', self.handle_response)
# We should not send any content to the target host until we have established a security context through the
# 'handle_response' hook, so we stash the original request body and remove it from the current request
if self.context is None:
self.body = str(request.body)
request.body = ""
request.headers["Content-Type"] = "application/soap+xml;charset=UTF-8"
request.headers['Content-Length'] = 0
else:
logging.debug("encrypting body")
self._encrypt(request, self.tls_connection)
return request
|
ianclegg/winrmlib | winrmlib/api/authentication.py | HttpCredSSPAuth._credssp_processor | python | def _credssp_processor(self, context):
http_response = (yield)
credssp_context = self._get_credssp_header(http_response)
if credssp_context is None:
raise Exception('The remote host did not respond with a \'www-authenticate\' header containing '
'\'CredSSP\' as an available authentication mechanism')
# 1. First, secure the channel with a TLS Handshake
if not credssp_context:
self.tls_connection = SSL.Connection(self.tls_context)
self.tls_connection.set_connect_state()
while True:
try:
self.tls_connection.do_handshake()
except SSL.WantReadError:
http_response = yield self._set_credssp_header(http_response.request, self.tls_connection.bio_read(4096))
credssp_context = self._get_credssp_header(http_response)
if credssp_context is None or not credssp_context:
raise Exception('The remote host rejected the CredSSP TLS handshake')
self.tls_connection.bio_write(credssp_context)
else:
break
# add logging to display the negotiated cipher (move to a function)
openssl_lib = _util.binding.lib
ffi = _util.binding.ffi
cipher = openssl_lib.SSL_get_current_cipher(self.tls_connection._ssl)
cipher_name = ffi.string( openssl_lib.SSL_CIPHER_get_name(cipher))
log.debug("Negotiated TLS Cipher: %s", cipher_name)
# 2. Send an TSRequest containing an NTLM Negotiate Request
context_generator = context.initialize_security_context()
negotiate_token = context_generator.send(None)
log.debug("NTLM Type 1: %s", AsHex(negotiate_token))
ts_request = TSRequest()
ts_request['negoTokens'] = negotiate_token
self.tls_connection.send(ts_request.getData())
http_response = yield self._set_credssp_header(http_response.request, self.tls_connection.bio_read(4096))
# Extract and decrypt the encoded TSRequest response struct from the Negotiate header
authenticate_header = self._get_credssp_header(http_response)
if not authenticate_header or authenticate_header is None:
raise Exception("The remote host rejected the CredSSP negotiation token")
self.tls_connection.bio_write(authenticate_header)
# NTLM Challenge Response and Server Public Key Validation
ts_request = TSRequest()
ts_request.fromString(self.tls_connection.recv(8192))
challenge_token = ts_request['negoTokens']
log.debug("NTLM Type 2: %s", AsHex(challenge_token))
server_cert = self.tls_connection.get_peer_certificate()
# not using channel bindings
#certificate_digest = base64.b16decode(server_cert.digest('SHA256').replace(':', ''))
## channel_binding_structure = gss_channel_bindings_struct()
## channel_binding_structure['application_data'] = "tls-server-end-point:" + certificate_digest
public_key = HttpCredSSPAuth._get_rsa_public_key(server_cert)
# The _RSAPublicKey must be 'wrapped' using the negotiated GSSAPI mechanism and send to the server along with
# the final SPNEGO token. This step of the CredSSP protocol is designed to thwart 'man-in-the-middle' attacks
# Build and encrypt the response to the server
ts_request = TSRequest()
type3= context_generator.send(challenge_token)
log.debug("NTLM Type 3: %s", AsHex(type3))
ts_request['negoTokens'] = type3
public_key_encrypted, signature = context.wrap_message(public_key)
ts_request['pubKeyAuth'] = signature + public_key_encrypted
self.tls_connection.send(ts_request.getData())
enc_type3 = self.tls_connection.bio_read(8192)
http_response = yield self._set_credssp_header(http_response.request, enc_type3)
# TLS decrypt the response, then ASN decode and check the error code
auth_response = self._get_credssp_header(http_response)
if not auth_response or auth_response is None:
raise Exception("The remote host rejected the challenge response")
self.tls_connection.bio_write(auth_response)
ts_request = TSRequest()
ts_request.fromString(self.tls_connection.recv(8192))
# TODO: determine how to validate server certificate here
#a = ts_request['pubKeyAuth']
# print ":".join("{:02x}".format(ord(c)) for c in a)
# 4. Send the Credentials to be delegated, these are encrypted with both NTLM v2 and then by TLS
tsp = TSPasswordCreds()
tsp['domain'] = self.password_authenticator.get_domain()
tsp['username'] = self.password_authenticator.get_username()
tsp['password'] = self.password_authenticator.get_password()
tsc = TSCredentials()
tsc['type'] = 1
tsc['credentials'] = tsp.getData()
ts_request = TSRequest()
encrypted, signature = context.wrap_message(tsc.getData())
ts_request['authInfo'] = signature + encrypted
self.tls_connection.send(ts_request.getData())
token = self.tls_connection.bio_read(8192)
http_response.request.body = self.body
http_response = yield self._set_credssp_header(self._encrypt(http_response.request, self.tls_connection), token)
if http_response.status_code == 401:
raise Exception('Authentication Failed') | Implements a state machine
:return: | train | https://github.com/ianclegg/winrmlib/blob/489b3ce5d0e6a9a7301ba5d345ba82fa824c1431/winrmlib/api/authentication.py#L584-L696 | null | class HttpCredSSPAuth(AuthBase):
"""
"""
def __init__(self, domain, username, password):
self.credssp_regex = re.compile('(?:.*,)*\s*CredSSP\s*([^,]*),?', re.I)
self.password_authenticator = PasswordAuthentication(domain, username, password)
# Windows 2008 R2 and earlier are unable to negotiate TLS 1.2 by default so use TLS 1.0
# TODO: provide an mechanism to enable TLS 1.2 which works with modern Windows servers
self.tls_context = SSL.Context(SSL.TLSv1_METHOD)
# OpenSSL introduced a fix to CBC ciphers by adding empty fragments, but this breaks Microsoft's TLS 1.0
# implementation. OpenSSL added a flag we need to use which ensures OpenSSL does not send empty fragments
# SSL_OP_DONT_INSERT_EMPTY_FRAGMENTS (0x00000800) | SSL_OP_TLS_BLOCK_PADDING_BUG (0x00000200)
self.tls_context.set_options(0x00000800 | 0x00000200)
self.tls_context.set_cipher_list('ALL')
self.tls_connection = None
def _get_credssp_header(self, response):
authenticate_header = response.headers.get('www-authenticate', None)
if authenticate_header:
match_obj = self.credssp_regex.search(authenticate_header)
if match_obj and len(match_obj.groups()) > 0:
encoded = match_obj.group(1)
return base64.b64decode(encoded)
return None
@staticmethod
def _set_credssp_header(request, value):
request.headers['Authorization'] = "CredSSP {0}".format(base64.b64encode(value))
return request
@staticmethod
def _get_rsa_public_key(cert):
"""
PyOpenSSL does not provide a public method to export the public key from a certificate as a properly formatted
ASN.1 RSAPublicKey structure. There are 'hacks' which use dump_privatekey(crypto.FILETYPE_ASN1, <public_key>),
but this dumps the public key within a PrivateKeyInfo structure which is not suitable for a comparison. This
approach uses the PyOpenSSL CFFI bindings to invoke the i2d_RSAPublicKey() which correctly extracts the key
material in an ASN.1 RSAPublicKey structure.
:param cert: The ASN.1 Encoded Certificate
:return: The ASN.1 Encoded RSAPublicKey structure containing the supplied certificates public Key
"""
openssl_pkey = cert.get_pubkey()
openssl_lib = _util.binding.lib
ffi = _util.binding.ffi
buf = ffi.new("unsigned char **")
rsa = openssl_lib.EVP_PKEY_get1_RSA(openssl_pkey._pkey)
length = openssl_lib.i2d_RSAPublicKey(rsa, buf)
public_key = ffi.buffer(buf[0], length)[:]
ffi.gc(buf[0], openssl_lib.OPENSSL_free)
return public_key
#does the server need to respond with a final credssp header, probably not.
#if we arrive here something probably went wrong?
#if self._get_credssp_header(http_response) is None:
# raise Exception('Authentication Failed')
@staticmethod
def _encrypt(request, context):
body = '--Encrypted Boundary\r\n'
body += '\tContent-Type: application/HTTP-CredSSP-session-encrypted\r\n'
body += '\tOriginalContent: type=application/soap+xml;charset=UTF-8;Length=' + str(len(request.body)) + '\r\n'
body += '--Encrypted Boundary\r\n'
body += 'Content-Type: application/octet-stream\r\n'
# CredSSP uses the initial TLS context for encryption
context.send(request.body)
body += struct.pack('<i', 16)
body += context.bio_read(8192)
body += '--Encrypted Boundary--\r\n'
request.body = body
request.headers['Content-Length'] = str(len(body))
request.headers['Content-Type'] = 'multipart/encrypted;'
request.headers['Content-Type'] += 'protocol="application/HTTP-CredSSP-session-encrypted";'
request.headers['Content-Type'] += 'boundary="Encrypted Boundary"'
return request
@staticmethod
def _decrypt(response, context):
# TODO: there is some extra validation to sanitise the incoming response here
mime_parts = response.content.split('--Encrypted Boundary')
length = int(mime_parts[1].split(';Length=')[1].strip())
parts = mime_parts[2].split("application/octet-stream")
payload = parts[1][2:]
# The first 4 bytes of the response indicate the length of the signature. When using CredSSP the signature
# is not computed or sent, we simply validate this is 0x10
signature_length = struct.unpack('<i', payload[:4])[0]
tls_text = payload[4:]
#if len(tls_text) != length:
# raise Exception("extracted length not equal to expected length")
context.bio_write(tls_text)
plaintext = context.recv(8192)
return plaintext
def handle_401(self, response, **kwargs):
self.context = NtlmContext(self.password_authenticator, session_security='encrypt')
credssp_processor = self._credssp_processor(self.context)
next(credssp_processor)
while response.status_code == 401:
# This is required
response.content
response.raw.release_conn()
client_request = credssp_processor.send(response)
response = response.connection.send(client_request, **kwargs)
return response
def handle_response(self, response, **kwargs):
#if self.pos is not None:
# Rewind the file position indicator of the body to where
# it was to resend the request.
# response.request.body.seek(self.pos)
if response.status_code == 401:
response = self.handle_401(response, **kwargs)
log.debug("handle_response(): returning {0}".format(response))
# TODO: check the response header to see if the response is encrypted first
if response.status_code == 200:
logging.debug("decrypting body")
message = self._decrypt(response, self.tls_connection)
response._content = message
return response
def __call__(self, request):
request.headers["Connection"] = "Keep-Alive"
# TODO: implement support for unencrypted requests
request.register_hook('response', self.handle_response)
# We should not send any content to the target host until we have established a security context through the
# 'handle_response' hook, so we stash the original request body and remove it from the current request
if self.context is None:
self.body = str(request.body)
request.body = ""
request.headers["Content-Type"] = "application/soap+xml;charset=UTF-8"
request.headers['Content-Length'] = 0
else:
logging.debug("encrypting body")
self._encrypt(request, self.tls_connection)
return request
|
ianclegg/winrmlib | winrmlib/api/compression.py | Compressor.compress | python | def compress(self, data, windowLength = None):
if windowLength == None:
windowLength = self.defaultWindowLength
compressed = ""
pos = 0
lastPos = len(data) - self.minStringLength
while pos < lastPos:
searchStart = max(pos - windowLength, 0);
matchLength = self.minStringLength
foundMatch = False
bestMatchDistance = self.maxStringDistance
bestMatchLength = 0
newCompressed = None
while (searchStart + matchLength) < pos:
m1 = data[searchStart : searchStart + matchLength]
m2 = data[pos : pos + matchLength]
isValidMatch = (m1 == m2 and matchLength < self.maxStringLength)
if isValidMatch:
matchLength += 1
foundMatch = True
else:
realMatchLength = matchLength - 1
if foundMatch and realMatchLength > bestMatchLength:
bestMatchDistance = pos - searchStart - realMatchLength
bestMatchLength = realMatchLength
matchLength = self.minStringLength
searchStart += 1
foundMatch = False
if bestMatchLength:
newCompressed = (self.referencePrefix + self.__encodeReferenceInt(bestMatchDistance, 2) + self.__encodeReferenceLength(bestMatchLength))
pos += bestMatchLength
else:
if data[pos] != self.referencePrefix:
newCompressed = data[pos]
else:
newCompressed = self.referencePrefix + self.referencePrefix
pos += 1
compressed += newCompressed
return compressed + data[pos:].replace("`", "``") | Compresses text data using the LZ77 algorithm. | train | https://github.com/ianclegg/winrmlib/blob/489b3ce5d0e6a9a7301ba5d345ba82fa824c1431/winrmlib/api/compression.py#L131-L182 | null | class Compressor:
def __init__(self):
self.referencePrefix = "`"
self.referencePrefixCode = ord(self.referencePrefix)
self.referenceIntBase = 96
self.referenceIntFloorCode = ord(" ")
self.referenceIntCeilCode = self.referenceIntFloorCode + self.referenceIntBase - 1
self.maxStringDistance = self.referenceIntBase ** 2 - 1
self.minStringLength = 5
self.maxStringLength = self.referenceIntBase ** 1 - 1 + self.minStringLength
self.maxWindowLength = self.maxStringDistance + self.minStringLength;
self.defaultWindowLength = 144
def decompress(self, data):
"""Decompresses LZ77 compressed text data"""
decompressed = ""
pos = 0
while pos < len(data):
currentChar = data[pos]
if currentChar != self.referencePrefix:
decompressed += currentChar
pos += 1
else:
nextChar = data[pos + 1]
if nextChar != self.referencePrefix:
distance = self.__decodeReferenceInt(data[pos + 1 : pos + 3], 2)
length = self.__decodeReferenceLength(data[pos + 3])
start = len(decompressed) - distance - length
end = start + length
decompressed += decompressed[start : end]
pos += self.minStringLength - 1
else:
decompressed += self.referencePrefix
pos += 2
return decompressed
def __encodeReferenceInt(self, value, width):
if value >= 0 and value < (self.referenceIntBase ** width - 1):
encoded = ""
while value > 0:
encoded = chr((value % self.referenceIntBase) + self.referenceIntFloorCode) + encoded
value = int(value / self.referenceIntBase)
missingLength = width - len(encoded)
for i in range(missingLength):
encoded = chr(self.referenceIntFloorCode) + encoded
return encoded
else:
raise Exception("Reference value out of range: %d (width = %d)" % (value, width))
def __encodeReferenceLength(self, length):
return self.__encodeReferenceInt(length - self.minStringLength, 1)
def __decodeReferenceInt(self, data, width):
value = 0
for i in range(width):
value *= self.referenceIntBase
charCode = ord(data[i])
if charCode >= self.referenceIntFloorCode and charCode <= self.referenceIntCeilCode:
value += charCode - self.referenceIntFloorCode
else:
raise Exception("Invalid char code: %d" % charCode)
return value
def __decodeReferenceLength(self, data):
return self.__decodeReferenceInt(data, 1) + self.minStringLength |
ianclegg/winrmlib | winrmlib/api/compression.py | Compressor.decompress | python | def decompress(self, data):
decompressed = ""
pos = 0
while pos < len(data):
currentChar = data[pos]
if currentChar != self.referencePrefix:
decompressed += currentChar
pos += 1
else:
nextChar = data[pos + 1]
if nextChar != self.referencePrefix:
distance = self.__decodeReferenceInt(data[pos + 1 : pos + 3], 2)
length = self.__decodeReferenceLength(data[pos + 3])
start = len(decompressed) - distance - length
end = start + length
decompressed += decompressed[start : end]
pos += self.minStringLength - 1
else:
decompressed += self.referencePrefix
pos += 2
return decompressed | Decompresses LZ77 compressed text data | train | https://github.com/ianclegg/winrmlib/blob/489b3ce5d0e6a9a7301ba5d345ba82fa824c1431/winrmlib/api/compression.py#L184-L207 | [
"def __decodeReferenceInt(self, data, width):\n\tvalue = 0\n\tfor i in range(width):\n\t\tvalue *= self.referenceIntBase\n\t\tcharCode = ord(data[i])\n\t\tif charCode >= self.referenceIntFloorCode and charCode <= self.referenceIntCeilCode:\n\t\t\tvalue += charCode - self.referenceIntFloorCode\n\t\telse:\n\t\t\traise Exception(\"Invalid char code: %d\" % charCode)\n\n\treturn value\n",
"def __decodeReferenceLength(self, data):\n\treturn self.__decodeReferenceInt(data, 1) + self.minStringLength"
] | class Compressor:
def __init__(self):
self.referencePrefix = "`"
self.referencePrefixCode = ord(self.referencePrefix)
self.referenceIntBase = 96
self.referenceIntFloorCode = ord(" ")
self.referenceIntCeilCode = self.referenceIntFloorCode + self.referenceIntBase - 1
self.maxStringDistance = self.referenceIntBase ** 2 - 1
self.minStringLength = 5
self.maxStringLength = self.referenceIntBase ** 1 - 1 + self.minStringLength
self.maxWindowLength = self.maxStringDistance + self.minStringLength;
self.defaultWindowLength = 144
def compress(self, data, windowLength = None):
"""Compresses text data using the LZ77 algorithm."""
if windowLength == None:
windowLength = self.defaultWindowLength
compressed = ""
pos = 0
lastPos = len(data) - self.minStringLength
while pos < lastPos:
searchStart = max(pos - windowLength, 0);
matchLength = self.minStringLength
foundMatch = False
bestMatchDistance = self.maxStringDistance
bestMatchLength = 0
newCompressed = None
while (searchStart + matchLength) < pos:
m1 = data[searchStart : searchStart + matchLength]
m2 = data[pos : pos + matchLength]
isValidMatch = (m1 == m2 and matchLength < self.maxStringLength)
if isValidMatch:
matchLength += 1
foundMatch = True
else:
realMatchLength = matchLength - 1
if foundMatch and realMatchLength > bestMatchLength:
bestMatchDistance = pos - searchStart - realMatchLength
bestMatchLength = realMatchLength
matchLength = self.minStringLength
searchStart += 1
foundMatch = False
if bestMatchLength:
newCompressed = (self.referencePrefix + self.__encodeReferenceInt(bestMatchDistance, 2) + self.__encodeReferenceLength(bestMatchLength))
pos += bestMatchLength
else:
if data[pos] != self.referencePrefix:
newCompressed = data[pos]
else:
newCompressed = self.referencePrefix + self.referencePrefix
pos += 1
compressed += newCompressed
return compressed + data[pos:].replace("`", "``")
def __encodeReferenceInt(self, value, width):
if value >= 0 and value < (self.referenceIntBase ** width - 1):
encoded = ""
while value > 0:
encoded = chr((value % self.referenceIntBase) + self.referenceIntFloorCode) + encoded
value = int(value / self.referenceIntBase)
missingLength = width - len(encoded)
for i in range(missingLength):
encoded = chr(self.referenceIntFloorCode) + encoded
return encoded
else:
raise Exception("Reference value out of range: %d (width = %d)" % (value, width))
def __encodeReferenceLength(self, length):
return self.__encodeReferenceInt(length - self.minStringLength, 1)
def __decodeReferenceInt(self, data, width):
value = 0
for i in range(width):
value *= self.referenceIntBase
charCode = ord(data[i])
if charCode >= self.referenceIntFloorCode and charCode <= self.referenceIntCeilCode:
value += charCode - self.referenceIntFloorCode
else:
raise Exception("Invalid char code: %d" % charCode)
return value
def __decodeReferenceLength(self, data):
return self.__decodeReferenceInt(data, 1) + self.minStringLength |
ianclegg/winrmlib | winrmlib/shell.py | CommandShell.open | python | def open(self, input_streams=['stdin'], output_streams=['stderr', 'stdout']):
shell = dict()
shell['rsp:InputStreams'] = " ".join(input_streams)
shell['rsp:OutputStreams'] = " ".join(output_streams)
shell['rsp:IdleTimeout'] = str(self.idle_timeout)
if self.working_directory is not None:
shell['rsp:WorkingDirectory'] = str(self.working_directory)
if self.environment is not None:
variables = []
for key, value in self.environment.items():
variables.append({'#text': str(value), '@Name': key})
shell['rsp:Environment'] = {'Variable': variables}
response = self.session.create(self.resource, {'rsp:Shell': shell})
self.__shell_id = response['rsp:Shell']['rsp:ShellId'] | Opens the remote shell | train | https://github.com/ianclegg/winrmlib/blob/489b3ce5d0e6a9a7301ba5d345ba82fa824c1431/winrmlib/shell.py#L56-L75 | [
"def create(self, resource, obj,\n operation_timeout=None, max_envelope_size=None, locale=None):\n \"\"\"\n resource can be a URL or a ResourceLocator\n \"\"\"\n if isinstance(resource, str):\n resource = ResourceLocator(resource)\n\n headers = self._build_headers(resource, Session.CreateAction,\n operation_timeout, max_envelope_size, locale)\n return self.service.invoke(headers, obj)\n"
] | class CommandShell(object):
def __init__(self, endpoint, username, password, **kwargs):
"""
timeout
codepage
noprofile
environment
"""
# Process arguments
self.environment = kwargs.get('environment', None)
self.working_directory = kwargs.get('working_directory', None)
self.idle_timeout = kwargs.get('idle_timeout', 180000)
codepage = kwargs.get('codepage', 437)
# Build the Session and the SOAP Headers
self.__shell_id = None
self.session = Session(endpoint, username, password)
self.resource = ResourceLocator(CommandShell.ShellResource)
self.resource.add_option('WINRS_CODEPAGE', codepage, True)
if bool(kwargs.get('noprofile', False)):
self.resource.add_option('WINRS_NOPROFILE', 'FALSE', True)
else:
self.resource.add_option('WINRS_NOPROFILE', 'TRUE', True)
def run(self, command, arguments=(), console_mode_stdin=True, skip_cmd_shell=False):
"""This function does something.
:param command: The command to be executed
:type name: str.
:param arguments: A list of arguments to be passed to the command
:type state: str.
:returns: int -- the return code.
:raises: AttributeError, KeyError
iclegg: blocking i/o operations are slow, doesnt Python have a moden 'async' mechanism
rather than replying on 80's style callbacks?
"""
logging.info('running command: ' + command)
resource = ResourceLocator(CommandShell.ShellResource)
resource.add_selector('ShellId', self.__shell_id)
resource.add_option('WINRS_SKIP_CMD_SHELL', ['FALSE', 'TRUE'][bool(skip_cmd_shell)], True)
resource.add_option('WINRS_CONSOLEMODE_STDIN', ['FALSE', 'TRUE'][bool(console_mode_stdin)], True)
command = OrderedDict([('rsp:Command', command)])
command['rsp:Arguments'] = list(arguments)
response = self.session.command(resource, {'rsp:CommandLine': command})
command_id = response['rsp:CommandResponse']['rsp:CommandId']
logging.info('receive command: ' + command_id)
return command_id
def receive(self, command_id, streams=('stdout', 'stderr'), command_timeout=60):
"""
Recieves data
:param command_id:
:param streams:
:param command_timeout:
:return:
"""
logging.info('receive command: ' + command_id)
response_streams = dict.fromkeys(streams, '')
(complete, exit_code) = self._receive_poll(command_id, response_streams)
while not complete:
(complete, exit_code) = self._receive_poll(command_id, response_streams)
# This retains some compatibility with pywinrm
if sorted(response_streams.keys()) == sorted(['stderr', 'stdout']):
return response_streams['stdout'], response_streams['stderr'], exit_code
else:
return response_streams, exit_code
def _receive_poll(self, command_id, response_streams):
"""
Recieves data
:param command_id:
:param streams:
:return:
"""
logging.info('receive command: ' + command_id)
resource = ResourceLocator(CommandShell.ShellResource)
resource.add_selector('ShellId', self.__shell_id)
stream_attributes = {'#text': " ".join(response_streams.keys()), '@CommandId': command_id}
receive = {'rsp:Receive': {'rsp:DesiredStream': stream_attributes}}
try:
response = self.session.recieve(resource, receive)['rsp:ReceiveResponse']
except Exception as e:
return False, None
# some responses will not include any output
session_streams = response.get('rsp:Stream', ())
if not isinstance(session_streams, list):
session_streams = [session_streams]
for stream in session_streams:
if stream['@CommandId'] == command_id and '#text' in stream:
response_streams[stream['@Name']] += base64.b64decode(stream['#text'])
# XPRESS Compression Testing
# print "\\x".join("{:02x}".format(ord(c)) for c in base64.b64decode(stream['#text']))
# data = base64.b64decode(stream['#text'])
# f = open('c:\\users\\developer\\temp\\data.bin', 'wb')
# f.write(data)
# f.close()
# decode = api.compression.xpress_decode(data[4:])
done = response['rsp:CommandState']['@State'] == CommandShell.StateDone
if done:
exit_code = int(response['rsp:CommandState']['rsp:ExitCode'])
else: exit_code = None
return done, exit_code
def close(self):
"""
Closes pipe
:return:
"""
resource = ResourceLocator(CommandShell.ShellResource)
resource.add_selector('ShellId', self.__shell_id)
self.session.delete(resource)
|
ianclegg/winrmlib | winrmlib/shell.py | CommandShell.run | python | def run(self, command, arguments=(), console_mode_stdin=True, skip_cmd_shell=False):
logging.info('running command: ' + command)
resource = ResourceLocator(CommandShell.ShellResource)
resource.add_selector('ShellId', self.__shell_id)
resource.add_option('WINRS_SKIP_CMD_SHELL', ['FALSE', 'TRUE'][bool(skip_cmd_shell)], True)
resource.add_option('WINRS_CONSOLEMODE_STDIN', ['FALSE', 'TRUE'][bool(console_mode_stdin)], True)
command = OrderedDict([('rsp:Command', command)])
command['rsp:Arguments'] = list(arguments)
response = self.session.command(resource, {'rsp:CommandLine': command})
command_id = response['rsp:CommandResponse']['rsp:CommandId']
logging.info('receive command: ' + command_id)
return command_id | This function does something.
:param command: The command to be executed
:type name: str.
:param arguments: A list of arguments to be passed to the command
:type state: str.
:returns: int -- the return code.
:raises: AttributeError, KeyError
iclegg: blocking i/o operations are slow, doesnt Python have a moden 'async' mechanism
rather than replying on 80's style callbacks? | train | https://github.com/ianclegg/winrmlib/blob/489b3ce5d0e6a9a7301ba5d345ba82fa824c1431/winrmlib/shell.py#L77-L101 | [
"def add_option(self, name, value, must_comply):\n self.options[name] = [value, must_comply]\n",
"def add_selector(self, name, value):\n self.selectors[name] = value\n",
"def command(self, resource, obj,\n operation_timeout=None, max_envelope_size=None, locale=None):\n \"\"\"\n resource can be a URL or a ResourceLocator\n \"\"\"\n if isinstance(resource, str):\n resource = ResourceLocator(resource)\n\n headers = self._build_headers(resource, Session.CommandAction,\n operation_timeout, max_envelope_size, locale)\n return self.service.invoke(headers, obj)\n"
] | class CommandShell(object):
def __init__(self, endpoint, username, password, **kwargs):
"""
timeout
codepage
noprofile
environment
"""
# Process arguments
self.environment = kwargs.get('environment', None)
self.working_directory = kwargs.get('working_directory', None)
self.idle_timeout = kwargs.get('idle_timeout', 180000)
codepage = kwargs.get('codepage', 437)
# Build the Session and the SOAP Headers
self.__shell_id = None
self.session = Session(endpoint, username, password)
self.resource = ResourceLocator(CommandShell.ShellResource)
self.resource.add_option('WINRS_CODEPAGE', codepage, True)
if bool(kwargs.get('noprofile', False)):
self.resource.add_option('WINRS_NOPROFILE', 'FALSE', True)
else:
self.resource.add_option('WINRS_NOPROFILE', 'TRUE', True)
def open(self, input_streams=['stdin'], output_streams=['stderr', 'stdout']):
"""
Opens the remote shell
"""
shell = dict()
shell['rsp:InputStreams'] = " ".join(input_streams)
shell['rsp:OutputStreams'] = " ".join(output_streams)
shell['rsp:IdleTimeout'] = str(self.idle_timeout)
if self.working_directory is not None:
shell['rsp:WorkingDirectory'] = str(self.working_directory)
if self.environment is not None:
variables = []
for key, value in self.environment.items():
variables.append({'#text': str(value), '@Name': key})
shell['rsp:Environment'] = {'Variable': variables}
response = self.session.create(self.resource, {'rsp:Shell': shell})
self.__shell_id = response['rsp:Shell']['rsp:ShellId']
def receive(self, command_id, streams=('stdout', 'stderr'), command_timeout=60):
"""
Recieves data
:param command_id:
:param streams:
:param command_timeout:
:return:
"""
logging.info('receive command: ' + command_id)
response_streams = dict.fromkeys(streams, '')
(complete, exit_code) = self._receive_poll(command_id, response_streams)
while not complete:
(complete, exit_code) = self._receive_poll(command_id, response_streams)
# This retains some compatibility with pywinrm
if sorted(response_streams.keys()) == sorted(['stderr', 'stdout']):
return response_streams['stdout'], response_streams['stderr'], exit_code
else:
return response_streams, exit_code
def _receive_poll(self, command_id, response_streams):
"""
Recieves data
:param command_id:
:param streams:
:return:
"""
logging.info('receive command: ' + command_id)
resource = ResourceLocator(CommandShell.ShellResource)
resource.add_selector('ShellId', self.__shell_id)
stream_attributes = {'#text': " ".join(response_streams.keys()), '@CommandId': command_id}
receive = {'rsp:Receive': {'rsp:DesiredStream': stream_attributes}}
try:
response = self.session.recieve(resource, receive)['rsp:ReceiveResponse']
except Exception as e:
return False, None
# some responses will not include any output
session_streams = response.get('rsp:Stream', ())
if not isinstance(session_streams, list):
session_streams = [session_streams]
for stream in session_streams:
if stream['@CommandId'] == command_id and '#text' in stream:
response_streams[stream['@Name']] += base64.b64decode(stream['#text'])
# XPRESS Compression Testing
# print "\\x".join("{:02x}".format(ord(c)) for c in base64.b64decode(stream['#text']))
# data = base64.b64decode(stream['#text'])
# f = open('c:\\users\\developer\\temp\\data.bin', 'wb')
# f.write(data)
# f.close()
# decode = api.compression.xpress_decode(data[4:])
done = response['rsp:CommandState']['@State'] == CommandShell.StateDone
if done:
exit_code = int(response['rsp:CommandState']['rsp:ExitCode'])
else: exit_code = None
return done, exit_code
def close(self):
"""
Closes pipe
:return:
"""
resource = ResourceLocator(CommandShell.ShellResource)
resource.add_selector('ShellId', self.__shell_id)
self.session.delete(resource)
|
ianclegg/winrmlib | winrmlib/shell.py | CommandShell.receive | python | def receive(self, command_id, streams=('stdout', 'stderr'), command_timeout=60):
logging.info('receive command: ' + command_id)
response_streams = dict.fromkeys(streams, '')
(complete, exit_code) = self._receive_poll(command_id, response_streams)
while not complete:
(complete, exit_code) = self._receive_poll(command_id, response_streams)
# This retains some compatibility with pywinrm
if sorted(response_streams.keys()) == sorted(['stderr', 'stdout']):
return response_streams['stdout'], response_streams['stderr'], exit_code
else:
return response_streams, exit_code | Recieves data
:param command_id:
:param streams:
:param command_timeout:
:return: | train | https://github.com/ianclegg/winrmlib/blob/489b3ce5d0e6a9a7301ba5d345ba82fa824c1431/winrmlib/shell.py#L103-L121 | [
"def _receive_poll(self, command_id, response_streams):\n \"\"\"\n Recieves data\n :param command_id:\n :param streams:\n :return:\n \"\"\"\n logging.info('receive command: ' + command_id)\n resource = ResourceLocator(CommandShell.ShellResource)\n resource.add_selector('ShellId', self.__shell_id)\n\n stream_attributes = {'#text': \" \".join(response_streams.keys()), '@CommandId': command_id}\n receive = {'rsp:Receive': {'rsp:DesiredStream': stream_attributes}}\n\n try:\n response = self.session.recieve(resource, receive)['rsp:ReceiveResponse']\n except Exception as e:\n return False, None\n\n # some responses will not include any output\n session_streams = response.get('rsp:Stream', ())\n if not isinstance(session_streams, list):\n session_streams = [session_streams]\n\n for stream in session_streams:\n if stream['@CommandId'] == command_id and '#text' in stream:\n response_streams[stream['@Name']] += base64.b64decode(stream['#text'])\n # XPRESS Compression Testing\n # print \"\\\\x\".join(\"{:02x}\".format(ord(c)) for c in base64.b64decode(stream['#text']))\n # data = base64.b64decode(stream['#text'])\n # f = open('c:\\\\users\\\\developer\\\\temp\\\\data.bin', 'wb')\n # f.write(data)\n # f.close()\n # decode = api.compression.xpress_decode(data[4:])\n done = response['rsp:CommandState']['@State'] == CommandShell.StateDone\n if done:\n exit_code = int(response['rsp:CommandState']['rsp:ExitCode'])\n else: exit_code = None\n return done, exit_code\n"
] | class CommandShell(object):
def __init__(self, endpoint, username, password, **kwargs):
"""
timeout
codepage
noprofile
environment
"""
# Process arguments
self.environment = kwargs.get('environment', None)
self.working_directory = kwargs.get('working_directory', None)
self.idle_timeout = kwargs.get('idle_timeout', 180000)
codepage = kwargs.get('codepage', 437)
# Build the Session and the SOAP Headers
self.__shell_id = None
self.session = Session(endpoint, username, password)
self.resource = ResourceLocator(CommandShell.ShellResource)
self.resource.add_option('WINRS_CODEPAGE', codepage, True)
if bool(kwargs.get('noprofile', False)):
self.resource.add_option('WINRS_NOPROFILE', 'FALSE', True)
else:
self.resource.add_option('WINRS_NOPROFILE', 'TRUE', True)
def open(self, input_streams=['stdin'], output_streams=['stderr', 'stdout']):
"""
Opens the remote shell
"""
shell = dict()
shell['rsp:InputStreams'] = " ".join(input_streams)
shell['rsp:OutputStreams'] = " ".join(output_streams)
shell['rsp:IdleTimeout'] = str(self.idle_timeout)
if self.working_directory is not None:
shell['rsp:WorkingDirectory'] = str(self.working_directory)
if self.environment is not None:
variables = []
for key, value in self.environment.items():
variables.append({'#text': str(value), '@Name': key})
shell['rsp:Environment'] = {'Variable': variables}
response = self.session.create(self.resource, {'rsp:Shell': shell})
self.__shell_id = response['rsp:Shell']['rsp:ShellId']
def run(self, command, arguments=(), console_mode_stdin=True, skip_cmd_shell=False):
"""This function does something.
:param command: The command to be executed
:type name: str.
:param arguments: A list of arguments to be passed to the command
:type state: str.
:returns: int -- the return code.
:raises: AttributeError, KeyError
iclegg: blocking i/o operations are slow, doesnt Python have a moden 'async' mechanism
rather than replying on 80's style callbacks?
"""
logging.info('running command: ' + command)
resource = ResourceLocator(CommandShell.ShellResource)
resource.add_selector('ShellId', self.__shell_id)
resource.add_option('WINRS_SKIP_CMD_SHELL', ['FALSE', 'TRUE'][bool(skip_cmd_shell)], True)
resource.add_option('WINRS_CONSOLEMODE_STDIN', ['FALSE', 'TRUE'][bool(console_mode_stdin)], True)
command = OrderedDict([('rsp:Command', command)])
command['rsp:Arguments'] = list(arguments)
response = self.session.command(resource, {'rsp:CommandLine': command})
command_id = response['rsp:CommandResponse']['rsp:CommandId']
logging.info('receive command: ' + command_id)
return command_id
def _receive_poll(self, command_id, response_streams):
"""
Recieves data
:param command_id:
:param streams:
:return:
"""
logging.info('receive command: ' + command_id)
resource = ResourceLocator(CommandShell.ShellResource)
resource.add_selector('ShellId', self.__shell_id)
stream_attributes = {'#text': " ".join(response_streams.keys()), '@CommandId': command_id}
receive = {'rsp:Receive': {'rsp:DesiredStream': stream_attributes}}
try:
response = self.session.recieve(resource, receive)['rsp:ReceiveResponse']
except Exception as e:
return False, None
# some responses will not include any output
session_streams = response.get('rsp:Stream', ())
if not isinstance(session_streams, list):
session_streams = [session_streams]
for stream in session_streams:
if stream['@CommandId'] == command_id and '#text' in stream:
response_streams[stream['@Name']] += base64.b64decode(stream['#text'])
# XPRESS Compression Testing
# print "\\x".join("{:02x}".format(ord(c)) for c in base64.b64decode(stream['#text']))
# data = base64.b64decode(stream['#text'])
# f = open('c:\\users\\developer\\temp\\data.bin', 'wb')
# f.write(data)
# f.close()
# decode = api.compression.xpress_decode(data[4:])
done = response['rsp:CommandState']['@State'] == CommandShell.StateDone
if done:
exit_code = int(response['rsp:CommandState']['rsp:ExitCode'])
else: exit_code = None
return done, exit_code
def close(self):
"""
Closes pipe
:return:
"""
resource = ResourceLocator(CommandShell.ShellResource)
resource.add_selector('ShellId', self.__shell_id)
self.session.delete(resource)
|
ianclegg/winrmlib | winrmlib/shell.py | CommandShell._receive_poll | python | def _receive_poll(self, command_id, response_streams):
logging.info('receive command: ' + command_id)
resource = ResourceLocator(CommandShell.ShellResource)
resource.add_selector('ShellId', self.__shell_id)
stream_attributes = {'#text': " ".join(response_streams.keys()), '@CommandId': command_id}
receive = {'rsp:Receive': {'rsp:DesiredStream': stream_attributes}}
try:
response = self.session.recieve(resource, receive)['rsp:ReceiveResponse']
except Exception as e:
return False, None
# some responses will not include any output
session_streams = response.get('rsp:Stream', ())
if not isinstance(session_streams, list):
session_streams = [session_streams]
for stream in session_streams:
if stream['@CommandId'] == command_id and '#text' in stream:
response_streams[stream['@Name']] += base64.b64decode(stream['#text'])
# XPRESS Compression Testing
# print "\\x".join("{:02x}".format(ord(c)) for c in base64.b64decode(stream['#text']))
# data = base64.b64decode(stream['#text'])
# f = open('c:\\users\\developer\\temp\\data.bin', 'wb')
# f.write(data)
# f.close()
# decode = api.compression.xpress_decode(data[4:])
done = response['rsp:CommandState']['@State'] == CommandShell.StateDone
if done:
exit_code = int(response['rsp:CommandState']['rsp:ExitCode'])
else: exit_code = None
return done, exit_code | Recieves data
:param command_id:
:param streams:
:return: | train | https://github.com/ianclegg/winrmlib/blob/489b3ce5d0e6a9a7301ba5d345ba82fa824c1431/winrmlib/shell.py#L123-L161 | [
"def add_selector(self, name, value):\n self.selectors[name] = value\n"
] | class CommandShell(object):
def __init__(self, endpoint, username, password, **kwargs):
"""
timeout
codepage
noprofile
environment
"""
# Process arguments
self.environment = kwargs.get('environment', None)
self.working_directory = kwargs.get('working_directory', None)
self.idle_timeout = kwargs.get('idle_timeout', 180000)
codepage = kwargs.get('codepage', 437)
# Build the Session and the SOAP Headers
self.__shell_id = None
self.session = Session(endpoint, username, password)
self.resource = ResourceLocator(CommandShell.ShellResource)
self.resource.add_option('WINRS_CODEPAGE', codepage, True)
if bool(kwargs.get('noprofile', False)):
self.resource.add_option('WINRS_NOPROFILE', 'FALSE', True)
else:
self.resource.add_option('WINRS_NOPROFILE', 'TRUE', True)
def open(self, input_streams=['stdin'], output_streams=['stderr', 'stdout']):
"""
Opens the remote shell
"""
shell = dict()
shell['rsp:InputStreams'] = " ".join(input_streams)
shell['rsp:OutputStreams'] = " ".join(output_streams)
shell['rsp:IdleTimeout'] = str(self.idle_timeout)
if self.working_directory is not None:
shell['rsp:WorkingDirectory'] = str(self.working_directory)
if self.environment is not None:
variables = []
for key, value in self.environment.items():
variables.append({'#text': str(value), '@Name': key})
shell['rsp:Environment'] = {'Variable': variables}
response = self.session.create(self.resource, {'rsp:Shell': shell})
self.__shell_id = response['rsp:Shell']['rsp:ShellId']
def run(self, command, arguments=(), console_mode_stdin=True, skip_cmd_shell=False):
"""This function does something.
:param command: The command to be executed
:type name: str.
:param arguments: A list of arguments to be passed to the command
:type state: str.
:returns: int -- the return code.
:raises: AttributeError, KeyError
iclegg: blocking i/o operations are slow, doesnt Python have a moden 'async' mechanism
rather than replying on 80's style callbacks?
"""
logging.info('running command: ' + command)
resource = ResourceLocator(CommandShell.ShellResource)
resource.add_selector('ShellId', self.__shell_id)
resource.add_option('WINRS_SKIP_CMD_SHELL', ['FALSE', 'TRUE'][bool(skip_cmd_shell)], True)
resource.add_option('WINRS_CONSOLEMODE_STDIN', ['FALSE', 'TRUE'][bool(console_mode_stdin)], True)
command = OrderedDict([('rsp:Command', command)])
command['rsp:Arguments'] = list(arguments)
response = self.session.command(resource, {'rsp:CommandLine': command})
command_id = response['rsp:CommandResponse']['rsp:CommandId']
logging.info('receive command: ' + command_id)
return command_id
def receive(self, command_id, streams=('stdout', 'stderr'), command_timeout=60):
"""
Recieves data
:param command_id:
:param streams:
:param command_timeout:
:return:
"""
logging.info('receive command: ' + command_id)
response_streams = dict.fromkeys(streams, '')
(complete, exit_code) = self._receive_poll(command_id, response_streams)
while not complete:
(complete, exit_code) = self._receive_poll(command_id, response_streams)
# This retains some compatibility with pywinrm
if sorted(response_streams.keys()) == sorted(['stderr', 'stdout']):
return response_streams['stdout'], response_streams['stderr'], exit_code
else:
return response_streams, exit_code
def close(self):
"""
Closes pipe
:return:
"""
resource = ResourceLocator(CommandShell.ShellResource)
resource.add_selector('ShellId', self.__shell_id)
self.session.delete(resource)
|
ianclegg/winrmlib | winrmlib/shell.py | CommandShell.close | python | def close(self):
resource = ResourceLocator(CommandShell.ShellResource)
resource.add_selector('ShellId', self.__shell_id)
self.session.delete(resource) | Closes pipe
:return: | train | https://github.com/ianclegg/winrmlib/blob/489b3ce5d0e6a9a7301ba5d345ba82fa824c1431/winrmlib/shell.py#L163-L170 | [
"def add_selector(self, name, value):\n self.selectors[name] = value\n",
"def delete(self, resource, operation_timeout=None, max_envelope_size=None, locale=None):\n \"\"\"\n resource can be a URL or a ResourceLocator\n \"\"\"\n if isinstance(resource, str):\n resource = ResourceLocator(resource)\n\n headers = self._build_headers(resource, Session.DeleteAction,\n operation_timeout, max_envelope_size, locale)\n return self.service.invoke(headers, None)\n"
] | class CommandShell(object):
def __init__(self, endpoint, username, password, **kwargs):
"""
timeout
codepage
noprofile
environment
"""
# Process arguments
self.environment = kwargs.get('environment', None)
self.working_directory = kwargs.get('working_directory', None)
self.idle_timeout = kwargs.get('idle_timeout', 180000)
codepage = kwargs.get('codepage', 437)
# Build the Session and the SOAP Headers
self.__shell_id = None
self.session = Session(endpoint, username, password)
self.resource = ResourceLocator(CommandShell.ShellResource)
self.resource.add_option('WINRS_CODEPAGE', codepage, True)
if bool(kwargs.get('noprofile', False)):
self.resource.add_option('WINRS_NOPROFILE', 'FALSE', True)
else:
self.resource.add_option('WINRS_NOPROFILE', 'TRUE', True)
def open(self, input_streams=['stdin'], output_streams=['stderr', 'stdout']):
"""
Opens the remote shell
"""
shell = dict()
shell['rsp:InputStreams'] = " ".join(input_streams)
shell['rsp:OutputStreams'] = " ".join(output_streams)
shell['rsp:IdleTimeout'] = str(self.idle_timeout)
if self.working_directory is not None:
shell['rsp:WorkingDirectory'] = str(self.working_directory)
if self.environment is not None:
variables = []
for key, value in self.environment.items():
variables.append({'#text': str(value), '@Name': key})
shell['rsp:Environment'] = {'Variable': variables}
response = self.session.create(self.resource, {'rsp:Shell': shell})
self.__shell_id = response['rsp:Shell']['rsp:ShellId']
def run(self, command, arguments=(), console_mode_stdin=True, skip_cmd_shell=False):
"""This function does something.
:param command: The command to be executed
:type name: str.
:param arguments: A list of arguments to be passed to the command
:type state: str.
:returns: int -- the return code.
:raises: AttributeError, KeyError
iclegg: blocking i/o operations are slow, doesnt Python have a moden 'async' mechanism
rather than replying on 80's style callbacks?
"""
logging.info('running command: ' + command)
resource = ResourceLocator(CommandShell.ShellResource)
resource.add_selector('ShellId', self.__shell_id)
resource.add_option('WINRS_SKIP_CMD_SHELL', ['FALSE', 'TRUE'][bool(skip_cmd_shell)], True)
resource.add_option('WINRS_CONSOLEMODE_STDIN', ['FALSE', 'TRUE'][bool(console_mode_stdin)], True)
command = OrderedDict([('rsp:Command', command)])
command['rsp:Arguments'] = list(arguments)
response = self.session.command(resource, {'rsp:CommandLine': command})
command_id = response['rsp:CommandResponse']['rsp:CommandId']
logging.info('receive command: ' + command_id)
return command_id
def receive(self, command_id, streams=('stdout', 'stderr'), command_timeout=60):
"""
Recieves data
:param command_id:
:param streams:
:param command_timeout:
:return:
"""
logging.info('receive command: ' + command_id)
response_streams = dict.fromkeys(streams, '')
(complete, exit_code) = self._receive_poll(command_id, response_streams)
while not complete:
(complete, exit_code) = self._receive_poll(command_id, response_streams)
# This retains some compatibility with pywinrm
if sorted(response_streams.keys()) == sorted(['stderr', 'stdout']):
return response_streams['stdout'], response_streams['stderr'], exit_code
else:
return response_streams, exit_code
def _receive_poll(self, command_id, response_streams):
"""
Recieves data
:param command_id:
:param streams:
:return:
"""
logging.info('receive command: ' + command_id)
resource = ResourceLocator(CommandShell.ShellResource)
resource.add_selector('ShellId', self.__shell_id)
stream_attributes = {'#text': " ".join(response_streams.keys()), '@CommandId': command_id}
receive = {'rsp:Receive': {'rsp:DesiredStream': stream_attributes}}
try:
response = self.session.recieve(resource, receive)['rsp:ReceiveResponse']
except Exception as e:
return False, None
# some responses will not include any output
session_streams = response.get('rsp:Stream', ())
if not isinstance(session_streams, list):
session_streams = [session_streams]
for stream in session_streams:
if stream['@CommandId'] == command_id and '#text' in stream:
response_streams[stream['@Name']] += base64.b64decode(stream['#text'])
# XPRESS Compression Testing
# print "\\x".join("{:02x}".format(ord(c)) for c in base64.b64decode(stream['#text']))
# data = base64.b64decode(stream['#text'])
# f = open('c:\\users\\developer\\temp\\data.bin', 'wb')
# f.write(data)
# f.close()
# decode = api.compression.xpress_decode(data[4:])
done = response['rsp:CommandState']['@State'] == CommandShell.StateDone
if done:
exit_code = int(response['rsp:CommandState']['rsp:ExitCode'])
else: exit_code = None
return done, exit_code
|
ianclegg/winrmlib | winrmlib/api/session.py | Session.get | python | def get(self, resource, operation_timeout=None, max_envelope_size=None, locale=None):
if isinstance(resource, str):
resource = ResourceLocator(resource)
headers = self._build_headers(resource, Session.GetAction, operation_timeout, max_envelope_size, locale)
self.service.invoke.set_options(tsoapheaders=headers)
return self.service.invoke | resource can be a URL or a ResourceLocator | train | https://github.com/ianclegg/winrmlib/blob/489b3ce5d0e6a9a7301ba5d345ba82fa824c1431/winrmlib/api/session.py#L51-L60 | [
"def _build_headers(self, resource, action, operation_timeout, max_envelope_size, locale):\n headers = OrderedDict([\n ('a:To', self.endpoint),\n ('a:ReplyTo', Session.Address),\n ('w:ResourceURI', resource.url),\n ('a:MessageID', format(uuid.uuid4())),\n ('a:Action', action)]\n )\n # TODO: Implement support for Microsoft XPRESS compression\n # https://social.msdn.microsoft.com/Forums/en-US/501e4f29-edfc-4240-af3b-344264060b99/\n # wsman-xpress-remote-shell-compression?forum=os_windowsprotocols\n\n # headers.update({'rsp:CompressionType': {'@soap:mustUnderstand': 'true', '#text': 'xpress'}})\n # only include the operation timeout if the user specified one when the class was instantiated\n # or if the user explicitly set one when invoking a method.\n if operation_timeout is not None:\n headers.update(self._build_operation_timeout(operation_timeout))\n elif self.default_operation_timeout is not None:\n headers.update(self.default_operation_timeout)\n\n headers.update(self._build_selectors(resource.selectors))\n headers.update(self._build_options(resource.options))\n headers.update(self._build_max_envelope(max_envelope_size))\n headers.update(self._build_locale(locale))\n return headers\n"
] | class Session(object):
"""
Factory object for building sessions and connection options
"""
def __init__(self, endpoint, username, password, **kwargs):
# transport = Session._build_transport(endpoint, auth, username, password)
# Store the endpoint and the service we will use to invoke it
self.endpoint = endpoint
# False == No CredSSP
self.service = Service(endpoint, username, password, True)
# The user can set override some defaults for the Session, they can also be overridden on each request
self.max_envelope = self._build_max_envelope(kwargs.get('max_envelope_size', Session.MaxEnvelopeSize))
self.locale = self._build_locale(kwargs.get('locale', Session.Locale))
# The operation timeout header overrides the timeout set on the server. Some users may prefer to
# use the servers default timeout, so this header will only be included if the user explicitly sets
# an operation timeout.
if 'operation_timeout' in kwargs:
self.default_operation_timeout = self._build_operation_timeout(kwargs.get('operation_timeout'))
else:
self.default_operation_timeout = None
def put(self, resource, obj,
operation_timeout=None, max_envelope_size=None, locale=None):
"""
resource can be a URL or a ResourceLocator
"""
headers = None
return self.service.invoke(headers, obj)
def delete(self, resource, operation_timeout=None, max_envelope_size=None, locale=None):
"""
resource can be a URL or a ResourceLocator
"""
if isinstance(resource, str):
resource = ResourceLocator(resource)
headers = self._build_headers(resource, Session.DeleteAction,
operation_timeout, max_envelope_size, locale)
return self.service.invoke(headers, None)
def create(self, resource, obj,
operation_timeout=None, max_envelope_size=None, locale=None):
"""
resource can be a URL or a ResourceLocator
"""
if isinstance(resource, str):
resource = ResourceLocator(resource)
headers = self._build_headers(resource, Session.CreateAction,
operation_timeout, max_envelope_size, locale)
return self.service.invoke(headers, obj)
def command(self, resource, obj,
operation_timeout=None, max_envelope_size=None, locale=None):
"""
resource can be a URL or a ResourceLocator
"""
if isinstance(resource, str):
resource = ResourceLocator(resource)
headers = self._build_headers(resource, Session.CommandAction,
operation_timeout, max_envelope_size, locale)
return self.service.invoke(headers, obj)
def recieve(self, resource, obj,
operation_timeout=None, max_envelope_size=None, locale=None):
"""
resource can be a URL or a ResourceLocator
"""
if isinstance(resource, str):
resource = ResourceLocator(resource)
headers = self._build_headers(resource, Session.ReceiveAction,
operation_timeout, max_envelope_size, locale)
return self.service.invoke(headers, obj)
@staticmethod
def _build_selectors(selectors):
# Build the WSMan SelectorSet Element from the selector dictionary
selector_set = []
for selector_name in selectors.iterkeys():
selector_value = selectors[selector_name]
selector_set.append({'#text': str(selector_value), '@Name': selector_name})
return {'w:SelectorSet': {'w:Selector': selector_set}}
@staticmethod
# TODO add mustcomply attribute to element
def _build_options(options):
option_set = []
for name, (value, must_comply) in options.iteritems():
must_comply = bool(must_comply)
option_set.append({'#text': str(value), '@Name': name})
return {'w:OptionSet': {'w:Option': option_set}}
def _build_operation_timeout(self, operation_timeout):
if operation_timeout is None:
return self.default_operation_timeout
else:
return {'w:OperationTimeout': 'PT{0}S'.format(operation_timeout)}
def _build_max_envelope(self, max_envelope_size):
if max_envelope_size is None:
return self.max_envelope
else:
return {'w:MaxEnvelopeSize': '{0}'.format(max_envelope_size)}
def _build_locale(self, locale):
if locale is None:
return self.locale
else:
return {'Locale': {"@xml:lang": "en-US"}}
def _build_headers(self, resource, action, operation_timeout, max_envelope_size, locale):
headers = OrderedDict([
('a:To', self.endpoint),
('a:ReplyTo', Session.Address),
('w:ResourceURI', resource.url),
('a:MessageID', format(uuid.uuid4())),
('a:Action', action)]
)
# TODO: Implement support for Microsoft XPRESS compression
# https://social.msdn.microsoft.com/Forums/en-US/501e4f29-edfc-4240-af3b-344264060b99/
# wsman-xpress-remote-shell-compression?forum=os_windowsprotocols
# headers.update({'rsp:CompressionType': {'@soap:mustUnderstand': 'true', '#text': 'xpress'}})
# only include the operation timeout if the user specified one when the class was instantiated
# or if the user explicitly set one when invoking a method.
if operation_timeout is not None:
headers.update(self._build_operation_timeout(operation_timeout))
elif self.default_operation_timeout is not None:
headers.update(self.default_operation_timeout)
headers.update(self._build_selectors(resource.selectors))
headers.update(self._build_options(resource.options))
headers.update(self._build_max_envelope(max_envelope_size))
headers.update(self._build_locale(locale))
return headers
|
ianclegg/winrmlib | winrmlib/api/session.py | Session.put | python | def put(self, resource, obj,
operation_timeout=None, max_envelope_size=None, locale=None):
headers = None
return self.service.invoke(headers, obj) | resource can be a URL or a ResourceLocator | train | https://github.com/ianclegg/winrmlib/blob/489b3ce5d0e6a9a7301ba5d345ba82fa824c1431/winrmlib/api/session.py#L62-L68 | null | class Session(object):
"""
Factory object for building sessions and connection options
"""
def __init__(self, endpoint, username, password, **kwargs):
# transport = Session._build_transport(endpoint, auth, username, password)
# Store the endpoint and the service we will use to invoke it
self.endpoint = endpoint
# False == No CredSSP
self.service = Service(endpoint, username, password, True)
# The user can set override some defaults for the Session, they can also be overridden on each request
self.max_envelope = self._build_max_envelope(kwargs.get('max_envelope_size', Session.MaxEnvelopeSize))
self.locale = self._build_locale(kwargs.get('locale', Session.Locale))
# The operation timeout header overrides the timeout set on the server. Some users may prefer to
# use the servers default timeout, so this header will only be included if the user explicitly sets
# an operation timeout.
if 'operation_timeout' in kwargs:
self.default_operation_timeout = self._build_operation_timeout(kwargs.get('operation_timeout'))
else:
self.default_operation_timeout = None
def get(self, resource, operation_timeout=None, max_envelope_size=None, locale=None):
"""
resource can be a URL or a ResourceLocator
"""
if isinstance(resource, str):
resource = ResourceLocator(resource)
headers = self._build_headers(resource, Session.GetAction, operation_timeout, max_envelope_size, locale)
self.service.invoke.set_options(tsoapheaders=headers)
return self.service.invoke
def delete(self, resource, operation_timeout=None, max_envelope_size=None, locale=None):
"""
resource can be a URL or a ResourceLocator
"""
if isinstance(resource, str):
resource = ResourceLocator(resource)
headers = self._build_headers(resource, Session.DeleteAction,
operation_timeout, max_envelope_size, locale)
return self.service.invoke(headers, None)
def create(self, resource, obj,
operation_timeout=None, max_envelope_size=None, locale=None):
"""
resource can be a URL or a ResourceLocator
"""
if isinstance(resource, str):
resource = ResourceLocator(resource)
headers = self._build_headers(resource, Session.CreateAction,
operation_timeout, max_envelope_size, locale)
return self.service.invoke(headers, obj)
def command(self, resource, obj,
operation_timeout=None, max_envelope_size=None, locale=None):
"""
resource can be a URL or a ResourceLocator
"""
if isinstance(resource, str):
resource = ResourceLocator(resource)
headers = self._build_headers(resource, Session.CommandAction,
operation_timeout, max_envelope_size, locale)
return self.service.invoke(headers, obj)
def recieve(self, resource, obj,
operation_timeout=None, max_envelope_size=None, locale=None):
"""
resource can be a URL or a ResourceLocator
"""
if isinstance(resource, str):
resource = ResourceLocator(resource)
headers = self._build_headers(resource, Session.ReceiveAction,
operation_timeout, max_envelope_size, locale)
return self.service.invoke(headers, obj)
@staticmethod
def _build_selectors(selectors):
# Build the WSMan SelectorSet Element from the selector dictionary
selector_set = []
for selector_name in selectors.iterkeys():
selector_value = selectors[selector_name]
selector_set.append({'#text': str(selector_value), '@Name': selector_name})
return {'w:SelectorSet': {'w:Selector': selector_set}}
@staticmethod
# TODO add mustcomply attribute to element
def _build_options(options):
option_set = []
for name, (value, must_comply) in options.iteritems():
must_comply = bool(must_comply)
option_set.append({'#text': str(value), '@Name': name})
return {'w:OptionSet': {'w:Option': option_set}}
def _build_operation_timeout(self, operation_timeout):
if operation_timeout is None:
return self.default_operation_timeout
else:
return {'w:OperationTimeout': 'PT{0}S'.format(operation_timeout)}
def _build_max_envelope(self, max_envelope_size):
if max_envelope_size is None:
return self.max_envelope
else:
return {'w:MaxEnvelopeSize': '{0}'.format(max_envelope_size)}
def _build_locale(self, locale):
if locale is None:
return self.locale
else:
return {'Locale': {"@xml:lang": "en-US"}}
def _build_headers(self, resource, action, operation_timeout, max_envelope_size, locale):
headers = OrderedDict([
('a:To', self.endpoint),
('a:ReplyTo', Session.Address),
('w:ResourceURI', resource.url),
('a:MessageID', format(uuid.uuid4())),
('a:Action', action)]
)
# TODO: Implement support for Microsoft XPRESS compression
# https://social.msdn.microsoft.com/Forums/en-US/501e4f29-edfc-4240-af3b-344264060b99/
# wsman-xpress-remote-shell-compression?forum=os_windowsprotocols
# headers.update({'rsp:CompressionType': {'@soap:mustUnderstand': 'true', '#text': 'xpress'}})
# only include the operation timeout if the user specified one when the class was instantiated
# or if the user explicitly set one when invoking a method.
if operation_timeout is not None:
headers.update(self._build_operation_timeout(operation_timeout))
elif self.default_operation_timeout is not None:
headers.update(self.default_operation_timeout)
headers.update(self._build_selectors(resource.selectors))
headers.update(self._build_options(resource.options))
headers.update(self._build_max_envelope(max_envelope_size))
headers.update(self._build_locale(locale))
return headers
|
ianclegg/winrmlib | winrmlib/api/session.py | Session.delete | python | def delete(self, resource, operation_timeout=None, max_envelope_size=None, locale=None):
if isinstance(resource, str):
resource = ResourceLocator(resource)
headers = self._build_headers(resource, Session.DeleteAction,
operation_timeout, max_envelope_size, locale)
return self.service.invoke(headers, None) | resource can be a URL or a ResourceLocator | train | https://github.com/ianclegg/winrmlib/blob/489b3ce5d0e6a9a7301ba5d345ba82fa824c1431/winrmlib/api/session.py#L70-L79 | [
"def _build_headers(self, resource, action, operation_timeout, max_envelope_size, locale):\n headers = OrderedDict([\n ('a:To', self.endpoint),\n ('a:ReplyTo', Session.Address),\n ('w:ResourceURI', resource.url),\n ('a:MessageID', format(uuid.uuid4())),\n ('a:Action', action)]\n )\n # TODO: Implement support for Microsoft XPRESS compression\n # https://social.msdn.microsoft.com/Forums/en-US/501e4f29-edfc-4240-af3b-344264060b99/\n # wsman-xpress-remote-shell-compression?forum=os_windowsprotocols\n\n # headers.update({'rsp:CompressionType': {'@soap:mustUnderstand': 'true', '#text': 'xpress'}})\n # only include the operation timeout if the user specified one when the class was instantiated\n # or if the user explicitly set one when invoking a method.\n if operation_timeout is not None:\n headers.update(self._build_operation_timeout(operation_timeout))\n elif self.default_operation_timeout is not None:\n headers.update(self.default_operation_timeout)\n\n headers.update(self._build_selectors(resource.selectors))\n headers.update(self._build_options(resource.options))\n headers.update(self._build_max_envelope(max_envelope_size))\n headers.update(self._build_locale(locale))\n return headers\n"
] | class Session(object):
"""
Factory object for building sessions and connection options
"""
def __init__(self, endpoint, username, password, **kwargs):
# transport = Session._build_transport(endpoint, auth, username, password)
# Store the endpoint and the service we will use to invoke it
self.endpoint = endpoint
# False == No CredSSP
self.service = Service(endpoint, username, password, True)
# The user can set override some defaults for the Session, they can also be overridden on each request
self.max_envelope = self._build_max_envelope(kwargs.get('max_envelope_size', Session.MaxEnvelopeSize))
self.locale = self._build_locale(kwargs.get('locale', Session.Locale))
# The operation timeout header overrides the timeout set on the server. Some users may prefer to
# use the servers default timeout, so this header will only be included if the user explicitly sets
# an operation timeout.
if 'operation_timeout' in kwargs:
self.default_operation_timeout = self._build_operation_timeout(kwargs.get('operation_timeout'))
else:
self.default_operation_timeout = None
def get(self, resource, operation_timeout=None, max_envelope_size=None, locale=None):
"""
resource can be a URL or a ResourceLocator
"""
if isinstance(resource, str):
resource = ResourceLocator(resource)
headers = self._build_headers(resource, Session.GetAction, operation_timeout, max_envelope_size, locale)
self.service.invoke.set_options(tsoapheaders=headers)
return self.service.invoke
def put(self, resource, obj,
operation_timeout=None, max_envelope_size=None, locale=None):
"""
resource can be a URL or a ResourceLocator
"""
headers = None
return self.service.invoke(headers, obj)
def create(self, resource, obj,
operation_timeout=None, max_envelope_size=None, locale=None):
"""
resource can be a URL or a ResourceLocator
"""
if isinstance(resource, str):
resource = ResourceLocator(resource)
headers = self._build_headers(resource, Session.CreateAction,
operation_timeout, max_envelope_size, locale)
return self.service.invoke(headers, obj)
def command(self, resource, obj,
operation_timeout=None, max_envelope_size=None, locale=None):
"""
resource can be a URL or a ResourceLocator
"""
if isinstance(resource, str):
resource = ResourceLocator(resource)
headers = self._build_headers(resource, Session.CommandAction,
operation_timeout, max_envelope_size, locale)
return self.service.invoke(headers, obj)
def recieve(self, resource, obj,
operation_timeout=None, max_envelope_size=None, locale=None):
"""
resource can be a URL or a ResourceLocator
"""
if isinstance(resource, str):
resource = ResourceLocator(resource)
headers = self._build_headers(resource, Session.ReceiveAction,
operation_timeout, max_envelope_size, locale)
return self.service.invoke(headers, obj)
@staticmethod
def _build_selectors(selectors):
# Build the WSMan SelectorSet Element from the selector dictionary
selector_set = []
for selector_name in selectors.iterkeys():
selector_value = selectors[selector_name]
selector_set.append({'#text': str(selector_value), '@Name': selector_name})
return {'w:SelectorSet': {'w:Selector': selector_set}}
@staticmethod
# TODO add mustcomply attribute to element
def _build_options(options):
option_set = []
for name, (value, must_comply) in options.iteritems():
must_comply = bool(must_comply)
option_set.append({'#text': str(value), '@Name': name})
return {'w:OptionSet': {'w:Option': option_set}}
def _build_operation_timeout(self, operation_timeout):
if operation_timeout is None:
return self.default_operation_timeout
else:
return {'w:OperationTimeout': 'PT{0}S'.format(operation_timeout)}
def _build_max_envelope(self, max_envelope_size):
if max_envelope_size is None:
return self.max_envelope
else:
return {'w:MaxEnvelopeSize': '{0}'.format(max_envelope_size)}
def _build_locale(self, locale):
if locale is None:
return self.locale
else:
return {'Locale': {"@xml:lang": "en-US"}}
def _build_headers(self, resource, action, operation_timeout, max_envelope_size, locale):
headers = OrderedDict([
('a:To', self.endpoint),
('a:ReplyTo', Session.Address),
('w:ResourceURI', resource.url),
('a:MessageID', format(uuid.uuid4())),
('a:Action', action)]
)
# TODO: Implement support for Microsoft XPRESS compression
# https://social.msdn.microsoft.com/Forums/en-US/501e4f29-edfc-4240-af3b-344264060b99/
# wsman-xpress-remote-shell-compression?forum=os_windowsprotocols
# headers.update({'rsp:CompressionType': {'@soap:mustUnderstand': 'true', '#text': 'xpress'}})
# only include the operation timeout if the user specified one when the class was instantiated
# or if the user explicitly set one when invoking a method.
if operation_timeout is not None:
headers.update(self._build_operation_timeout(operation_timeout))
elif self.default_operation_timeout is not None:
headers.update(self.default_operation_timeout)
headers.update(self._build_selectors(resource.selectors))
headers.update(self._build_options(resource.options))
headers.update(self._build_max_envelope(max_envelope_size))
headers.update(self._build_locale(locale))
return headers
|
ianclegg/winrmlib | winrmlib/api/session.py | Session.create | python | def create(self, resource, obj,
operation_timeout=None, max_envelope_size=None, locale=None):
if isinstance(resource, str):
resource = ResourceLocator(resource)
headers = self._build_headers(resource, Session.CreateAction,
operation_timeout, max_envelope_size, locale)
return self.service.invoke(headers, obj) | resource can be a URL or a ResourceLocator | train | https://github.com/ianclegg/winrmlib/blob/489b3ce5d0e6a9a7301ba5d345ba82fa824c1431/winrmlib/api/session.py#L81-L91 | [
"def _build_headers(self, resource, action, operation_timeout, max_envelope_size, locale):\n headers = OrderedDict([\n ('a:To', self.endpoint),\n ('a:ReplyTo', Session.Address),\n ('w:ResourceURI', resource.url),\n ('a:MessageID', format(uuid.uuid4())),\n ('a:Action', action)]\n )\n # TODO: Implement support for Microsoft XPRESS compression\n # https://social.msdn.microsoft.com/Forums/en-US/501e4f29-edfc-4240-af3b-344264060b99/\n # wsman-xpress-remote-shell-compression?forum=os_windowsprotocols\n\n # headers.update({'rsp:CompressionType': {'@soap:mustUnderstand': 'true', '#text': 'xpress'}})\n # only include the operation timeout if the user specified one when the class was instantiated\n # or if the user explicitly set one when invoking a method.\n if operation_timeout is not None:\n headers.update(self._build_operation_timeout(operation_timeout))\n elif self.default_operation_timeout is not None:\n headers.update(self.default_operation_timeout)\n\n headers.update(self._build_selectors(resource.selectors))\n headers.update(self._build_options(resource.options))\n headers.update(self._build_max_envelope(max_envelope_size))\n headers.update(self._build_locale(locale))\n return headers\n"
] | class Session(object):
"""
Factory object for building sessions and connection options
"""
def __init__(self, endpoint, username, password, **kwargs):
# transport = Session._build_transport(endpoint, auth, username, password)
# Store the endpoint and the service we will use to invoke it
self.endpoint = endpoint
# False == No CredSSP
self.service = Service(endpoint, username, password, True)
# The user can set override some defaults for the Session, they can also be overridden on each request
self.max_envelope = self._build_max_envelope(kwargs.get('max_envelope_size', Session.MaxEnvelopeSize))
self.locale = self._build_locale(kwargs.get('locale', Session.Locale))
# The operation timeout header overrides the timeout set on the server. Some users may prefer to
# use the servers default timeout, so this header will only be included if the user explicitly sets
# an operation timeout.
if 'operation_timeout' in kwargs:
self.default_operation_timeout = self._build_operation_timeout(kwargs.get('operation_timeout'))
else:
self.default_operation_timeout = None
def get(self, resource, operation_timeout=None, max_envelope_size=None, locale=None):
"""
resource can be a URL or a ResourceLocator
"""
if isinstance(resource, str):
resource = ResourceLocator(resource)
headers = self._build_headers(resource, Session.GetAction, operation_timeout, max_envelope_size, locale)
self.service.invoke.set_options(tsoapheaders=headers)
return self.service.invoke
def put(self, resource, obj,
operation_timeout=None, max_envelope_size=None, locale=None):
"""
resource can be a URL or a ResourceLocator
"""
headers = None
return self.service.invoke(headers, obj)
def delete(self, resource, operation_timeout=None, max_envelope_size=None, locale=None):
"""
resource can be a URL or a ResourceLocator
"""
if isinstance(resource, str):
resource = ResourceLocator(resource)
headers = self._build_headers(resource, Session.DeleteAction,
operation_timeout, max_envelope_size, locale)
return self.service.invoke(headers, None)
def command(self, resource, obj,
operation_timeout=None, max_envelope_size=None, locale=None):
"""
resource can be a URL or a ResourceLocator
"""
if isinstance(resource, str):
resource = ResourceLocator(resource)
headers = self._build_headers(resource, Session.CommandAction,
operation_timeout, max_envelope_size, locale)
return self.service.invoke(headers, obj)
def recieve(self, resource, obj,
operation_timeout=None, max_envelope_size=None, locale=None):
"""
resource can be a URL or a ResourceLocator
"""
if isinstance(resource, str):
resource = ResourceLocator(resource)
headers = self._build_headers(resource, Session.ReceiveAction,
operation_timeout, max_envelope_size, locale)
return self.service.invoke(headers, obj)
@staticmethod
def _build_selectors(selectors):
# Build the WSMan SelectorSet Element from the selector dictionary
selector_set = []
for selector_name in selectors.iterkeys():
selector_value = selectors[selector_name]
selector_set.append({'#text': str(selector_value), '@Name': selector_name})
return {'w:SelectorSet': {'w:Selector': selector_set}}
@staticmethod
# TODO add mustcomply attribute to element
def _build_options(options):
option_set = []
for name, (value, must_comply) in options.iteritems():
must_comply = bool(must_comply)
option_set.append({'#text': str(value), '@Name': name})
return {'w:OptionSet': {'w:Option': option_set}}
def _build_operation_timeout(self, operation_timeout):
if operation_timeout is None:
return self.default_operation_timeout
else:
return {'w:OperationTimeout': 'PT{0}S'.format(operation_timeout)}
def _build_max_envelope(self, max_envelope_size):
if max_envelope_size is None:
return self.max_envelope
else:
return {'w:MaxEnvelopeSize': '{0}'.format(max_envelope_size)}
def _build_locale(self, locale):
if locale is None:
return self.locale
else:
return {'Locale': {"@xml:lang": "en-US"}}
def _build_headers(self, resource, action, operation_timeout, max_envelope_size, locale):
headers = OrderedDict([
('a:To', self.endpoint),
('a:ReplyTo', Session.Address),
('w:ResourceURI', resource.url),
('a:MessageID', format(uuid.uuid4())),
('a:Action', action)]
)
# TODO: Implement support for Microsoft XPRESS compression
# https://social.msdn.microsoft.com/Forums/en-US/501e4f29-edfc-4240-af3b-344264060b99/
# wsman-xpress-remote-shell-compression?forum=os_windowsprotocols
# headers.update({'rsp:CompressionType': {'@soap:mustUnderstand': 'true', '#text': 'xpress'}})
# only include the operation timeout if the user specified one when the class was instantiated
# or if the user explicitly set one when invoking a method.
if operation_timeout is not None:
headers.update(self._build_operation_timeout(operation_timeout))
elif self.default_operation_timeout is not None:
headers.update(self.default_operation_timeout)
headers.update(self._build_selectors(resource.selectors))
headers.update(self._build_options(resource.options))
headers.update(self._build_max_envelope(max_envelope_size))
headers.update(self._build_locale(locale))
return headers
|
ianclegg/winrmlib | winrmlib/api/session.py | Session.command | python | def command(self, resource, obj,
operation_timeout=None, max_envelope_size=None, locale=None):
if isinstance(resource, str):
resource = ResourceLocator(resource)
headers = self._build_headers(resource, Session.CommandAction,
operation_timeout, max_envelope_size, locale)
return self.service.invoke(headers, obj) | resource can be a URL or a ResourceLocator | train | https://github.com/ianclegg/winrmlib/blob/489b3ce5d0e6a9a7301ba5d345ba82fa824c1431/winrmlib/api/session.py#L93-L103 | [
"def _build_headers(self, resource, action, operation_timeout, max_envelope_size, locale):\n headers = OrderedDict([\n ('a:To', self.endpoint),\n ('a:ReplyTo', Session.Address),\n ('w:ResourceURI', resource.url),\n ('a:MessageID', format(uuid.uuid4())),\n ('a:Action', action)]\n )\n # TODO: Implement support for Microsoft XPRESS compression\n # https://social.msdn.microsoft.com/Forums/en-US/501e4f29-edfc-4240-af3b-344264060b99/\n # wsman-xpress-remote-shell-compression?forum=os_windowsprotocols\n\n # headers.update({'rsp:CompressionType': {'@soap:mustUnderstand': 'true', '#text': 'xpress'}})\n # only include the operation timeout if the user specified one when the class was instantiated\n # or if the user explicitly set one when invoking a method.\n if operation_timeout is not None:\n headers.update(self._build_operation_timeout(operation_timeout))\n elif self.default_operation_timeout is not None:\n headers.update(self.default_operation_timeout)\n\n headers.update(self._build_selectors(resource.selectors))\n headers.update(self._build_options(resource.options))\n headers.update(self._build_max_envelope(max_envelope_size))\n headers.update(self._build_locale(locale))\n return headers\n"
] | class Session(object):
"""
Factory object for building sessions and connection options
"""
def __init__(self, endpoint, username, password, **kwargs):
# transport = Session._build_transport(endpoint, auth, username, password)
# Store the endpoint and the service we will use to invoke it
self.endpoint = endpoint
# False == No CredSSP
self.service = Service(endpoint, username, password, True)
# The user can set override some defaults for the Session, they can also be overridden on each request
self.max_envelope = self._build_max_envelope(kwargs.get('max_envelope_size', Session.MaxEnvelopeSize))
self.locale = self._build_locale(kwargs.get('locale', Session.Locale))
# The operation timeout header overrides the timeout set on the server. Some users may prefer to
# use the servers default timeout, so this header will only be included if the user explicitly sets
# an operation timeout.
if 'operation_timeout' in kwargs:
self.default_operation_timeout = self._build_operation_timeout(kwargs.get('operation_timeout'))
else:
self.default_operation_timeout = None
def get(self, resource, operation_timeout=None, max_envelope_size=None, locale=None):
"""
resource can be a URL or a ResourceLocator
"""
if isinstance(resource, str):
resource = ResourceLocator(resource)
headers = self._build_headers(resource, Session.GetAction, operation_timeout, max_envelope_size, locale)
self.service.invoke.set_options(tsoapheaders=headers)
return self.service.invoke
def put(self, resource, obj,
operation_timeout=None, max_envelope_size=None, locale=None):
"""
resource can be a URL or a ResourceLocator
"""
headers = None
return self.service.invoke(headers, obj)
def delete(self, resource, operation_timeout=None, max_envelope_size=None, locale=None):
"""
resource can be a URL or a ResourceLocator
"""
if isinstance(resource, str):
resource = ResourceLocator(resource)
headers = self._build_headers(resource, Session.DeleteAction,
operation_timeout, max_envelope_size, locale)
return self.service.invoke(headers, None)
def create(self, resource, obj,
operation_timeout=None, max_envelope_size=None, locale=None):
"""
resource can be a URL or a ResourceLocator
"""
if isinstance(resource, str):
resource = ResourceLocator(resource)
headers = self._build_headers(resource, Session.CreateAction,
operation_timeout, max_envelope_size, locale)
return self.service.invoke(headers, obj)
def recieve(self, resource, obj,
operation_timeout=None, max_envelope_size=None, locale=None):
"""
resource can be a URL or a ResourceLocator
"""
if isinstance(resource, str):
resource = ResourceLocator(resource)
headers = self._build_headers(resource, Session.ReceiveAction,
operation_timeout, max_envelope_size, locale)
return self.service.invoke(headers, obj)
@staticmethod
def _build_selectors(selectors):
# Build the WSMan SelectorSet Element from the selector dictionary
selector_set = []
for selector_name in selectors.iterkeys():
selector_value = selectors[selector_name]
selector_set.append({'#text': str(selector_value), '@Name': selector_name})
return {'w:SelectorSet': {'w:Selector': selector_set}}
@staticmethod
# TODO add mustcomply attribute to element
def _build_options(options):
option_set = []
for name, (value, must_comply) in options.iteritems():
must_comply = bool(must_comply)
option_set.append({'#text': str(value), '@Name': name})
return {'w:OptionSet': {'w:Option': option_set}}
def _build_operation_timeout(self, operation_timeout):
if operation_timeout is None:
return self.default_operation_timeout
else:
return {'w:OperationTimeout': 'PT{0}S'.format(operation_timeout)}
def _build_max_envelope(self, max_envelope_size):
if max_envelope_size is None:
return self.max_envelope
else:
return {'w:MaxEnvelopeSize': '{0}'.format(max_envelope_size)}
def _build_locale(self, locale):
if locale is None:
return self.locale
else:
return {'Locale': {"@xml:lang": "en-US"}}
def _build_headers(self, resource, action, operation_timeout, max_envelope_size, locale):
headers = OrderedDict([
('a:To', self.endpoint),
('a:ReplyTo', Session.Address),
('w:ResourceURI', resource.url),
('a:MessageID', format(uuid.uuid4())),
('a:Action', action)]
)
# TODO: Implement support for Microsoft XPRESS compression
# https://social.msdn.microsoft.com/Forums/en-US/501e4f29-edfc-4240-af3b-344264060b99/
# wsman-xpress-remote-shell-compression?forum=os_windowsprotocols
# headers.update({'rsp:CompressionType': {'@soap:mustUnderstand': 'true', '#text': 'xpress'}})
# only include the operation timeout if the user specified one when the class was instantiated
# or if the user explicitly set one when invoking a method.
if operation_timeout is not None:
headers.update(self._build_operation_timeout(operation_timeout))
elif self.default_operation_timeout is not None:
headers.update(self.default_operation_timeout)
headers.update(self._build_selectors(resource.selectors))
headers.update(self._build_options(resource.options))
headers.update(self._build_max_envelope(max_envelope_size))
headers.update(self._build_locale(locale))
return headers
|
ianclegg/winrmlib | winrmlib/api/session.py | Session.recieve | python | def recieve(self, resource, obj,
operation_timeout=None, max_envelope_size=None, locale=None):
if isinstance(resource, str):
resource = ResourceLocator(resource)
headers = self._build_headers(resource, Session.ReceiveAction,
operation_timeout, max_envelope_size, locale)
return self.service.invoke(headers, obj) | resource can be a URL or a ResourceLocator | train | https://github.com/ianclegg/winrmlib/blob/489b3ce5d0e6a9a7301ba5d345ba82fa824c1431/winrmlib/api/session.py#L105-L115 | [
"def _build_headers(self, resource, action, operation_timeout, max_envelope_size, locale):\n headers = OrderedDict([\n ('a:To', self.endpoint),\n ('a:ReplyTo', Session.Address),\n ('w:ResourceURI', resource.url),\n ('a:MessageID', format(uuid.uuid4())),\n ('a:Action', action)]\n )\n # TODO: Implement support for Microsoft XPRESS compression\n # https://social.msdn.microsoft.com/Forums/en-US/501e4f29-edfc-4240-af3b-344264060b99/\n # wsman-xpress-remote-shell-compression?forum=os_windowsprotocols\n\n # headers.update({'rsp:CompressionType': {'@soap:mustUnderstand': 'true', '#text': 'xpress'}})\n # only include the operation timeout if the user specified one when the class was instantiated\n # or if the user explicitly set one when invoking a method.\n if operation_timeout is not None:\n headers.update(self._build_operation_timeout(operation_timeout))\n elif self.default_operation_timeout is not None:\n headers.update(self.default_operation_timeout)\n\n headers.update(self._build_selectors(resource.selectors))\n headers.update(self._build_options(resource.options))\n headers.update(self._build_max_envelope(max_envelope_size))\n headers.update(self._build_locale(locale))\n return headers\n"
] | class Session(object):
"""
Factory object for building sessions and connection options
"""
def __init__(self, endpoint, username, password, **kwargs):
# transport = Session._build_transport(endpoint, auth, username, password)
# Store the endpoint and the service we will use to invoke it
self.endpoint = endpoint
# False == No CredSSP
self.service = Service(endpoint, username, password, True)
# The user can set override some defaults for the Session, they can also be overridden on each request
self.max_envelope = self._build_max_envelope(kwargs.get('max_envelope_size', Session.MaxEnvelopeSize))
self.locale = self._build_locale(kwargs.get('locale', Session.Locale))
# The operation timeout header overrides the timeout set on the server. Some users may prefer to
# use the servers default timeout, so this header will only be included if the user explicitly sets
# an operation timeout.
if 'operation_timeout' in kwargs:
self.default_operation_timeout = self._build_operation_timeout(kwargs.get('operation_timeout'))
else:
self.default_operation_timeout = None
def get(self, resource, operation_timeout=None, max_envelope_size=None, locale=None):
"""
resource can be a URL or a ResourceLocator
"""
if isinstance(resource, str):
resource = ResourceLocator(resource)
headers = self._build_headers(resource, Session.GetAction, operation_timeout, max_envelope_size, locale)
self.service.invoke.set_options(tsoapheaders=headers)
return self.service.invoke
def put(self, resource, obj,
operation_timeout=None, max_envelope_size=None, locale=None):
"""
resource can be a URL or a ResourceLocator
"""
headers = None
return self.service.invoke(headers, obj)
def delete(self, resource, operation_timeout=None, max_envelope_size=None, locale=None):
"""
resource can be a URL or a ResourceLocator
"""
if isinstance(resource, str):
resource = ResourceLocator(resource)
headers = self._build_headers(resource, Session.DeleteAction,
operation_timeout, max_envelope_size, locale)
return self.service.invoke(headers, None)
def create(self, resource, obj,
operation_timeout=None, max_envelope_size=None, locale=None):
"""
resource can be a URL or a ResourceLocator
"""
if isinstance(resource, str):
resource = ResourceLocator(resource)
headers = self._build_headers(resource, Session.CreateAction,
operation_timeout, max_envelope_size, locale)
return self.service.invoke(headers, obj)
def command(self, resource, obj,
operation_timeout=None, max_envelope_size=None, locale=None):
"""
resource can be a URL or a ResourceLocator
"""
if isinstance(resource, str):
resource = ResourceLocator(resource)
headers = self._build_headers(resource, Session.CommandAction,
operation_timeout, max_envelope_size, locale)
return self.service.invoke(headers, obj)
@staticmethod
def _build_selectors(selectors):
# Build the WSMan SelectorSet Element from the selector dictionary
selector_set = []
for selector_name in selectors.iterkeys():
selector_value = selectors[selector_name]
selector_set.append({'#text': str(selector_value), '@Name': selector_name})
return {'w:SelectorSet': {'w:Selector': selector_set}}
@staticmethod
# TODO add mustcomply attribute to element
def _build_options(options):
option_set = []
for name, (value, must_comply) in options.iteritems():
must_comply = bool(must_comply)
option_set.append({'#text': str(value), '@Name': name})
return {'w:OptionSet': {'w:Option': option_set}}
def _build_operation_timeout(self, operation_timeout):
if operation_timeout is None:
return self.default_operation_timeout
else:
return {'w:OperationTimeout': 'PT{0}S'.format(operation_timeout)}
def _build_max_envelope(self, max_envelope_size):
if max_envelope_size is None:
return self.max_envelope
else:
return {'w:MaxEnvelopeSize': '{0}'.format(max_envelope_size)}
def _build_locale(self, locale):
if locale is None:
return self.locale
else:
return {'Locale': {"@xml:lang": "en-US"}}
def _build_headers(self, resource, action, operation_timeout, max_envelope_size, locale):
headers = OrderedDict([
('a:To', self.endpoint),
('a:ReplyTo', Session.Address),
('w:ResourceURI', resource.url),
('a:MessageID', format(uuid.uuid4())),
('a:Action', action)]
)
# TODO: Implement support for Microsoft XPRESS compression
# https://social.msdn.microsoft.com/Forums/en-US/501e4f29-edfc-4240-af3b-344264060b99/
# wsman-xpress-remote-shell-compression?forum=os_windowsprotocols
# headers.update({'rsp:CompressionType': {'@soap:mustUnderstand': 'true', '#text': 'xpress'}})
# only include the operation timeout if the user specified one when the class was instantiated
# or if the user explicitly set one when invoking a method.
if operation_timeout is not None:
headers.update(self._build_operation_timeout(operation_timeout))
elif self.default_operation_timeout is not None:
headers.update(self.default_operation_timeout)
headers.update(self._build_selectors(resource.selectors))
headers.update(self._build_options(resource.options))
headers.update(self._build_max_envelope(max_envelope_size))
headers.update(self._build_locale(locale))
return headers
|
ianclegg/winrmlib | winrmlib/api/service.py | Service.invoke | python | def invoke(self, headers, body):
xml = Service._create_request(headers, body)
try:
response = self.session.post(self.endpoint, verify=False, data=xml)
logging.debug(response.content)
except Exception as e:
traceback.print_exc()
raise WSManException(e)
if response.status_code == 200:
return Service._parse_response(response.content)
if response.status_code == 401:
raise WSManAuthenticationException('the remote host rejected authentication')
raise WSManException('the remote host returned an unexpected http status code: %s' % response.status_code) | Invokes the soap service | train | https://github.com/ianclegg/winrmlib/blob/489b3ce5d0e6a9a7301ba5d345ba82fa824c1431/winrmlib/api/service.py#L55-L74 | [
"def _create_request(headers, body):\n \"\"\"\n Create the SOAP 1.2 Envelope\n An ordered dictionary is required to ensure the same order is reflected in the XML, otherwise the\n SOAP Body element would appear before the Header element.\n \"\"\"\n envelope = OrderedDict()\n for (namespace, alias) in Service.Namespaces.items():\n envelope['@xmlns:' + alias] = namespace\n envelope['soap:Header'] = headers\n envelope['soap:Body'] = body\n return xmltodict.unparse({'soap:Envelope': envelope}, encoding='utf-8')\n",
"def _parse_response(xml):\n \"\"\"\n Attempt to parse the SOAP response and return a python object\n Raise a WSManException if a Fault is found\n \"\"\"\n try:\n soap_response = xmltodict.parse(xml, process_namespaces=True, namespaces=Service.Namespaces)\n except Exception:\n logging.debug('unable to parse the xml response: %s', xml)\n raise WSManException(\"the remote host returned an invalid soap response\")\n\n # the delete response has an empty body\n body = soap_response['soap:Envelope']['soap:Body']\n if body is not None and 'soap:Fault' in body:\n raise WSManOperationException(body['soap:Fault']['soap:Reason']['soap:Text']['#text'])\n return body\n"
] | class Service(object):
"""
SOAP Service
"""
def __init__(self, endpoint, username, password, delegation=False, **kwargs):
"""
# Kerberos authentication does not require a password if the MIT kerberos GSS-API mechanism already has a
# valid service ticket for the WSMAN service on the target server. However if the callee wishes to use
# credential delegation with Kerberos they must still provide a password (see Microsoft [MS-CSSP] Protocol)
#
# Credential delegation requires an authentication mechanism which provide message integrity and confidentiality
# such as NTLM or Kerberos; basic authentication cannot be used.
#
# Kerberos requires the username in UPN (RFC xxxx) form. UPN or NetBIOS usernames can be used whith NTLM
#
# when TCP connection is closed, its auto-opened
#
"""
self.session = kwargs.get('session', Session())
self.endpoint = endpoint
self.session.auth = Service._determine_auth_mechanism(username, password, delegation)
@staticmethod
def _determine_auth_mechanism(username, password, delegation):
"""
if the username contains at '@' sign we will use kerberos
if the username contains a '/ we will use ntlm
either NTLM or Kerberos. In fact its basically always Negotiate.
"""
if re.match('(.*)@(.+)', username) is not None:
if delegation is True:
raise Exception('Kerberos is not yet supported, specify the username in <domain>\<username> form for NTLM')
else:
raise Exception('Kerberos is not yet supported, specify the username in <domain>>\<username> form for NTLM')
# check for NT format 'domain\username' a blank domain or username is invalid
legacy = re.match('(.*)\\\\(.*)', username)
if legacy is not None:
if not legacy.group(1):
raise Exception('Please specify the Windows domain for user in <domain>\<username> format')
if not legacy.group(2):
raise Exception('Please specify the Username of the user in <domain>\<username> format')
if delegation is True:
return HttpCredSSPAuth(legacy.group(1), legacy.group(2), password)
else:
return HttpNtlmAuth(legacy.group(1), legacy.group(2), password)
#return HttpCredSSPAuth("SERVER2012", "Administrator", password)
# attempt NTLM (local account, not domain) - if username is '' then we try anonymous NTLM auth
# as if anyone will configure that - uf!
return HttpNtlmAuth('', username, password)
@staticmethod
def _create_request(headers, body):
"""
Create the SOAP 1.2 Envelope
An ordered dictionary is required to ensure the same order is reflected in the XML, otherwise the
SOAP Body element would appear before the Header element.
"""
envelope = OrderedDict()
for (namespace, alias) in Service.Namespaces.items():
envelope['@xmlns:' + alias] = namespace
envelope['soap:Header'] = headers
envelope['soap:Body'] = body
return xmltodict.unparse({'soap:Envelope': envelope}, encoding='utf-8')
@staticmethod
def _parse_response(xml):
"""
Attempt to parse the SOAP response and return a python object
Raise a WSManException if a Fault is found
"""
try:
soap_response = xmltodict.parse(xml, process_namespaces=True, namespaces=Service.Namespaces)
except Exception:
logging.debug('unable to parse the xml response: %s', xml)
raise WSManException("the remote host returned an invalid soap response")
# the delete response has an empty body
body = soap_response['soap:Envelope']['soap:Body']
if body is not None and 'soap:Fault' in body:
raise WSManOperationException(body['soap:Fault']['soap:Reason']['soap:Text']['#text'])
return body
|
ianclegg/winrmlib | winrmlib/api/service.py | Service._determine_auth_mechanism | python | def _determine_auth_mechanism(username, password, delegation):
if re.match('(.*)@(.+)', username) is not None:
if delegation is True:
raise Exception('Kerberos is not yet supported, specify the username in <domain>\<username> form for NTLM')
else:
raise Exception('Kerberos is not yet supported, specify the username in <domain>>\<username> form for NTLM')
# check for NT format 'domain\username' a blank domain or username is invalid
legacy = re.match('(.*)\\\\(.*)', username)
if legacy is not None:
if not legacy.group(1):
raise Exception('Please specify the Windows domain for user in <domain>\<username> format')
if not legacy.group(2):
raise Exception('Please specify the Username of the user in <domain>\<username> format')
if delegation is True:
return HttpCredSSPAuth(legacy.group(1), legacy.group(2), password)
else:
return HttpNtlmAuth(legacy.group(1), legacy.group(2), password)
#return HttpCredSSPAuth("SERVER2012", "Administrator", password)
# attempt NTLM (local account, not domain) - if username is '' then we try anonymous NTLM auth
# as if anyone will configure that - uf!
return HttpNtlmAuth('', username, password) | if the username contains at '@' sign we will use kerberos
if the username contains a '/ we will use ntlm
either NTLM or Kerberos. In fact its basically always Negotiate. | train | https://github.com/ianclegg/winrmlib/blob/489b3ce5d0e6a9a7301ba5d345ba82fa824c1431/winrmlib/api/service.py#L77-L104 | null | class Service(object):
"""
SOAP Service
"""
def __init__(self, endpoint, username, password, delegation=False, **kwargs):
"""
# Kerberos authentication does not require a password if the MIT kerberos GSS-API mechanism already has a
# valid service ticket for the WSMAN service on the target server. However if the callee wishes to use
# credential delegation with Kerberos they must still provide a password (see Microsoft [MS-CSSP] Protocol)
#
# Credential delegation requires an authentication mechanism which provide message integrity and confidentiality
# such as NTLM or Kerberos; basic authentication cannot be used.
#
# Kerberos requires the username in UPN (RFC xxxx) form. UPN or NetBIOS usernames can be used whith NTLM
#
# when TCP connection is closed, its auto-opened
#
"""
self.session = kwargs.get('session', Session())
self.endpoint = endpoint
self.session.auth = Service._determine_auth_mechanism(username, password, delegation)
def invoke(self, headers, body):
"""
Invokes the soap service
"""
xml = Service._create_request(headers, body)
try:
response = self.session.post(self.endpoint, verify=False, data=xml)
logging.debug(response.content)
except Exception as e:
traceback.print_exc()
raise WSManException(e)
if response.status_code == 200:
return Service._parse_response(response.content)
if response.status_code == 401:
raise WSManAuthenticationException('the remote host rejected authentication')
raise WSManException('the remote host returned an unexpected http status code: %s' % response.status_code)
@staticmethod
@staticmethod
def _create_request(headers, body):
"""
Create the SOAP 1.2 Envelope
An ordered dictionary is required to ensure the same order is reflected in the XML, otherwise the
SOAP Body element would appear before the Header element.
"""
envelope = OrderedDict()
for (namespace, alias) in Service.Namespaces.items():
envelope['@xmlns:' + alias] = namespace
envelope['soap:Header'] = headers
envelope['soap:Body'] = body
return xmltodict.unparse({'soap:Envelope': envelope}, encoding='utf-8')
@staticmethod
def _parse_response(xml):
"""
Attempt to parse the SOAP response and return a python object
Raise a WSManException if a Fault is found
"""
try:
soap_response = xmltodict.parse(xml, process_namespaces=True, namespaces=Service.Namespaces)
except Exception:
logging.debug('unable to parse the xml response: %s', xml)
raise WSManException("the remote host returned an invalid soap response")
# the delete response has an empty body
body = soap_response['soap:Envelope']['soap:Body']
if body is not None and 'soap:Fault' in body:
raise WSManOperationException(body['soap:Fault']['soap:Reason']['soap:Text']['#text'])
return body
|
ianclegg/winrmlib | winrmlib/api/service.py | Service._create_request | python | def _create_request(headers, body):
envelope = OrderedDict()
for (namespace, alias) in Service.Namespaces.items():
envelope['@xmlns:' + alias] = namespace
envelope['soap:Header'] = headers
envelope['soap:Body'] = body
return xmltodict.unparse({'soap:Envelope': envelope}, encoding='utf-8') | Create the SOAP 1.2 Envelope
An ordered dictionary is required to ensure the same order is reflected in the XML, otherwise the
SOAP Body element would appear before the Header element. | train | https://github.com/ianclegg/winrmlib/blob/489b3ce5d0e6a9a7301ba5d345ba82fa824c1431/winrmlib/api/service.py#L107-L118 | null | class Service(object):
"""
SOAP Service
"""
def __init__(self, endpoint, username, password, delegation=False, **kwargs):
"""
# Kerberos authentication does not require a password if the MIT kerberos GSS-API mechanism already has a
# valid service ticket for the WSMAN service on the target server. However if the callee wishes to use
# credential delegation with Kerberos they must still provide a password (see Microsoft [MS-CSSP] Protocol)
#
# Credential delegation requires an authentication mechanism which provide message integrity and confidentiality
# such as NTLM or Kerberos; basic authentication cannot be used.
#
# Kerberos requires the username in UPN (RFC xxxx) form. UPN or NetBIOS usernames can be used whith NTLM
#
# when TCP connection is closed, its auto-opened
#
"""
self.session = kwargs.get('session', Session())
self.endpoint = endpoint
self.session.auth = Service._determine_auth_mechanism(username, password, delegation)
def invoke(self, headers, body):
"""
Invokes the soap service
"""
xml = Service._create_request(headers, body)
try:
response = self.session.post(self.endpoint, verify=False, data=xml)
logging.debug(response.content)
except Exception as e:
traceback.print_exc()
raise WSManException(e)
if response.status_code == 200:
return Service._parse_response(response.content)
if response.status_code == 401:
raise WSManAuthenticationException('the remote host rejected authentication')
raise WSManException('the remote host returned an unexpected http status code: %s' % response.status_code)
@staticmethod
def _determine_auth_mechanism(username, password, delegation):
"""
if the username contains at '@' sign we will use kerberos
if the username contains a '/ we will use ntlm
either NTLM or Kerberos. In fact its basically always Negotiate.
"""
if re.match('(.*)@(.+)', username) is not None:
if delegation is True:
raise Exception('Kerberos is not yet supported, specify the username in <domain>\<username> form for NTLM')
else:
raise Exception('Kerberos is not yet supported, specify the username in <domain>>\<username> form for NTLM')
# check for NT format 'domain\username' a blank domain or username is invalid
legacy = re.match('(.*)\\\\(.*)', username)
if legacy is not None:
if not legacy.group(1):
raise Exception('Please specify the Windows domain for user in <domain>\<username> format')
if not legacy.group(2):
raise Exception('Please specify the Username of the user in <domain>\<username> format')
if delegation is True:
return HttpCredSSPAuth(legacy.group(1), legacy.group(2), password)
else:
return HttpNtlmAuth(legacy.group(1), legacy.group(2), password)
#return HttpCredSSPAuth("SERVER2012", "Administrator", password)
# attempt NTLM (local account, not domain) - if username is '' then we try anonymous NTLM auth
# as if anyone will configure that - uf!
return HttpNtlmAuth('', username, password)
@staticmethod
@staticmethod
def _parse_response(xml):
"""
Attempt to parse the SOAP response and return a python object
Raise a WSManException if a Fault is found
"""
try:
soap_response = xmltodict.parse(xml, process_namespaces=True, namespaces=Service.Namespaces)
except Exception:
logging.debug('unable to parse the xml response: %s', xml)
raise WSManException("the remote host returned an invalid soap response")
# the delete response has an empty body
body = soap_response['soap:Envelope']['soap:Body']
if body is not None and 'soap:Fault' in body:
raise WSManOperationException(body['soap:Fault']['soap:Reason']['soap:Text']['#text'])
return body
|
ianclegg/winrmlib | winrmlib/api/service.py | Service._parse_response | python | def _parse_response(xml):
try:
soap_response = xmltodict.parse(xml, process_namespaces=True, namespaces=Service.Namespaces)
except Exception:
logging.debug('unable to parse the xml response: %s', xml)
raise WSManException("the remote host returned an invalid soap response")
# the delete response has an empty body
body = soap_response['soap:Envelope']['soap:Body']
if body is not None and 'soap:Fault' in body:
raise WSManOperationException(body['soap:Fault']['soap:Reason']['soap:Text']['#text'])
return body | Attempt to parse the SOAP response and return a python object
Raise a WSManException if a Fault is found | train | https://github.com/ianclegg/winrmlib/blob/489b3ce5d0e6a9a7301ba5d345ba82fa824c1431/winrmlib/api/service.py#L121-L136 | null | class Service(object):
"""
SOAP Service
"""
def __init__(self, endpoint, username, password, delegation=False, **kwargs):
"""
# Kerberos authentication does not require a password if the MIT kerberos GSS-API mechanism already has a
# valid service ticket for the WSMAN service on the target server. However if the callee wishes to use
# credential delegation with Kerberos they must still provide a password (see Microsoft [MS-CSSP] Protocol)
#
# Credential delegation requires an authentication mechanism which provide message integrity and confidentiality
# such as NTLM or Kerberos; basic authentication cannot be used.
#
# Kerberos requires the username in UPN (RFC xxxx) form. UPN or NetBIOS usernames can be used whith NTLM
#
# when TCP connection is closed, its auto-opened
#
"""
self.session = kwargs.get('session', Session())
self.endpoint = endpoint
self.session.auth = Service._determine_auth_mechanism(username, password, delegation)
def invoke(self, headers, body):
"""
Invokes the soap service
"""
xml = Service._create_request(headers, body)
try:
response = self.session.post(self.endpoint, verify=False, data=xml)
logging.debug(response.content)
except Exception as e:
traceback.print_exc()
raise WSManException(e)
if response.status_code == 200:
return Service._parse_response(response.content)
if response.status_code == 401:
raise WSManAuthenticationException('the remote host rejected authentication')
raise WSManException('the remote host returned an unexpected http status code: %s' % response.status_code)
@staticmethod
def _determine_auth_mechanism(username, password, delegation):
"""
if the username contains at '@' sign we will use kerberos
if the username contains a '/ we will use ntlm
either NTLM or Kerberos. In fact its basically always Negotiate.
"""
if re.match('(.*)@(.+)', username) is not None:
if delegation is True:
raise Exception('Kerberos is not yet supported, specify the username in <domain>\<username> form for NTLM')
else:
raise Exception('Kerberos is not yet supported, specify the username in <domain>>\<username> form for NTLM')
# check for NT format 'domain\username' a blank domain or username is invalid
legacy = re.match('(.*)\\\\(.*)', username)
if legacy is not None:
if not legacy.group(1):
raise Exception('Please specify the Windows domain for user in <domain>\<username> format')
if not legacy.group(2):
raise Exception('Please specify the Username of the user in <domain>\<username> format')
if delegation is True:
return HttpCredSSPAuth(legacy.group(1), legacy.group(2), password)
else:
return HttpNtlmAuth(legacy.group(1), legacy.group(2), password)
#return HttpCredSSPAuth("SERVER2012", "Administrator", password)
# attempt NTLM (local account, not domain) - if username is '' then we try anonymous NTLM auth
# as if anyone will configure that - uf!
return HttpNtlmAuth('', username, password)
@staticmethod
def _create_request(headers, body):
"""
Create the SOAP 1.2 Envelope
An ordered dictionary is required to ensure the same order is reflected in the XML, otherwise the
SOAP Body element would appear before the Header element.
"""
envelope = OrderedDict()
for (namespace, alias) in Service.Namespaces.items():
envelope['@xmlns:' + alias] = namespace
envelope['soap:Header'] = headers
envelope['soap:Body'] = body
return xmltodict.unparse({'soap:Envelope': envelope}, encoding='utf-8')
@staticmethod
|
ianclegg/winrmlib | winrmlib/client.py | WinRmClient.create_session | python | def create_session():
shell = CommandShell('http://192.168.137.238:5985/wsman', 'Administrator', 'Pa55w0rd')
shell.open()
command_id = shell.run('ipconfig', ['/all'])
(stdout, stderr, exit_code) = shell.receive(command_id)
sys.stdout.write(stdout.strip() + '\r\n')
shell.close()
return None | shell = CommandShell('http://192.168.145.132:5985/wsman', 'Administrator', 'Pa55w0rd') | train | https://github.com/ianclegg/winrmlib/blob/489b3ce5d0e6a9a7301ba5d345ba82fa824c1431/winrmlib/client.py#L53-L64 | [
"def open(self, input_streams=['stdin'], output_streams=['stderr', 'stdout']):\n \"\"\"\n Opens the remote shell\n \"\"\"\n shell = dict()\n shell['rsp:InputStreams'] = \" \".join(input_streams)\n shell['rsp:OutputStreams'] = \" \".join(output_streams)\n shell['rsp:IdleTimeout'] = str(self.idle_timeout)\n\n if self.working_directory is not None:\n shell['rsp:WorkingDirectory'] = str(self.working_directory)\n\n if self.environment is not None:\n variables = []\n for key, value in self.environment.items():\n variables.append({'#text': str(value), '@Name': key})\n shell['rsp:Environment'] = {'Variable': variables}\n\n response = self.session.create(self.resource, {'rsp:Shell': shell})\n self.__shell_id = response['rsp:Shell']['rsp:ShellId']\n",
"def run(self, command, arguments=(), console_mode_stdin=True, skip_cmd_shell=False):\n \"\"\"This function does something.\n :param command: The command to be executed\n :type name: str.\n :param arguments: A list of arguments to be passed to the command\n :type state: str.\n :returns: int -- the return code.\n :raises: AttributeError, KeyError\n\n iclegg: blocking i/o operations are slow, doesnt Python have a moden 'async' mechanism\n rather than replying on 80's style callbacks?\n \"\"\"\n logging.info('running command: ' + command)\n resource = ResourceLocator(CommandShell.ShellResource)\n resource.add_selector('ShellId', self.__shell_id)\n resource.add_option('WINRS_SKIP_CMD_SHELL', ['FALSE', 'TRUE'][bool(skip_cmd_shell)], True)\n resource.add_option('WINRS_CONSOLEMODE_STDIN', ['FALSE', 'TRUE'][bool(console_mode_stdin)], True)\n\n command = OrderedDict([('rsp:Command', command)])\n command['rsp:Arguments'] = list(arguments)\n\n response = self.session.command(resource, {'rsp:CommandLine': command})\n command_id = response['rsp:CommandResponse']['rsp:CommandId']\n logging.info('receive command: ' + command_id)\n return command_id\n",
"def receive(self, command_id, streams=('stdout', 'stderr'), command_timeout=60):\n \"\"\"\n Recieves data\n :param command_id:\n :param streams:\n :param command_timeout:\n :return:\n \"\"\"\n logging.info('receive command: ' + command_id)\n response_streams = dict.fromkeys(streams, '')\n (complete, exit_code) = self._receive_poll(command_id, response_streams)\n while not complete:\n (complete, exit_code) = self._receive_poll(command_id, response_streams)\n\n # This retains some compatibility with pywinrm\n if sorted(response_streams.keys()) == sorted(['stderr', 'stdout']):\n return response_streams['stdout'], response_streams['stderr'], exit_code\n else:\n return response_streams, exit_code\n",
"def close(self):\n \"\"\"\n Closes pipe\n :return:\n \"\"\"\n resource = ResourceLocator(CommandShell.ShellResource)\n resource.add_selector('ShellId', self.__shell_id)\n self.session.delete(resource)\n"
] | class WinRmClient(object):
"""
Factory object for building sessions and connection options
"""
@staticmethod
|
schneiderfelipe/pyrrole | pyrrole/core.py | _parse_chemical_equation | python | def _parse_chemical_equation(value):
arrow = _pp.oneOf('-> <- <=>').setResultsName('arrow')
species = _pp.Word(_pp.printables).setResultsName('species')
coefficient = (_pp.Optional(_pp.Word(_pp.nums), default=1)
.setParseAction(_pp.tokenMap(int))
.setResultsName('coefficient'))
group_ = _pp.Group(coefficient + _pp.Optional(_pp.Suppress('*')) + species)
reactants = ((group_ + _pp.ZeroOrMore(_pp.Suppress('+') + group_))
.setResultsName('reactants'))
products = ((group_ + _pp.ZeroOrMore(_pp.Suppress('+') + group_))
.setResultsName('products'))
grammar = reactants + arrow + products
parsed = grammar.parseString(value).asDict()
if parsed['arrow'] == '<-':
parsed['reactants'], parsed['products'] \
= parsed['products'], parsed['reactants']
parsed['arrow'] = '->'
return parsed | Parse the chemical equation mini-language.
See the docstring of `ChemicalEquation` for more.
Parameters
----------
value : `str`
A string in chemical equation mini-language.
Returns
-------
mapping
A mapping in the format specified by the mini-language (see notes on
`ChemicalEquation`).
Examples
--------
>>> from pyrrole.core import _parse_chemical_equation
>>> parsed = _parse_chemical_equation('4 A + 3 B <- 2 C + D')
>>> parsed['arrow']
'->'
>>> parsed['products'][1]['species']
'B'
>>> parsed['reactants'][0]['coefficient']
2 | train | https://github.com/schneiderfelipe/pyrrole/blob/13e26accc9a059f0ab69773648b24292fe1fbfd6/pyrrole/core.py#L24-L72 | null | #!/usr/bin/python3
# -*- encoding: utf-8 -*-
"""Tools for handling chemical equations and systems."""
import re
import numbers as _numbers
try:
from collections.abc import Iterable as _Iterable
from collections.abc import Mapping as _Mapping
from collections.abc import Sequence as _Sequence
except ImportError:
from collections import Iterable as _Iterable
from collections import Mapping as _Mapping
from collections import Sequence as _Sequence
import warnings as _warnings
import numpy as _np
import pandas as _pd
import networkx as _nx
import pyparsing as _pp
def _get_chemical_equation_piece(species_list, coefficients):
"""
Produce a string from chemical species and their coefficients.
Parameters
----------
species_list : iterable of `str`
Iterable of chemical species.
coefficients : iterable of `float`
Nonzero stoichiometric coefficients. The length of `species_list` and
`coefficients` must be the same. Negative values are made positive and
zeros are ignored along with their respective species.
Examples
--------
>>> from pyrrole.core import _get_chemical_equation_piece
>>> _get_chemical_equation_piece(["AcOH"], [2])
'2 AcOH'
>>> _get_chemical_equation_piece(["AcO-", "H+"], [-1, -1])
'AcO- + H+'
>>> _get_chemical_equation_piece("ABCD", [-2, -1, 0, -1])
'2 A + B + D'
"""
def _get_token(species, coefficient):
if coefficient == 1:
return '{}'.format(species)
else:
return '{:g} {}'.format(coefficient, species)
bag = []
for species, coefficient in zip(species_list, coefficients):
if coefficient < 0:
coefficient = -coefficient
if coefficient > 0:
bag.append(_get_token(species, coefficient))
return '{}'.format(' + '.join(bag))
def _check_data(data):
"""
Check a data object for inconsistencies.
Parameters
----------
data : `pandas.DataFrame`
A `data` object, i.e., a table whose rows store information about
chemical species, indexed by chemical species.
Warns
-----
UserWarning
Warned if a ground state species has one or more imaginary vibrational
frequencies, or if a transition state species has zero, two or more
imaginary vibrational frequencies.
Examples
--------
>>> import pandas as pd
>>> from pyrrole.core import _check_data
>>> data = (pd.DataFrame([{'name': 'A', 'vibfreqs': [0., 1., 2.]},
... {'name': 'B', 'vibfreqs': [0., -1., 2.]},
... {'name': 'C', 'vibfreqs': [0., -1., -2.]},
... {'name': 'A#', 'vibfreqs': [0., 1., 2.]},
... {'name': 'C#', 'vibfreqs': [0., -2., -1.]},
... {'name': 'B#', 'vibfreqs': [0., -1., 2.]}])
... .set_index('name'))
>>> _check_data(data)
"""
if "vibfreqs" in data.columns:
for species in data.index:
vibfreqs = data.loc[species, "vibfreqs"]
nimagvibfreqs = _np.sum(_np.array(vibfreqs) < 0)
if species[-1] == '#' and nimagvibfreqs != 1:
_warnings.warn("'{}' should have 1 imaginary vibfreqs but {} "
"found".format(species, nimagvibfreqs))
elif species[-1] != '#' and nimagvibfreqs != 0:
_warnings.warn("'{}' should have no imaginary vibfreqs but {} "
"found".format(species, nimagvibfreqs))
class ChemicalEquation:
"""
An object for manipulating chemical equations in a way similar to vectors.
This class provides an abstraction for chemical equations, generalizing
equilibria, reactions, phase transitions and others. Conceptually,
`ChemicalEquation` works like a vector that can be manipulated and operated
upon. This allows the calculation of reaction free energies from data about
chemical species, for instance.
Parameters
----------
value : `str`, `ChemicalEquation`, mapping or `Series`
A string in chemical equation mini-language (see notes below), another
`ChemicalEquation` object (which is copied) or either a mapping or
`Series` from chemical species (`str`) to *signed* stoichiometric
coefficients. See examples below.
data : `pandas.DataFrame`, optional
A `data` object, i.e., a table whose rows store information about
chemical species, indexed by chemical species.
arrow : `str`, optional
Arrow symbol to use if `value` is a mapping or `Series`,
ignored otherwise.
check_data : `bool`, optional
Whether to check data object for inconsistencies.
Attributes
----------
arrow : `str`
Arrow symbol that separates `reactants` and `products` in
a chemical equation. It is always equal to either ``'->'`` or
``'<=>'``.
coefficient : mapping of `str` to `float`
A mapping relating each `species` to its signed
stoichiometric coefficient. Values are positive (negative) for
`products` (`reactants`).
products : iterable of `str`
Chemical product species, i.e., those on the right-hand side of the
equation.
reactants : iterable of `str`
Chemical reactant species, i.e., those on the left-hand side of the
equation.
species : iterable of `str`
All species, i.e., union of all `reactants` and `products`.
Raises
------
ValueError
Raised if `arrow` is given or calculated to be something other than
``"->"`` or ``"<=>"`` (``"<-"`` is equivalent to ``"->"`` if
chemical equation is given as `str`).
TypeError
Raised if `value` is something other than `str`, mapping or
`pandas.Series`.
Notes
-----
Chemical equations in pyrrole are defined according to the following
mini-language (white spaces are ignored)::
equation ::= reactants arrow products
reactants, products ::= coefficient ['*'] species
['+' coefficient ['*'] species]*
coefficient ::= [integers] (defaults to 1)
species ::= mix of printable characters
arrow ::= '->' | '<-' | '<=>'
Examples
--------
>>> from pyrrole import ChemicalEquation
>>> ChemicalEquation('H2O <=> H+ + OH-')
ChemicalEquation('H2O <=> H+ + OH-')
Chemical equations are stored in the usual order, even when given inverted
(see notes above). This means that, although understood, ``'<-'`` is never
a value of `arrow`:
>>> equation = ChemicalEquation('2 CO <- CO2 + C')
>>> equation.arrow
'->'
>>> equation # stored in the usual order
ChemicalEquation('C + CO2 -> 2 CO')
Chemical species that appear in both reactants and products are simplified:
>>> ChemicalEquation('A + B -> 2 A')
ChemicalEquation('B -> A')
Chemical equations can be manipulated as entities in a vector space. For
instance, they can be added and subtracted, ...
>>> (-ChemicalEquation('AgCl(s) <=> Ag+(aq) + Cl-(aq)')
... +ChemicalEquation('NaCl(s) <=> Na+(aq) + Cl-(aq)')
... +ChemicalEquation('AgNO3(s) <=> Ag+(aq) + NO3-(aq)')
... -ChemicalEquation('NaNO3(s) <=> Na+(aq) + NO3-(aq)'))
ChemicalEquation('AgNO3(s) + NaCl(s) <=> AgCl(s) + NaNO3(s)')
... and also divided and multiplied by numbers:
>>> 10 * ChemicalEquation('CH4 + 2 O2 -> CO2 + 2 H2O') / 2
ChemicalEquation('5 CH4 + 10 O2 -> 5 CO2 + 10 H2O')
Stoichiometric coefficients are always *signed*, that is, positive for
`products` and negative for `reactants`:
>>> equation = ChemicalEquation('NaCl + AgNO3 -> NaNO3 + AgCl(v)')
>>> equation.coefficient['AgCl(v)'] # a product
1.0
>>> equation.coefficient['AgNO3'] # a reactant
-1.0
Convenient attributes make it easy to obtain iterables of chemical species.
>>> "AgCl(v)" in equation.reactants
False
>>> for species in equation.species:
... print(species)
AgCl(v)
AgNO3
NaCl
NaNO3
Although not recomended for everyday usage, a chemical equation can also be
created from a complete set of stoichiometric coefficients. This makes it
easy to convert other objects into a `ChemicalEquation` object:
>>> ChemicalEquation({'NaCl': -1, 'AgNO3': -1,
... 'NaNO3': 1, 'AgCl(v)': 1}, arrow='->')
ChemicalEquation('AgNO3 + NaCl -> AgCl(v) + NaNO3')
"""
def __init__(self, value, data=None, arrow=None, check_data=True):
"""See the docstring for this class."""
# TODO: make tests for usage of data.
if isinstance(value, ChemicalEquation):
self.arrow = value.arrow
self.coefficient = value.coefficient
# TODO: make a test for this if.
if data is None:
data = value.data
elif isinstance(value, str):
parsed = _parse_chemical_equation(value)
coefficient_products = _pd.Series({
product['species']: +product['coefficient']
for product in parsed['products']})
coefficient_reactants = _pd.Series({
reactant['species']: -reactant['coefficient']
for reactant in parsed['reactants']})
self.arrow = parsed['arrow']
self.coefficient = coefficient_reactants.add(coefficient_products,
fill_value=0)
elif isinstance(value, (_Mapping, _pd.Series)):
# TODO: make test.
if arrow not in {'->', '<=>'}:
raise ValueError("arrow must be either '->' or '<=>' ('{}' "
"given)".format(arrow))
self.arrow = arrow
self.coefficient = _pd.Series(value)
else:
raise TypeError("value must be either str, mapping or "
"Series ('{}' "
"given)".format(type(value).__name__))
self.coefficient = self.coefficient.rename(self.__str__())
self.data = data
if self.data is not None:
if not isinstance(self.data, _pd.DataFrame):
self.data = _pd.DataFrame(self.data)
if check_data:
_check_data(self.data.loc[self.species])
def _get_products(self):
return self.coefficient[self.coefficient > 0].index
products = property(_get_products)
def _get_reactants(self):
return self.coefficient[self.coefficient < 0].index
reactants = property(_get_reactants)
def _get_species(self):
return self.coefficient.index
species = property(_get_species)
def __add__(self, other):
"""Add chemical equations as if they were vectors."""
if not isinstance(other, ChemicalEquation):
raise NotImplementedError
return ChemicalEquation(self.coefficient.add(other.coefficient,
fill_value=0),
arrow=self.arrow)
def __sub__(self, other):
"""Subtract chemical equations as if they were vectors."""
if not isinstance(other, ChemicalEquation):
raise NotImplementedError
return ChemicalEquation(self.coefficient.sub(other.coefficient,
fill_value=0),
arrow=self.arrow)
def __mul__(self, other):
"""Multiply chemical equation by a number."""
if not isinstance(other, _numbers.Number):
raise NotImplementedError
return ChemicalEquation(self.coefficient.mul(other, fill_value=0),
arrow=self.arrow)
__rmul__ = __mul__
def __truediv__(self, other):
"""Divide chemical equation by a number."""
if not isinstance(other, _numbers.Number):
raise NotImplementedError
return ChemicalEquation(self.coefficient.div(other, fill_value=0),
arrow=self.arrow)
def __div__(self, other):
"""Ensure "true" division always takes place."""
return self.__truediv__(other)
def __pos__(self):
"""Make unary plus operator be equivalent to multiply by one."""
return self
def __neg__(self):
"""Make unary minus operator be equivalent to multiply by minus one."""
return ChemicalEquation(self.coefficient.mul(-1, fill_value=0),
arrow=self.arrow)
def __eq__(self, other):
"""Compare chemical equations in terms of coefficients."""
diff = self.__sub__(other)
return all(diff.coefficient == 0)
def __repr__(self):
"""Build a string representation of this object."""
return "ChemicalEquation('{}')".format(self)
def __str__(self):
"""Build a unique, parsable string for this chemical equation."""
products = []
reactants = []
for species, coefficient in sorted(self.coefficient.items()):
if coefficient < 0:
bag = reactants
elif coefficient > 0:
bag = products
else:
continue
bag.append(_get_chemical_equation_piece([species], [coefficient]))
return '{} {} {}'.format(' + '.join(reactants), self.arrow,
' + '.join(products))
def to_series(self, only=None,
intensive_columns=["temperature", "pressure"],
check_data=True):
"""
Produce a data record for `ChemicalEquation`.
All possible linear differences for all numeric attributes are computed
and stored in the returned `pandas.Series` object (see examples below).
This allows for easy application and manipulation of
`Hess's law <https://en.wikipedia.org/wiki/Hess%27s_law>`_ to chemical
equations (see examples below).
Parameters
----------
only : ``"reactants"``, ``"products"``, optional
Instead of the standard behaviour (difference of sums), sum numeric
attributes of either reactants or products only. If given, absolute
coefficients are used.
intensive_columns : iterable of `str`, optional
A set of column names representing intensive properties (e.g. bulk
properties) whose values are not summable. Those must be constant
throughout the chemical equation.
check_data : `bool`, optional
Whether to check data object for inconsistencies.
Returns
-------
series : `pandas.Series`
Data record of attribute differences, whose name is the canonical
string representation of the `ChemicalEquation` or, if `only` is
given, a string representing either reactants or products (see
examples below).
Raises
------
ValueError
Raised if `self.data` wasn't defined (e.g. is `None`), if `only`
is something other than ``"reactants"`` or ``"products"``, or if
two or more distinct values for an intensive property have been
found.
Examples
--------
>>> from pyrrole import ChemicalEquation
>>> from pyrrole.atoms import create_data, read_cclib
>>> data = create_data(
... read_cclib("data/acetate/acetic_acid.out",
... "AcOH(g)"),
... read_cclib("data/acetate/acetic_acid@water.out",
... "AcOH(aq)"))
>>> equilibrium = ChemicalEquation("AcOH(g) <=> AcOH(aq)",
... data)
>>> equilibrium.to_series()
charge 0.000000
enthalpy -0.010958
entropy -0.000198
freeenergy -0.010759
mult 0.000000
natom 0.000000
nbasis 0.000000
nmo 0.000000
pressure 1.000000
temperature 298.150000
Name: AcOH(g) <=> AcOH(aq), dtype: float64
Sums of either reactants or products can be computed:
>>> equilibrium.to_series("reactants")
charge 0.000000
enthalpy -228.533374
entropy 0.031135
freeenergy -228.564509
mult 1.000000
natom 8.000000
nbasis 68.000000
nmo 68.000000
pressure 1.000000
temperature 298.150000
Name: AcOH(g), dtype: float64
"""
if self.data is None:
# TODO: should an empty Series be returned?
raise ValueError("data not defined")
# TODO: find a way to keep categorical columns. Keep if they match?
columns = self.data.select_dtypes('number').columns
intensive_columns = [column for column in columns
if column in intensive_columns]
extensive_columns = [column for column in columns
if column not in intensive_columns]
columns = extensive_columns + intensive_columns
if only is None:
species = self.species
elif only == "reactants":
species = sorted(self.reactants)
elif only == "products":
species = sorted(self.products)
else:
raise ValueError("only must be either 'reactants' or 'products' "
"('{}' given)".format(only))
if check_data:
_check_data(self.data.loc[species])
if all([s in self.data.index for s in species]):
series = (self.data.loc[species, extensive_columns]
.mul(self.coefficient, axis="index").sum("index"))
for column in intensive_columns:
vals = self.data[column].unique()
if len(vals) > 1:
raise ValueError("different values for {}: "
"{}".format(column, vals))
series[column] = vals[0]
else:
series = _pd.Series(_np.nan, index=columns)
if only is None:
name = self.__str__()
else:
coefficients = self.coefficient[species]
name = _get_chemical_equation_piece(species, coefficients)
if only == "reactants":
series[extensive_columns] = -series[extensive_columns]
# Avoid negative zero
# (see https://stackoverflow.com/a/11010791/4039050)
series = series + 0.
return series.rename(name)
def _split_arrows(value):
"""
Split a string with sequential chemical equations into separate strings.
Strings in odd positions in the returned iterable represent sums of
chemical species (with possible stoichiometric coefficients). Strings in
even positions represent arrow symbols. See examples below.
Parameters
----------
value : `str`
A string with sequential chemical equations in the mini-language (see
notes on `ChemicalEquation`).
Returns
-------
iterable of `str`
An iterable of strings. Odd positions represent sums of chemical
species (with possible stoichiometric coefficients). Strings in even
positions represent arrow symbols. See examples below.
Notes
-----
Spaces are not striped from the returned strings (see examples below).
Examples
--------
>>> from pyrrole.core import _split_arrows
>>> _split_arrows('A -> B')
['A ', '->', ' B']
"""
return re.split(r"(->|<-|<=>)", value)
def _split_chemical_equations(value):
"""
Split a string with sequential chemical equations into separate strings.
Each string in the returned iterable represents a single chemical equation
of the input.
See the docstrings of `ChemicalEquation` and `ChemicalSystem` for more.
Parameters
----------
value : `str`
A string with sequential chemical equations in the mini-language (see
notes on `ChemicalEquation`).
Returns
-------
iterable of `str`
An iterable of strings in the format specified by the mini-language
(see notes on `ChemicalEquation`).
Examples
--------
>>> from pyrrole.core import _split_chemical_equations
>>> _split_chemical_equations('A + B -> C + D -> D + E <=> F + G <- H + I')
['A + B -> C + D', 'C + D -> D + E', 'D + E <=> F + G', 'F + G <- H + I']
"""
pieces = _split_arrows(value)
return [(pieces[i] +
pieces[i + 1] +
pieces[i + 2]).strip()
for i in range(0, len(pieces) - 2, 2)]
# TODO: improve the examples for this class.
class ChemicalSystem:
"""
Abstraction for models consisting of a set of chemical equations.
Parameters
----------
values : `ChemicalEquation`, `str`, sequence of `ChemicalEquation` or `str`
Definitions of chemical equations. This can either be a single value or
an iterable and accepts anything that can become a `ChemicalEquation`,
or strings with consecutive equations (see examples below).
data : `pandas.DataFrame`, optional
A `data` object, i.e., a table whose rows store information about
chemical species, indexed by chemical species.
check_data : `bool`, optional
Whether to check data object for inconsistencies.
Attributes
----------
equations : iterable of `ChemicalEquation`
Stored `ChemicalEquation` objects.
Examples
--------
>>> from pyrrole import ChemicalSystem
>>> ChemicalSystem("A <=> B -> 2 C")
ChemicalSystem(["A <=> B", "B -> 2 C"])
Single chemical equations are also accepted, in which case the resulting
model has a single equation:
>>> ChemicalSystem(ChemicalEquation("A -> B"))
ChemicalSystem(["A -> B"])
Iterables can mix chemical equation definitions of different types:
>>> ChemicalSystem(["A -> B", "A -> C <- D",
... ChemicalEquation("E -> A")])
ChemicalSystem(["A -> B", "A -> C", "D -> C", "E -> A"])
"""
def __init__(self, values, data=None, check_data=True):
"""See the docstring for this class."""
if isinstance(values, str):
self.equations = list(map(ChemicalEquation,
_split_chemical_equations(values)))
elif isinstance(values, (_Iterable, _Sequence)):
self.equations = []
for value in values:
if isinstance(value, str):
self.equations.extend(map(ChemicalEquation,
_split_chemical_equations(value)))
else:
self.equations.append(ChemicalEquation(value))
else:
self.equations = [ChemicalEquation(values)]
self.data = data
if self.data is not None:
if not isinstance(self.data, _pd.DataFrame):
self.data = _pd.DataFrame(self.data)
if check_data:
_check_data(self.data)
for equation in self.equations:
# TODO: make a test for this if.
if equation.data is None:
equation.data = self.data
def __repr__(self):
"""Build a string representation of this object."""
return "ChemicalSystem([{}])".format(
", ".join(['"' + str(equation) + '"'
for equation in self.equations]))
def to_dataframe(self, *args, **kwargs):
"""
Produce a data table with records for all chemical equations.
All possible differences for numeric attributes are computed and stored
as columns in the returned `pandas.DataFrame` object (see examples
below), whose rows represent chemical equations.
In terms of behavior, this method can be seen as the `ChemicalEquation`
counterpart of `create_data`.
Returns
-------
dataframe : `pandas.DataFrame`
Data table with records of attribute differences for every single
`ChemicalEquation` object in the model.
Notes
-----
Further arguments and keywords are passed directly to
`ChemicalEquation.to_series`.
Examples
--------
>>> from pyrrole import ChemicalSystem
>>> from pyrrole.atoms import create_data, read_cclib
>>> data = create_data(
... read_cclib("data/acetate/acetic_acid.out",
... "AcOH(g)"),
... read_cclib("data/acetate/acetic_acid@water.out",
... "AcOH(aq)"))
>>> data = data[["enthalpy", "entropy", "freeenergy"]]
>>> equilibrium = ChemicalSystem("AcOH(g) <=> AcOH(aq)", data)
>>> equilibrium.to_dataframe() # doctest: +NORMALIZE_WHITESPACE
enthalpy entropy freeenergy
chemical_equation
AcOH(g) <=> AcOH(aq) -0.010958 -0.000198 -0.010759
"""
dataframe = _pd.DataFrame([equation.to_series(*args, **kwargs)
for equation in self.equations])
dataframe.index.name = "chemical_equation"
return dataframe
def to_digraph(self, *args, **kwargs):
"""
Compute a directed graph for the chemical system.
Returns
-------
digraph : `networkx.DiGraph`
Graph nodes are reactants and/or products of chemical equations,
while edges represent the equations themselves. Double ended edges
are used to represent equilibria. Attributes are computed with
`ChemicalEquation.to_series` for each equation (see examples
below).
Notes
-----
Further arguments and keywords are passed directly to
`ChemicalEquation.to_series`.
Examples
--------
>>> from pyrrole import ChemicalSystem
>>> from pyrrole.atoms import create_data, read_cclib
>>> data = create_data(
... read_cclib("data/acetate/acetic_acid.out", "AcOH(g)"),
... read_cclib("data/acetate/acetic_acid@water.out", "AcOH(aq)"))
>>> equilibrium = ChemicalSystem("AcOH(g) <=> AcOH(aq)", data)
>>> digraph = equilibrium.to_digraph()
>>> sorted(digraph.nodes(data='freeenergy'))
[('AcOH(aq)', -228.57526805), ('AcOH(g)', -228.56450866)]
>>> digraph.number_of_nodes()
2
>>> digraph.number_of_edges()
2
"""
# TODO: make test for this
digraph = _nx.DiGraph()
for equation in self.equations:
reactants, arrow, products = [value.strip() for value
in _split_arrows(str(equation))]
try:
attr = equation.to_series("reactants", *args,
**kwargs).to_dict()
except ValueError:
attr = dict()
digraph.add_node(reactants, **attr)
try:
attr = equation.to_series("products", *args,
**kwargs).to_dict()
except ValueError:
attr = dict()
digraph.add_node(products, **attr)
try:
attr = equation.to_series(*args, **kwargs).to_dict()
except ValueError:
attr = dict()
digraph.add_edge(reactants, products, **attr)
if arrow == '<=>':
digraph.add_edge(products, reactants, **attr)
return digraph
|
schneiderfelipe/pyrrole | pyrrole/core.py | _get_chemical_equation_piece | python | def _get_chemical_equation_piece(species_list, coefficients):
def _get_token(species, coefficient):
if coefficient == 1:
return '{}'.format(species)
else:
return '{:g} {}'.format(coefficient, species)
bag = []
for species, coefficient in zip(species_list, coefficients):
if coefficient < 0:
coefficient = -coefficient
if coefficient > 0:
bag.append(_get_token(species, coefficient))
return '{}'.format(' + '.join(bag)) | Produce a string from chemical species and their coefficients.
Parameters
----------
species_list : iterable of `str`
Iterable of chemical species.
coefficients : iterable of `float`
Nonzero stoichiometric coefficients. The length of `species_list` and
`coefficients` must be the same. Negative values are made positive and
zeros are ignored along with their respective species.
Examples
--------
>>> from pyrrole.core import _get_chemical_equation_piece
>>> _get_chemical_equation_piece(["AcOH"], [2])
'2 AcOH'
>>> _get_chemical_equation_piece(["AcO-", "H+"], [-1, -1])
'AcO- + H+'
>>> _get_chemical_equation_piece("ABCD", [-2, -1, 0, -1])
'2 A + B + D' | train | https://github.com/schneiderfelipe/pyrrole/blob/13e26accc9a059f0ab69773648b24292fe1fbfd6/pyrrole/core.py#L75-L111 | [
"def _get_token(species, coefficient):\n if coefficient == 1:\n return '{}'.format(species)\n else:\n return '{:g} {}'.format(coefficient, species)\n"
] | #!/usr/bin/python3
# -*- encoding: utf-8 -*-
"""Tools for handling chemical equations and systems."""
import re
import numbers as _numbers
try:
from collections.abc import Iterable as _Iterable
from collections.abc import Mapping as _Mapping
from collections.abc import Sequence as _Sequence
except ImportError:
from collections import Iterable as _Iterable
from collections import Mapping as _Mapping
from collections import Sequence as _Sequence
import warnings as _warnings
import numpy as _np
import pandas as _pd
import networkx as _nx
import pyparsing as _pp
def _parse_chemical_equation(value):
"""
Parse the chemical equation mini-language.
See the docstring of `ChemicalEquation` for more.
Parameters
----------
value : `str`
A string in chemical equation mini-language.
Returns
-------
mapping
A mapping in the format specified by the mini-language (see notes on
`ChemicalEquation`).
Examples
--------
>>> from pyrrole.core import _parse_chemical_equation
>>> parsed = _parse_chemical_equation('4 A + 3 B <- 2 C + D')
>>> parsed['arrow']
'->'
>>> parsed['products'][1]['species']
'B'
>>> parsed['reactants'][0]['coefficient']
2
"""
arrow = _pp.oneOf('-> <- <=>').setResultsName('arrow')
species = _pp.Word(_pp.printables).setResultsName('species')
coefficient = (_pp.Optional(_pp.Word(_pp.nums), default=1)
.setParseAction(_pp.tokenMap(int))
.setResultsName('coefficient'))
group_ = _pp.Group(coefficient + _pp.Optional(_pp.Suppress('*')) + species)
reactants = ((group_ + _pp.ZeroOrMore(_pp.Suppress('+') + group_))
.setResultsName('reactants'))
products = ((group_ + _pp.ZeroOrMore(_pp.Suppress('+') + group_))
.setResultsName('products'))
grammar = reactants + arrow + products
parsed = grammar.parseString(value).asDict()
if parsed['arrow'] == '<-':
parsed['reactants'], parsed['products'] \
= parsed['products'], parsed['reactants']
parsed['arrow'] = '->'
return parsed
def _check_data(data):
"""
Check a data object for inconsistencies.
Parameters
----------
data : `pandas.DataFrame`
A `data` object, i.e., a table whose rows store information about
chemical species, indexed by chemical species.
Warns
-----
UserWarning
Warned if a ground state species has one or more imaginary vibrational
frequencies, or if a transition state species has zero, two or more
imaginary vibrational frequencies.
Examples
--------
>>> import pandas as pd
>>> from pyrrole.core import _check_data
>>> data = (pd.DataFrame([{'name': 'A', 'vibfreqs': [0., 1., 2.]},
... {'name': 'B', 'vibfreqs': [0., -1., 2.]},
... {'name': 'C', 'vibfreqs': [0., -1., -2.]},
... {'name': 'A#', 'vibfreqs': [0., 1., 2.]},
... {'name': 'C#', 'vibfreqs': [0., -2., -1.]},
... {'name': 'B#', 'vibfreqs': [0., -1., 2.]}])
... .set_index('name'))
>>> _check_data(data)
"""
if "vibfreqs" in data.columns:
for species in data.index:
vibfreqs = data.loc[species, "vibfreqs"]
nimagvibfreqs = _np.sum(_np.array(vibfreqs) < 0)
if species[-1] == '#' and nimagvibfreqs != 1:
_warnings.warn("'{}' should have 1 imaginary vibfreqs but {} "
"found".format(species, nimagvibfreqs))
elif species[-1] != '#' and nimagvibfreqs != 0:
_warnings.warn("'{}' should have no imaginary vibfreqs but {} "
"found".format(species, nimagvibfreqs))
class ChemicalEquation:
"""
An object for manipulating chemical equations in a way similar to vectors.
This class provides an abstraction for chemical equations, generalizing
equilibria, reactions, phase transitions and others. Conceptually,
`ChemicalEquation` works like a vector that can be manipulated and operated
upon. This allows the calculation of reaction free energies from data about
chemical species, for instance.
Parameters
----------
value : `str`, `ChemicalEquation`, mapping or `Series`
A string in chemical equation mini-language (see notes below), another
`ChemicalEquation` object (which is copied) or either a mapping or
`Series` from chemical species (`str`) to *signed* stoichiometric
coefficients. See examples below.
data : `pandas.DataFrame`, optional
A `data` object, i.e., a table whose rows store information about
chemical species, indexed by chemical species.
arrow : `str`, optional
Arrow symbol to use if `value` is a mapping or `Series`,
ignored otherwise.
check_data : `bool`, optional
Whether to check data object for inconsistencies.
Attributes
----------
arrow : `str`
Arrow symbol that separates `reactants` and `products` in
a chemical equation. It is always equal to either ``'->'`` or
``'<=>'``.
coefficient : mapping of `str` to `float`
A mapping relating each `species` to its signed
stoichiometric coefficient. Values are positive (negative) for
`products` (`reactants`).
products : iterable of `str`
Chemical product species, i.e., those on the right-hand side of the
equation.
reactants : iterable of `str`
Chemical reactant species, i.e., those on the left-hand side of the
equation.
species : iterable of `str`
All species, i.e., union of all `reactants` and `products`.
Raises
------
ValueError
Raised if `arrow` is given or calculated to be something other than
``"->"`` or ``"<=>"`` (``"<-"`` is equivalent to ``"->"`` if
chemical equation is given as `str`).
TypeError
Raised if `value` is something other than `str`, mapping or
`pandas.Series`.
Notes
-----
Chemical equations in pyrrole are defined according to the following
mini-language (white spaces are ignored)::
equation ::= reactants arrow products
reactants, products ::= coefficient ['*'] species
['+' coefficient ['*'] species]*
coefficient ::= [integers] (defaults to 1)
species ::= mix of printable characters
arrow ::= '->' | '<-' | '<=>'
Examples
--------
>>> from pyrrole import ChemicalEquation
>>> ChemicalEquation('H2O <=> H+ + OH-')
ChemicalEquation('H2O <=> H+ + OH-')
Chemical equations are stored in the usual order, even when given inverted
(see notes above). This means that, although understood, ``'<-'`` is never
a value of `arrow`:
>>> equation = ChemicalEquation('2 CO <- CO2 + C')
>>> equation.arrow
'->'
>>> equation # stored in the usual order
ChemicalEquation('C + CO2 -> 2 CO')
Chemical species that appear in both reactants and products are simplified:
>>> ChemicalEquation('A + B -> 2 A')
ChemicalEquation('B -> A')
Chemical equations can be manipulated as entities in a vector space. For
instance, they can be added and subtracted, ...
>>> (-ChemicalEquation('AgCl(s) <=> Ag+(aq) + Cl-(aq)')
... +ChemicalEquation('NaCl(s) <=> Na+(aq) + Cl-(aq)')
... +ChemicalEquation('AgNO3(s) <=> Ag+(aq) + NO3-(aq)')
... -ChemicalEquation('NaNO3(s) <=> Na+(aq) + NO3-(aq)'))
ChemicalEquation('AgNO3(s) + NaCl(s) <=> AgCl(s) + NaNO3(s)')
... and also divided and multiplied by numbers:
>>> 10 * ChemicalEquation('CH4 + 2 O2 -> CO2 + 2 H2O') / 2
ChemicalEquation('5 CH4 + 10 O2 -> 5 CO2 + 10 H2O')
Stoichiometric coefficients are always *signed*, that is, positive for
`products` and negative for `reactants`:
>>> equation = ChemicalEquation('NaCl + AgNO3 -> NaNO3 + AgCl(v)')
>>> equation.coefficient['AgCl(v)'] # a product
1.0
>>> equation.coefficient['AgNO3'] # a reactant
-1.0
Convenient attributes make it easy to obtain iterables of chemical species.
>>> "AgCl(v)" in equation.reactants
False
>>> for species in equation.species:
... print(species)
AgCl(v)
AgNO3
NaCl
NaNO3
Although not recomended for everyday usage, a chemical equation can also be
created from a complete set of stoichiometric coefficients. This makes it
easy to convert other objects into a `ChemicalEquation` object:
>>> ChemicalEquation({'NaCl': -1, 'AgNO3': -1,
... 'NaNO3': 1, 'AgCl(v)': 1}, arrow='->')
ChemicalEquation('AgNO3 + NaCl -> AgCl(v) + NaNO3')
"""
def __init__(self, value, data=None, arrow=None, check_data=True):
"""See the docstring for this class."""
# TODO: make tests for usage of data.
if isinstance(value, ChemicalEquation):
self.arrow = value.arrow
self.coefficient = value.coefficient
# TODO: make a test for this if.
if data is None:
data = value.data
elif isinstance(value, str):
parsed = _parse_chemical_equation(value)
coefficient_products = _pd.Series({
product['species']: +product['coefficient']
for product in parsed['products']})
coefficient_reactants = _pd.Series({
reactant['species']: -reactant['coefficient']
for reactant in parsed['reactants']})
self.arrow = parsed['arrow']
self.coefficient = coefficient_reactants.add(coefficient_products,
fill_value=0)
elif isinstance(value, (_Mapping, _pd.Series)):
# TODO: make test.
if arrow not in {'->', '<=>'}:
raise ValueError("arrow must be either '->' or '<=>' ('{}' "
"given)".format(arrow))
self.arrow = arrow
self.coefficient = _pd.Series(value)
else:
raise TypeError("value must be either str, mapping or "
"Series ('{}' "
"given)".format(type(value).__name__))
self.coefficient = self.coefficient.rename(self.__str__())
self.data = data
if self.data is not None:
if not isinstance(self.data, _pd.DataFrame):
self.data = _pd.DataFrame(self.data)
if check_data:
_check_data(self.data.loc[self.species])
def _get_products(self):
return self.coefficient[self.coefficient > 0].index
products = property(_get_products)
def _get_reactants(self):
return self.coefficient[self.coefficient < 0].index
reactants = property(_get_reactants)
def _get_species(self):
return self.coefficient.index
species = property(_get_species)
def __add__(self, other):
"""Add chemical equations as if they were vectors."""
if not isinstance(other, ChemicalEquation):
raise NotImplementedError
return ChemicalEquation(self.coefficient.add(other.coefficient,
fill_value=0),
arrow=self.arrow)
def __sub__(self, other):
"""Subtract chemical equations as if they were vectors."""
if not isinstance(other, ChemicalEquation):
raise NotImplementedError
return ChemicalEquation(self.coefficient.sub(other.coefficient,
fill_value=0),
arrow=self.arrow)
def __mul__(self, other):
"""Multiply chemical equation by a number."""
if not isinstance(other, _numbers.Number):
raise NotImplementedError
return ChemicalEquation(self.coefficient.mul(other, fill_value=0),
arrow=self.arrow)
__rmul__ = __mul__
def __truediv__(self, other):
"""Divide chemical equation by a number."""
if not isinstance(other, _numbers.Number):
raise NotImplementedError
return ChemicalEquation(self.coefficient.div(other, fill_value=0),
arrow=self.arrow)
def __div__(self, other):
"""Ensure "true" division always takes place."""
return self.__truediv__(other)
def __pos__(self):
"""Make unary plus operator be equivalent to multiply by one."""
return self
def __neg__(self):
"""Make unary minus operator be equivalent to multiply by minus one."""
return ChemicalEquation(self.coefficient.mul(-1, fill_value=0),
arrow=self.arrow)
def __eq__(self, other):
"""Compare chemical equations in terms of coefficients."""
diff = self.__sub__(other)
return all(diff.coefficient == 0)
def __repr__(self):
"""Build a string representation of this object."""
return "ChemicalEquation('{}')".format(self)
def __str__(self):
"""Build a unique, parsable string for this chemical equation."""
products = []
reactants = []
for species, coefficient in sorted(self.coefficient.items()):
if coefficient < 0:
bag = reactants
elif coefficient > 0:
bag = products
else:
continue
bag.append(_get_chemical_equation_piece([species], [coefficient]))
return '{} {} {}'.format(' + '.join(reactants), self.arrow,
' + '.join(products))
def to_series(self, only=None,
intensive_columns=["temperature", "pressure"],
check_data=True):
"""
Produce a data record for `ChemicalEquation`.
All possible linear differences for all numeric attributes are computed
and stored in the returned `pandas.Series` object (see examples below).
This allows for easy application and manipulation of
`Hess's law <https://en.wikipedia.org/wiki/Hess%27s_law>`_ to chemical
equations (see examples below).
Parameters
----------
only : ``"reactants"``, ``"products"``, optional
Instead of the standard behaviour (difference of sums), sum numeric
attributes of either reactants or products only. If given, absolute
coefficients are used.
intensive_columns : iterable of `str`, optional
A set of column names representing intensive properties (e.g. bulk
properties) whose values are not summable. Those must be constant
throughout the chemical equation.
check_data : `bool`, optional
Whether to check data object for inconsistencies.
Returns
-------
series : `pandas.Series`
Data record of attribute differences, whose name is the canonical
string representation of the `ChemicalEquation` or, if `only` is
given, a string representing either reactants or products (see
examples below).
Raises
------
ValueError
Raised if `self.data` wasn't defined (e.g. is `None`), if `only`
is something other than ``"reactants"`` or ``"products"``, or if
two or more distinct values for an intensive property have been
found.
Examples
--------
>>> from pyrrole import ChemicalEquation
>>> from pyrrole.atoms import create_data, read_cclib
>>> data = create_data(
... read_cclib("data/acetate/acetic_acid.out",
... "AcOH(g)"),
... read_cclib("data/acetate/acetic_acid@water.out",
... "AcOH(aq)"))
>>> equilibrium = ChemicalEquation("AcOH(g) <=> AcOH(aq)",
... data)
>>> equilibrium.to_series()
charge 0.000000
enthalpy -0.010958
entropy -0.000198
freeenergy -0.010759
mult 0.000000
natom 0.000000
nbasis 0.000000
nmo 0.000000
pressure 1.000000
temperature 298.150000
Name: AcOH(g) <=> AcOH(aq), dtype: float64
Sums of either reactants or products can be computed:
>>> equilibrium.to_series("reactants")
charge 0.000000
enthalpy -228.533374
entropy 0.031135
freeenergy -228.564509
mult 1.000000
natom 8.000000
nbasis 68.000000
nmo 68.000000
pressure 1.000000
temperature 298.150000
Name: AcOH(g), dtype: float64
"""
if self.data is None:
# TODO: should an empty Series be returned?
raise ValueError("data not defined")
# TODO: find a way to keep categorical columns. Keep if they match?
columns = self.data.select_dtypes('number').columns
intensive_columns = [column for column in columns
if column in intensive_columns]
extensive_columns = [column for column in columns
if column not in intensive_columns]
columns = extensive_columns + intensive_columns
if only is None:
species = self.species
elif only == "reactants":
species = sorted(self.reactants)
elif only == "products":
species = sorted(self.products)
else:
raise ValueError("only must be either 'reactants' or 'products' "
"('{}' given)".format(only))
if check_data:
_check_data(self.data.loc[species])
if all([s in self.data.index for s in species]):
series = (self.data.loc[species, extensive_columns]
.mul(self.coefficient, axis="index").sum("index"))
for column in intensive_columns:
vals = self.data[column].unique()
if len(vals) > 1:
raise ValueError("different values for {}: "
"{}".format(column, vals))
series[column] = vals[0]
else:
series = _pd.Series(_np.nan, index=columns)
if only is None:
name = self.__str__()
else:
coefficients = self.coefficient[species]
name = _get_chemical_equation_piece(species, coefficients)
if only == "reactants":
series[extensive_columns] = -series[extensive_columns]
# Avoid negative zero
# (see https://stackoverflow.com/a/11010791/4039050)
series = series + 0.
return series.rename(name)
def _split_arrows(value):
"""
Split a string with sequential chemical equations into separate strings.
Strings in odd positions in the returned iterable represent sums of
chemical species (with possible stoichiometric coefficients). Strings in
even positions represent arrow symbols. See examples below.
Parameters
----------
value : `str`
A string with sequential chemical equations in the mini-language (see
notes on `ChemicalEquation`).
Returns
-------
iterable of `str`
An iterable of strings. Odd positions represent sums of chemical
species (with possible stoichiometric coefficients). Strings in even
positions represent arrow symbols. See examples below.
Notes
-----
Spaces are not striped from the returned strings (see examples below).
Examples
--------
>>> from pyrrole.core import _split_arrows
>>> _split_arrows('A -> B')
['A ', '->', ' B']
"""
return re.split(r"(->|<-|<=>)", value)
def _split_chemical_equations(value):
"""
Split a string with sequential chemical equations into separate strings.
Each string in the returned iterable represents a single chemical equation
of the input.
See the docstrings of `ChemicalEquation` and `ChemicalSystem` for more.
Parameters
----------
value : `str`
A string with sequential chemical equations in the mini-language (see
notes on `ChemicalEquation`).
Returns
-------
iterable of `str`
An iterable of strings in the format specified by the mini-language
(see notes on `ChemicalEquation`).
Examples
--------
>>> from pyrrole.core import _split_chemical_equations
>>> _split_chemical_equations('A + B -> C + D -> D + E <=> F + G <- H + I')
['A + B -> C + D', 'C + D -> D + E', 'D + E <=> F + G', 'F + G <- H + I']
"""
pieces = _split_arrows(value)
return [(pieces[i] +
pieces[i + 1] +
pieces[i + 2]).strip()
for i in range(0, len(pieces) - 2, 2)]
# TODO: improve the examples for this class.
class ChemicalSystem:
"""
Abstraction for models consisting of a set of chemical equations.
Parameters
----------
values : `ChemicalEquation`, `str`, sequence of `ChemicalEquation` or `str`
Definitions of chemical equations. This can either be a single value or
an iterable and accepts anything that can become a `ChemicalEquation`,
or strings with consecutive equations (see examples below).
data : `pandas.DataFrame`, optional
A `data` object, i.e., a table whose rows store information about
chemical species, indexed by chemical species.
check_data : `bool`, optional
Whether to check data object for inconsistencies.
Attributes
----------
equations : iterable of `ChemicalEquation`
Stored `ChemicalEquation` objects.
Examples
--------
>>> from pyrrole import ChemicalSystem
>>> ChemicalSystem("A <=> B -> 2 C")
ChemicalSystem(["A <=> B", "B -> 2 C"])
Single chemical equations are also accepted, in which case the resulting
model has a single equation:
>>> ChemicalSystem(ChemicalEquation("A -> B"))
ChemicalSystem(["A -> B"])
Iterables can mix chemical equation definitions of different types:
>>> ChemicalSystem(["A -> B", "A -> C <- D",
... ChemicalEquation("E -> A")])
ChemicalSystem(["A -> B", "A -> C", "D -> C", "E -> A"])
"""
def __init__(self, values, data=None, check_data=True):
"""See the docstring for this class."""
if isinstance(values, str):
self.equations = list(map(ChemicalEquation,
_split_chemical_equations(values)))
elif isinstance(values, (_Iterable, _Sequence)):
self.equations = []
for value in values:
if isinstance(value, str):
self.equations.extend(map(ChemicalEquation,
_split_chemical_equations(value)))
else:
self.equations.append(ChemicalEquation(value))
else:
self.equations = [ChemicalEquation(values)]
self.data = data
if self.data is not None:
if not isinstance(self.data, _pd.DataFrame):
self.data = _pd.DataFrame(self.data)
if check_data:
_check_data(self.data)
for equation in self.equations:
# TODO: make a test for this if.
if equation.data is None:
equation.data = self.data
def __repr__(self):
"""Build a string representation of this object."""
return "ChemicalSystem([{}])".format(
", ".join(['"' + str(equation) + '"'
for equation in self.equations]))
def to_dataframe(self, *args, **kwargs):
"""
Produce a data table with records for all chemical equations.
All possible differences for numeric attributes are computed and stored
as columns in the returned `pandas.DataFrame` object (see examples
below), whose rows represent chemical equations.
In terms of behavior, this method can be seen as the `ChemicalEquation`
counterpart of `create_data`.
Returns
-------
dataframe : `pandas.DataFrame`
Data table with records of attribute differences for every single
`ChemicalEquation` object in the model.
Notes
-----
Further arguments and keywords are passed directly to
`ChemicalEquation.to_series`.
Examples
--------
>>> from pyrrole import ChemicalSystem
>>> from pyrrole.atoms import create_data, read_cclib
>>> data = create_data(
... read_cclib("data/acetate/acetic_acid.out",
... "AcOH(g)"),
... read_cclib("data/acetate/acetic_acid@water.out",
... "AcOH(aq)"))
>>> data = data[["enthalpy", "entropy", "freeenergy"]]
>>> equilibrium = ChemicalSystem("AcOH(g) <=> AcOH(aq)", data)
>>> equilibrium.to_dataframe() # doctest: +NORMALIZE_WHITESPACE
enthalpy entropy freeenergy
chemical_equation
AcOH(g) <=> AcOH(aq) -0.010958 -0.000198 -0.010759
"""
dataframe = _pd.DataFrame([equation.to_series(*args, **kwargs)
for equation in self.equations])
dataframe.index.name = "chemical_equation"
return dataframe
def to_digraph(self, *args, **kwargs):
"""
Compute a directed graph for the chemical system.
Returns
-------
digraph : `networkx.DiGraph`
Graph nodes are reactants and/or products of chemical equations,
while edges represent the equations themselves. Double ended edges
are used to represent equilibria. Attributes are computed with
`ChemicalEquation.to_series` for each equation (see examples
below).
Notes
-----
Further arguments and keywords are passed directly to
`ChemicalEquation.to_series`.
Examples
--------
>>> from pyrrole import ChemicalSystem
>>> from pyrrole.atoms import create_data, read_cclib
>>> data = create_data(
... read_cclib("data/acetate/acetic_acid.out", "AcOH(g)"),
... read_cclib("data/acetate/acetic_acid@water.out", "AcOH(aq)"))
>>> equilibrium = ChemicalSystem("AcOH(g) <=> AcOH(aq)", data)
>>> digraph = equilibrium.to_digraph()
>>> sorted(digraph.nodes(data='freeenergy'))
[('AcOH(aq)', -228.57526805), ('AcOH(g)', -228.56450866)]
>>> digraph.number_of_nodes()
2
>>> digraph.number_of_edges()
2
"""
# TODO: make test for this
digraph = _nx.DiGraph()
for equation in self.equations:
reactants, arrow, products = [value.strip() for value
in _split_arrows(str(equation))]
try:
attr = equation.to_series("reactants", *args,
**kwargs).to_dict()
except ValueError:
attr = dict()
digraph.add_node(reactants, **attr)
try:
attr = equation.to_series("products", *args,
**kwargs).to_dict()
except ValueError:
attr = dict()
digraph.add_node(products, **attr)
try:
attr = equation.to_series(*args, **kwargs).to_dict()
except ValueError:
attr = dict()
digraph.add_edge(reactants, products, **attr)
if arrow == '<=>':
digraph.add_edge(products, reactants, **attr)
return digraph
|
schneiderfelipe/pyrrole | pyrrole/core.py | _check_data | python | def _check_data(data):
if "vibfreqs" in data.columns:
for species in data.index:
vibfreqs = data.loc[species, "vibfreqs"]
nimagvibfreqs = _np.sum(_np.array(vibfreqs) < 0)
if species[-1] == '#' and nimagvibfreqs != 1:
_warnings.warn("'{}' should have 1 imaginary vibfreqs but {} "
"found".format(species, nimagvibfreqs))
elif species[-1] != '#' and nimagvibfreqs != 0:
_warnings.warn("'{}' should have no imaginary vibfreqs but {} "
"found".format(species, nimagvibfreqs)) | Check a data object for inconsistencies.
Parameters
----------
data : `pandas.DataFrame`
A `data` object, i.e., a table whose rows store information about
chemical species, indexed by chemical species.
Warns
-----
UserWarning
Warned if a ground state species has one or more imaginary vibrational
frequencies, or if a transition state species has zero, two or more
imaginary vibrational frequencies.
Examples
--------
>>> import pandas as pd
>>> from pyrrole.core import _check_data
>>> data = (pd.DataFrame([{'name': 'A', 'vibfreqs': [0., 1., 2.]},
... {'name': 'B', 'vibfreqs': [0., -1., 2.]},
... {'name': 'C', 'vibfreqs': [0., -1., -2.]},
... {'name': 'A#', 'vibfreqs': [0., 1., 2.]},
... {'name': 'C#', 'vibfreqs': [0., -2., -1.]},
... {'name': 'B#', 'vibfreqs': [0., -1., 2.]}])
... .set_index('name'))
>>> _check_data(data) | train | https://github.com/schneiderfelipe/pyrrole/blob/13e26accc9a059f0ab69773648b24292fe1fbfd6/pyrrole/core.py#L114-L154 | null | #!/usr/bin/python3
# -*- encoding: utf-8 -*-
"""Tools for handling chemical equations and systems."""
import re
import numbers as _numbers
try:
from collections.abc import Iterable as _Iterable
from collections.abc import Mapping as _Mapping
from collections.abc import Sequence as _Sequence
except ImportError:
from collections import Iterable as _Iterable
from collections import Mapping as _Mapping
from collections import Sequence as _Sequence
import warnings as _warnings
import numpy as _np
import pandas as _pd
import networkx as _nx
import pyparsing as _pp
def _parse_chemical_equation(value):
"""
Parse the chemical equation mini-language.
See the docstring of `ChemicalEquation` for more.
Parameters
----------
value : `str`
A string in chemical equation mini-language.
Returns
-------
mapping
A mapping in the format specified by the mini-language (see notes on
`ChemicalEquation`).
Examples
--------
>>> from pyrrole.core import _parse_chemical_equation
>>> parsed = _parse_chemical_equation('4 A + 3 B <- 2 C + D')
>>> parsed['arrow']
'->'
>>> parsed['products'][1]['species']
'B'
>>> parsed['reactants'][0]['coefficient']
2
"""
arrow = _pp.oneOf('-> <- <=>').setResultsName('arrow')
species = _pp.Word(_pp.printables).setResultsName('species')
coefficient = (_pp.Optional(_pp.Word(_pp.nums), default=1)
.setParseAction(_pp.tokenMap(int))
.setResultsName('coefficient'))
group_ = _pp.Group(coefficient + _pp.Optional(_pp.Suppress('*')) + species)
reactants = ((group_ + _pp.ZeroOrMore(_pp.Suppress('+') + group_))
.setResultsName('reactants'))
products = ((group_ + _pp.ZeroOrMore(_pp.Suppress('+') + group_))
.setResultsName('products'))
grammar = reactants + arrow + products
parsed = grammar.parseString(value).asDict()
if parsed['arrow'] == '<-':
parsed['reactants'], parsed['products'] \
= parsed['products'], parsed['reactants']
parsed['arrow'] = '->'
return parsed
def _get_chemical_equation_piece(species_list, coefficients):
"""
Produce a string from chemical species and their coefficients.
Parameters
----------
species_list : iterable of `str`
Iterable of chemical species.
coefficients : iterable of `float`
Nonzero stoichiometric coefficients. The length of `species_list` and
`coefficients` must be the same. Negative values are made positive and
zeros are ignored along with their respective species.
Examples
--------
>>> from pyrrole.core import _get_chemical_equation_piece
>>> _get_chemical_equation_piece(["AcOH"], [2])
'2 AcOH'
>>> _get_chemical_equation_piece(["AcO-", "H+"], [-1, -1])
'AcO- + H+'
>>> _get_chemical_equation_piece("ABCD", [-2, -1, 0, -1])
'2 A + B + D'
"""
def _get_token(species, coefficient):
if coefficient == 1:
return '{}'.format(species)
else:
return '{:g} {}'.format(coefficient, species)
bag = []
for species, coefficient in zip(species_list, coefficients):
if coefficient < 0:
coefficient = -coefficient
if coefficient > 0:
bag.append(_get_token(species, coefficient))
return '{}'.format(' + '.join(bag))
class ChemicalEquation:
"""
An object for manipulating chemical equations in a way similar to vectors.
This class provides an abstraction for chemical equations, generalizing
equilibria, reactions, phase transitions and others. Conceptually,
`ChemicalEquation` works like a vector that can be manipulated and operated
upon. This allows the calculation of reaction free energies from data about
chemical species, for instance.
Parameters
----------
value : `str`, `ChemicalEquation`, mapping or `Series`
A string in chemical equation mini-language (see notes below), another
`ChemicalEquation` object (which is copied) or either a mapping or
`Series` from chemical species (`str`) to *signed* stoichiometric
coefficients. See examples below.
data : `pandas.DataFrame`, optional
A `data` object, i.e., a table whose rows store information about
chemical species, indexed by chemical species.
arrow : `str`, optional
Arrow symbol to use if `value` is a mapping or `Series`,
ignored otherwise.
check_data : `bool`, optional
Whether to check data object for inconsistencies.
Attributes
----------
arrow : `str`
Arrow symbol that separates `reactants` and `products` in
a chemical equation. It is always equal to either ``'->'`` or
``'<=>'``.
coefficient : mapping of `str` to `float`
A mapping relating each `species` to its signed
stoichiometric coefficient. Values are positive (negative) for
`products` (`reactants`).
products : iterable of `str`
Chemical product species, i.e., those on the right-hand side of the
equation.
reactants : iterable of `str`
Chemical reactant species, i.e., those on the left-hand side of the
equation.
species : iterable of `str`
All species, i.e., union of all `reactants` and `products`.
Raises
------
ValueError
Raised if `arrow` is given or calculated to be something other than
``"->"`` or ``"<=>"`` (``"<-"`` is equivalent to ``"->"`` if
chemical equation is given as `str`).
TypeError
Raised if `value` is something other than `str`, mapping or
`pandas.Series`.
Notes
-----
Chemical equations in pyrrole are defined according to the following
mini-language (white spaces are ignored)::
equation ::= reactants arrow products
reactants, products ::= coefficient ['*'] species
['+' coefficient ['*'] species]*
coefficient ::= [integers] (defaults to 1)
species ::= mix of printable characters
arrow ::= '->' | '<-' | '<=>'
Examples
--------
>>> from pyrrole import ChemicalEquation
>>> ChemicalEquation('H2O <=> H+ + OH-')
ChemicalEquation('H2O <=> H+ + OH-')
Chemical equations are stored in the usual order, even when given inverted
(see notes above). This means that, although understood, ``'<-'`` is never
a value of `arrow`:
>>> equation = ChemicalEquation('2 CO <- CO2 + C')
>>> equation.arrow
'->'
>>> equation # stored in the usual order
ChemicalEquation('C + CO2 -> 2 CO')
Chemical species that appear in both reactants and products are simplified:
>>> ChemicalEquation('A + B -> 2 A')
ChemicalEquation('B -> A')
Chemical equations can be manipulated as entities in a vector space. For
instance, they can be added and subtracted, ...
>>> (-ChemicalEquation('AgCl(s) <=> Ag+(aq) + Cl-(aq)')
... +ChemicalEquation('NaCl(s) <=> Na+(aq) + Cl-(aq)')
... +ChemicalEquation('AgNO3(s) <=> Ag+(aq) + NO3-(aq)')
... -ChemicalEquation('NaNO3(s) <=> Na+(aq) + NO3-(aq)'))
ChemicalEquation('AgNO3(s) + NaCl(s) <=> AgCl(s) + NaNO3(s)')
... and also divided and multiplied by numbers:
>>> 10 * ChemicalEquation('CH4 + 2 O2 -> CO2 + 2 H2O') / 2
ChemicalEquation('5 CH4 + 10 O2 -> 5 CO2 + 10 H2O')
Stoichiometric coefficients are always *signed*, that is, positive for
`products` and negative for `reactants`:
>>> equation = ChemicalEquation('NaCl + AgNO3 -> NaNO3 + AgCl(v)')
>>> equation.coefficient['AgCl(v)'] # a product
1.0
>>> equation.coefficient['AgNO3'] # a reactant
-1.0
Convenient attributes make it easy to obtain iterables of chemical species.
>>> "AgCl(v)" in equation.reactants
False
>>> for species in equation.species:
... print(species)
AgCl(v)
AgNO3
NaCl
NaNO3
Although not recomended for everyday usage, a chemical equation can also be
created from a complete set of stoichiometric coefficients. This makes it
easy to convert other objects into a `ChemicalEquation` object:
>>> ChemicalEquation({'NaCl': -1, 'AgNO3': -1,
... 'NaNO3': 1, 'AgCl(v)': 1}, arrow='->')
ChemicalEquation('AgNO3 + NaCl -> AgCl(v) + NaNO3')
"""
def __init__(self, value, data=None, arrow=None, check_data=True):
"""See the docstring for this class."""
# TODO: make tests for usage of data.
if isinstance(value, ChemicalEquation):
self.arrow = value.arrow
self.coefficient = value.coefficient
# TODO: make a test for this if.
if data is None:
data = value.data
elif isinstance(value, str):
parsed = _parse_chemical_equation(value)
coefficient_products = _pd.Series({
product['species']: +product['coefficient']
for product in parsed['products']})
coefficient_reactants = _pd.Series({
reactant['species']: -reactant['coefficient']
for reactant in parsed['reactants']})
self.arrow = parsed['arrow']
self.coefficient = coefficient_reactants.add(coefficient_products,
fill_value=0)
elif isinstance(value, (_Mapping, _pd.Series)):
# TODO: make test.
if arrow not in {'->', '<=>'}:
raise ValueError("arrow must be either '->' or '<=>' ('{}' "
"given)".format(arrow))
self.arrow = arrow
self.coefficient = _pd.Series(value)
else:
raise TypeError("value must be either str, mapping or "
"Series ('{}' "
"given)".format(type(value).__name__))
self.coefficient = self.coefficient.rename(self.__str__())
self.data = data
if self.data is not None:
if not isinstance(self.data, _pd.DataFrame):
self.data = _pd.DataFrame(self.data)
if check_data:
_check_data(self.data.loc[self.species])
def _get_products(self):
return self.coefficient[self.coefficient > 0].index
products = property(_get_products)
def _get_reactants(self):
return self.coefficient[self.coefficient < 0].index
reactants = property(_get_reactants)
def _get_species(self):
return self.coefficient.index
species = property(_get_species)
def __add__(self, other):
"""Add chemical equations as if they were vectors."""
if not isinstance(other, ChemicalEquation):
raise NotImplementedError
return ChemicalEquation(self.coefficient.add(other.coefficient,
fill_value=0),
arrow=self.arrow)
def __sub__(self, other):
"""Subtract chemical equations as if they were vectors."""
if not isinstance(other, ChemicalEquation):
raise NotImplementedError
return ChemicalEquation(self.coefficient.sub(other.coefficient,
fill_value=0),
arrow=self.arrow)
def __mul__(self, other):
"""Multiply chemical equation by a number."""
if not isinstance(other, _numbers.Number):
raise NotImplementedError
return ChemicalEquation(self.coefficient.mul(other, fill_value=0),
arrow=self.arrow)
__rmul__ = __mul__
def __truediv__(self, other):
"""Divide chemical equation by a number."""
if not isinstance(other, _numbers.Number):
raise NotImplementedError
return ChemicalEquation(self.coefficient.div(other, fill_value=0),
arrow=self.arrow)
def __div__(self, other):
"""Ensure "true" division always takes place."""
return self.__truediv__(other)
def __pos__(self):
"""Make unary plus operator be equivalent to multiply by one."""
return self
def __neg__(self):
"""Make unary minus operator be equivalent to multiply by minus one."""
return ChemicalEquation(self.coefficient.mul(-1, fill_value=0),
arrow=self.arrow)
def __eq__(self, other):
"""Compare chemical equations in terms of coefficients."""
diff = self.__sub__(other)
return all(diff.coefficient == 0)
def __repr__(self):
"""Build a string representation of this object."""
return "ChemicalEquation('{}')".format(self)
def __str__(self):
"""Build a unique, parsable string for this chemical equation."""
products = []
reactants = []
for species, coefficient in sorted(self.coefficient.items()):
if coefficient < 0:
bag = reactants
elif coefficient > 0:
bag = products
else:
continue
bag.append(_get_chemical_equation_piece([species], [coefficient]))
return '{} {} {}'.format(' + '.join(reactants), self.arrow,
' + '.join(products))
def to_series(self, only=None,
intensive_columns=["temperature", "pressure"],
check_data=True):
"""
Produce a data record for `ChemicalEquation`.
All possible linear differences for all numeric attributes are computed
and stored in the returned `pandas.Series` object (see examples below).
This allows for easy application and manipulation of
`Hess's law <https://en.wikipedia.org/wiki/Hess%27s_law>`_ to chemical
equations (see examples below).
Parameters
----------
only : ``"reactants"``, ``"products"``, optional
Instead of the standard behaviour (difference of sums), sum numeric
attributes of either reactants or products only. If given, absolute
coefficients are used.
intensive_columns : iterable of `str`, optional
A set of column names representing intensive properties (e.g. bulk
properties) whose values are not summable. Those must be constant
throughout the chemical equation.
check_data : `bool`, optional
Whether to check data object for inconsistencies.
Returns
-------
series : `pandas.Series`
Data record of attribute differences, whose name is the canonical
string representation of the `ChemicalEquation` or, if `only` is
given, a string representing either reactants or products (see
examples below).
Raises
------
ValueError
Raised if `self.data` wasn't defined (e.g. is `None`), if `only`
is something other than ``"reactants"`` or ``"products"``, or if
two or more distinct values for an intensive property have been
found.
Examples
--------
>>> from pyrrole import ChemicalEquation
>>> from pyrrole.atoms import create_data, read_cclib
>>> data = create_data(
... read_cclib("data/acetate/acetic_acid.out",
... "AcOH(g)"),
... read_cclib("data/acetate/acetic_acid@water.out",
... "AcOH(aq)"))
>>> equilibrium = ChemicalEquation("AcOH(g) <=> AcOH(aq)",
... data)
>>> equilibrium.to_series()
charge 0.000000
enthalpy -0.010958
entropy -0.000198
freeenergy -0.010759
mult 0.000000
natom 0.000000
nbasis 0.000000
nmo 0.000000
pressure 1.000000
temperature 298.150000
Name: AcOH(g) <=> AcOH(aq), dtype: float64
Sums of either reactants or products can be computed:
>>> equilibrium.to_series("reactants")
charge 0.000000
enthalpy -228.533374
entropy 0.031135
freeenergy -228.564509
mult 1.000000
natom 8.000000
nbasis 68.000000
nmo 68.000000
pressure 1.000000
temperature 298.150000
Name: AcOH(g), dtype: float64
"""
if self.data is None:
# TODO: should an empty Series be returned?
raise ValueError("data not defined")
# TODO: find a way to keep categorical columns. Keep if they match?
columns = self.data.select_dtypes('number').columns
intensive_columns = [column for column in columns
if column in intensive_columns]
extensive_columns = [column for column in columns
if column not in intensive_columns]
columns = extensive_columns + intensive_columns
if only is None:
species = self.species
elif only == "reactants":
species = sorted(self.reactants)
elif only == "products":
species = sorted(self.products)
else:
raise ValueError("only must be either 'reactants' or 'products' "
"('{}' given)".format(only))
if check_data:
_check_data(self.data.loc[species])
if all([s in self.data.index for s in species]):
series = (self.data.loc[species, extensive_columns]
.mul(self.coefficient, axis="index").sum("index"))
for column in intensive_columns:
vals = self.data[column].unique()
if len(vals) > 1:
raise ValueError("different values for {}: "
"{}".format(column, vals))
series[column] = vals[0]
else:
series = _pd.Series(_np.nan, index=columns)
if only is None:
name = self.__str__()
else:
coefficients = self.coefficient[species]
name = _get_chemical_equation_piece(species, coefficients)
if only == "reactants":
series[extensive_columns] = -series[extensive_columns]
# Avoid negative zero
# (see https://stackoverflow.com/a/11010791/4039050)
series = series + 0.
return series.rename(name)
def _split_arrows(value):
"""
Split a string with sequential chemical equations into separate strings.
Strings in odd positions in the returned iterable represent sums of
chemical species (with possible stoichiometric coefficients). Strings in
even positions represent arrow symbols. See examples below.
Parameters
----------
value : `str`
A string with sequential chemical equations in the mini-language (see
notes on `ChemicalEquation`).
Returns
-------
iterable of `str`
An iterable of strings. Odd positions represent sums of chemical
species (with possible stoichiometric coefficients). Strings in even
positions represent arrow symbols. See examples below.
Notes
-----
Spaces are not striped from the returned strings (see examples below).
Examples
--------
>>> from pyrrole.core import _split_arrows
>>> _split_arrows('A -> B')
['A ', '->', ' B']
"""
return re.split(r"(->|<-|<=>)", value)
def _split_chemical_equations(value):
"""
Split a string with sequential chemical equations into separate strings.
Each string in the returned iterable represents a single chemical equation
of the input.
See the docstrings of `ChemicalEquation` and `ChemicalSystem` for more.
Parameters
----------
value : `str`
A string with sequential chemical equations in the mini-language (see
notes on `ChemicalEquation`).
Returns
-------
iterable of `str`
An iterable of strings in the format specified by the mini-language
(see notes on `ChemicalEquation`).
Examples
--------
>>> from pyrrole.core import _split_chemical_equations
>>> _split_chemical_equations('A + B -> C + D -> D + E <=> F + G <- H + I')
['A + B -> C + D', 'C + D -> D + E', 'D + E <=> F + G', 'F + G <- H + I']
"""
pieces = _split_arrows(value)
return [(pieces[i] +
pieces[i + 1] +
pieces[i + 2]).strip()
for i in range(0, len(pieces) - 2, 2)]
# TODO: improve the examples for this class.
class ChemicalSystem:
"""
Abstraction for models consisting of a set of chemical equations.
Parameters
----------
values : `ChemicalEquation`, `str`, sequence of `ChemicalEquation` or `str`
Definitions of chemical equations. This can either be a single value or
an iterable and accepts anything that can become a `ChemicalEquation`,
or strings with consecutive equations (see examples below).
data : `pandas.DataFrame`, optional
A `data` object, i.e., a table whose rows store information about
chemical species, indexed by chemical species.
check_data : `bool`, optional
Whether to check data object for inconsistencies.
Attributes
----------
equations : iterable of `ChemicalEquation`
Stored `ChemicalEquation` objects.
Examples
--------
>>> from pyrrole import ChemicalSystem
>>> ChemicalSystem("A <=> B -> 2 C")
ChemicalSystem(["A <=> B", "B -> 2 C"])
Single chemical equations are also accepted, in which case the resulting
model has a single equation:
>>> ChemicalSystem(ChemicalEquation("A -> B"))
ChemicalSystem(["A -> B"])
Iterables can mix chemical equation definitions of different types:
>>> ChemicalSystem(["A -> B", "A -> C <- D",
... ChemicalEquation("E -> A")])
ChemicalSystem(["A -> B", "A -> C", "D -> C", "E -> A"])
"""
def __init__(self, values, data=None, check_data=True):
"""See the docstring for this class."""
if isinstance(values, str):
self.equations = list(map(ChemicalEquation,
_split_chemical_equations(values)))
elif isinstance(values, (_Iterable, _Sequence)):
self.equations = []
for value in values:
if isinstance(value, str):
self.equations.extend(map(ChemicalEquation,
_split_chemical_equations(value)))
else:
self.equations.append(ChemicalEquation(value))
else:
self.equations = [ChemicalEquation(values)]
self.data = data
if self.data is not None:
if not isinstance(self.data, _pd.DataFrame):
self.data = _pd.DataFrame(self.data)
if check_data:
_check_data(self.data)
for equation in self.equations:
# TODO: make a test for this if.
if equation.data is None:
equation.data = self.data
def __repr__(self):
"""Build a string representation of this object."""
return "ChemicalSystem([{}])".format(
", ".join(['"' + str(equation) + '"'
for equation in self.equations]))
def to_dataframe(self, *args, **kwargs):
"""
Produce a data table with records for all chemical equations.
All possible differences for numeric attributes are computed and stored
as columns in the returned `pandas.DataFrame` object (see examples
below), whose rows represent chemical equations.
In terms of behavior, this method can be seen as the `ChemicalEquation`
counterpart of `create_data`.
Returns
-------
dataframe : `pandas.DataFrame`
Data table with records of attribute differences for every single
`ChemicalEquation` object in the model.
Notes
-----
Further arguments and keywords are passed directly to
`ChemicalEquation.to_series`.
Examples
--------
>>> from pyrrole import ChemicalSystem
>>> from pyrrole.atoms import create_data, read_cclib
>>> data = create_data(
... read_cclib("data/acetate/acetic_acid.out",
... "AcOH(g)"),
... read_cclib("data/acetate/acetic_acid@water.out",
... "AcOH(aq)"))
>>> data = data[["enthalpy", "entropy", "freeenergy"]]
>>> equilibrium = ChemicalSystem("AcOH(g) <=> AcOH(aq)", data)
>>> equilibrium.to_dataframe() # doctest: +NORMALIZE_WHITESPACE
enthalpy entropy freeenergy
chemical_equation
AcOH(g) <=> AcOH(aq) -0.010958 -0.000198 -0.010759
"""
dataframe = _pd.DataFrame([equation.to_series(*args, **kwargs)
for equation in self.equations])
dataframe.index.name = "chemical_equation"
return dataframe
def to_digraph(self, *args, **kwargs):
"""
Compute a directed graph for the chemical system.
Returns
-------
digraph : `networkx.DiGraph`
Graph nodes are reactants and/or products of chemical equations,
while edges represent the equations themselves. Double ended edges
are used to represent equilibria. Attributes are computed with
`ChemicalEquation.to_series` for each equation (see examples
below).
Notes
-----
Further arguments and keywords are passed directly to
`ChemicalEquation.to_series`.
Examples
--------
>>> from pyrrole import ChemicalSystem
>>> from pyrrole.atoms import create_data, read_cclib
>>> data = create_data(
... read_cclib("data/acetate/acetic_acid.out", "AcOH(g)"),
... read_cclib("data/acetate/acetic_acid@water.out", "AcOH(aq)"))
>>> equilibrium = ChemicalSystem("AcOH(g) <=> AcOH(aq)", data)
>>> digraph = equilibrium.to_digraph()
>>> sorted(digraph.nodes(data='freeenergy'))
[('AcOH(aq)', -228.57526805), ('AcOH(g)', -228.56450866)]
>>> digraph.number_of_nodes()
2
>>> digraph.number_of_edges()
2
"""
# TODO: make test for this
digraph = _nx.DiGraph()
for equation in self.equations:
reactants, arrow, products = [value.strip() for value
in _split_arrows(str(equation))]
try:
attr = equation.to_series("reactants", *args,
**kwargs).to_dict()
except ValueError:
attr = dict()
digraph.add_node(reactants, **attr)
try:
attr = equation.to_series("products", *args,
**kwargs).to_dict()
except ValueError:
attr = dict()
digraph.add_node(products, **attr)
try:
attr = equation.to_series(*args, **kwargs).to_dict()
except ValueError:
attr = dict()
digraph.add_edge(reactants, products, **attr)
if arrow == '<=>':
digraph.add_edge(products, reactants, **attr)
return digraph
|
schneiderfelipe/pyrrole | pyrrole/core.py | _split_chemical_equations | python | def _split_chemical_equations(value):
pieces = _split_arrows(value)
return [(pieces[i] +
pieces[i + 1] +
pieces[i + 2]).strip()
for i in range(0, len(pieces) - 2, 2)] | Split a string with sequential chemical equations into separate strings.
Each string in the returned iterable represents a single chemical equation
of the input.
See the docstrings of `ChemicalEquation` and `ChemicalSystem` for more.
Parameters
----------
value : `str`
A string with sequential chemical equations in the mini-language (see
notes on `ChemicalEquation`).
Returns
-------
iterable of `str`
An iterable of strings in the format specified by the mini-language
(see notes on `ChemicalEquation`).
Examples
--------
>>> from pyrrole.core import _split_chemical_equations
>>> _split_chemical_equations('A + B -> C + D -> D + E <=> F + G <- H + I')
['A + B -> C + D', 'C + D -> D + E', 'D + E <=> F + G', 'F + G <- H + I'] | train | https://github.com/schneiderfelipe/pyrrole/blob/13e26accc9a059f0ab69773648b24292fe1fbfd6/pyrrole/core.py#L584-L615 | [
"def _split_arrows(value):\n \"\"\"\n Split a string with sequential chemical equations into separate strings.\n\n Strings in odd positions in the returned iterable represent sums of\n chemical species (with possible stoichiometric coefficients). Strings in\n even positions represent arrow symbols. See examples below.\n\n Parameters\n ----------\n value : `str`\n A string with sequential chemical equations in the mini-language (see\n notes on `ChemicalEquation`).\n\n Returns\n -------\n iterable of `str`\n An iterable of strings. Odd positions represent sums of chemical\n species (with possible stoichiometric coefficients). Strings in even\n positions represent arrow symbols. See examples below.\n\n Notes\n -----\n Spaces are not striped from the returned strings (see examples below).\n\n Examples\n --------\n >>> from pyrrole.core import _split_arrows\n >>> _split_arrows('A -> B')\n ['A ', '->', ' B']\n\n \"\"\"\n return re.split(r\"(->|<-|<=>)\", value)\n"
] | #!/usr/bin/python3
# -*- encoding: utf-8 -*-
"""Tools for handling chemical equations and systems."""
import re
import numbers as _numbers
try:
from collections.abc import Iterable as _Iterable
from collections.abc import Mapping as _Mapping
from collections.abc import Sequence as _Sequence
except ImportError:
from collections import Iterable as _Iterable
from collections import Mapping as _Mapping
from collections import Sequence as _Sequence
import warnings as _warnings
import numpy as _np
import pandas as _pd
import networkx as _nx
import pyparsing as _pp
def _parse_chemical_equation(value):
"""
Parse the chemical equation mini-language.
See the docstring of `ChemicalEquation` for more.
Parameters
----------
value : `str`
A string in chemical equation mini-language.
Returns
-------
mapping
A mapping in the format specified by the mini-language (see notes on
`ChemicalEquation`).
Examples
--------
>>> from pyrrole.core import _parse_chemical_equation
>>> parsed = _parse_chemical_equation('4 A + 3 B <- 2 C + D')
>>> parsed['arrow']
'->'
>>> parsed['products'][1]['species']
'B'
>>> parsed['reactants'][0]['coefficient']
2
"""
arrow = _pp.oneOf('-> <- <=>').setResultsName('arrow')
species = _pp.Word(_pp.printables).setResultsName('species')
coefficient = (_pp.Optional(_pp.Word(_pp.nums), default=1)
.setParseAction(_pp.tokenMap(int))
.setResultsName('coefficient'))
group_ = _pp.Group(coefficient + _pp.Optional(_pp.Suppress('*')) + species)
reactants = ((group_ + _pp.ZeroOrMore(_pp.Suppress('+') + group_))
.setResultsName('reactants'))
products = ((group_ + _pp.ZeroOrMore(_pp.Suppress('+') + group_))
.setResultsName('products'))
grammar = reactants + arrow + products
parsed = grammar.parseString(value).asDict()
if parsed['arrow'] == '<-':
parsed['reactants'], parsed['products'] \
= parsed['products'], parsed['reactants']
parsed['arrow'] = '->'
return parsed
def _get_chemical_equation_piece(species_list, coefficients):
"""
Produce a string from chemical species and their coefficients.
Parameters
----------
species_list : iterable of `str`
Iterable of chemical species.
coefficients : iterable of `float`
Nonzero stoichiometric coefficients. The length of `species_list` and
`coefficients` must be the same. Negative values are made positive and
zeros are ignored along with their respective species.
Examples
--------
>>> from pyrrole.core import _get_chemical_equation_piece
>>> _get_chemical_equation_piece(["AcOH"], [2])
'2 AcOH'
>>> _get_chemical_equation_piece(["AcO-", "H+"], [-1, -1])
'AcO- + H+'
>>> _get_chemical_equation_piece("ABCD", [-2, -1, 0, -1])
'2 A + B + D'
"""
def _get_token(species, coefficient):
if coefficient == 1:
return '{}'.format(species)
else:
return '{:g} {}'.format(coefficient, species)
bag = []
for species, coefficient in zip(species_list, coefficients):
if coefficient < 0:
coefficient = -coefficient
if coefficient > 0:
bag.append(_get_token(species, coefficient))
return '{}'.format(' + '.join(bag))
def _check_data(data):
"""
Check a data object for inconsistencies.
Parameters
----------
data : `pandas.DataFrame`
A `data` object, i.e., a table whose rows store information about
chemical species, indexed by chemical species.
Warns
-----
UserWarning
Warned if a ground state species has one or more imaginary vibrational
frequencies, or if a transition state species has zero, two or more
imaginary vibrational frequencies.
Examples
--------
>>> import pandas as pd
>>> from pyrrole.core import _check_data
>>> data = (pd.DataFrame([{'name': 'A', 'vibfreqs': [0., 1., 2.]},
... {'name': 'B', 'vibfreqs': [0., -1., 2.]},
... {'name': 'C', 'vibfreqs': [0., -1., -2.]},
... {'name': 'A#', 'vibfreqs': [0., 1., 2.]},
... {'name': 'C#', 'vibfreqs': [0., -2., -1.]},
... {'name': 'B#', 'vibfreqs': [0., -1., 2.]}])
... .set_index('name'))
>>> _check_data(data)
"""
if "vibfreqs" in data.columns:
for species in data.index:
vibfreqs = data.loc[species, "vibfreqs"]
nimagvibfreqs = _np.sum(_np.array(vibfreqs) < 0)
if species[-1] == '#' and nimagvibfreqs != 1:
_warnings.warn("'{}' should have 1 imaginary vibfreqs but {} "
"found".format(species, nimagvibfreqs))
elif species[-1] != '#' and nimagvibfreqs != 0:
_warnings.warn("'{}' should have no imaginary vibfreqs but {} "
"found".format(species, nimagvibfreqs))
class ChemicalEquation:
"""
An object for manipulating chemical equations in a way similar to vectors.
This class provides an abstraction for chemical equations, generalizing
equilibria, reactions, phase transitions and others. Conceptually,
`ChemicalEquation` works like a vector that can be manipulated and operated
upon. This allows the calculation of reaction free energies from data about
chemical species, for instance.
Parameters
----------
value : `str`, `ChemicalEquation`, mapping or `Series`
A string in chemical equation mini-language (see notes below), another
`ChemicalEquation` object (which is copied) or either a mapping or
`Series` from chemical species (`str`) to *signed* stoichiometric
coefficients. See examples below.
data : `pandas.DataFrame`, optional
A `data` object, i.e., a table whose rows store information about
chemical species, indexed by chemical species.
arrow : `str`, optional
Arrow symbol to use if `value` is a mapping or `Series`,
ignored otherwise.
check_data : `bool`, optional
Whether to check data object for inconsistencies.
Attributes
----------
arrow : `str`
Arrow symbol that separates `reactants` and `products` in
a chemical equation. It is always equal to either ``'->'`` or
``'<=>'``.
coefficient : mapping of `str` to `float`
A mapping relating each `species` to its signed
stoichiometric coefficient. Values are positive (negative) for
`products` (`reactants`).
products : iterable of `str`
Chemical product species, i.e., those on the right-hand side of the
equation.
reactants : iterable of `str`
Chemical reactant species, i.e., those on the left-hand side of the
equation.
species : iterable of `str`
All species, i.e., union of all `reactants` and `products`.
Raises
------
ValueError
Raised if `arrow` is given or calculated to be something other than
``"->"`` or ``"<=>"`` (``"<-"`` is equivalent to ``"->"`` if
chemical equation is given as `str`).
TypeError
Raised if `value` is something other than `str`, mapping or
`pandas.Series`.
Notes
-----
Chemical equations in pyrrole are defined according to the following
mini-language (white spaces are ignored)::
equation ::= reactants arrow products
reactants, products ::= coefficient ['*'] species
['+' coefficient ['*'] species]*
coefficient ::= [integers] (defaults to 1)
species ::= mix of printable characters
arrow ::= '->' | '<-' | '<=>'
Examples
--------
>>> from pyrrole import ChemicalEquation
>>> ChemicalEquation('H2O <=> H+ + OH-')
ChemicalEquation('H2O <=> H+ + OH-')
Chemical equations are stored in the usual order, even when given inverted
(see notes above). This means that, although understood, ``'<-'`` is never
a value of `arrow`:
>>> equation = ChemicalEquation('2 CO <- CO2 + C')
>>> equation.arrow
'->'
>>> equation # stored in the usual order
ChemicalEquation('C + CO2 -> 2 CO')
Chemical species that appear in both reactants and products are simplified:
>>> ChemicalEquation('A + B -> 2 A')
ChemicalEquation('B -> A')
Chemical equations can be manipulated as entities in a vector space. For
instance, they can be added and subtracted, ...
>>> (-ChemicalEquation('AgCl(s) <=> Ag+(aq) + Cl-(aq)')
... +ChemicalEquation('NaCl(s) <=> Na+(aq) + Cl-(aq)')
... +ChemicalEquation('AgNO3(s) <=> Ag+(aq) + NO3-(aq)')
... -ChemicalEquation('NaNO3(s) <=> Na+(aq) + NO3-(aq)'))
ChemicalEquation('AgNO3(s) + NaCl(s) <=> AgCl(s) + NaNO3(s)')
... and also divided and multiplied by numbers:
>>> 10 * ChemicalEquation('CH4 + 2 O2 -> CO2 + 2 H2O') / 2
ChemicalEquation('5 CH4 + 10 O2 -> 5 CO2 + 10 H2O')
Stoichiometric coefficients are always *signed*, that is, positive for
`products` and negative for `reactants`:
>>> equation = ChemicalEquation('NaCl + AgNO3 -> NaNO3 + AgCl(v)')
>>> equation.coefficient['AgCl(v)'] # a product
1.0
>>> equation.coefficient['AgNO3'] # a reactant
-1.0
Convenient attributes make it easy to obtain iterables of chemical species.
>>> "AgCl(v)" in equation.reactants
False
>>> for species in equation.species:
... print(species)
AgCl(v)
AgNO3
NaCl
NaNO3
Although not recomended for everyday usage, a chemical equation can also be
created from a complete set of stoichiometric coefficients. This makes it
easy to convert other objects into a `ChemicalEquation` object:
>>> ChemicalEquation({'NaCl': -1, 'AgNO3': -1,
... 'NaNO3': 1, 'AgCl(v)': 1}, arrow='->')
ChemicalEquation('AgNO3 + NaCl -> AgCl(v) + NaNO3')
"""
def __init__(self, value, data=None, arrow=None, check_data=True):
"""See the docstring for this class."""
# TODO: make tests for usage of data.
if isinstance(value, ChemicalEquation):
self.arrow = value.arrow
self.coefficient = value.coefficient
# TODO: make a test for this if.
if data is None:
data = value.data
elif isinstance(value, str):
parsed = _parse_chemical_equation(value)
coefficient_products = _pd.Series({
product['species']: +product['coefficient']
for product in parsed['products']})
coefficient_reactants = _pd.Series({
reactant['species']: -reactant['coefficient']
for reactant in parsed['reactants']})
self.arrow = parsed['arrow']
self.coefficient = coefficient_reactants.add(coefficient_products,
fill_value=0)
elif isinstance(value, (_Mapping, _pd.Series)):
# TODO: make test.
if arrow not in {'->', '<=>'}:
raise ValueError("arrow must be either '->' or '<=>' ('{}' "
"given)".format(arrow))
self.arrow = arrow
self.coefficient = _pd.Series(value)
else:
raise TypeError("value must be either str, mapping or "
"Series ('{}' "
"given)".format(type(value).__name__))
self.coefficient = self.coefficient.rename(self.__str__())
self.data = data
if self.data is not None:
if not isinstance(self.data, _pd.DataFrame):
self.data = _pd.DataFrame(self.data)
if check_data:
_check_data(self.data.loc[self.species])
def _get_products(self):
return self.coefficient[self.coefficient > 0].index
products = property(_get_products)
def _get_reactants(self):
return self.coefficient[self.coefficient < 0].index
reactants = property(_get_reactants)
def _get_species(self):
return self.coefficient.index
species = property(_get_species)
def __add__(self, other):
"""Add chemical equations as if they were vectors."""
if not isinstance(other, ChemicalEquation):
raise NotImplementedError
return ChemicalEquation(self.coefficient.add(other.coefficient,
fill_value=0),
arrow=self.arrow)
def __sub__(self, other):
"""Subtract chemical equations as if they were vectors."""
if not isinstance(other, ChemicalEquation):
raise NotImplementedError
return ChemicalEquation(self.coefficient.sub(other.coefficient,
fill_value=0),
arrow=self.arrow)
def __mul__(self, other):
"""Multiply chemical equation by a number."""
if not isinstance(other, _numbers.Number):
raise NotImplementedError
return ChemicalEquation(self.coefficient.mul(other, fill_value=0),
arrow=self.arrow)
__rmul__ = __mul__
def __truediv__(self, other):
"""Divide chemical equation by a number."""
if not isinstance(other, _numbers.Number):
raise NotImplementedError
return ChemicalEquation(self.coefficient.div(other, fill_value=0),
arrow=self.arrow)
def __div__(self, other):
"""Ensure "true" division always takes place."""
return self.__truediv__(other)
def __pos__(self):
"""Make unary plus operator be equivalent to multiply by one."""
return self
def __neg__(self):
"""Make unary minus operator be equivalent to multiply by minus one."""
return ChemicalEquation(self.coefficient.mul(-1, fill_value=0),
arrow=self.arrow)
def __eq__(self, other):
"""Compare chemical equations in terms of coefficients."""
diff = self.__sub__(other)
return all(diff.coefficient == 0)
def __repr__(self):
"""Build a string representation of this object."""
return "ChemicalEquation('{}')".format(self)
def __str__(self):
"""Build a unique, parsable string for this chemical equation."""
products = []
reactants = []
for species, coefficient in sorted(self.coefficient.items()):
if coefficient < 0:
bag = reactants
elif coefficient > 0:
bag = products
else:
continue
bag.append(_get_chemical_equation_piece([species], [coefficient]))
return '{} {} {}'.format(' + '.join(reactants), self.arrow,
' + '.join(products))
def to_series(self, only=None,
intensive_columns=["temperature", "pressure"],
check_data=True):
"""
Produce a data record for `ChemicalEquation`.
All possible linear differences for all numeric attributes are computed
and stored in the returned `pandas.Series` object (see examples below).
This allows for easy application and manipulation of
`Hess's law <https://en.wikipedia.org/wiki/Hess%27s_law>`_ to chemical
equations (see examples below).
Parameters
----------
only : ``"reactants"``, ``"products"``, optional
Instead of the standard behaviour (difference of sums), sum numeric
attributes of either reactants or products only. If given, absolute
coefficients are used.
intensive_columns : iterable of `str`, optional
A set of column names representing intensive properties (e.g. bulk
properties) whose values are not summable. Those must be constant
throughout the chemical equation.
check_data : `bool`, optional
Whether to check data object for inconsistencies.
Returns
-------
series : `pandas.Series`
Data record of attribute differences, whose name is the canonical
string representation of the `ChemicalEquation` or, if `only` is
given, a string representing either reactants or products (see
examples below).
Raises
------
ValueError
Raised if `self.data` wasn't defined (e.g. is `None`), if `only`
is something other than ``"reactants"`` or ``"products"``, or if
two or more distinct values for an intensive property have been
found.
Examples
--------
>>> from pyrrole import ChemicalEquation
>>> from pyrrole.atoms import create_data, read_cclib
>>> data = create_data(
... read_cclib("data/acetate/acetic_acid.out",
... "AcOH(g)"),
... read_cclib("data/acetate/acetic_acid@water.out",
... "AcOH(aq)"))
>>> equilibrium = ChemicalEquation("AcOH(g) <=> AcOH(aq)",
... data)
>>> equilibrium.to_series()
charge 0.000000
enthalpy -0.010958
entropy -0.000198
freeenergy -0.010759
mult 0.000000
natom 0.000000
nbasis 0.000000
nmo 0.000000
pressure 1.000000
temperature 298.150000
Name: AcOH(g) <=> AcOH(aq), dtype: float64
Sums of either reactants or products can be computed:
>>> equilibrium.to_series("reactants")
charge 0.000000
enthalpy -228.533374
entropy 0.031135
freeenergy -228.564509
mult 1.000000
natom 8.000000
nbasis 68.000000
nmo 68.000000
pressure 1.000000
temperature 298.150000
Name: AcOH(g), dtype: float64
"""
if self.data is None:
# TODO: should an empty Series be returned?
raise ValueError("data not defined")
# TODO: find a way to keep categorical columns. Keep if they match?
columns = self.data.select_dtypes('number').columns
intensive_columns = [column for column in columns
if column in intensive_columns]
extensive_columns = [column for column in columns
if column not in intensive_columns]
columns = extensive_columns + intensive_columns
if only is None:
species = self.species
elif only == "reactants":
species = sorted(self.reactants)
elif only == "products":
species = sorted(self.products)
else:
raise ValueError("only must be either 'reactants' or 'products' "
"('{}' given)".format(only))
if check_data:
_check_data(self.data.loc[species])
if all([s in self.data.index for s in species]):
series = (self.data.loc[species, extensive_columns]
.mul(self.coefficient, axis="index").sum("index"))
for column in intensive_columns:
vals = self.data[column].unique()
if len(vals) > 1:
raise ValueError("different values for {}: "
"{}".format(column, vals))
series[column] = vals[0]
else:
series = _pd.Series(_np.nan, index=columns)
if only is None:
name = self.__str__()
else:
coefficients = self.coefficient[species]
name = _get_chemical_equation_piece(species, coefficients)
if only == "reactants":
series[extensive_columns] = -series[extensive_columns]
# Avoid negative zero
# (see https://stackoverflow.com/a/11010791/4039050)
series = series + 0.
return series.rename(name)
def _split_arrows(value):
"""
Split a string with sequential chemical equations into separate strings.
Strings in odd positions in the returned iterable represent sums of
chemical species (with possible stoichiometric coefficients). Strings in
even positions represent arrow symbols. See examples below.
Parameters
----------
value : `str`
A string with sequential chemical equations in the mini-language (see
notes on `ChemicalEquation`).
Returns
-------
iterable of `str`
An iterable of strings. Odd positions represent sums of chemical
species (with possible stoichiometric coefficients). Strings in even
positions represent arrow symbols. See examples below.
Notes
-----
Spaces are not striped from the returned strings (see examples below).
Examples
--------
>>> from pyrrole.core import _split_arrows
>>> _split_arrows('A -> B')
['A ', '->', ' B']
"""
return re.split(r"(->|<-|<=>)", value)
# TODO: improve the examples for this class.
class ChemicalSystem:
"""
Abstraction for models consisting of a set of chemical equations.
Parameters
----------
values : `ChemicalEquation`, `str`, sequence of `ChemicalEquation` or `str`
Definitions of chemical equations. This can either be a single value or
an iterable and accepts anything that can become a `ChemicalEquation`,
or strings with consecutive equations (see examples below).
data : `pandas.DataFrame`, optional
A `data` object, i.e., a table whose rows store information about
chemical species, indexed by chemical species.
check_data : `bool`, optional
Whether to check data object for inconsistencies.
Attributes
----------
equations : iterable of `ChemicalEquation`
Stored `ChemicalEquation` objects.
Examples
--------
>>> from pyrrole import ChemicalSystem
>>> ChemicalSystem("A <=> B -> 2 C")
ChemicalSystem(["A <=> B", "B -> 2 C"])
Single chemical equations are also accepted, in which case the resulting
model has a single equation:
>>> ChemicalSystem(ChemicalEquation("A -> B"))
ChemicalSystem(["A -> B"])
Iterables can mix chemical equation definitions of different types:
>>> ChemicalSystem(["A -> B", "A -> C <- D",
... ChemicalEquation("E -> A")])
ChemicalSystem(["A -> B", "A -> C", "D -> C", "E -> A"])
"""
def __init__(self, values, data=None, check_data=True):
"""See the docstring for this class."""
if isinstance(values, str):
self.equations = list(map(ChemicalEquation,
_split_chemical_equations(values)))
elif isinstance(values, (_Iterable, _Sequence)):
self.equations = []
for value in values:
if isinstance(value, str):
self.equations.extend(map(ChemicalEquation,
_split_chemical_equations(value)))
else:
self.equations.append(ChemicalEquation(value))
else:
self.equations = [ChemicalEquation(values)]
self.data = data
if self.data is not None:
if not isinstance(self.data, _pd.DataFrame):
self.data = _pd.DataFrame(self.data)
if check_data:
_check_data(self.data)
for equation in self.equations:
# TODO: make a test for this if.
if equation.data is None:
equation.data = self.data
def __repr__(self):
"""Build a string representation of this object."""
return "ChemicalSystem([{}])".format(
", ".join(['"' + str(equation) + '"'
for equation in self.equations]))
def to_dataframe(self, *args, **kwargs):
"""
Produce a data table with records for all chemical equations.
All possible differences for numeric attributes are computed and stored
as columns in the returned `pandas.DataFrame` object (see examples
below), whose rows represent chemical equations.
In terms of behavior, this method can be seen as the `ChemicalEquation`
counterpart of `create_data`.
Returns
-------
dataframe : `pandas.DataFrame`
Data table with records of attribute differences for every single
`ChemicalEquation` object in the model.
Notes
-----
Further arguments and keywords are passed directly to
`ChemicalEquation.to_series`.
Examples
--------
>>> from pyrrole import ChemicalSystem
>>> from pyrrole.atoms import create_data, read_cclib
>>> data = create_data(
... read_cclib("data/acetate/acetic_acid.out",
... "AcOH(g)"),
... read_cclib("data/acetate/acetic_acid@water.out",
... "AcOH(aq)"))
>>> data = data[["enthalpy", "entropy", "freeenergy"]]
>>> equilibrium = ChemicalSystem("AcOH(g) <=> AcOH(aq)", data)
>>> equilibrium.to_dataframe() # doctest: +NORMALIZE_WHITESPACE
enthalpy entropy freeenergy
chemical_equation
AcOH(g) <=> AcOH(aq) -0.010958 -0.000198 -0.010759
"""
dataframe = _pd.DataFrame([equation.to_series(*args, **kwargs)
for equation in self.equations])
dataframe.index.name = "chemical_equation"
return dataframe
def to_digraph(self, *args, **kwargs):
"""
Compute a directed graph for the chemical system.
Returns
-------
digraph : `networkx.DiGraph`
Graph nodes are reactants and/or products of chemical equations,
while edges represent the equations themselves. Double ended edges
are used to represent equilibria. Attributes are computed with
`ChemicalEquation.to_series` for each equation (see examples
below).
Notes
-----
Further arguments and keywords are passed directly to
`ChemicalEquation.to_series`.
Examples
--------
>>> from pyrrole import ChemicalSystem
>>> from pyrrole.atoms import create_data, read_cclib
>>> data = create_data(
... read_cclib("data/acetate/acetic_acid.out", "AcOH(g)"),
... read_cclib("data/acetate/acetic_acid@water.out", "AcOH(aq)"))
>>> equilibrium = ChemicalSystem("AcOH(g) <=> AcOH(aq)", data)
>>> digraph = equilibrium.to_digraph()
>>> sorted(digraph.nodes(data='freeenergy'))
[('AcOH(aq)', -228.57526805), ('AcOH(g)', -228.56450866)]
>>> digraph.number_of_nodes()
2
>>> digraph.number_of_edges()
2
"""
# TODO: make test for this
digraph = _nx.DiGraph()
for equation in self.equations:
reactants, arrow, products = [value.strip() for value
in _split_arrows(str(equation))]
try:
attr = equation.to_series("reactants", *args,
**kwargs).to_dict()
except ValueError:
attr = dict()
digraph.add_node(reactants, **attr)
try:
attr = equation.to_series("products", *args,
**kwargs).to_dict()
except ValueError:
attr = dict()
digraph.add_node(products, **attr)
try:
attr = equation.to_series(*args, **kwargs).to_dict()
except ValueError:
attr = dict()
digraph.add_edge(reactants, products, **attr)
if arrow == '<=>':
digraph.add_edge(products, reactants, **attr)
return digraph
|
schneiderfelipe/pyrrole | pyrrole/core.py | ChemicalEquation.to_series | python | def to_series(self, only=None,
intensive_columns=["temperature", "pressure"],
check_data=True):
if self.data is None:
# TODO: should an empty Series be returned?
raise ValueError("data not defined")
# TODO: find a way to keep categorical columns. Keep if they match?
columns = self.data.select_dtypes('number').columns
intensive_columns = [column for column in columns
if column in intensive_columns]
extensive_columns = [column for column in columns
if column not in intensive_columns]
columns = extensive_columns + intensive_columns
if only is None:
species = self.species
elif only == "reactants":
species = sorted(self.reactants)
elif only == "products":
species = sorted(self.products)
else:
raise ValueError("only must be either 'reactants' or 'products' "
"('{}' given)".format(only))
if check_data:
_check_data(self.data.loc[species])
if all([s in self.data.index for s in species]):
series = (self.data.loc[species, extensive_columns]
.mul(self.coefficient, axis="index").sum("index"))
for column in intensive_columns:
vals = self.data[column].unique()
if len(vals) > 1:
raise ValueError("different values for {}: "
"{}".format(column, vals))
series[column] = vals[0]
else:
series = _pd.Series(_np.nan, index=columns)
if only is None:
name = self.__str__()
else:
coefficients = self.coefficient[species]
name = _get_chemical_equation_piece(species, coefficients)
if only == "reactants":
series[extensive_columns] = -series[extensive_columns]
# Avoid negative zero
# (see https://stackoverflow.com/a/11010791/4039050)
series = series + 0.
return series.rename(name) | Produce a data record for `ChemicalEquation`.
All possible linear differences for all numeric attributes are computed
and stored in the returned `pandas.Series` object (see examples below).
This allows for easy application and manipulation of
`Hess's law <https://en.wikipedia.org/wiki/Hess%27s_law>`_ to chemical
equations (see examples below).
Parameters
----------
only : ``"reactants"``, ``"products"``, optional
Instead of the standard behaviour (difference of sums), sum numeric
attributes of either reactants or products only. If given, absolute
coefficients are used.
intensive_columns : iterable of `str`, optional
A set of column names representing intensive properties (e.g. bulk
properties) whose values are not summable. Those must be constant
throughout the chemical equation.
check_data : `bool`, optional
Whether to check data object for inconsistencies.
Returns
-------
series : `pandas.Series`
Data record of attribute differences, whose name is the canonical
string representation of the `ChemicalEquation` or, if `only` is
given, a string representing either reactants or products (see
examples below).
Raises
------
ValueError
Raised if `self.data` wasn't defined (e.g. is `None`), if `only`
is something other than ``"reactants"`` or ``"products"``, or if
two or more distinct values for an intensive property have been
found.
Examples
--------
>>> from pyrrole import ChemicalEquation
>>> from pyrrole.atoms import create_data, read_cclib
>>> data = create_data(
... read_cclib("data/acetate/acetic_acid.out",
... "AcOH(g)"),
... read_cclib("data/acetate/acetic_acid@water.out",
... "AcOH(aq)"))
>>> equilibrium = ChemicalEquation("AcOH(g) <=> AcOH(aq)",
... data)
>>> equilibrium.to_series()
charge 0.000000
enthalpy -0.010958
entropy -0.000198
freeenergy -0.010759
mult 0.000000
natom 0.000000
nbasis 0.000000
nmo 0.000000
pressure 1.000000
temperature 298.150000
Name: AcOH(g) <=> AcOH(aq), dtype: float64
Sums of either reactants or products can be computed:
>>> equilibrium.to_series("reactants")
charge 0.000000
enthalpy -228.533374
entropy 0.031135
freeenergy -228.564509
mult 1.000000
natom 8.000000
nbasis 68.000000
nmo 68.000000
pressure 1.000000
temperature 298.150000
Name: AcOH(g), dtype: float64 | train | https://github.com/schneiderfelipe/pyrrole/blob/13e26accc9a059f0ab69773648b24292fe1fbfd6/pyrrole/core.py#L416-L546 | [
"def _get_chemical_equation_piece(species_list, coefficients):\n \"\"\"\n Produce a string from chemical species and their coefficients.\n\n Parameters\n ----------\n species_list : iterable of `str`\n Iterable of chemical species.\n coefficients : iterable of `float`\n Nonzero stoichiometric coefficients. The length of `species_list` and\n `coefficients` must be the same. Negative values are made positive and\n zeros are ignored along with their respective species.\n\n Examples\n --------\n >>> from pyrrole.core import _get_chemical_equation_piece\n >>> _get_chemical_equation_piece([\"AcOH\"], [2])\n '2 AcOH'\n >>> _get_chemical_equation_piece([\"AcO-\", \"H+\"], [-1, -1])\n 'AcO- + H+'\n >>> _get_chemical_equation_piece(\"ABCD\", [-2, -1, 0, -1])\n '2 A + B + D'\n\n \"\"\"\n def _get_token(species, coefficient):\n if coefficient == 1:\n return '{}'.format(species)\n else:\n return '{:g} {}'.format(coefficient, species)\n\n bag = []\n for species, coefficient in zip(species_list, coefficients):\n if coefficient < 0:\n coefficient = -coefficient\n if coefficient > 0:\n bag.append(_get_token(species, coefficient))\n return '{}'.format(' + '.join(bag))\n",
"def _check_data(data):\n \"\"\"\n Check a data object for inconsistencies.\n\n Parameters\n ----------\n data : `pandas.DataFrame`\n A `data` object, i.e., a table whose rows store information about\n chemical species, indexed by chemical species.\n\n Warns\n -----\n UserWarning\n Warned if a ground state species has one or more imaginary vibrational\n frequencies, or if a transition state species has zero, two or more\n imaginary vibrational frequencies.\n\n Examples\n --------\n >>> import pandas as pd\n >>> from pyrrole.core import _check_data\n >>> data = (pd.DataFrame([{'name': 'A', 'vibfreqs': [0., 1., 2.]},\n ... {'name': 'B', 'vibfreqs': [0., -1., 2.]},\n ... {'name': 'C', 'vibfreqs': [0., -1., -2.]},\n ... {'name': 'A#', 'vibfreqs': [0., 1., 2.]},\n ... {'name': 'C#', 'vibfreqs': [0., -2., -1.]},\n ... {'name': 'B#', 'vibfreqs': [0., -1., 2.]}])\n ... .set_index('name'))\n >>> _check_data(data)\n\n \"\"\"\n if \"vibfreqs\" in data.columns:\n for species in data.index:\n vibfreqs = data.loc[species, \"vibfreqs\"]\n nimagvibfreqs = _np.sum(_np.array(vibfreqs) < 0)\n if species[-1] == '#' and nimagvibfreqs != 1:\n _warnings.warn(\"'{}' should have 1 imaginary vibfreqs but {} \"\n \"found\".format(species, nimagvibfreqs))\n elif species[-1] != '#' and nimagvibfreqs != 0:\n _warnings.warn(\"'{}' should have no imaginary vibfreqs but {} \"\n \"found\".format(species, nimagvibfreqs))\n",
"def __str__(self):\n \"\"\"Build a unique, parsable string for this chemical equation.\"\"\"\n products = []\n reactants = []\n for species, coefficient in sorted(self.coefficient.items()):\n if coefficient < 0:\n bag = reactants\n elif coefficient > 0:\n bag = products\n else:\n continue\n bag.append(_get_chemical_equation_piece([species], [coefficient]))\n\n return '{} {} {}'.format(' + '.join(reactants), self.arrow,\n ' + '.join(products))\n"
] | class ChemicalEquation:
"""
An object for manipulating chemical equations in a way similar to vectors.
This class provides an abstraction for chemical equations, generalizing
equilibria, reactions, phase transitions and others. Conceptually,
`ChemicalEquation` works like a vector that can be manipulated and operated
upon. This allows the calculation of reaction free energies from data about
chemical species, for instance.
Parameters
----------
value : `str`, `ChemicalEquation`, mapping or `Series`
A string in chemical equation mini-language (see notes below), another
`ChemicalEquation` object (which is copied) or either a mapping or
`Series` from chemical species (`str`) to *signed* stoichiometric
coefficients. See examples below.
data : `pandas.DataFrame`, optional
A `data` object, i.e., a table whose rows store information about
chemical species, indexed by chemical species.
arrow : `str`, optional
Arrow symbol to use if `value` is a mapping or `Series`,
ignored otherwise.
check_data : `bool`, optional
Whether to check data object for inconsistencies.
Attributes
----------
arrow : `str`
Arrow symbol that separates `reactants` and `products` in
a chemical equation. It is always equal to either ``'->'`` or
``'<=>'``.
coefficient : mapping of `str` to `float`
A mapping relating each `species` to its signed
stoichiometric coefficient. Values are positive (negative) for
`products` (`reactants`).
products : iterable of `str`
Chemical product species, i.e., those on the right-hand side of the
equation.
reactants : iterable of `str`
Chemical reactant species, i.e., those on the left-hand side of the
equation.
species : iterable of `str`
All species, i.e., union of all `reactants` and `products`.
Raises
------
ValueError
Raised if `arrow` is given or calculated to be something other than
``"->"`` or ``"<=>"`` (``"<-"`` is equivalent to ``"->"`` if
chemical equation is given as `str`).
TypeError
Raised if `value` is something other than `str`, mapping or
`pandas.Series`.
Notes
-----
Chemical equations in pyrrole are defined according to the following
mini-language (white spaces are ignored)::
equation ::= reactants arrow products
reactants, products ::= coefficient ['*'] species
['+' coefficient ['*'] species]*
coefficient ::= [integers] (defaults to 1)
species ::= mix of printable characters
arrow ::= '->' | '<-' | '<=>'
Examples
--------
>>> from pyrrole import ChemicalEquation
>>> ChemicalEquation('H2O <=> H+ + OH-')
ChemicalEquation('H2O <=> H+ + OH-')
Chemical equations are stored in the usual order, even when given inverted
(see notes above). This means that, although understood, ``'<-'`` is never
a value of `arrow`:
>>> equation = ChemicalEquation('2 CO <- CO2 + C')
>>> equation.arrow
'->'
>>> equation # stored in the usual order
ChemicalEquation('C + CO2 -> 2 CO')
Chemical species that appear in both reactants and products are simplified:
>>> ChemicalEquation('A + B -> 2 A')
ChemicalEquation('B -> A')
Chemical equations can be manipulated as entities in a vector space. For
instance, they can be added and subtracted, ...
>>> (-ChemicalEquation('AgCl(s) <=> Ag+(aq) + Cl-(aq)')
... +ChemicalEquation('NaCl(s) <=> Na+(aq) + Cl-(aq)')
... +ChemicalEquation('AgNO3(s) <=> Ag+(aq) + NO3-(aq)')
... -ChemicalEquation('NaNO3(s) <=> Na+(aq) + NO3-(aq)'))
ChemicalEquation('AgNO3(s) + NaCl(s) <=> AgCl(s) + NaNO3(s)')
... and also divided and multiplied by numbers:
>>> 10 * ChemicalEquation('CH4 + 2 O2 -> CO2 + 2 H2O') / 2
ChemicalEquation('5 CH4 + 10 O2 -> 5 CO2 + 10 H2O')
Stoichiometric coefficients are always *signed*, that is, positive for
`products` and negative for `reactants`:
>>> equation = ChemicalEquation('NaCl + AgNO3 -> NaNO3 + AgCl(v)')
>>> equation.coefficient['AgCl(v)'] # a product
1.0
>>> equation.coefficient['AgNO3'] # a reactant
-1.0
Convenient attributes make it easy to obtain iterables of chemical species.
>>> "AgCl(v)" in equation.reactants
False
>>> for species in equation.species:
... print(species)
AgCl(v)
AgNO3
NaCl
NaNO3
Although not recomended for everyday usage, a chemical equation can also be
created from a complete set of stoichiometric coefficients. This makes it
easy to convert other objects into a `ChemicalEquation` object:
>>> ChemicalEquation({'NaCl': -1, 'AgNO3': -1,
... 'NaNO3': 1, 'AgCl(v)': 1}, arrow='->')
ChemicalEquation('AgNO3 + NaCl -> AgCl(v) + NaNO3')
"""
def __init__(self, value, data=None, arrow=None, check_data=True):
"""See the docstring for this class."""
# TODO: make tests for usage of data.
if isinstance(value, ChemicalEquation):
self.arrow = value.arrow
self.coefficient = value.coefficient
# TODO: make a test for this if.
if data is None:
data = value.data
elif isinstance(value, str):
parsed = _parse_chemical_equation(value)
coefficient_products = _pd.Series({
product['species']: +product['coefficient']
for product in parsed['products']})
coefficient_reactants = _pd.Series({
reactant['species']: -reactant['coefficient']
for reactant in parsed['reactants']})
self.arrow = parsed['arrow']
self.coefficient = coefficient_reactants.add(coefficient_products,
fill_value=0)
elif isinstance(value, (_Mapping, _pd.Series)):
# TODO: make test.
if arrow not in {'->', '<=>'}:
raise ValueError("arrow must be either '->' or '<=>' ('{}' "
"given)".format(arrow))
self.arrow = arrow
self.coefficient = _pd.Series(value)
else:
raise TypeError("value must be either str, mapping or "
"Series ('{}' "
"given)".format(type(value).__name__))
self.coefficient = self.coefficient.rename(self.__str__())
self.data = data
if self.data is not None:
if not isinstance(self.data, _pd.DataFrame):
self.data = _pd.DataFrame(self.data)
if check_data:
_check_data(self.data.loc[self.species])
def _get_products(self):
return self.coefficient[self.coefficient > 0].index
products = property(_get_products)
def _get_reactants(self):
return self.coefficient[self.coefficient < 0].index
reactants = property(_get_reactants)
def _get_species(self):
return self.coefficient.index
species = property(_get_species)
def __add__(self, other):
"""Add chemical equations as if they were vectors."""
if not isinstance(other, ChemicalEquation):
raise NotImplementedError
return ChemicalEquation(self.coefficient.add(other.coefficient,
fill_value=0),
arrow=self.arrow)
def __sub__(self, other):
"""Subtract chemical equations as if they were vectors."""
if not isinstance(other, ChemicalEquation):
raise NotImplementedError
return ChemicalEquation(self.coefficient.sub(other.coefficient,
fill_value=0),
arrow=self.arrow)
def __mul__(self, other):
"""Multiply chemical equation by a number."""
if not isinstance(other, _numbers.Number):
raise NotImplementedError
return ChemicalEquation(self.coefficient.mul(other, fill_value=0),
arrow=self.arrow)
__rmul__ = __mul__
def __truediv__(self, other):
"""Divide chemical equation by a number."""
if not isinstance(other, _numbers.Number):
raise NotImplementedError
return ChemicalEquation(self.coefficient.div(other, fill_value=0),
arrow=self.arrow)
def __div__(self, other):
"""Ensure "true" division always takes place."""
return self.__truediv__(other)
def __pos__(self):
"""Make unary plus operator be equivalent to multiply by one."""
return self
def __neg__(self):
"""Make unary minus operator be equivalent to multiply by minus one."""
return ChemicalEquation(self.coefficient.mul(-1, fill_value=0),
arrow=self.arrow)
def __eq__(self, other):
"""Compare chemical equations in terms of coefficients."""
diff = self.__sub__(other)
return all(diff.coefficient == 0)
def __repr__(self):
"""Build a string representation of this object."""
return "ChemicalEquation('{}')".format(self)
def __str__(self):
"""Build a unique, parsable string for this chemical equation."""
products = []
reactants = []
for species, coefficient in sorted(self.coefficient.items()):
if coefficient < 0:
bag = reactants
elif coefficient > 0:
bag = products
else:
continue
bag.append(_get_chemical_equation_piece([species], [coefficient]))
return '{} {} {}'.format(' + '.join(reactants), self.arrow,
' + '.join(products))
|
schneiderfelipe/pyrrole | pyrrole/core.py | ChemicalSystem.to_dataframe | python | def to_dataframe(self, *args, **kwargs):
dataframe = _pd.DataFrame([equation.to_series(*args, **kwargs)
for equation in self.equations])
dataframe.index.name = "chemical_equation"
return dataframe | Produce a data table with records for all chemical equations.
All possible differences for numeric attributes are computed and stored
as columns in the returned `pandas.DataFrame` object (see examples
below), whose rows represent chemical equations.
In terms of behavior, this method can be seen as the `ChemicalEquation`
counterpart of `create_data`.
Returns
-------
dataframe : `pandas.DataFrame`
Data table with records of attribute differences for every single
`ChemicalEquation` object in the model.
Notes
-----
Further arguments and keywords are passed directly to
`ChemicalEquation.to_series`.
Examples
--------
>>> from pyrrole import ChemicalSystem
>>> from pyrrole.atoms import create_data, read_cclib
>>> data = create_data(
... read_cclib("data/acetate/acetic_acid.out",
... "AcOH(g)"),
... read_cclib("data/acetate/acetic_acid@water.out",
... "AcOH(aq)"))
>>> data = data[["enthalpy", "entropy", "freeenergy"]]
>>> equilibrium = ChemicalSystem("AcOH(g) <=> AcOH(aq)", data)
>>> equilibrium.to_dataframe() # doctest: +NORMALIZE_WHITESPACE
enthalpy entropy freeenergy
chemical_equation
AcOH(g) <=> AcOH(aq) -0.010958 -0.000198 -0.010759 | train | https://github.com/schneiderfelipe/pyrrole/blob/13e26accc9a059f0ab69773648b24292fe1fbfd6/pyrrole/core.py#L694-L736 | null | class ChemicalSystem:
"""
Abstraction for models consisting of a set of chemical equations.
Parameters
----------
values : `ChemicalEquation`, `str`, sequence of `ChemicalEquation` or `str`
Definitions of chemical equations. This can either be a single value or
an iterable and accepts anything that can become a `ChemicalEquation`,
or strings with consecutive equations (see examples below).
data : `pandas.DataFrame`, optional
A `data` object, i.e., a table whose rows store information about
chemical species, indexed by chemical species.
check_data : `bool`, optional
Whether to check data object for inconsistencies.
Attributes
----------
equations : iterable of `ChemicalEquation`
Stored `ChemicalEquation` objects.
Examples
--------
>>> from pyrrole import ChemicalSystem
>>> ChemicalSystem("A <=> B -> 2 C")
ChemicalSystem(["A <=> B", "B -> 2 C"])
Single chemical equations are also accepted, in which case the resulting
model has a single equation:
>>> ChemicalSystem(ChemicalEquation("A -> B"))
ChemicalSystem(["A -> B"])
Iterables can mix chemical equation definitions of different types:
>>> ChemicalSystem(["A -> B", "A -> C <- D",
... ChemicalEquation("E -> A")])
ChemicalSystem(["A -> B", "A -> C", "D -> C", "E -> A"])
"""
def __init__(self, values, data=None, check_data=True):
"""See the docstring for this class."""
if isinstance(values, str):
self.equations = list(map(ChemicalEquation,
_split_chemical_equations(values)))
elif isinstance(values, (_Iterable, _Sequence)):
self.equations = []
for value in values:
if isinstance(value, str):
self.equations.extend(map(ChemicalEquation,
_split_chemical_equations(value)))
else:
self.equations.append(ChemicalEquation(value))
else:
self.equations = [ChemicalEquation(values)]
self.data = data
if self.data is not None:
if not isinstance(self.data, _pd.DataFrame):
self.data = _pd.DataFrame(self.data)
if check_data:
_check_data(self.data)
for equation in self.equations:
# TODO: make a test for this if.
if equation.data is None:
equation.data = self.data
def __repr__(self):
"""Build a string representation of this object."""
return "ChemicalSystem([{}])".format(
", ".join(['"' + str(equation) + '"'
for equation in self.equations]))
def to_digraph(self, *args, **kwargs):
"""
Compute a directed graph for the chemical system.
Returns
-------
digraph : `networkx.DiGraph`
Graph nodes are reactants and/or products of chemical equations,
while edges represent the equations themselves. Double ended edges
are used to represent equilibria. Attributes are computed with
`ChemicalEquation.to_series` for each equation (see examples
below).
Notes
-----
Further arguments and keywords are passed directly to
`ChemicalEquation.to_series`.
Examples
--------
>>> from pyrrole import ChemicalSystem
>>> from pyrrole.atoms import create_data, read_cclib
>>> data = create_data(
... read_cclib("data/acetate/acetic_acid.out", "AcOH(g)"),
... read_cclib("data/acetate/acetic_acid@water.out", "AcOH(aq)"))
>>> equilibrium = ChemicalSystem("AcOH(g) <=> AcOH(aq)", data)
>>> digraph = equilibrium.to_digraph()
>>> sorted(digraph.nodes(data='freeenergy'))
[('AcOH(aq)', -228.57526805), ('AcOH(g)', -228.56450866)]
>>> digraph.number_of_nodes()
2
>>> digraph.number_of_edges()
2
"""
# TODO: make test for this
digraph = _nx.DiGraph()
for equation in self.equations:
reactants, arrow, products = [value.strip() for value
in _split_arrows(str(equation))]
try:
attr = equation.to_series("reactants", *args,
**kwargs).to_dict()
except ValueError:
attr = dict()
digraph.add_node(reactants, **attr)
try:
attr = equation.to_series("products", *args,
**kwargs).to_dict()
except ValueError:
attr = dict()
digraph.add_node(products, **attr)
try:
attr = equation.to_series(*args, **kwargs).to_dict()
except ValueError:
attr = dict()
digraph.add_edge(reactants, products, **attr)
if arrow == '<=>':
digraph.add_edge(products, reactants, **attr)
return digraph
|
schneiderfelipe/pyrrole | pyrrole/core.py | ChemicalSystem.to_digraph | python | def to_digraph(self, *args, **kwargs):
# TODO: make test for this
digraph = _nx.DiGraph()
for equation in self.equations:
reactants, arrow, products = [value.strip() for value
in _split_arrows(str(equation))]
try:
attr = equation.to_series("reactants", *args,
**kwargs).to_dict()
except ValueError:
attr = dict()
digraph.add_node(reactants, **attr)
try:
attr = equation.to_series("products", *args,
**kwargs).to_dict()
except ValueError:
attr = dict()
digraph.add_node(products, **attr)
try:
attr = equation.to_series(*args, **kwargs).to_dict()
except ValueError:
attr = dict()
digraph.add_edge(reactants, products, **attr)
if arrow == '<=>':
digraph.add_edge(products, reactants, **attr)
return digraph | Compute a directed graph for the chemical system.
Returns
-------
digraph : `networkx.DiGraph`
Graph nodes are reactants and/or products of chemical equations,
while edges represent the equations themselves. Double ended edges
are used to represent equilibria. Attributes are computed with
`ChemicalEquation.to_series` for each equation (see examples
below).
Notes
-----
Further arguments and keywords are passed directly to
`ChemicalEquation.to_series`.
Examples
--------
>>> from pyrrole import ChemicalSystem
>>> from pyrrole.atoms import create_data, read_cclib
>>> data = create_data(
... read_cclib("data/acetate/acetic_acid.out", "AcOH(g)"),
... read_cclib("data/acetate/acetic_acid@water.out", "AcOH(aq)"))
>>> equilibrium = ChemicalSystem("AcOH(g) <=> AcOH(aq)", data)
>>> digraph = equilibrium.to_digraph()
>>> sorted(digraph.nodes(data='freeenergy'))
[('AcOH(aq)', -228.57526805), ('AcOH(g)', -228.56450866)]
>>> digraph.number_of_nodes()
2
>>> digraph.number_of_edges()
2 | train | https://github.com/schneiderfelipe/pyrrole/blob/13e26accc9a059f0ab69773648b24292fe1fbfd6/pyrrole/core.py#L738-L801 | [
"def _split_arrows(value):\n \"\"\"\n Split a string with sequential chemical equations into separate strings.\n\n Strings in odd positions in the returned iterable represent sums of\n chemical species (with possible stoichiometric coefficients). Strings in\n even positions represent arrow symbols. See examples below.\n\n Parameters\n ----------\n value : `str`\n A string with sequential chemical equations in the mini-language (see\n notes on `ChemicalEquation`).\n\n Returns\n -------\n iterable of `str`\n An iterable of strings. Odd positions represent sums of chemical\n species (with possible stoichiometric coefficients). Strings in even\n positions represent arrow symbols. See examples below.\n\n Notes\n -----\n Spaces are not striped from the returned strings (see examples below).\n\n Examples\n --------\n >>> from pyrrole.core import _split_arrows\n >>> _split_arrows('A -> B')\n ['A ', '->', ' B']\n\n \"\"\"\n return re.split(r\"(->|<-|<=>)\", value)\n"
] | class ChemicalSystem:
"""
Abstraction for models consisting of a set of chemical equations.
Parameters
----------
values : `ChemicalEquation`, `str`, sequence of `ChemicalEquation` or `str`
Definitions of chemical equations. This can either be a single value or
an iterable and accepts anything that can become a `ChemicalEquation`,
or strings with consecutive equations (see examples below).
data : `pandas.DataFrame`, optional
A `data` object, i.e., a table whose rows store information about
chemical species, indexed by chemical species.
check_data : `bool`, optional
Whether to check data object for inconsistencies.
Attributes
----------
equations : iterable of `ChemicalEquation`
Stored `ChemicalEquation` objects.
Examples
--------
>>> from pyrrole import ChemicalSystem
>>> ChemicalSystem("A <=> B -> 2 C")
ChemicalSystem(["A <=> B", "B -> 2 C"])
Single chemical equations are also accepted, in which case the resulting
model has a single equation:
>>> ChemicalSystem(ChemicalEquation("A -> B"))
ChemicalSystem(["A -> B"])
Iterables can mix chemical equation definitions of different types:
>>> ChemicalSystem(["A -> B", "A -> C <- D",
... ChemicalEquation("E -> A")])
ChemicalSystem(["A -> B", "A -> C", "D -> C", "E -> A"])
"""
def __init__(self, values, data=None, check_data=True):
"""See the docstring for this class."""
if isinstance(values, str):
self.equations = list(map(ChemicalEquation,
_split_chemical_equations(values)))
elif isinstance(values, (_Iterable, _Sequence)):
self.equations = []
for value in values:
if isinstance(value, str):
self.equations.extend(map(ChemicalEquation,
_split_chemical_equations(value)))
else:
self.equations.append(ChemicalEquation(value))
else:
self.equations = [ChemicalEquation(values)]
self.data = data
if self.data is not None:
if not isinstance(self.data, _pd.DataFrame):
self.data = _pd.DataFrame(self.data)
if check_data:
_check_data(self.data)
for equation in self.equations:
# TODO: make a test for this if.
if equation.data is None:
equation.data = self.data
def __repr__(self):
"""Build a string representation of this object."""
return "ChemicalSystem([{}])".format(
", ".join(['"' + str(equation) + '"'
for equation in self.equations]))
def to_dataframe(self, *args, **kwargs):
"""
Produce a data table with records for all chemical equations.
All possible differences for numeric attributes are computed and stored
as columns in the returned `pandas.DataFrame` object (see examples
below), whose rows represent chemical equations.
In terms of behavior, this method can be seen as the `ChemicalEquation`
counterpart of `create_data`.
Returns
-------
dataframe : `pandas.DataFrame`
Data table with records of attribute differences for every single
`ChemicalEquation` object in the model.
Notes
-----
Further arguments and keywords are passed directly to
`ChemicalEquation.to_series`.
Examples
--------
>>> from pyrrole import ChemicalSystem
>>> from pyrrole.atoms import create_data, read_cclib
>>> data = create_data(
... read_cclib("data/acetate/acetic_acid.out",
... "AcOH(g)"),
... read_cclib("data/acetate/acetic_acid@water.out",
... "AcOH(aq)"))
>>> data = data[["enthalpy", "entropy", "freeenergy"]]
>>> equilibrium = ChemicalSystem("AcOH(g) <=> AcOH(aq)", data)
>>> equilibrium.to_dataframe() # doctest: +NORMALIZE_WHITESPACE
enthalpy entropy freeenergy
chemical_equation
AcOH(g) <=> AcOH(aq) -0.010958 -0.000198 -0.010759
"""
dataframe = _pd.DataFrame([equation.to_series(*args, **kwargs)
for equation in self.equations])
dataframe.index.name = "chemical_equation"
return dataframe
|
schneiderfelipe/pyrrole | pyrrole/atoms.py | read_cclib | python | def read_cclib(value, name=None):
if isinstance(value, _logfileparser.Logfile):
# TODO: test this case.
jobfilename = value.filename
ccdata = value.parse()
elif isinstance(value, _data.ccData):
# TODO: test this case.
jobfilename = None
ccdata = value
else:
# TODO: test this case.
ccobj = _cclib.ccopen(value)
jobfilename = ccobj.filename
ccdata = ccobj.parse()
if name is None:
name = jobfilename
attributes = ccdata.getattributes()
attributes.update({
'name': name,
'jobfilename': jobfilename,
})
return Atoms(attributes) | Create an `Atoms` object from data attributes parsed by cclib.
`cclib <https://cclib.github.io/>`_ is an open source library, written in
Python, for parsing and interpreting the results (logfiles) of
computational chemistry packages.
Parameters
----------
value : `str`, `cclib.parser.logfileparser.Logfile`, `cclib.parser.data.ccData`
A path to a logfile, or either a cclib job object (i.e., from
`cclib.ccopen`), or cclib data object (i.e., from ``job.parse()``).
name : `str`, optional
Name for chemical species. If not given, this is set to the logfile
path, if known. Chemical equations mention this name when refering to
the returned object.
Returns
-------
molecule : `Atoms`
All attributes obtainable by cclib are made available as attributes in
the returned object.
Examples
--------
>>> from pyrrole.atoms import read_cclib
>>> molecule = read_cclib('data/pyrrolate/pyrrole.out')
>>> molecule.atomnos
array([6, 6, 6, 6, 7, 1, 1, 1, 1, 1], dtype=int32)
>>> molecule.charge
0 | train | https://github.com/schneiderfelipe/pyrrole/blob/13e26accc9a059f0ab69773648b24292fe1fbfd6/pyrrole/atoms.py#L491-L548 | null | #!/usr/bin/python3
# -*- encoding: utf-8 -*-
"""Tools for handling molecular structures and their attributes."""
import os as _os
import re as _re
import numpy as _np
import pandas as _pd
import cclib as _cclib
import pybel as _pb
import openbabel as _ob
from cclib.parser import data as _data
from cclib.parser import logfileparser as _logfileparser
from cclib.bridge.cclib2openbabel import makecclib as _makecclib
from cclib.bridge.cclib2openbabel import makeopenbabel as _makeopenbabel
class Atoms:
"""
Abstraction of fixed molecular structures as sequences of atoms.
Properties are stored in `Atoms.attributes`, which follow naming and unit
conventions of the `cclib project <https://cclib.github.io/>`_ as close as
possible (see `read_cclib` for producing `Atoms` out of cclib objects or
logfiles).
Parameters
----------
attributes : mapping or `pandas.Series`
A mapping containing data attributes whose keys follow the naming
conventions of the `cclib project <https://cclib.github.io/>`_ for
properties as close as possible. Any mapping of attributes that can be
converted to a `pandas.Series` object is accepted.
Attributes
----------
attribute : `pandas.Series`
Molecular properties. Keys follow the naming practice of the `cclib
project <https://cclib.github.io/>`_ for properties as close as
possible. The object is named during initialization after `Atoms.name`
(see notes below).
Notes
-----
This class is intended to be used directly in very simple cases only. For
complex situations, use factory functions such as `read_cclib`.
`Atoms.name` is set to `Atoms.jobfilename` if `Atoms.name` does not exist.
Examples
--------
An `Atoms` object can be instantiated by giving any mapping of
attributes that can be converted to a `pandas.Series` object:
>>> from pyrrole.atoms import Atoms
>>> dioxygen = Atoms({
... 'atomcoords': [[0., 0., 0.],
... [0., 0., 1.21]],
... 'charge': 0,
... 'mult': 3,
... 'atomnos': [8, 8]})
Attributes can be accessed by their keys, as given during initialization:
>>> dioxygen.mult
3
>>> dioxygen.atomcoords[-1][1, 2]
1.21
Two `Atoms` objects are considered equal if their attributes match:
>>> dioxygen_copy = Atoms({
... 'atomcoords': [[0., 0., 0.],
... [0., 0., 1.21]],
... 'charge': 0,
... 'mult': 3,
... 'atomnos': [8, 8]})
>>> dioxygen == dioxygen_copy
True
>>> dioxygen is dioxygen_copy
False
If printed, `Atoms` objects produce coordinates in
`xyz format <https://en.wikipedia.org/wiki/XYZ_file_format>`_:
>>> from pyrrole.atoms import read_pybel
>>> print(read_pybel("data/pyrrolate/pyrrole.xyz", "pyrrole"))
C -1.11468 0.00000 0.33059
C -0.70848 0.00000 -0.97739
C 0.70848 0.00000 -0.97739
C 1.11468 0.00000 0.33059
N 0.00000 0.00000 1.11189
H -2.10267 0.00000 0.75908
H -1.35484 0.00000 -1.83956
H 1.35484 0.00000 -1.83956
H 2.10267 0.00000 0.75908
H 0.00000 0.00000 2.11476
>>> print(dioxygen)
O 0.00000 0.00000 0.00000
O 0.00000 0.00000 1.21000
"""
def __init__(self, attributes):
"""See the docstring for this class."""
# TODO: make idempotent (allow receiving Atoms objects).
self.attributes = _pd.Series(attributes)
# TODO: properly document used attributes.
if "name" in self.attributes:
# TODO: make a test for this if.
self.attributes.rename(self.name, inplace=True)
elif "jobfilename" in self.attributes:
self.attributes.rename(self.jobfilename, inplace=True)
if "atomcoords" in self.attributes:
self.attributes["atomcoords"] = _np.asanyarray(self.atomcoords)
if len(self.atomcoords.shape) < 3:
self.attributes["atomcoords"] = [self.atomcoords]
self.name = self.attributes.name
def __getattr__(self, value):
"""Wrap `Atoms.value` into `Atoms.attributes['value']`."""
return self.attributes[value]
def __eq__(self, other):
"""Compare molecules in terms of attributes."""
# Expensive but definitely correct.
return self.attributes.to_json() == other.attributes.to_json()
def __str__(self):
"""Build a string with Cartesian coordinates for this molecule."""
# TODO: make a read_string that is able to read the results of this
# function.
return self.to_string('xyz')
def split(self, pattern=None):
r"""
Break molecule up into constituent fragments.
By default (i.e., if `pattern` is `None`), each disconnected fragment
is returned as a separate new `Atoms` object. This uses OpenBabel
(through `OBMol.Separate`) and might not preserve atom order, depending
on your version of the library.
Parameters
----------
pattern : iterable of iterable of `int`, optional
Groupings of atoms into molecule fragments. Each element of
`pattern` should be an iterable whose members are atom indices (see
example below).
Returns
-------
fragments : iterable of `Atoms`
Examples
--------
>>> from pyrrole import atoms
>>> water_dimer = atoms.read_pybel("data/water-dimer.xyz")
"Natural fragmentation" is the default behaviour, i.e. all disconnected
fragments are returned:
>>> for frag in water_dimer.split():
... print("{}\n".format(frag))
O -1.62893 -0.04138 0.37137
H -0.69803 -0.09168 0.09337
H -2.06663 -0.73498 -0.13663
<BLANKLINE>
O 1.21457 0.03172 -0.27623
H 1.72977 -0.08038 0.53387
H 1.44927 0.91672 -0.58573
<BLANKLINE>
Precise fragment grouping can be achieved by explicitly indicating
which atoms belong to which fragments:
>>> for frag in water_dimer.split([range(3), (5, 4), [3]]):
... print("{}\n".format(frag))
O -1.62893 -0.04138 0.37137
H -0.69803 -0.09168 0.09337
H -2.06663 -0.73498 -0.13663
<BLANKLINE>
H 1.72977 -0.08038 0.53387
H 1.44927 0.91672 -0.58573
<BLANKLINE>
O 1.21457 0.03172 -0.27623
<BLANKLINE>
"""
molecule_pybel = self.to_pybel()
if pattern is None:
fragments = [read_pybel(frag)
for frag in molecule_pybel.OBMol.Separate()]
else:
fragments = []
for group in pattern:
fragment_obmol = _pb.ob.OBMol()
for i in group:
obatom = molecule_pybel.OBMol.GetAtomById(i)
fragment_obmol.InsertAtom(obatom)
fragments.append(fragment_obmol)
fragments = [read_pybel(frag) for frag in fragments]
return fragments
def to_series(self):
"""
Produce a data record of attributes as a `pandas.Series` object.
This method is useful for producing many data tables in pyrrole. See
`ChemicalEquation.to_series` and `ChemicalSystem.to_dataframe` for
more.
Returns
-------
series : `pandas.Series`
Data record of attributes.
Examples
--------
>>> from pyrrole.atoms import Atoms
>>> dioxygen = Atoms({'atomcoords': [[0., 0., 0.],
... [0., 0., 1.21]],
... 'atomnos': [8, 8],
... 'mult': 3,
... 'name': 'dioxygen'})
>>> dioxygen.to_series()
atomcoords [[[0.0, 0.0, 0.0], [0.0, 0.0, 1.21]]]
atomnos [8, 8]
mult 3
name dioxygen
Name: dioxygen, dtype: object
This method can be used to produce a copy of an `Atoms` object:
>>> dioxygen_copy = Atoms(dioxygen.to_series())
>>> dioxygen == dioxygen_copy
True
>>> dioxygen is dioxygen_copy
False
"""
return self.attributes
def to_pybel(self):
"""
Produce a Pybel Molecule object.
It is based on the capabilities of OpenBabel through Pybel. The present
object must have at least `atomcoords`, `atomnos`, `charge` and `mult`
defined.
Returns
-------
`pybel.Molecule`
Examples
--------
>>> from pyrrole.atoms import Atoms
>>> dioxygen = Atoms({'atomcoords': [[0., 0., 0.],
... [0., 0., 1.21]],
... 'atomnos': [8, 8],
... 'charge': 0,
... 'mult': 3,
... 'name': 'dioxygen'})
>>> mol = dioxygen.to_pybel()
>>> mol.molwt
31.9988
"""
# TODO: This only exports last geometry by default.
obmol = _makeopenbabel(self.atomcoords, self.atomnos, self.charge,
self.mult)
title = self.name or ""
if 'scfenergies' in self.attributes:
title += ", scfenergy={} eV".format(self.scfenergies[-1])
obmol.SetTitle(title)
# TODO: make a test for this function.
return _pb.Molecule(obmol)
def to_string(self, format="smi", dialect=None, with_header=False,
fragment_id=None, constraints=None):
r"""
Produce a string representation of the molecule.
This function wraps and extends the functionality of OpenBabel (which
is accessible through `to_pybel`). Many chemical formats can thus be
output (see the `pybel.outformats` variable for a list of available
output formats).
Parameters
----------
format : `str`, optional
Chemical file format of the returned string representation (see
examples below).
dialect : `str`, optional
Format dialect. This encompasses enhancements provided for some
subformats. If ``"standard"`` or `None`, the output provided by
OpenBabel is used with no or minimal modification. See notes below.
with_header : `bool`, optional
If `format` encompasses a header, allow it in the returned string.
This would be, for instance, the first two lines of data for
``format="xyz"`` (see examples below). This might not work with all
dialects and/or formats.
fragment_id : `str`, optional
Indentify molecular fragments (see examples below). This might not
work with all dialects and/or formats.
constraints : iterable object of `int`
Set cartesian constraints for selected atoms (see examples below).
This might not work with all dialects and/or formats.
Returns
-------
`str`
String representation of molecule in the specified format and/or
dialect.
Raises
------
KeyError
Raised if `dialect` value is currently not supported or if
`fragment_id` is given with a currently not supported `dialect`
value.
Notes
-----
Format dialects are subformats that support extended functionality.
Currently supported dialects are:
- for ``format="xyz"``:
- ``"ADF"``, ``"ORCA"``.
Examples
--------
>>> from pyrrole import atoms
>>> dioxygen = atoms.Atoms({'atomcoords': [[0., 0., 0.],
... [0., 0., 1.21]],
... 'atomnos': [8, 8],
... 'charge': 0,
... 'mult': 3,
... 'name': 'dioxygen'})
By default, a SMILES string is returned:
>>> dioxygen.to_string()
'O=O\tdioxygen'
Cartesian coordinates can be produced with ``format="xyz"``, which is
equivalent to printing an `Atoms` instance:
>>> print(dioxygen.to_string("xyz"))
O 0.00000 0.00000 0.00000
O 0.00000 0.00000 1.21000
>>> print(dioxygen)
O 0.00000 0.00000 0.00000
O 0.00000 0.00000 1.21000
Header lines are disabled by default (for ``format="xyz"``, for
example, the header stores the number of atoms in the molecule and a
comment or title line), but this can be reversed with
``with_header=True``:
>>> print(dioxygen.to_string("xyz", with_header=True))
2
dioxygen
O 0.00000 0.00000 0.00000
O 0.00000 0.00000 1.21000
Coordinates for packages such as GAMESS and MOPAC are also supported:
>>> water_dimer = atoms.read_pybel("data/water-dimer.xyz")
>>> print(water_dimer.to_string("gamin"))
O 8.0 -1.6289300000 -0.0413800000 0.3713700000
H 1.0 -0.6980300000 -0.0916800000 0.0933700000
H 1.0 -2.0666300000 -0.7349800000 -0.1366300000
O 8.0 1.2145700000 0.0317200000 -0.2762300000
H 1.0 1.4492700000 0.9167200000 -0.5857300000
H 1.0 1.7297700000 -0.0803800000 0.5338700000
>>> print(water_dimer.to_string("mop"))
O -1.62893 1 -0.04138 1 0.37137 1
H -0.69803 1 -0.09168 1 0.09337 1
H -2.06663 1 -0.73498 1 -0.13663 1
O 1.21457 1 0.03172 1 -0.27623 1
H 1.44927 1 0.91672 1 -0.58573 1
H 1.72977 1 -0.08038 1 0.53387 1
Constraining of cartesian coordinates works with MOPAC format:
>>> print(water_dimer.to_string("mop", constraints=(0, 3)))
O -1.62893 0 -0.04138 0 0.37137 0
H -0.69803 1 -0.09168 1 0.09337 1
H -2.06663 1 -0.73498 1 -0.13663 1
O 1.21457 0 0.03172 0 -0.27623 0
H 1.44927 1 0.91672 1 -0.58573 1
H 1.72977 1 -0.08038 1 0.53387 1
Fragment identification is supported for ``"ADF"`` and ``"ORCA"``
dialects:
>>> print(water_dimer.to_string("xyz", dialect="ADF",
... fragment_id="dimer"))
O -1.62893 -0.04138 0.37137 f=dimer
H -0.69803 -0.09168 0.09337 f=dimer
H -2.06663 -0.73498 -0.13663 f=dimer
O 1.21457 0.03172 -0.27623 f=dimer
H 1.44927 0.91672 -0.58573 f=dimer
H 1.72977 -0.08038 0.53387 f=dimer
>>> print(water_dimer.to_string("xyz", dialect="ORCA",
... fragment_id=1))
O(1) -1.62893 -0.04138 0.37137
H(1) -0.69803 -0.09168 0.09337
H(1) -2.06663 -0.73498 -0.13663
O(1) 1.21457 0.03172 -0.27623
H(1) 1.44927 0.91672 -0.58573
H(1) 1.72977 -0.08038 0.53387
"""
s = self.to_pybel().write(format).strip()
if dialect is None:
dialect = "standard"
dialect = dialect.lower()
if format == "xyz":
natom, comment, body = s.split("\n", 2)
if dialect in {"adf", "orca", "standard"}:
if fragment_id is not None:
if dialect == "adf":
body = \
"\n".join(["{} f={}".format(line,
fragment_id)
for line in body.split("\n")])
elif dialect == "orca":
fragment_id = "({})".format(fragment_id)
body = \
"\n".join([line.replace(" " * len(fragment_id),
fragment_id, 1)
for line in body.split("\n")])
else:
raise KeyError("fragment_id currently not supported "
"with dialect '{}'".format(dialect))
else:
raise KeyError("dialect '{}' currently not "
"supported".format(dialect))
if with_header:
s = "\n".join([natom, comment, body])
else:
s = body
elif format == "gamin":
lines = s.split("\n")
begin = "\n".join([line.strip() for line in lines[:5]])
body = "\n".join([line.strip() for line in lines[5:-1]])
if with_header:
s = "\n".join([begin, body])
else:
s = body
elif format == "mop":
chunks = s.split("\n", 2)
begin = "\n".join([line.strip() for line in chunks[:2]])
body = chunks[2].strip()
if constraints is not None:
body = body.split("\n")
for i in constraints:
body[i] = _re.sub(' 1( |$)', ' 0\g<1>', body[i])
body = "\n".join(body)
if with_header:
s = "\n".join([begin, body])
else:
s = body
return s.strip()
def read_pybel(value, name=None):
"""
Create an `Atoms` object from content parsed by Pybel.
`Pybel <https://openbabel.org/docs/dev/UseTheLibrary/Python_Pybel.html>`_
is a Python module that simplifies access to the OpenBabel API, a chemical
toolbox designed to speak the many languages of chemical data. It’s an
open, collaborative project allowing anyone to search, convert, analyze, or
store data from molecular modeling, chemistry, solid-state materials,
biochemistry, and related areas.
Parameters
----------
value : `str`, `pybel.Molecule`, `openbabel.OBMol`
A path to a file, or either a Pybel Molecule object, or OpenBabel
OBMol.
name : `str`, optional
Name for chemical species. If not given, this is set to the file path,
if known. Chemical equations mention this name when refering to the
returned object.
Returns
-------
molecule : `Atoms`
All attributes convertible from Pybel to cclib are made available as
attributes in the returned object.
Notes
-----
The following attributes are converted from Pybel to cclib: `atomcoords`,
`atommasses`, `atomnos`, `natom`, `charge` and `mult`. One must keep in
mind that `charge` and `mult` are not always reliable, since these are
often calculated from atomic formal charges.
Examples
--------
>>> from pyrrole.atoms import read_pybel
>>> molecule = read_pybel('data/pyrrolate/pyrrole.xyz')
>>> molecule.atomnos
array([6, 6, 6, 6, 7, 1, 1, 1, 1, 1], dtype=int32)
>>> molecule.natom
10
>>> molecule.charge
0
"""
if isinstance(value, _pb.Molecule):
# TODO: test this case.
jobfilename = None
charge, mult = value.charge, value.spin
ccdata = _makecclib(value.OBMol)
elif isinstance(value, _ob.OBMol):
# TODO: test this case.
jobfilename = None
charge, mult = value.GetTotalCharge(), value.GetTotalSpinMultiplicity()
ccdata = _makecclib(value)
else:
# TODO: test this case.
jobfilename = value
_, jobfilename_ext = _os.path.splitext(jobfilename)
# TODO: This only reads first structure.
mol = next(_pb.readfile(jobfilename_ext[1:], jobfilename))
charge, mult = mol.charge, mol.spin
ccdata = _makecclib(mol.OBMol)
if name is None:
name = jobfilename
attributes = ccdata.getattributes()
attributes.update({
'name': name,
'jobfilename': jobfilename,
'charge': charge,
'mult': mult
})
return Atoms(attributes)
def create_data(*args):
"""
Produce a single data object from an arbitrary number of different objects.
This function returns a single `pandas.DataFrame` object from a collection
of `Atoms` and `pandas.DataFrame` objects. The returned object, already
indexed by `Atoms.name`, can be promptly used by e.g. `ChemicalSystem`.
Parameters
----------
*args : `pandas.DataFrame` or `Atoms`-like
All positional arguments are assumed to be sources of data.
`Atoms`-like objects (i.e. any object accepted by the `Atoms`
constructor) become single row records in the final returned
data object. `pandas.DataFrame` data table objects, on the other hand,
are concatenated together (by using `pandas.DataFrame.concat`).
Returns
-------
dataframe : `pandas.DataFrame`
Resulting tabular data object. The returned object is guaranteed to be
indexed by `Atoms.name`; if no column with this name exists at
indexing time, a new column (with `None` values) is created for the
purpose of indexing.
Notes
-----
The returned `pandas.DataFrame` will be indexed by `Atoms.name` (see
examples below), which might be the same as `Atoms.jobfilename` if no name
was given to the constructor of `Atoms` (e.g. mapping).
Examples
--------
>>> from pyrrole.atoms import Atoms, create_data, read_cclib
>>> pyrrole = read_cclib('data/pyrrolate/pyrrole.out', 'pyrrole')
>>> pyrrolate = read_cclib('data/pyrrolate/pyrrolate.out')
>>> data = create_data(pyrrole, pyrrolate)
>>> data['charge']
name
pyrrole 0
data/pyrrolate/pyrrolate.out -1
Name: charge, dtype: int64
"""
def _prepare_data(data):
if not isinstance(data, _pd.DataFrame):
try:
data = _pd.DataFrame([data.to_series()])
except AttributeError:
data = _pd.DataFrame([Atoms(data).to_series()])
if data.index.name != "name":
if "name" not in data.columns:
data["name"] = None
data = data.set_index("name")
return data.reset_index()
args = map(_prepare_data, args)
dataframe = _pd.concat(args, sort=False)
return dataframe.set_index("name")
|
schneiderfelipe/pyrrole | pyrrole/atoms.py | read_pybel | python | def read_pybel(value, name=None):
if isinstance(value, _pb.Molecule):
# TODO: test this case.
jobfilename = None
charge, mult = value.charge, value.spin
ccdata = _makecclib(value.OBMol)
elif isinstance(value, _ob.OBMol):
# TODO: test this case.
jobfilename = None
charge, mult = value.GetTotalCharge(), value.GetTotalSpinMultiplicity()
ccdata = _makecclib(value)
else:
# TODO: test this case.
jobfilename = value
_, jobfilename_ext = _os.path.splitext(jobfilename)
# TODO: This only reads first structure.
mol = next(_pb.readfile(jobfilename_ext[1:], jobfilename))
charge, mult = mol.charge, mol.spin
ccdata = _makecclib(mol.OBMol)
if name is None:
name = jobfilename
attributes = ccdata.getattributes()
attributes.update({
'name': name,
'jobfilename': jobfilename,
'charge': charge,
'mult': mult
})
return Atoms(attributes) | Create an `Atoms` object from content parsed by Pybel.
`Pybel <https://openbabel.org/docs/dev/UseTheLibrary/Python_Pybel.html>`_
is a Python module that simplifies access to the OpenBabel API, a chemical
toolbox designed to speak the many languages of chemical data. It’s an
open, collaborative project allowing anyone to search, convert, analyze, or
store data from molecular modeling, chemistry, solid-state materials,
biochemistry, and related areas.
Parameters
----------
value : `str`, `pybel.Molecule`, `openbabel.OBMol`
A path to a file, or either a Pybel Molecule object, or OpenBabel
OBMol.
name : `str`, optional
Name for chemical species. If not given, this is set to the file path,
if known. Chemical equations mention this name when refering to the
returned object.
Returns
-------
molecule : `Atoms`
All attributes convertible from Pybel to cclib are made available as
attributes in the returned object.
Notes
-----
The following attributes are converted from Pybel to cclib: `atomcoords`,
`atommasses`, `atomnos`, `natom`, `charge` and `mult`. One must keep in
mind that `charge` and `mult` are not always reliable, since these are
often calculated from atomic formal charges.
Examples
--------
>>> from pyrrole.atoms import read_pybel
>>> molecule = read_pybel('data/pyrrolate/pyrrole.xyz')
>>> molecule.atomnos
array([6, 6, 6, 6, 7, 1, 1, 1, 1, 1], dtype=int32)
>>> molecule.natom
10
>>> molecule.charge
0 | train | https://github.com/schneiderfelipe/pyrrole/blob/13e26accc9a059f0ab69773648b24292fe1fbfd6/pyrrole/atoms.py#L551-L628 | null | #!/usr/bin/python3
# -*- encoding: utf-8 -*-
"""Tools for handling molecular structures and their attributes."""
import os as _os
import re as _re
import numpy as _np
import pandas as _pd
import cclib as _cclib
import pybel as _pb
import openbabel as _ob
from cclib.parser import data as _data
from cclib.parser import logfileparser as _logfileparser
from cclib.bridge.cclib2openbabel import makecclib as _makecclib
from cclib.bridge.cclib2openbabel import makeopenbabel as _makeopenbabel
class Atoms:
"""
Abstraction of fixed molecular structures as sequences of atoms.
Properties are stored in `Atoms.attributes`, which follow naming and unit
conventions of the `cclib project <https://cclib.github.io/>`_ as close as
possible (see `read_cclib` for producing `Atoms` out of cclib objects or
logfiles).
Parameters
----------
attributes : mapping or `pandas.Series`
A mapping containing data attributes whose keys follow the naming
conventions of the `cclib project <https://cclib.github.io/>`_ for
properties as close as possible. Any mapping of attributes that can be
converted to a `pandas.Series` object is accepted.
Attributes
----------
attribute : `pandas.Series`
Molecular properties. Keys follow the naming practice of the `cclib
project <https://cclib.github.io/>`_ for properties as close as
possible. The object is named during initialization after `Atoms.name`
(see notes below).
Notes
-----
This class is intended to be used directly in very simple cases only. For
complex situations, use factory functions such as `read_cclib`.
`Atoms.name` is set to `Atoms.jobfilename` if `Atoms.name` does not exist.
Examples
--------
An `Atoms` object can be instantiated by giving any mapping of
attributes that can be converted to a `pandas.Series` object:
>>> from pyrrole.atoms import Atoms
>>> dioxygen = Atoms({
... 'atomcoords': [[0., 0., 0.],
... [0., 0., 1.21]],
... 'charge': 0,
... 'mult': 3,
... 'atomnos': [8, 8]})
Attributes can be accessed by their keys, as given during initialization:
>>> dioxygen.mult
3
>>> dioxygen.atomcoords[-1][1, 2]
1.21
Two `Atoms` objects are considered equal if their attributes match:
>>> dioxygen_copy = Atoms({
... 'atomcoords': [[0., 0., 0.],
... [0., 0., 1.21]],
... 'charge': 0,
... 'mult': 3,
... 'atomnos': [8, 8]})
>>> dioxygen == dioxygen_copy
True
>>> dioxygen is dioxygen_copy
False
If printed, `Atoms` objects produce coordinates in
`xyz format <https://en.wikipedia.org/wiki/XYZ_file_format>`_:
>>> from pyrrole.atoms import read_pybel
>>> print(read_pybel("data/pyrrolate/pyrrole.xyz", "pyrrole"))
C -1.11468 0.00000 0.33059
C -0.70848 0.00000 -0.97739
C 0.70848 0.00000 -0.97739
C 1.11468 0.00000 0.33059
N 0.00000 0.00000 1.11189
H -2.10267 0.00000 0.75908
H -1.35484 0.00000 -1.83956
H 1.35484 0.00000 -1.83956
H 2.10267 0.00000 0.75908
H 0.00000 0.00000 2.11476
>>> print(dioxygen)
O 0.00000 0.00000 0.00000
O 0.00000 0.00000 1.21000
"""
def __init__(self, attributes):
"""See the docstring for this class."""
# TODO: make idempotent (allow receiving Atoms objects).
self.attributes = _pd.Series(attributes)
# TODO: properly document used attributes.
if "name" in self.attributes:
# TODO: make a test for this if.
self.attributes.rename(self.name, inplace=True)
elif "jobfilename" in self.attributes:
self.attributes.rename(self.jobfilename, inplace=True)
if "atomcoords" in self.attributes:
self.attributes["atomcoords"] = _np.asanyarray(self.atomcoords)
if len(self.atomcoords.shape) < 3:
self.attributes["atomcoords"] = [self.atomcoords]
self.name = self.attributes.name
def __getattr__(self, value):
"""Wrap `Atoms.value` into `Atoms.attributes['value']`."""
return self.attributes[value]
def __eq__(self, other):
"""Compare molecules in terms of attributes."""
# Expensive but definitely correct.
return self.attributes.to_json() == other.attributes.to_json()
def __str__(self):
"""Build a string with Cartesian coordinates for this molecule."""
# TODO: make a read_string that is able to read the results of this
# function.
return self.to_string('xyz')
def split(self, pattern=None):
r"""
Break molecule up into constituent fragments.
By default (i.e., if `pattern` is `None`), each disconnected fragment
is returned as a separate new `Atoms` object. This uses OpenBabel
(through `OBMol.Separate`) and might not preserve atom order, depending
on your version of the library.
Parameters
----------
pattern : iterable of iterable of `int`, optional
Groupings of atoms into molecule fragments. Each element of
`pattern` should be an iterable whose members are atom indices (see
example below).
Returns
-------
fragments : iterable of `Atoms`
Examples
--------
>>> from pyrrole import atoms
>>> water_dimer = atoms.read_pybel("data/water-dimer.xyz")
"Natural fragmentation" is the default behaviour, i.e. all disconnected
fragments are returned:
>>> for frag in water_dimer.split():
... print("{}\n".format(frag))
O -1.62893 -0.04138 0.37137
H -0.69803 -0.09168 0.09337
H -2.06663 -0.73498 -0.13663
<BLANKLINE>
O 1.21457 0.03172 -0.27623
H 1.72977 -0.08038 0.53387
H 1.44927 0.91672 -0.58573
<BLANKLINE>
Precise fragment grouping can be achieved by explicitly indicating
which atoms belong to which fragments:
>>> for frag in water_dimer.split([range(3), (5, 4), [3]]):
... print("{}\n".format(frag))
O -1.62893 -0.04138 0.37137
H -0.69803 -0.09168 0.09337
H -2.06663 -0.73498 -0.13663
<BLANKLINE>
H 1.72977 -0.08038 0.53387
H 1.44927 0.91672 -0.58573
<BLANKLINE>
O 1.21457 0.03172 -0.27623
<BLANKLINE>
"""
molecule_pybel = self.to_pybel()
if pattern is None:
fragments = [read_pybel(frag)
for frag in molecule_pybel.OBMol.Separate()]
else:
fragments = []
for group in pattern:
fragment_obmol = _pb.ob.OBMol()
for i in group:
obatom = molecule_pybel.OBMol.GetAtomById(i)
fragment_obmol.InsertAtom(obatom)
fragments.append(fragment_obmol)
fragments = [read_pybel(frag) for frag in fragments]
return fragments
def to_series(self):
"""
Produce a data record of attributes as a `pandas.Series` object.
This method is useful for producing many data tables in pyrrole. See
`ChemicalEquation.to_series` and `ChemicalSystem.to_dataframe` for
more.
Returns
-------
series : `pandas.Series`
Data record of attributes.
Examples
--------
>>> from pyrrole.atoms import Atoms
>>> dioxygen = Atoms({'atomcoords': [[0., 0., 0.],
... [0., 0., 1.21]],
... 'atomnos': [8, 8],
... 'mult': 3,
... 'name': 'dioxygen'})
>>> dioxygen.to_series()
atomcoords [[[0.0, 0.0, 0.0], [0.0, 0.0, 1.21]]]
atomnos [8, 8]
mult 3
name dioxygen
Name: dioxygen, dtype: object
This method can be used to produce a copy of an `Atoms` object:
>>> dioxygen_copy = Atoms(dioxygen.to_series())
>>> dioxygen == dioxygen_copy
True
>>> dioxygen is dioxygen_copy
False
"""
return self.attributes
def to_pybel(self):
"""
Produce a Pybel Molecule object.
It is based on the capabilities of OpenBabel through Pybel. The present
object must have at least `atomcoords`, `atomnos`, `charge` and `mult`
defined.
Returns
-------
`pybel.Molecule`
Examples
--------
>>> from pyrrole.atoms import Atoms
>>> dioxygen = Atoms({'atomcoords': [[0., 0., 0.],
... [0., 0., 1.21]],
... 'atomnos': [8, 8],
... 'charge': 0,
... 'mult': 3,
... 'name': 'dioxygen'})
>>> mol = dioxygen.to_pybel()
>>> mol.molwt
31.9988
"""
# TODO: This only exports last geometry by default.
obmol = _makeopenbabel(self.atomcoords, self.atomnos, self.charge,
self.mult)
title = self.name or ""
if 'scfenergies' in self.attributes:
title += ", scfenergy={} eV".format(self.scfenergies[-1])
obmol.SetTitle(title)
# TODO: make a test for this function.
return _pb.Molecule(obmol)
def to_string(self, format="smi", dialect=None, with_header=False,
fragment_id=None, constraints=None):
r"""
Produce a string representation of the molecule.
This function wraps and extends the functionality of OpenBabel (which
is accessible through `to_pybel`). Many chemical formats can thus be
output (see the `pybel.outformats` variable for a list of available
output formats).
Parameters
----------
format : `str`, optional
Chemical file format of the returned string representation (see
examples below).
dialect : `str`, optional
Format dialect. This encompasses enhancements provided for some
subformats. If ``"standard"`` or `None`, the output provided by
OpenBabel is used with no or minimal modification. See notes below.
with_header : `bool`, optional
If `format` encompasses a header, allow it in the returned string.
This would be, for instance, the first two lines of data for
``format="xyz"`` (see examples below). This might not work with all
dialects and/or formats.
fragment_id : `str`, optional
Indentify molecular fragments (see examples below). This might not
work with all dialects and/or formats.
constraints : iterable object of `int`
Set cartesian constraints for selected atoms (see examples below).
This might not work with all dialects and/or formats.
Returns
-------
`str`
String representation of molecule in the specified format and/or
dialect.
Raises
------
KeyError
Raised if `dialect` value is currently not supported or if
`fragment_id` is given with a currently not supported `dialect`
value.
Notes
-----
Format dialects are subformats that support extended functionality.
Currently supported dialects are:
- for ``format="xyz"``:
- ``"ADF"``, ``"ORCA"``.
Examples
--------
>>> from pyrrole import atoms
>>> dioxygen = atoms.Atoms({'atomcoords': [[0., 0., 0.],
... [0., 0., 1.21]],
... 'atomnos': [8, 8],
... 'charge': 0,
... 'mult': 3,
... 'name': 'dioxygen'})
By default, a SMILES string is returned:
>>> dioxygen.to_string()
'O=O\tdioxygen'
Cartesian coordinates can be produced with ``format="xyz"``, which is
equivalent to printing an `Atoms` instance:
>>> print(dioxygen.to_string("xyz"))
O 0.00000 0.00000 0.00000
O 0.00000 0.00000 1.21000
>>> print(dioxygen)
O 0.00000 0.00000 0.00000
O 0.00000 0.00000 1.21000
Header lines are disabled by default (for ``format="xyz"``, for
example, the header stores the number of atoms in the molecule and a
comment or title line), but this can be reversed with
``with_header=True``:
>>> print(dioxygen.to_string("xyz", with_header=True))
2
dioxygen
O 0.00000 0.00000 0.00000
O 0.00000 0.00000 1.21000
Coordinates for packages such as GAMESS and MOPAC are also supported:
>>> water_dimer = atoms.read_pybel("data/water-dimer.xyz")
>>> print(water_dimer.to_string("gamin"))
O 8.0 -1.6289300000 -0.0413800000 0.3713700000
H 1.0 -0.6980300000 -0.0916800000 0.0933700000
H 1.0 -2.0666300000 -0.7349800000 -0.1366300000
O 8.0 1.2145700000 0.0317200000 -0.2762300000
H 1.0 1.4492700000 0.9167200000 -0.5857300000
H 1.0 1.7297700000 -0.0803800000 0.5338700000
>>> print(water_dimer.to_string("mop"))
O -1.62893 1 -0.04138 1 0.37137 1
H -0.69803 1 -0.09168 1 0.09337 1
H -2.06663 1 -0.73498 1 -0.13663 1
O 1.21457 1 0.03172 1 -0.27623 1
H 1.44927 1 0.91672 1 -0.58573 1
H 1.72977 1 -0.08038 1 0.53387 1
Constraining of cartesian coordinates works with MOPAC format:
>>> print(water_dimer.to_string("mop", constraints=(0, 3)))
O -1.62893 0 -0.04138 0 0.37137 0
H -0.69803 1 -0.09168 1 0.09337 1
H -2.06663 1 -0.73498 1 -0.13663 1
O 1.21457 0 0.03172 0 -0.27623 0
H 1.44927 1 0.91672 1 -0.58573 1
H 1.72977 1 -0.08038 1 0.53387 1
Fragment identification is supported for ``"ADF"`` and ``"ORCA"``
dialects:
>>> print(water_dimer.to_string("xyz", dialect="ADF",
... fragment_id="dimer"))
O -1.62893 -0.04138 0.37137 f=dimer
H -0.69803 -0.09168 0.09337 f=dimer
H -2.06663 -0.73498 -0.13663 f=dimer
O 1.21457 0.03172 -0.27623 f=dimer
H 1.44927 0.91672 -0.58573 f=dimer
H 1.72977 -0.08038 0.53387 f=dimer
>>> print(water_dimer.to_string("xyz", dialect="ORCA",
... fragment_id=1))
O(1) -1.62893 -0.04138 0.37137
H(1) -0.69803 -0.09168 0.09337
H(1) -2.06663 -0.73498 -0.13663
O(1) 1.21457 0.03172 -0.27623
H(1) 1.44927 0.91672 -0.58573
H(1) 1.72977 -0.08038 0.53387
"""
s = self.to_pybel().write(format).strip()
if dialect is None:
dialect = "standard"
dialect = dialect.lower()
if format == "xyz":
natom, comment, body = s.split("\n", 2)
if dialect in {"adf", "orca", "standard"}:
if fragment_id is not None:
if dialect == "adf":
body = \
"\n".join(["{} f={}".format(line,
fragment_id)
for line in body.split("\n")])
elif dialect == "orca":
fragment_id = "({})".format(fragment_id)
body = \
"\n".join([line.replace(" " * len(fragment_id),
fragment_id, 1)
for line in body.split("\n")])
else:
raise KeyError("fragment_id currently not supported "
"with dialect '{}'".format(dialect))
else:
raise KeyError("dialect '{}' currently not "
"supported".format(dialect))
if with_header:
s = "\n".join([natom, comment, body])
else:
s = body
elif format == "gamin":
lines = s.split("\n")
begin = "\n".join([line.strip() for line in lines[:5]])
body = "\n".join([line.strip() for line in lines[5:-1]])
if with_header:
s = "\n".join([begin, body])
else:
s = body
elif format == "mop":
chunks = s.split("\n", 2)
begin = "\n".join([line.strip() for line in chunks[:2]])
body = chunks[2].strip()
if constraints is not None:
body = body.split("\n")
for i in constraints:
body[i] = _re.sub(' 1( |$)', ' 0\g<1>', body[i])
body = "\n".join(body)
if with_header:
s = "\n".join([begin, body])
else:
s = body
return s.strip()
def read_cclib(value, name=None):
"""
Create an `Atoms` object from data attributes parsed by cclib.
`cclib <https://cclib.github.io/>`_ is an open source library, written in
Python, for parsing and interpreting the results (logfiles) of
computational chemistry packages.
Parameters
----------
value : `str`, `cclib.parser.logfileparser.Logfile`, `cclib.parser.data.ccData`
A path to a logfile, or either a cclib job object (i.e., from
`cclib.ccopen`), or cclib data object (i.e., from ``job.parse()``).
name : `str`, optional
Name for chemical species. If not given, this is set to the logfile
path, if known. Chemical equations mention this name when refering to
the returned object.
Returns
-------
molecule : `Atoms`
All attributes obtainable by cclib are made available as attributes in
the returned object.
Examples
--------
>>> from pyrrole.atoms import read_cclib
>>> molecule = read_cclib('data/pyrrolate/pyrrole.out')
>>> molecule.atomnos
array([6, 6, 6, 6, 7, 1, 1, 1, 1, 1], dtype=int32)
>>> molecule.charge
0
"""
if isinstance(value, _logfileparser.Logfile):
# TODO: test this case.
jobfilename = value.filename
ccdata = value.parse()
elif isinstance(value, _data.ccData):
# TODO: test this case.
jobfilename = None
ccdata = value
else:
# TODO: test this case.
ccobj = _cclib.ccopen(value)
jobfilename = ccobj.filename
ccdata = ccobj.parse()
if name is None:
name = jobfilename
attributes = ccdata.getattributes()
attributes.update({
'name': name,
'jobfilename': jobfilename,
})
return Atoms(attributes)
def create_data(*args):
"""
Produce a single data object from an arbitrary number of different objects.
This function returns a single `pandas.DataFrame` object from a collection
of `Atoms` and `pandas.DataFrame` objects. The returned object, already
indexed by `Atoms.name`, can be promptly used by e.g. `ChemicalSystem`.
Parameters
----------
*args : `pandas.DataFrame` or `Atoms`-like
All positional arguments are assumed to be sources of data.
`Atoms`-like objects (i.e. any object accepted by the `Atoms`
constructor) become single row records in the final returned
data object. `pandas.DataFrame` data table objects, on the other hand,
are concatenated together (by using `pandas.DataFrame.concat`).
Returns
-------
dataframe : `pandas.DataFrame`
Resulting tabular data object. The returned object is guaranteed to be
indexed by `Atoms.name`; if no column with this name exists at
indexing time, a new column (with `None` values) is created for the
purpose of indexing.
Notes
-----
The returned `pandas.DataFrame` will be indexed by `Atoms.name` (see
examples below), which might be the same as `Atoms.jobfilename` if no name
was given to the constructor of `Atoms` (e.g. mapping).
Examples
--------
>>> from pyrrole.atoms import Atoms, create_data, read_cclib
>>> pyrrole = read_cclib('data/pyrrolate/pyrrole.out', 'pyrrole')
>>> pyrrolate = read_cclib('data/pyrrolate/pyrrolate.out')
>>> data = create_data(pyrrole, pyrrolate)
>>> data['charge']
name
pyrrole 0
data/pyrrolate/pyrrolate.out -1
Name: charge, dtype: int64
"""
def _prepare_data(data):
if not isinstance(data, _pd.DataFrame):
try:
data = _pd.DataFrame([data.to_series()])
except AttributeError:
data = _pd.DataFrame([Atoms(data).to_series()])
if data.index.name != "name":
if "name" not in data.columns:
data["name"] = None
data = data.set_index("name")
return data.reset_index()
args = map(_prepare_data, args)
dataframe = _pd.concat(args, sort=False)
return dataframe.set_index("name")
|
schneiderfelipe/pyrrole | pyrrole/atoms.py | create_data | python | def create_data(*args):
def _prepare_data(data):
if not isinstance(data, _pd.DataFrame):
try:
data = _pd.DataFrame([data.to_series()])
except AttributeError:
data = _pd.DataFrame([Atoms(data).to_series()])
if data.index.name != "name":
if "name" not in data.columns:
data["name"] = None
data = data.set_index("name")
return data.reset_index()
args = map(_prepare_data, args)
dataframe = _pd.concat(args, sort=False)
return dataframe.set_index("name") | Produce a single data object from an arbitrary number of different objects.
This function returns a single `pandas.DataFrame` object from a collection
of `Atoms` and `pandas.DataFrame` objects. The returned object, already
indexed by `Atoms.name`, can be promptly used by e.g. `ChemicalSystem`.
Parameters
----------
*args : `pandas.DataFrame` or `Atoms`-like
All positional arguments are assumed to be sources of data.
`Atoms`-like objects (i.e. any object accepted by the `Atoms`
constructor) become single row records in the final returned
data object. `pandas.DataFrame` data table objects, on the other hand,
are concatenated together (by using `pandas.DataFrame.concat`).
Returns
-------
dataframe : `pandas.DataFrame`
Resulting tabular data object. The returned object is guaranteed to be
indexed by `Atoms.name`; if no column with this name exists at
indexing time, a new column (with `None` values) is created for the
purpose of indexing.
Notes
-----
The returned `pandas.DataFrame` will be indexed by `Atoms.name` (see
examples below), which might be the same as `Atoms.jobfilename` if no name
was given to the constructor of `Atoms` (e.g. mapping).
Examples
--------
>>> from pyrrole.atoms import Atoms, create_data, read_cclib
>>> pyrrole = read_cclib('data/pyrrolate/pyrrole.out', 'pyrrole')
>>> pyrrolate = read_cclib('data/pyrrolate/pyrrolate.out')
>>> data = create_data(pyrrole, pyrrolate)
>>> data['charge']
name
pyrrole 0
data/pyrrolate/pyrrolate.out -1
Name: charge, dtype: int64 | train | https://github.com/schneiderfelipe/pyrrole/blob/13e26accc9a059f0ab69773648b24292fe1fbfd6/pyrrole/atoms.py#L631-L690 | null | #!/usr/bin/python3
# -*- encoding: utf-8 -*-
"""Tools for handling molecular structures and their attributes."""
import os as _os
import re as _re
import numpy as _np
import pandas as _pd
import cclib as _cclib
import pybel as _pb
import openbabel as _ob
from cclib.parser import data as _data
from cclib.parser import logfileparser as _logfileparser
from cclib.bridge.cclib2openbabel import makecclib as _makecclib
from cclib.bridge.cclib2openbabel import makeopenbabel as _makeopenbabel
class Atoms:
"""
Abstraction of fixed molecular structures as sequences of atoms.
Properties are stored in `Atoms.attributes`, which follow naming and unit
conventions of the `cclib project <https://cclib.github.io/>`_ as close as
possible (see `read_cclib` for producing `Atoms` out of cclib objects or
logfiles).
Parameters
----------
attributes : mapping or `pandas.Series`
A mapping containing data attributes whose keys follow the naming
conventions of the `cclib project <https://cclib.github.io/>`_ for
properties as close as possible. Any mapping of attributes that can be
converted to a `pandas.Series` object is accepted.
Attributes
----------
attribute : `pandas.Series`
Molecular properties. Keys follow the naming practice of the `cclib
project <https://cclib.github.io/>`_ for properties as close as
possible. The object is named during initialization after `Atoms.name`
(see notes below).
Notes
-----
This class is intended to be used directly in very simple cases only. For
complex situations, use factory functions such as `read_cclib`.
`Atoms.name` is set to `Atoms.jobfilename` if `Atoms.name` does not exist.
Examples
--------
An `Atoms` object can be instantiated by giving any mapping of
attributes that can be converted to a `pandas.Series` object:
>>> from pyrrole.atoms import Atoms
>>> dioxygen = Atoms({
... 'atomcoords': [[0., 0., 0.],
... [0., 0., 1.21]],
... 'charge': 0,
... 'mult': 3,
... 'atomnos': [8, 8]})
Attributes can be accessed by their keys, as given during initialization:
>>> dioxygen.mult
3
>>> dioxygen.atomcoords[-1][1, 2]
1.21
Two `Atoms` objects are considered equal if their attributes match:
>>> dioxygen_copy = Atoms({
... 'atomcoords': [[0., 0., 0.],
... [0., 0., 1.21]],
... 'charge': 0,
... 'mult': 3,
... 'atomnos': [8, 8]})
>>> dioxygen == dioxygen_copy
True
>>> dioxygen is dioxygen_copy
False
If printed, `Atoms` objects produce coordinates in
`xyz format <https://en.wikipedia.org/wiki/XYZ_file_format>`_:
>>> from pyrrole.atoms import read_pybel
>>> print(read_pybel("data/pyrrolate/pyrrole.xyz", "pyrrole"))
C -1.11468 0.00000 0.33059
C -0.70848 0.00000 -0.97739
C 0.70848 0.00000 -0.97739
C 1.11468 0.00000 0.33059
N 0.00000 0.00000 1.11189
H -2.10267 0.00000 0.75908
H -1.35484 0.00000 -1.83956
H 1.35484 0.00000 -1.83956
H 2.10267 0.00000 0.75908
H 0.00000 0.00000 2.11476
>>> print(dioxygen)
O 0.00000 0.00000 0.00000
O 0.00000 0.00000 1.21000
"""
def __init__(self, attributes):
"""See the docstring for this class."""
# TODO: make idempotent (allow receiving Atoms objects).
self.attributes = _pd.Series(attributes)
# TODO: properly document used attributes.
if "name" in self.attributes:
# TODO: make a test for this if.
self.attributes.rename(self.name, inplace=True)
elif "jobfilename" in self.attributes:
self.attributes.rename(self.jobfilename, inplace=True)
if "atomcoords" in self.attributes:
self.attributes["atomcoords"] = _np.asanyarray(self.atomcoords)
if len(self.atomcoords.shape) < 3:
self.attributes["atomcoords"] = [self.atomcoords]
self.name = self.attributes.name
def __getattr__(self, value):
"""Wrap `Atoms.value` into `Atoms.attributes['value']`."""
return self.attributes[value]
def __eq__(self, other):
"""Compare molecules in terms of attributes."""
# Expensive but definitely correct.
return self.attributes.to_json() == other.attributes.to_json()
def __str__(self):
"""Build a string with Cartesian coordinates for this molecule."""
# TODO: make a read_string that is able to read the results of this
# function.
return self.to_string('xyz')
def split(self, pattern=None):
r"""
Break molecule up into constituent fragments.
By default (i.e., if `pattern` is `None`), each disconnected fragment
is returned as a separate new `Atoms` object. This uses OpenBabel
(through `OBMol.Separate`) and might not preserve atom order, depending
on your version of the library.
Parameters
----------
pattern : iterable of iterable of `int`, optional
Groupings of atoms into molecule fragments. Each element of
`pattern` should be an iterable whose members are atom indices (see
example below).
Returns
-------
fragments : iterable of `Atoms`
Examples
--------
>>> from pyrrole import atoms
>>> water_dimer = atoms.read_pybel("data/water-dimer.xyz")
"Natural fragmentation" is the default behaviour, i.e. all disconnected
fragments are returned:
>>> for frag in water_dimer.split():
... print("{}\n".format(frag))
O -1.62893 -0.04138 0.37137
H -0.69803 -0.09168 0.09337
H -2.06663 -0.73498 -0.13663
<BLANKLINE>
O 1.21457 0.03172 -0.27623
H 1.72977 -0.08038 0.53387
H 1.44927 0.91672 -0.58573
<BLANKLINE>
Precise fragment grouping can be achieved by explicitly indicating
which atoms belong to which fragments:
>>> for frag in water_dimer.split([range(3), (5, 4), [3]]):
... print("{}\n".format(frag))
O -1.62893 -0.04138 0.37137
H -0.69803 -0.09168 0.09337
H -2.06663 -0.73498 -0.13663
<BLANKLINE>
H 1.72977 -0.08038 0.53387
H 1.44927 0.91672 -0.58573
<BLANKLINE>
O 1.21457 0.03172 -0.27623
<BLANKLINE>
"""
molecule_pybel = self.to_pybel()
if pattern is None:
fragments = [read_pybel(frag)
for frag in molecule_pybel.OBMol.Separate()]
else:
fragments = []
for group in pattern:
fragment_obmol = _pb.ob.OBMol()
for i in group:
obatom = molecule_pybel.OBMol.GetAtomById(i)
fragment_obmol.InsertAtom(obatom)
fragments.append(fragment_obmol)
fragments = [read_pybel(frag) for frag in fragments]
return fragments
def to_series(self):
"""
Produce a data record of attributes as a `pandas.Series` object.
This method is useful for producing many data tables in pyrrole. See
`ChemicalEquation.to_series` and `ChemicalSystem.to_dataframe` for
more.
Returns
-------
series : `pandas.Series`
Data record of attributes.
Examples
--------
>>> from pyrrole.atoms import Atoms
>>> dioxygen = Atoms({'atomcoords': [[0., 0., 0.],
... [0., 0., 1.21]],
... 'atomnos': [8, 8],
... 'mult': 3,
... 'name': 'dioxygen'})
>>> dioxygen.to_series()
atomcoords [[[0.0, 0.0, 0.0], [0.0, 0.0, 1.21]]]
atomnos [8, 8]
mult 3
name dioxygen
Name: dioxygen, dtype: object
This method can be used to produce a copy of an `Atoms` object:
>>> dioxygen_copy = Atoms(dioxygen.to_series())
>>> dioxygen == dioxygen_copy
True
>>> dioxygen is dioxygen_copy
False
"""
return self.attributes
def to_pybel(self):
"""
Produce a Pybel Molecule object.
It is based on the capabilities of OpenBabel through Pybel. The present
object must have at least `atomcoords`, `atomnos`, `charge` and `mult`
defined.
Returns
-------
`pybel.Molecule`
Examples
--------
>>> from pyrrole.atoms import Atoms
>>> dioxygen = Atoms({'atomcoords': [[0., 0., 0.],
... [0., 0., 1.21]],
... 'atomnos': [8, 8],
... 'charge': 0,
... 'mult': 3,
... 'name': 'dioxygen'})
>>> mol = dioxygen.to_pybel()
>>> mol.molwt
31.9988
"""
# TODO: This only exports last geometry by default.
obmol = _makeopenbabel(self.atomcoords, self.atomnos, self.charge,
self.mult)
title = self.name or ""
if 'scfenergies' in self.attributes:
title += ", scfenergy={} eV".format(self.scfenergies[-1])
obmol.SetTitle(title)
# TODO: make a test for this function.
return _pb.Molecule(obmol)
def to_string(self, format="smi", dialect=None, with_header=False,
fragment_id=None, constraints=None):
r"""
Produce a string representation of the molecule.
This function wraps and extends the functionality of OpenBabel (which
is accessible through `to_pybel`). Many chemical formats can thus be
output (see the `pybel.outformats` variable for a list of available
output formats).
Parameters
----------
format : `str`, optional
Chemical file format of the returned string representation (see
examples below).
dialect : `str`, optional
Format dialect. This encompasses enhancements provided for some
subformats. If ``"standard"`` or `None`, the output provided by
OpenBabel is used with no or minimal modification. See notes below.
with_header : `bool`, optional
If `format` encompasses a header, allow it in the returned string.
This would be, for instance, the first two lines of data for
``format="xyz"`` (see examples below). This might not work with all
dialects and/or formats.
fragment_id : `str`, optional
Indentify molecular fragments (see examples below). This might not
work with all dialects and/or formats.
constraints : iterable object of `int`
Set cartesian constraints for selected atoms (see examples below).
This might not work with all dialects and/or formats.
Returns
-------
`str`
String representation of molecule in the specified format and/or
dialect.
Raises
------
KeyError
Raised if `dialect` value is currently not supported or if
`fragment_id` is given with a currently not supported `dialect`
value.
Notes
-----
Format dialects are subformats that support extended functionality.
Currently supported dialects are:
- for ``format="xyz"``:
- ``"ADF"``, ``"ORCA"``.
Examples
--------
>>> from pyrrole import atoms
>>> dioxygen = atoms.Atoms({'atomcoords': [[0., 0., 0.],
... [0., 0., 1.21]],
... 'atomnos': [8, 8],
... 'charge': 0,
... 'mult': 3,
... 'name': 'dioxygen'})
By default, a SMILES string is returned:
>>> dioxygen.to_string()
'O=O\tdioxygen'
Cartesian coordinates can be produced with ``format="xyz"``, which is
equivalent to printing an `Atoms` instance:
>>> print(dioxygen.to_string("xyz"))
O 0.00000 0.00000 0.00000
O 0.00000 0.00000 1.21000
>>> print(dioxygen)
O 0.00000 0.00000 0.00000
O 0.00000 0.00000 1.21000
Header lines are disabled by default (for ``format="xyz"``, for
example, the header stores the number of atoms in the molecule and a
comment or title line), but this can be reversed with
``with_header=True``:
>>> print(dioxygen.to_string("xyz", with_header=True))
2
dioxygen
O 0.00000 0.00000 0.00000
O 0.00000 0.00000 1.21000
Coordinates for packages such as GAMESS and MOPAC are also supported:
>>> water_dimer = atoms.read_pybel("data/water-dimer.xyz")
>>> print(water_dimer.to_string("gamin"))
O 8.0 -1.6289300000 -0.0413800000 0.3713700000
H 1.0 -0.6980300000 -0.0916800000 0.0933700000
H 1.0 -2.0666300000 -0.7349800000 -0.1366300000
O 8.0 1.2145700000 0.0317200000 -0.2762300000
H 1.0 1.4492700000 0.9167200000 -0.5857300000
H 1.0 1.7297700000 -0.0803800000 0.5338700000
>>> print(water_dimer.to_string("mop"))
O -1.62893 1 -0.04138 1 0.37137 1
H -0.69803 1 -0.09168 1 0.09337 1
H -2.06663 1 -0.73498 1 -0.13663 1
O 1.21457 1 0.03172 1 -0.27623 1
H 1.44927 1 0.91672 1 -0.58573 1
H 1.72977 1 -0.08038 1 0.53387 1
Constraining of cartesian coordinates works with MOPAC format:
>>> print(water_dimer.to_string("mop", constraints=(0, 3)))
O -1.62893 0 -0.04138 0 0.37137 0
H -0.69803 1 -0.09168 1 0.09337 1
H -2.06663 1 -0.73498 1 -0.13663 1
O 1.21457 0 0.03172 0 -0.27623 0
H 1.44927 1 0.91672 1 -0.58573 1
H 1.72977 1 -0.08038 1 0.53387 1
Fragment identification is supported for ``"ADF"`` and ``"ORCA"``
dialects:
>>> print(water_dimer.to_string("xyz", dialect="ADF",
... fragment_id="dimer"))
O -1.62893 -0.04138 0.37137 f=dimer
H -0.69803 -0.09168 0.09337 f=dimer
H -2.06663 -0.73498 -0.13663 f=dimer
O 1.21457 0.03172 -0.27623 f=dimer
H 1.44927 0.91672 -0.58573 f=dimer
H 1.72977 -0.08038 0.53387 f=dimer
>>> print(water_dimer.to_string("xyz", dialect="ORCA",
... fragment_id=1))
O(1) -1.62893 -0.04138 0.37137
H(1) -0.69803 -0.09168 0.09337
H(1) -2.06663 -0.73498 -0.13663
O(1) 1.21457 0.03172 -0.27623
H(1) 1.44927 0.91672 -0.58573
H(1) 1.72977 -0.08038 0.53387
"""
s = self.to_pybel().write(format).strip()
if dialect is None:
dialect = "standard"
dialect = dialect.lower()
if format == "xyz":
natom, comment, body = s.split("\n", 2)
if dialect in {"adf", "orca", "standard"}:
if fragment_id is not None:
if dialect == "adf":
body = \
"\n".join(["{} f={}".format(line,
fragment_id)
for line in body.split("\n")])
elif dialect == "orca":
fragment_id = "({})".format(fragment_id)
body = \
"\n".join([line.replace(" " * len(fragment_id),
fragment_id, 1)
for line in body.split("\n")])
else:
raise KeyError("fragment_id currently not supported "
"with dialect '{}'".format(dialect))
else:
raise KeyError("dialect '{}' currently not "
"supported".format(dialect))
if with_header:
s = "\n".join([natom, comment, body])
else:
s = body
elif format == "gamin":
lines = s.split("\n")
begin = "\n".join([line.strip() for line in lines[:5]])
body = "\n".join([line.strip() for line in lines[5:-1]])
if with_header:
s = "\n".join([begin, body])
else:
s = body
elif format == "mop":
chunks = s.split("\n", 2)
begin = "\n".join([line.strip() for line in chunks[:2]])
body = chunks[2].strip()
if constraints is not None:
body = body.split("\n")
for i in constraints:
body[i] = _re.sub(' 1( |$)', ' 0\g<1>', body[i])
body = "\n".join(body)
if with_header:
s = "\n".join([begin, body])
else:
s = body
return s.strip()
def read_cclib(value, name=None):
"""
Create an `Atoms` object from data attributes parsed by cclib.
`cclib <https://cclib.github.io/>`_ is an open source library, written in
Python, for parsing and interpreting the results (logfiles) of
computational chemistry packages.
Parameters
----------
value : `str`, `cclib.parser.logfileparser.Logfile`, `cclib.parser.data.ccData`
A path to a logfile, or either a cclib job object (i.e., from
`cclib.ccopen`), or cclib data object (i.e., from ``job.parse()``).
name : `str`, optional
Name for chemical species. If not given, this is set to the logfile
path, if known. Chemical equations mention this name when refering to
the returned object.
Returns
-------
molecule : `Atoms`
All attributes obtainable by cclib are made available as attributes in
the returned object.
Examples
--------
>>> from pyrrole.atoms import read_cclib
>>> molecule = read_cclib('data/pyrrolate/pyrrole.out')
>>> molecule.atomnos
array([6, 6, 6, 6, 7, 1, 1, 1, 1, 1], dtype=int32)
>>> molecule.charge
0
"""
if isinstance(value, _logfileparser.Logfile):
# TODO: test this case.
jobfilename = value.filename
ccdata = value.parse()
elif isinstance(value, _data.ccData):
# TODO: test this case.
jobfilename = None
ccdata = value
else:
# TODO: test this case.
ccobj = _cclib.ccopen(value)
jobfilename = ccobj.filename
ccdata = ccobj.parse()
if name is None:
name = jobfilename
attributes = ccdata.getattributes()
attributes.update({
'name': name,
'jobfilename': jobfilename,
})
return Atoms(attributes)
def read_pybel(value, name=None):
"""
Create an `Atoms` object from content parsed by Pybel.
`Pybel <https://openbabel.org/docs/dev/UseTheLibrary/Python_Pybel.html>`_
is a Python module that simplifies access to the OpenBabel API, a chemical
toolbox designed to speak the many languages of chemical data. It’s an
open, collaborative project allowing anyone to search, convert, analyze, or
store data from molecular modeling, chemistry, solid-state materials,
biochemistry, and related areas.
Parameters
----------
value : `str`, `pybel.Molecule`, `openbabel.OBMol`
A path to a file, or either a Pybel Molecule object, or OpenBabel
OBMol.
name : `str`, optional
Name for chemical species. If not given, this is set to the file path,
if known. Chemical equations mention this name when refering to the
returned object.
Returns
-------
molecule : `Atoms`
All attributes convertible from Pybel to cclib are made available as
attributes in the returned object.
Notes
-----
The following attributes are converted from Pybel to cclib: `atomcoords`,
`atommasses`, `atomnos`, `natom`, `charge` and `mult`. One must keep in
mind that `charge` and `mult` are not always reliable, since these are
often calculated from atomic formal charges.
Examples
--------
>>> from pyrrole.atoms import read_pybel
>>> molecule = read_pybel('data/pyrrolate/pyrrole.xyz')
>>> molecule.atomnos
array([6, 6, 6, 6, 7, 1, 1, 1, 1, 1], dtype=int32)
>>> molecule.natom
10
>>> molecule.charge
0
"""
if isinstance(value, _pb.Molecule):
# TODO: test this case.
jobfilename = None
charge, mult = value.charge, value.spin
ccdata = _makecclib(value.OBMol)
elif isinstance(value, _ob.OBMol):
# TODO: test this case.
jobfilename = None
charge, mult = value.GetTotalCharge(), value.GetTotalSpinMultiplicity()
ccdata = _makecclib(value)
else:
# TODO: test this case.
jobfilename = value
_, jobfilename_ext = _os.path.splitext(jobfilename)
# TODO: This only reads first structure.
mol = next(_pb.readfile(jobfilename_ext[1:], jobfilename))
charge, mult = mol.charge, mol.spin
ccdata = _makecclib(mol.OBMol)
if name is None:
name = jobfilename
attributes = ccdata.getattributes()
attributes.update({
'name': name,
'jobfilename': jobfilename,
'charge': charge,
'mult': mult
})
return Atoms(attributes)
|
schneiderfelipe/pyrrole | pyrrole/atoms.py | Atoms.split | python | def split(self, pattern=None):
r"""
Break molecule up into constituent fragments.
By default (i.e., if `pattern` is `None`), each disconnected fragment
is returned as a separate new `Atoms` object. This uses OpenBabel
(through `OBMol.Separate`) and might not preserve atom order, depending
on your version of the library.
Parameters
----------
pattern : iterable of iterable of `int`, optional
Groupings of atoms into molecule fragments. Each element of
`pattern` should be an iterable whose members are atom indices (see
example below).
Returns
-------
fragments : iterable of `Atoms`
Examples
--------
>>> from pyrrole import atoms
>>> water_dimer = atoms.read_pybel("data/water-dimer.xyz")
"Natural fragmentation" is the default behaviour, i.e. all disconnected
fragments are returned:
>>> for frag in water_dimer.split():
... print("{}\n".format(frag))
O -1.62893 -0.04138 0.37137
H -0.69803 -0.09168 0.09337
H -2.06663 -0.73498 -0.13663
<BLANKLINE>
O 1.21457 0.03172 -0.27623
H 1.72977 -0.08038 0.53387
H 1.44927 0.91672 -0.58573
<BLANKLINE>
Precise fragment grouping can be achieved by explicitly indicating
which atoms belong to which fragments:
>>> for frag in water_dimer.split([range(3), (5, 4), [3]]):
... print("{}\n".format(frag))
O -1.62893 -0.04138 0.37137
H -0.69803 -0.09168 0.09337
H -2.06663 -0.73498 -0.13663
<BLANKLINE>
H 1.72977 -0.08038 0.53387
H 1.44927 0.91672 -0.58573
<BLANKLINE>
O 1.21457 0.03172 -0.27623
<BLANKLINE>
"""
molecule_pybel = self.to_pybel()
if pattern is None:
fragments = [read_pybel(frag)
for frag in molecule_pybel.OBMol.Separate()]
else:
fragments = []
for group in pattern:
fragment_obmol = _pb.ob.OBMol()
for i in group:
obatom = molecule_pybel.OBMol.GetAtomById(i)
fragment_obmol.InsertAtom(obatom)
fragments.append(fragment_obmol)
fragments = [read_pybel(frag) for frag in fragments]
return fragments | r"""
Break molecule up into constituent fragments.
By default (i.e., if `pattern` is `None`), each disconnected fragment
is returned as a separate new `Atoms` object. This uses OpenBabel
(through `OBMol.Separate`) and might not preserve atom order, depending
on your version of the library.
Parameters
----------
pattern : iterable of iterable of `int`, optional
Groupings of atoms into molecule fragments. Each element of
`pattern` should be an iterable whose members are atom indices (see
example below).
Returns
-------
fragments : iterable of `Atoms`
Examples
--------
>>> from pyrrole import atoms
>>> water_dimer = atoms.read_pybel("data/water-dimer.xyz")
"Natural fragmentation" is the default behaviour, i.e. all disconnected
fragments are returned:
>>> for frag in water_dimer.split():
... print("{}\n".format(frag))
O -1.62893 -0.04138 0.37137
H -0.69803 -0.09168 0.09337
H -2.06663 -0.73498 -0.13663
<BLANKLINE>
O 1.21457 0.03172 -0.27623
H 1.72977 -0.08038 0.53387
H 1.44927 0.91672 -0.58573
<BLANKLINE>
Precise fragment grouping can be achieved by explicitly indicating
which atoms belong to which fragments:
>>> for frag in water_dimer.split([range(3), (5, 4), [3]]):
... print("{}\n".format(frag))
O -1.62893 -0.04138 0.37137
H -0.69803 -0.09168 0.09337
H -2.06663 -0.73498 -0.13663
<BLANKLINE>
H 1.72977 -0.08038 0.53387
H 1.44927 0.91672 -0.58573
<BLANKLINE>
O 1.21457 0.03172 -0.27623
<BLANKLINE> | train | https://github.com/schneiderfelipe/pyrrole/blob/13e26accc9a059f0ab69773648b24292fe1fbfd6/pyrrole/atoms.py#L140-L212 | [
"def to_pybel(self):\n \"\"\"\n Produce a Pybel Molecule object.\n\n It is based on the capabilities of OpenBabel through Pybel. The present\n object must have at least `atomcoords`, `atomnos`, `charge` and `mult`\n defined.\n\n Returns\n -------\n `pybel.Molecule`\n\n Examples\n --------\n >>> from pyrrole.atoms import Atoms\n >>> dioxygen = Atoms({'atomcoords': [[0., 0., 0.],\n ... [0., 0., 1.21]],\n ... 'atomnos': [8, 8],\n ... 'charge': 0,\n ... 'mult': 3,\n ... 'name': 'dioxygen'})\n >>> mol = dioxygen.to_pybel()\n >>> mol.molwt\n 31.9988\n\n \"\"\"\n # TODO: This only exports last geometry by default.\n obmol = _makeopenbabel(self.atomcoords, self.atomnos, self.charge,\n self.mult)\n\n title = self.name or \"\"\n if 'scfenergies' in self.attributes:\n title += \", scfenergy={} eV\".format(self.scfenergies[-1])\n obmol.SetTitle(title)\n\n # TODO: make a test for this function.\n return _pb.Molecule(obmol)\n"
] | class Atoms:
"""
Abstraction of fixed molecular structures as sequences of atoms.
Properties are stored in `Atoms.attributes`, which follow naming and unit
conventions of the `cclib project <https://cclib.github.io/>`_ as close as
possible (see `read_cclib` for producing `Atoms` out of cclib objects or
logfiles).
Parameters
----------
attributes : mapping or `pandas.Series`
A mapping containing data attributes whose keys follow the naming
conventions of the `cclib project <https://cclib.github.io/>`_ for
properties as close as possible. Any mapping of attributes that can be
converted to a `pandas.Series` object is accepted.
Attributes
----------
attribute : `pandas.Series`
Molecular properties. Keys follow the naming practice of the `cclib
project <https://cclib.github.io/>`_ for properties as close as
possible. The object is named during initialization after `Atoms.name`
(see notes below).
Notes
-----
This class is intended to be used directly in very simple cases only. For
complex situations, use factory functions such as `read_cclib`.
`Atoms.name` is set to `Atoms.jobfilename` if `Atoms.name` does not exist.
Examples
--------
An `Atoms` object can be instantiated by giving any mapping of
attributes that can be converted to a `pandas.Series` object:
>>> from pyrrole.atoms import Atoms
>>> dioxygen = Atoms({
... 'atomcoords': [[0., 0., 0.],
... [0., 0., 1.21]],
... 'charge': 0,
... 'mult': 3,
... 'atomnos': [8, 8]})
Attributes can be accessed by their keys, as given during initialization:
>>> dioxygen.mult
3
>>> dioxygen.atomcoords[-1][1, 2]
1.21
Two `Atoms` objects are considered equal if their attributes match:
>>> dioxygen_copy = Atoms({
... 'atomcoords': [[0., 0., 0.],
... [0., 0., 1.21]],
... 'charge': 0,
... 'mult': 3,
... 'atomnos': [8, 8]})
>>> dioxygen == dioxygen_copy
True
>>> dioxygen is dioxygen_copy
False
If printed, `Atoms` objects produce coordinates in
`xyz format <https://en.wikipedia.org/wiki/XYZ_file_format>`_:
>>> from pyrrole.atoms import read_pybel
>>> print(read_pybel("data/pyrrolate/pyrrole.xyz", "pyrrole"))
C -1.11468 0.00000 0.33059
C -0.70848 0.00000 -0.97739
C 0.70848 0.00000 -0.97739
C 1.11468 0.00000 0.33059
N 0.00000 0.00000 1.11189
H -2.10267 0.00000 0.75908
H -1.35484 0.00000 -1.83956
H 1.35484 0.00000 -1.83956
H 2.10267 0.00000 0.75908
H 0.00000 0.00000 2.11476
>>> print(dioxygen)
O 0.00000 0.00000 0.00000
O 0.00000 0.00000 1.21000
"""
def __init__(self, attributes):
"""See the docstring for this class."""
# TODO: make idempotent (allow receiving Atoms objects).
self.attributes = _pd.Series(attributes)
# TODO: properly document used attributes.
if "name" in self.attributes:
# TODO: make a test for this if.
self.attributes.rename(self.name, inplace=True)
elif "jobfilename" in self.attributes:
self.attributes.rename(self.jobfilename, inplace=True)
if "atomcoords" in self.attributes:
self.attributes["atomcoords"] = _np.asanyarray(self.atomcoords)
if len(self.atomcoords.shape) < 3:
self.attributes["atomcoords"] = [self.atomcoords]
self.name = self.attributes.name
def __getattr__(self, value):
"""Wrap `Atoms.value` into `Atoms.attributes['value']`."""
return self.attributes[value]
def __eq__(self, other):
"""Compare molecules in terms of attributes."""
# Expensive but definitely correct.
return self.attributes.to_json() == other.attributes.to_json()
def __str__(self):
"""Build a string with Cartesian coordinates for this molecule."""
# TODO: make a read_string that is able to read the results of this
# function.
return self.to_string('xyz')
def to_series(self):
"""
Produce a data record of attributes as a `pandas.Series` object.
This method is useful for producing many data tables in pyrrole. See
`ChemicalEquation.to_series` and `ChemicalSystem.to_dataframe` for
more.
Returns
-------
series : `pandas.Series`
Data record of attributes.
Examples
--------
>>> from pyrrole.atoms import Atoms
>>> dioxygen = Atoms({'atomcoords': [[0., 0., 0.],
... [0., 0., 1.21]],
... 'atomnos': [8, 8],
... 'mult': 3,
... 'name': 'dioxygen'})
>>> dioxygen.to_series()
atomcoords [[[0.0, 0.0, 0.0], [0.0, 0.0, 1.21]]]
atomnos [8, 8]
mult 3
name dioxygen
Name: dioxygen, dtype: object
This method can be used to produce a copy of an `Atoms` object:
>>> dioxygen_copy = Atoms(dioxygen.to_series())
>>> dioxygen == dioxygen_copy
True
>>> dioxygen is dioxygen_copy
False
"""
return self.attributes
def to_pybel(self):
"""
Produce a Pybel Molecule object.
It is based on the capabilities of OpenBabel through Pybel. The present
object must have at least `atomcoords`, `atomnos`, `charge` and `mult`
defined.
Returns
-------
`pybel.Molecule`
Examples
--------
>>> from pyrrole.atoms import Atoms
>>> dioxygen = Atoms({'atomcoords': [[0., 0., 0.],
... [0., 0., 1.21]],
... 'atomnos': [8, 8],
... 'charge': 0,
... 'mult': 3,
... 'name': 'dioxygen'})
>>> mol = dioxygen.to_pybel()
>>> mol.molwt
31.9988
"""
# TODO: This only exports last geometry by default.
obmol = _makeopenbabel(self.atomcoords, self.atomnos, self.charge,
self.mult)
title = self.name or ""
if 'scfenergies' in self.attributes:
title += ", scfenergy={} eV".format(self.scfenergies[-1])
obmol.SetTitle(title)
# TODO: make a test for this function.
return _pb.Molecule(obmol)
def to_string(self, format="smi", dialect=None, with_header=False,
fragment_id=None, constraints=None):
r"""
Produce a string representation of the molecule.
This function wraps and extends the functionality of OpenBabel (which
is accessible through `to_pybel`). Many chemical formats can thus be
output (see the `pybel.outformats` variable for a list of available
output formats).
Parameters
----------
format : `str`, optional
Chemical file format of the returned string representation (see
examples below).
dialect : `str`, optional
Format dialect. This encompasses enhancements provided for some
subformats. If ``"standard"`` or `None`, the output provided by
OpenBabel is used with no or minimal modification. See notes below.
with_header : `bool`, optional
If `format` encompasses a header, allow it in the returned string.
This would be, for instance, the first two lines of data for
``format="xyz"`` (see examples below). This might not work with all
dialects and/or formats.
fragment_id : `str`, optional
Indentify molecular fragments (see examples below). This might not
work with all dialects and/or formats.
constraints : iterable object of `int`
Set cartesian constraints for selected atoms (see examples below).
This might not work with all dialects and/or formats.
Returns
-------
`str`
String representation of molecule in the specified format and/or
dialect.
Raises
------
KeyError
Raised if `dialect` value is currently not supported or if
`fragment_id` is given with a currently not supported `dialect`
value.
Notes
-----
Format dialects are subformats that support extended functionality.
Currently supported dialects are:
- for ``format="xyz"``:
- ``"ADF"``, ``"ORCA"``.
Examples
--------
>>> from pyrrole import atoms
>>> dioxygen = atoms.Atoms({'atomcoords': [[0., 0., 0.],
... [0., 0., 1.21]],
... 'atomnos': [8, 8],
... 'charge': 0,
... 'mult': 3,
... 'name': 'dioxygen'})
By default, a SMILES string is returned:
>>> dioxygen.to_string()
'O=O\tdioxygen'
Cartesian coordinates can be produced with ``format="xyz"``, which is
equivalent to printing an `Atoms` instance:
>>> print(dioxygen.to_string("xyz"))
O 0.00000 0.00000 0.00000
O 0.00000 0.00000 1.21000
>>> print(dioxygen)
O 0.00000 0.00000 0.00000
O 0.00000 0.00000 1.21000
Header lines are disabled by default (for ``format="xyz"``, for
example, the header stores the number of atoms in the molecule and a
comment or title line), but this can be reversed with
``with_header=True``:
>>> print(dioxygen.to_string("xyz", with_header=True))
2
dioxygen
O 0.00000 0.00000 0.00000
O 0.00000 0.00000 1.21000
Coordinates for packages such as GAMESS and MOPAC are also supported:
>>> water_dimer = atoms.read_pybel("data/water-dimer.xyz")
>>> print(water_dimer.to_string("gamin"))
O 8.0 -1.6289300000 -0.0413800000 0.3713700000
H 1.0 -0.6980300000 -0.0916800000 0.0933700000
H 1.0 -2.0666300000 -0.7349800000 -0.1366300000
O 8.0 1.2145700000 0.0317200000 -0.2762300000
H 1.0 1.4492700000 0.9167200000 -0.5857300000
H 1.0 1.7297700000 -0.0803800000 0.5338700000
>>> print(water_dimer.to_string("mop"))
O -1.62893 1 -0.04138 1 0.37137 1
H -0.69803 1 -0.09168 1 0.09337 1
H -2.06663 1 -0.73498 1 -0.13663 1
O 1.21457 1 0.03172 1 -0.27623 1
H 1.44927 1 0.91672 1 -0.58573 1
H 1.72977 1 -0.08038 1 0.53387 1
Constraining of cartesian coordinates works with MOPAC format:
>>> print(water_dimer.to_string("mop", constraints=(0, 3)))
O -1.62893 0 -0.04138 0 0.37137 0
H -0.69803 1 -0.09168 1 0.09337 1
H -2.06663 1 -0.73498 1 -0.13663 1
O 1.21457 0 0.03172 0 -0.27623 0
H 1.44927 1 0.91672 1 -0.58573 1
H 1.72977 1 -0.08038 1 0.53387 1
Fragment identification is supported for ``"ADF"`` and ``"ORCA"``
dialects:
>>> print(water_dimer.to_string("xyz", dialect="ADF",
... fragment_id="dimer"))
O -1.62893 -0.04138 0.37137 f=dimer
H -0.69803 -0.09168 0.09337 f=dimer
H -2.06663 -0.73498 -0.13663 f=dimer
O 1.21457 0.03172 -0.27623 f=dimer
H 1.44927 0.91672 -0.58573 f=dimer
H 1.72977 -0.08038 0.53387 f=dimer
>>> print(water_dimer.to_string("xyz", dialect="ORCA",
... fragment_id=1))
O(1) -1.62893 -0.04138 0.37137
H(1) -0.69803 -0.09168 0.09337
H(1) -2.06663 -0.73498 -0.13663
O(1) 1.21457 0.03172 -0.27623
H(1) 1.44927 0.91672 -0.58573
H(1) 1.72977 -0.08038 0.53387
"""
s = self.to_pybel().write(format).strip()
if dialect is None:
dialect = "standard"
dialect = dialect.lower()
if format == "xyz":
natom, comment, body = s.split("\n", 2)
if dialect in {"adf", "orca", "standard"}:
if fragment_id is not None:
if dialect == "adf":
body = \
"\n".join(["{} f={}".format(line,
fragment_id)
for line in body.split("\n")])
elif dialect == "orca":
fragment_id = "({})".format(fragment_id)
body = \
"\n".join([line.replace(" " * len(fragment_id),
fragment_id, 1)
for line in body.split("\n")])
else:
raise KeyError("fragment_id currently not supported "
"with dialect '{}'".format(dialect))
else:
raise KeyError("dialect '{}' currently not "
"supported".format(dialect))
if with_header:
s = "\n".join([natom, comment, body])
else:
s = body
elif format == "gamin":
lines = s.split("\n")
begin = "\n".join([line.strip() for line in lines[:5]])
body = "\n".join([line.strip() for line in lines[5:-1]])
if with_header:
s = "\n".join([begin, body])
else:
s = body
elif format == "mop":
chunks = s.split("\n", 2)
begin = "\n".join([line.strip() for line in chunks[:2]])
body = chunks[2].strip()
if constraints is not None:
body = body.split("\n")
for i in constraints:
body[i] = _re.sub(' 1( |$)', ' 0\g<1>', body[i])
body = "\n".join(body)
if with_header:
s = "\n".join([begin, body])
else:
s = body
return s.strip()
|
schneiderfelipe/pyrrole | pyrrole/atoms.py | Atoms.to_pybel | python | def to_pybel(self):
# TODO: This only exports last geometry by default.
obmol = _makeopenbabel(self.atomcoords, self.atomnos, self.charge,
self.mult)
title = self.name or ""
if 'scfenergies' in self.attributes:
title += ", scfenergy={} eV".format(self.scfenergies[-1])
obmol.SetTitle(title)
# TODO: make a test for this function.
return _pb.Molecule(obmol) | Produce a Pybel Molecule object.
It is based on the capabilities of OpenBabel through Pybel. The present
object must have at least `atomcoords`, `atomnos`, `charge` and `mult`
defined.
Returns
-------
`pybel.Molecule`
Examples
--------
>>> from pyrrole.atoms import Atoms
>>> dioxygen = Atoms({'atomcoords': [[0., 0., 0.],
... [0., 0., 1.21]],
... 'atomnos': [8, 8],
... 'charge': 0,
... 'mult': 3,
... 'name': 'dioxygen'})
>>> mol = dioxygen.to_pybel()
>>> mol.molwt
31.9988 | train | https://github.com/schneiderfelipe/pyrrole/blob/13e26accc9a059f0ab69773648b24292fe1fbfd6/pyrrole/atoms.py#L253-L289 | null | class Atoms:
"""
Abstraction of fixed molecular structures as sequences of atoms.
Properties are stored in `Atoms.attributes`, which follow naming and unit
conventions of the `cclib project <https://cclib.github.io/>`_ as close as
possible (see `read_cclib` for producing `Atoms` out of cclib objects or
logfiles).
Parameters
----------
attributes : mapping or `pandas.Series`
A mapping containing data attributes whose keys follow the naming
conventions of the `cclib project <https://cclib.github.io/>`_ for
properties as close as possible. Any mapping of attributes that can be
converted to a `pandas.Series` object is accepted.
Attributes
----------
attribute : `pandas.Series`
Molecular properties. Keys follow the naming practice of the `cclib
project <https://cclib.github.io/>`_ for properties as close as
possible. The object is named during initialization after `Atoms.name`
(see notes below).
Notes
-----
This class is intended to be used directly in very simple cases only. For
complex situations, use factory functions such as `read_cclib`.
`Atoms.name` is set to `Atoms.jobfilename` if `Atoms.name` does not exist.
Examples
--------
An `Atoms` object can be instantiated by giving any mapping of
attributes that can be converted to a `pandas.Series` object:
>>> from pyrrole.atoms import Atoms
>>> dioxygen = Atoms({
... 'atomcoords': [[0., 0., 0.],
... [0., 0., 1.21]],
... 'charge': 0,
... 'mult': 3,
... 'atomnos': [8, 8]})
Attributes can be accessed by their keys, as given during initialization:
>>> dioxygen.mult
3
>>> dioxygen.atomcoords[-1][1, 2]
1.21
Two `Atoms` objects are considered equal if their attributes match:
>>> dioxygen_copy = Atoms({
... 'atomcoords': [[0., 0., 0.],
... [0., 0., 1.21]],
... 'charge': 0,
... 'mult': 3,
... 'atomnos': [8, 8]})
>>> dioxygen == dioxygen_copy
True
>>> dioxygen is dioxygen_copy
False
If printed, `Atoms` objects produce coordinates in
`xyz format <https://en.wikipedia.org/wiki/XYZ_file_format>`_:
>>> from pyrrole.atoms import read_pybel
>>> print(read_pybel("data/pyrrolate/pyrrole.xyz", "pyrrole"))
C -1.11468 0.00000 0.33059
C -0.70848 0.00000 -0.97739
C 0.70848 0.00000 -0.97739
C 1.11468 0.00000 0.33059
N 0.00000 0.00000 1.11189
H -2.10267 0.00000 0.75908
H -1.35484 0.00000 -1.83956
H 1.35484 0.00000 -1.83956
H 2.10267 0.00000 0.75908
H 0.00000 0.00000 2.11476
>>> print(dioxygen)
O 0.00000 0.00000 0.00000
O 0.00000 0.00000 1.21000
"""
def __init__(self, attributes):
"""See the docstring for this class."""
# TODO: make idempotent (allow receiving Atoms objects).
self.attributes = _pd.Series(attributes)
# TODO: properly document used attributes.
if "name" in self.attributes:
# TODO: make a test for this if.
self.attributes.rename(self.name, inplace=True)
elif "jobfilename" in self.attributes:
self.attributes.rename(self.jobfilename, inplace=True)
if "atomcoords" in self.attributes:
self.attributes["atomcoords"] = _np.asanyarray(self.atomcoords)
if len(self.atomcoords.shape) < 3:
self.attributes["atomcoords"] = [self.atomcoords]
self.name = self.attributes.name
def __getattr__(self, value):
"""Wrap `Atoms.value` into `Atoms.attributes['value']`."""
return self.attributes[value]
def __eq__(self, other):
"""Compare molecules in terms of attributes."""
# Expensive but definitely correct.
return self.attributes.to_json() == other.attributes.to_json()
def __str__(self):
"""Build a string with Cartesian coordinates for this molecule."""
# TODO: make a read_string that is able to read the results of this
# function.
return self.to_string('xyz')
def split(self, pattern=None):
r"""
Break molecule up into constituent fragments.
By default (i.e., if `pattern` is `None`), each disconnected fragment
is returned as a separate new `Atoms` object. This uses OpenBabel
(through `OBMol.Separate`) and might not preserve atom order, depending
on your version of the library.
Parameters
----------
pattern : iterable of iterable of `int`, optional
Groupings of atoms into molecule fragments. Each element of
`pattern` should be an iterable whose members are atom indices (see
example below).
Returns
-------
fragments : iterable of `Atoms`
Examples
--------
>>> from pyrrole import atoms
>>> water_dimer = atoms.read_pybel("data/water-dimer.xyz")
"Natural fragmentation" is the default behaviour, i.e. all disconnected
fragments are returned:
>>> for frag in water_dimer.split():
... print("{}\n".format(frag))
O -1.62893 -0.04138 0.37137
H -0.69803 -0.09168 0.09337
H -2.06663 -0.73498 -0.13663
<BLANKLINE>
O 1.21457 0.03172 -0.27623
H 1.72977 -0.08038 0.53387
H 1.44927 0.91672 -0.58573
<BLANKLINE>
Precise fragment grouping can be achieved by explicitly indicating
which atoms belong to which fragments:
>>> for frag in water_dimer.split([range(3), (5, 4), [3]]):
... print("{}\n".format(frag))
O -1.62893 -0.04138 0.37137
H -0.69803 -0.09168 0.09337
H -2.06663 -0.73498 -0.13663
<BLANKLINE>
H 1.72977 -0.08038 0.53387
H 1.44927 0.91672 -0.58573
<BLANKLINE>
O 1.21457 0.03172 -0.27623
<BLANKLINE>
"""
molecule_pybel = self.to_pybel()
if pattern is None:
fragments = [read_pybel(frag)
for frag in molecule_pybel.OBMol.Separate()]
else:
fragments = []
for group in pattern:
fragment_obmol = _pb.ob.OBMol()
for i in group:
obatom = molecule_pybel.OBMol.GetAtomById(i)
fragment_obmol.InsertAtom(obatom)
fragments.append(fragment_obmol)
fragments = [read_pybel(frag) for frag in fragments]
return fragments
def to_series(self):
"""
Produce a data record of attributes as a `pandas.Series` object.
This method is useful for producing many data tables in pyrrole. See
`ChemicalEquation.to_series` and `ChemicalSystem.to_dataframe` for
more.
Returns
-------
series : `pandas.Series`
Data record of attributes.
Examples
--------
>>> from pyrrole.atoms import Atoms
>>> dioxygen = Atoms({'atomcoords': [[0., 0., 0.],
... [0., 0., 1.21]],
... 'atomnos': [8, 8],
... 'mult': 3,
... 'name': 'dioxygen'})
>>> dioxygen.to_series()
atomcoords [[[0.0, 0.0, 0.0], [0.0, 0.0, 1.21]]]
atomnos [8, 8]
mult 3
name dioxygen
Name: dioxygen, dtype: object
This method can be used to produce a copy of an `Atoms` object:
>>> dioxygen_copy = Atoms(dioxygen.to_series())
>>> dioxygen == dioxygen_copy
True
>>> dioxygen is dioxygen_copy
False
"""
return self.attributes
def to_string(self, format="smi", dialect=None, with_header=False,
fragment_id=None, constraints=None):
r"""
Produce a string representation of the molecule.
This function wraps and extends the functionality of OpenBabel (which
is accessible through `to_pybel`). Many chemical formats can thus be
output (see the `pybel.outformats` variable for a list of available
output formats).
Parameters
----------
format : `str`, optional
Chemical file format of the returned string representation (see
examples below).
dialect : `str`, optional
Format dialect. This encompasses enhancements provided for some
subformats. If ``"standard"`` or `None`, the output provided by
OpenBabel is used with no or minimal modification. See notes below.
with_header : `bool`, optional
If `format` encompasses a header, allow it in the returned string.
This would be, for instance, the first two lines of data for
``format="xyz"`` (see examples below). This might not work with all
dialects and/or formats.
fragment_id : `str`, optional
Indentify molecular fragments (see examples below). This might not
work with all dialects and/or formats.
constraints : iterable object of `int`
Set cartesian constraints for selected atoms (see examples below).
This might not work with all dialects and/or formats.
Returns
-------
`str`
String representation of molecule in the specified format and/or
dialect.
Raises
------
KeyError
Raised if `dialect` value is currently not supported or if
`fragment_id` is given with a currently not supported `dialect`
value.
Notes
-----
Format dialects are subformats that support extended functionality.
Currently supported dialects are:
- for ``format="xyz"``:
- ``"ADF"``, ``"ORCA"``.
Examples
--------
>>> from pyrrole import atoms
>>> dioxygen = atoms.Atoms({'atomcoords': [[0., 0., 0.],
... [0., 0., 1.21]],
... 'atomnos': [8, 8],
... 'charge': 0,
... 'mult': 3,
... 'name': 'dioxygen'})
By default, a SMILES string is returned:
>>> dioxygen.to_string()
'O=O\tdioxygen'
Cartesian coordinates can be produced with ``format="xyz"``, which is
equivalent to printing an `Atoms` instance:
>>> print(dioxygen.to_string("xyz"))
O 0.00000 0.00000 0.00000
O 0.00000 0.00000 1.21000
>>> print(dioxygen)
O 0.00000 0.00000 0.00000
O 0.00000 0.00000 1.21000
Header lines are disabled by default (for ``format="xyz"``, for
example, the header stores the number of atoms in the molecule and a
comment or title line), but this can be reversed with
``with_header=True``:
>>> print(dioxygen.to_string("xyz", with_header=True))
2
dioxygen
O 0.00000 0.00000 0.00000
O 0.00000 0.00000 1.21000
Coordinates for packages such as GAMESS and MOPAC are also supported:
>>> water_dimer = atoms.read_pybel("data/water-dimer.xyz")
>>> print(water_dimer.to_string("gamin"))
O 8.0 -1.6289300000 -0.0413800000 0.3713700000
H 1.0 -0.6980300000 -0.0916800000 0.0933700000
H 1.0 -2.0666300000 -0.7349800000 -0.1366300000
O 8.0 1.2145700000 0.0317200000 -0.2762300000
H 1.0 1.4492700000 0.9167200000 -0.5857300000
H 1.0 1.7297700000 -0.0803800000 0.5338700000
>>> print(water_dimer.to_string("mop"))
O -1.62893 1 -0.04138 1 0.37137 1
H -0.69803 1 -0.09168 1 0.09337 1
H -2.06663 1 -0.73498 1 -0.13663 1
O 1.21457 1 0.03172 1 -0.27623 1
H 1.44927 1 0.91672 1 -0.58573 1
H 1.72977 1 -0.08038 1 0.53387 1
Constraining of cartesian coordinates works with MOPAC format:
>>> print(water_dimer.to_string("mop", constraints=(0, 3)))
O -1.62893 0 -0.04138 0 0.37137 0
H -0.69803 1 -0.09168 1 0.09337 1
H -2.06663 1 -0.73498 1 -0.13663 1
O 1.21457 0 0.03172 0 -0.27623 0
H 1.44927 1 0.91672 1 -0.58573 1
H 1.72977 1 -0.08038 1 0.53387 1
Fragment identification is supported for ``"ADF"`` and ``"ORCA"``
dialects:
>>> print(water_dimer.to_string("xyz", dialect="ADF",
... fragment_id="dimer"))
O -1.62893 -0.04138 0.37137 f=dimer
H -0.69803 -0.09168 0.09337 f=dimer
H -2.06663 -0.73498 -0.13663 f=dimer
O 1.21457 0.03172 -0.27623 f=dimer
H 1.44927 0.91672 -0.58573 f=dimer
H 1.72977 -0.08038 0.53387 f=dimer
>>> print(water_dimer.to_string("xyz", dialect="ORCA",
... fragment_id=1))
O(1) -1.62893 -0.04138 0.37137
H(1) -0.69803 -0.09168 0.09337
H(1) -2.06663 -0.73498 -0.13663
O(1) 1.21457 0.03172 -0.27623
H(1) 1.44927 0.91672 -0.58573
H(1) 1.72977 -0.08038 0.53387
"""
s = self.to_pybel().write(format).strip()
if dialect is None:
dialect = "standard"
dialect = dialect.lower()
if format == "xyz":
natom, comment, body = s.split("\n", 2)
if dialect in {"adf", "orca", "standard"}:
if fragment_id is not None:
if dialect == "adf":
body = \
"\n".join(["{} f={}".format(line,
fragment_id)
for line in body.split("\n")])
elif dialect == "orca":
fragment_id = "({})".format(fragment_id)
body = \
"\n".join([line.replace(" " * len(fragment_id),
fragment_id, 1)
for line in body.split("\n")])
else:
raise KeyError("fragment_id currently not supported "
"with dialect '{}'".format(dialect))
else:
raise KeyError("dialect '{}' currently not "
"supported".format(dialect))
if with_header:
s = "\n".join([natom, comment, body])
else:
s = body
elif format == "gamin":
lines = s.split("\n")
begin = "\n".join([line.strip() for line in lines[:5]])
body = "\n".join([line.strip() for line in lines[5:-1]])
if with_header:
s = "\n".join([begin, body])
else:
s = body
elif format == "mop":
chunks = s.split("\n", 2)
begin = "\n".join([line.strip() for line in chunks[:2]])
body = chunks[2].strip()
if constraints is not None:
body = body.split("\n")
for i in constraints:
body[i] = _re.sub(' 1( |$)', ' 0\g<1>', body[i])
body = "\n".join(body)
if with_header:
s = "\n".join([begin, body])
else:
s = body
return s.strip()
|
schneiderfelipe/pyrrole | pyrrole/atoms.py | Atoms.to_string | python | def to_string(self, format="smi", dialect=None, with_header=False,
fragment_id=None, constraints=None):
r"""
Produce a string representation of the molecule.
This function wraps and extends the functionality of OpenBabel (which
is accessible through `to_pybel`). Many chemical formats can thus be
output (see the `pybel.outformats` variable for a list of available
output formats).
Parameters
----------
format : `str`, optional
Chemical file format of the returned string representation (see
examples below).
dialect : `str`, optional
Format dialect. This encompasses enhancements provided for some
subformats. If ``"standard"`` or `None`, the output provided by
OpenBabel is used with no or minimal modification. See notes below.
with_header : `bool`, optional
If `format` encompasses a header, allow it in the returned string.
This would be, for instance, the first two lines of data for
``format="xyz"`` (see examples below). This might not work with all
dialects and/or formats.
fragment_id : `str`, optional
Indentify molecular fragments (see examples below). This might not
work with all dialects and/or formats.
constraints : iterable object of `int`
Set cartesian constraints for selected atoms (see examples below).
This might not work with all dialects and/or formats.
Returns
-------
`str`
String representation of molecule in the specified format and/or
dialect.
Raises
------
KeyError
Raised if `dialect` value is currently not supported or if
`fragment_id` is given with a currently not supported `dialect`
value.
Notes
-----
Format dialects are subformats that support extended functionality.
Currently supported dialects are:
- for ``format="xyz"``:
- ``"ADF"``, ``"ORCA"``.
Examples
--------
>>> from pyrrole import atoms
>>> dioxygen = atoms.Atoms({'atomcoords': [[0., 0., 0.],
... [0., 0., 1.21]],
... 'atomnos': [8, 8],
... 'charge': 0,
... 'mult': 3,
... 'name': 'dioxygen'})
By default, a SMILES string is returned:
>>> dioxygen.to_string()
'O=O\tdioxygen'
Cartesian coordinates can be produced with ``format="xyz"``, which is
equivalent to printing an `Atoms` instance:
>>> print(dioxygen.to_string("xyz"))
O 0.00000 0.00000 0.00000
O 0.00000 0.00000 1.21000
>>> print(dioxygen)
O 0.00000 0.00000 0.00000
O 0.00000 0.00000 1.21000
Header lines are disabled by default (for ``format="xyz"``, for
example, the header stores the number of atoms in the molecule and a
comment or title line), but this can be reversed with
``with_header=True``:
>>> print(dioxygen.to_string("xyz", with_header=True))
2
dioxygen
O 0.00000 0.00000 0.00000
O 0.00000 0.00000 1.21000
Coordinates for packages such as GAMESS and MOPAC are also supported:
>>> water_dimer = atoms.read_pybel("data/water-dimer.xyz")
>>> print(water_dimer.to_string("gamin"))
O 8.0 -1.6289300000 -0.0413800000 0.3713700000
H 1.0 -0.6980300000 -0.0916800000 0.0933700000
H 1.0 -2.0666300000 -0.7349800000 -0.1366300000
O 8.0 1.2145700000 0.0317200000 -0.2762300000
H 1.0 1.4492700000 0.9167200000 -0.5857300000
H 1.0 1.7297700000 -0.0803800000 0.5338700000
>>> print(water_dimer.to_string("mop"))
O -1.62893 1 -0.04138 1 0.37137 1
H -0.69803 1 -0.09168 1 0.09337 1
H -2.06663 1 -0.73498 1 -0.13663 1
O 1.21457 1 0.03172 1 -0.27623 1
H 1.44927 1 0.91672 1 -0.58573 1
H 1.72977 1 -0.08038 1 0.53387 1
Constraining of cartesian coordinates works with MOPAC format:
>>> print(water_dimer.to_string("mop", constraints=(0, 3)))
O -1.62893 0 -0.04138 0 0.37137 0
H -0.69803 1 -0.09168 1 0.09337 1
H -2.06663 1 -0.73498 1 -0.13663 1
O 1.21457 0 0.03172 0 -0.27623 0
H 1.44927 1 0.91672 1 -0.58573 1
H 1.72977 1 -0.08038 1 0.53387 1
Fragment identification is supported for ``"ADF"`` and ``"ORCA"``
dialects:
>>> print(water_dimer.to_string("xyz", dialect="ADF",
... fragment_id="dimer"))
O -1.62893 -0.04138 0.37137 f=dimer
H -0.69803 -0.09168 0.09337 f=dimer
H -2.06663 -0.73498 -0.13663 f=dimer
O 1.21457 0.03172 -0.27623 f=dimer
H 1.44927 0.91672 -0.58573 f=dimer
H 1.72977 -0.08038 0.53387 f=dimer
>>> print(water_dimer.to_string("xyz", dialect="ORCA",
... fragment_id=1))
O(1) -1.62893 -0.04138 0.37137
H(1) -0.69803 -0.09168 0.09337
H(1) -2.06663 -0.73498 -0.13663
O(1) 1.21457 0.03172 -0.27623
H(1) 1.44927 0.91672 -0.58573
H(1) 1.72977 -0.08038 0.53387
"""
s = self.to_pybel().write(format).strip()
if dialect is None:
dialect = "standard"
dialect = dialect.lower()
if format == "xyz":
natom, comment, body = s.split("\n", 2)
if dialect in {"adf", "orca", "standard"}:
if fragment_id is not None:
if dialect == "adf":
body = \
"\n".join(["{} f={}".format(line,
fragment_id)
for line in body.split("\n")])
elif dialect == "orca":
fragment_id = "({})".format(fragment_id)
body = \
"\n".join([line.replace(" " * len(fragment_id),
fragment_id, 1)
for line in body.split("\n")])
else:
raise KeyError("fragment_id currently not supported "
"with dialect '{}'".format(dialect))
else:
raise KeyError("dialect '{}' currently not "
"supported".format(dialect))
if with_header:
s = "\n".join([natom, comment, body])
else:
s = body
elif format == "gamin":
lines = s.split("\n")
begin = "\n".join([line.strip() for line in lines[:5]])
body = "\n".join([line.strip() for line in lines[5:-1]])
if with_header:
s = "\n".join([begin, body])
else:
s = body
elif format == "mop":
chunks = s.split("\n", 2)
begin = "\n".join([line.strip() for line in chunks[:2]])
body = chunks[2].strip()
if constraints is not None:
body = body.split("\n")
for i in constraints:
body[i] = _re.sub(' 1( |$)', ' 0\g<1>', body[i])
body = "\n".join(body)
if with_header:
s = "\n".join([begin, body])
else:
s = body
return s.strip() | r"""
Produce a string representation of the molecule.
This function wraps and extends the functionality of OpenBabel (which
is accessible through `to_pybel`). Many chemical formats can thus be
output (see the `pybel.outformats` variable for a list of available
output formats).
Parameters
----------
format : `str`, optional
Chemical file format of the returned string representation (see
examples below).
dialect : `str`, optional
Format dialect. This encompasses enhancements provided for some
subformats. If ``"standard"`` or `None`, the output provided by
OpenBabel is used with no or minimal modification. See notes below.
with_header : `bool`, optional
If `format` encompasses a header, allow it in the returned string.
This would be, for instance, the first two lines of data for
``format="xyz"`` (see examples below). This might not work with all
dialects and/or formats.
fragment_id : `str`, optional
Indentify molecular fragments (see examples below). This might not
work with all dialects and/or formats.
constraints : iterable object of `int`
Set cartesian constraints for selected atoms (see examples below).
This might not work with all dialects and/or formats.
Returns
-------
`str`
String representation of molecule in the specified format and/or
dialect.
Raises
------
KeyError
Raised if `dialect` value is currently not supported or if
`fragment_id` is given with a currently not supported `dialect`
value.
Notes
-----
Format dialects are subformats that support extended functionality.
Currently supported dialects are:
- for ``format="xyz"``:
- ``"ADF"``, ``"ORCA"``.
Examples
--------
>>> from pyrrole import atoms
>>> dioxygen = atoms.Atoms({'atomcoords': [[0., 0., 0.],
... [0., 0., 1.21]],
... 'atomnos': [8, 8],
... 'charge': 0,
... 'mult': 3,
... 'name': 'dioxygen'})
By default, a SMILES string is returned:
>>> dioxygen.to_string()
'O=O\tdioxygen'
Cartesian coordinates can be produced with ``format="xyz"``, which is
equivalent to printing an `Atoms` instance:
>>> print(dioxygen.to_string("xyz"))
O 0.00000 0.00000 0.00000
O 0.00000 0.00000 1.21000
>>> print(dioxygen)
O 0.00000 0.00000 0.00000
O 0.00000 0.00000 1.21000
Header lines are disabled by default (for ``format="xyz"``, for
example, the header stores the number of atoms in the molecule and a
comment or title line), but this can be reversed with
``with_header=True``:
>>> print(dioxygen.to_string("xyz", with_header=True))
2
dioxygen
O 0.00000 0.00000 0.00000
O 0.00000 0.00000 1.21000
Coordinates for packages such as GAMESS and MOPAC are also supported:
>>> water_dimer = atoms.read_pybel("data/water-dimer.xyz")
>>> print(water_dimer.to_string("gamin"))
O 8.0 -1.6289300000 -0.0413800000 0.3713700000
H 1.0 -0.6980300000 -0.0916800000 0.0933700000
H 1.0 -2.0666300000 -0.7349800000 -0.1366300000
O 8.0 1.2145700000 0.0317200000 -0.2762300000
H 1.0 1.4492700000 0.9167200000 -0.5857300000
H 1.0 1.7297700000 -0.0803800000 0.5338700000
>>> print(water_dimer.to_string("mop"))
O -1.62893 1 -0.04138 1 0.37137 1
H -0.69803 1 -0.09168 1 0.09337 1
H -2.06663 1 -0.73498 1 -0.13663 1
O 1.21457 1 0.03172 1 -0.27623 1
H 1.44927 1 0.91672 1 -0.58573 1
H 1.72977 1 -0.08038 1 0.53387 1
Constraining of cartesian coordinates works with MOPAC format:
>>> print(water_dimer.to_string("mop", constraints=(0, 3)))
O -1.62893 0 -0.04138 0 0.37137 0
H -0.69803 1 -0.09168 1 0.09337 1
H -2.06663 1 -0.73498 1 -0.13663 1
O 1.21457 0 0.03172 0 -0.27623 0
H 1.44927 1 0.91672 1 -0.58573 1
H 1.72977 1 -0.08038 1 0.53387 1
Fragment identification is supported for ``"ADF"`` and ``"ORCA"``
dialects:
>>> print(water_dimer.to_string("xyz", dialect="ADF",
... fragment_id="dimer"))
O -1.62893 -0.04138 0.37137 f=dimer
H -0.69803 -0.09168 0.09337 f=dimer
H -2.06663 -0.73498 -0.13663 f=dimer
O 1.21457 0.03172 -0.27623 f=dimer
H 1.44927 0.91672 -0.58573 f=dimer
H 1.72977 -0.08038 0.53387 f=dimer
>>> print(water_dimer.to_string("xyz", dialect="ORCA",
... fragment_id=1))
O(1) -1.62893 -0.04138 0.37137
H(1) -0.69803 -0.09168 0.09337
H(1) -2.06663 -0.73498 -0.13663
O(1) 1.21457 0.03172 -0.27623
H(1) 1.44927 0.91672 -0.58573
H(1) 1.72977 -0.08038 0.53387 | train | https://github.com/schneiderfelipe/pyrrole/blob/13e26accc9a059f0ab69773648b24292fe1fbfd6/pyrrole/atoms.py#L291-L488 | [
"def to_pybel(self):\n \"\"\"\n Produce a Pybel Molecule object.\n\n It is based on the capabilities of OpenBabel through Pybel. The present\n object must have at least `atomcoords`, `atomnos`, `charge` and `mult`\n defined.\n\n Returns\n -------\n `pybel.Molecule`\n\n Examples\n --------\n >>> from pyrrole.atoms import Atoms\n >>> dioxygen = Atoms({'atomcoords': [[0., 0., 0.],\n ... [0., 0., 1.21]],\n ... 'atomnos': [8, 8],\n ... 'charge': 0,\n ... 'mult': 3,\n ... 'name': 'dioxygen'})\n >>> mol = dioxygen.to_pybel()\n >>> mol.molwt\n 31.9988\n\n \"\"\"\n # TODO: This only exports last geometry by default.\n obmol = _makeopenbabel(self.atomcoords, self.atomnos, self.charge,\n self.mult)\n\n title = self.name or \"\"\n if 'scfenergies' in self.attributes:\n title += \", scfenergy={} eV\".format(self.scfenergies[-1])\n obmol.SetTitle(title)\n\n # TODO: make a test for this function.\n return _pb.Molecule(obmol)\n"
] | class Atoms:
"""
Abstraction of fixed molecular structures as sequences of atoms.
Properties are stored in `Atoms.attributes`, which follow naming and unit
conventions of the `cclib project <https://cclib.github.io/>`_ as close as
possible (see `read_cclib` for producing `Atoms` out of cclib objects or
logfiles).
Parameters
----------
attributes : mapping or `pandas.Series`
A mapping containing data attributes whose keys follow the naming
conventions of the `cclib project <https://cclib.github.io/>`_ for
properties as close as possible. Any mapping of attributes that can be
converted to a `pandas.Series` object is accepted.
Attributes
----------
attribute : `pandas.Series`
Molecular properties. Keys follow the naming practice of the `cclib
project <https://cclib.github.io/>`_ for properties as close as
possible. The object is named during initialization after `Atoms.name`
(see notes below).
Notes
-----
This class is intended to be used directly in very simple cases only. For
complex situations, use factory functions such as `read_cclib`.
`Atoms.name` is set to `Atoms.jobfilename` if `Atoms.name` does not exist.
Examples
--------
An `Atoms` object can be instantiated by giving any mapping of
attributes that can be converted to a `pandas.Series` object:
>>> from pyrrole.atoms import Atoms
>>> dioxygen = Atoms({
... 'atomcoords': [[0., 0., 0.],
... [0., 0., 1.21]],
... 'charge': 0,
... 'mult': 3,
... 'atomnos': [8, 8]})
Attributes can be accessed by their keys, as given during initialization:
>>> dioxygen.mult
3
>>> dioxygen.atomcoords[-1][1, 2]
1.21
Two `Atoms` objects are considered equal if their attributes match:
>>> dioxygen_copy = Atoms({
... 'atomcoords': [[0., 0., 0.],
... [0., 0., 1.21]],
... 'charge': 0,
... 'mult': 3,
... 'atomnos': [8, 8]})
>>> dioxygen == dioxygen_copy
True
>>> dioxygen is dioxygen_copy
False
If printed, `Atoms` objects produce coordinates in
`xyz format <https://en.wikipedia.org/wiki/XYZ_file_format>`_:
>>> from pyrrole.atoms import read_pybel
>>> print(read_pybel("data/pyrrolate/pyrrole.xyz", "pyrrole"))
C -1.11468 0.00000 0.33059
C -0.70848 0.00000 -0.97739
C 0.70848 0.00000 -0.97739
C 1.11468 0.00000 0.33059
N 0.00000 0.00000 1.11189
H -2.10267 0.00000 0.75908
H -1.35484 0.00000 -1.83956
H 1.35484 0.00000 -1.83956
H 2.10267 0.00000 0.75908
H 0.00000 0.00000 2.11476
>>> print(dioxygen)
O 0.00000 0.00000 0.00000
O 0.00000 0.00000 1.21000
"""
def __init__(self, attributes):
"""See the docstring for this class."""
# TODO: make idempotent (allow receiving Atoms objects).
self.attributes = _pd.Series(attributes)
# TODO: properly document used attributes.
if "name" in self.attributes:
# TODO: make a test for this if.
self.attributes.rename(self.name, inplace=True)
elif "jobfilename" in self.attributes:
self.attributes.rename(self.jobfilename, inplace=True)
if "atomcoords" in self.attributes:
self.attributes["atomcoords"] = _np.asanyarray(self.atomcoords)
if len(self.atomcoords.shape) < 3:
self.attributes["atomcoords"] = [self.atomcoords]
self.name = self.attributes.name
def __getattr__(self, value):
"""Wrap `Atoms.value` into `Atoms.attributes['value']`."""
return self.attributes[value]
def __eq__(self, other):
"""Compare molecules in terms of attributes."""
# Expensive but definitely correct.
return self.attributes.to_json() == other.attributes.to_json()
def __str__(self):
"""Build a string with Cartesian coordinates for this molecule."""
# TODO: make a read_string that is able to read the results of this
# function.
return self.to_string('xyz')
def split(self, pattern=None):
r"""
Break molecule up into constituent fragments.
By default (i.e., if `pattern` is `None`), each disconnected fragment
is returned as a separate new `Atoms` object. This uses OpenBabel
(through `OBMol.Separate`) and might not preserve atom order, depending
on your version of the library.
Parameters
----------
pattern : iterable of iterable of `int`, optional
Groupings of atoms into molecule fragments. Each element of
`pattern` should be an iterable whose members are atom indices (see
example below).
Returns
-------
fragments : iterable of `Atoms`
Examples
--------
>>> from pyrrole import atoms
>>> water_dimer = atoms.read_pybel("data/water-dimer.xyz")
"Natural fragmentation" is the default behaviour, i.e. all disconnected
fragments are returned:
>>> for frag in water_dimer.split():
... print("{}\n".format(frag))
O -1.62893 -0.04138 0.37137
H -0.69803 -0.09168 0.09337
H -2.06663 -0.73498 -0.13663
<BLANKLINE>
O 1.21457 0.03172 -0.27623
H 1.72977 -0.08038 0.53387
H 1.44927 0.91672 -0.58573
<BLANKLINE>
Precise fragment grouping can be achieved by explicitly indicating
which atoms belong to which fragments:
>>> for frag in water_dimer.split([range(3), (5, 4), [3]]):
... print("{}\n".format(frag))
O -1.62893 -0.04138 0.37137
H -0.69803 -0.09168 0.09337
H -2.06663 -0.73498 -0.13663
<BLANKLINE>
H 1.72977 -0.08038 0.53387
H 1.44927 0.91672 -0.58573
<BLANKLINE>
O 1.21457 0.03172 -0.27623
<BLANKLINE>
"""
molecule_pybel = self.to_pybel()
if pattern is None:
fragments = [read_pybel(frag)
for frag in molecule_pybel.OBMol.Separate()]
else:
fragments = []
for group in pattern:
fragment_obmol = _pb.ob.OBMol()
for i in group:
obatom = molecule_pybel.OBMol.GetAtomById(i)
fragment_obmol.InsertAtom(obatom)
fragments.append(fragment_obmol)
fragments = [read_pybel(frag) for frag in fragments]
return fragments
def to_series(self):
"""
Produce a data record of attributes as a `pandas.Series` object.
This method is useful for producing many data tables in pyrrole. See
`ChemicalEquation.to_series` and `ChemicalSystem.to_dataframe` for
more.
Returns
-------
series : `pandas.Series`
Data record of attributes.
Examples
--------
>>> from pyrrole.atoms import Atoms
>>> dioxygen = Atoms({'atomcoords': [[0., 0., 0.],
... [0., 0., 1.21]],
... 'atomnos': [8, 8],
... 'mult': 3,
... 'name': 'dioxygen'})
>>> dioxygen.to_series()
atomcoords [[[0.0, 0.0, 0.0], [0.0, 0.0, 1.21]]]
atomnos [8, 8]
mult 3
name dioxygen
Name: dioxygen, dtype: object
This method can be used to produce a copy of an `Atoms` object:
>>> dioxygen_copy = Atoms(dioxygen.to_series())
>>> dioxygen == dioxygen_copy
True
>>> dioxygen is dioxygen_copy
False
"""
return self.attributes
def to_pybel(self):
"""
Produce a Pybel Molecule object.
It is based on the capabilities of OpenBabel through Pybel. The present
object must have at least `atomcoords`, `atomnos`, `charge` and `mult`
defined.
Returns
-------
`pybel.Molecule`
Examples
--------
>>> from pyrrole.atoms import Atoms
>>> dioxygen = Atoms({'atomcoords': [[0., 0., 0.],
... [0., 0., 1.21]],
... 'atomnos': [8, 8],
... 'charge': 0,
... 'mult': 3,
... 'name': 'dioxygen'})
>>> mol = dioxygen.to_pybel()
>>> mol.molwt
31.9988
"""
# TODO: This only exports last geometry by default.
obmol = _makeopenbabel(self.atomcoords, self.atomnos, self.charge,
self.mult)
title = self.name or ""
if 'scfenergies' in self.attributes:
title += ", scfenergy={} eV".format(self.scfenergies[-1])
obmol.SetTitle(title)
# TODO: make a test for this function.
return _pb.Molecule(obmol)
|
schneiderfelipe/pyrrole | pyrrole/drawing.py | _longest_common_subsequence | python | def _longest_common_subsequence(x, y):
m = len(x)
n = len(y)
# L[i, j] will contain the length of the longest common subsequence of
# x[0..i - 1] and y[0..j - 1].
L = _np.zeros((m + 1, n + 1), dtype=int)
for i in range(m + 1):
for j in range(n + 1):
if i == 0 or j == 0:
continue
elif x[i - 1] == y[j - 1]:
L[i, j] = L[i - 1, j - 1] + 1
else:
L[i, j] = max(L[i - 1, j], L[i, j - 1])
ret = []
i, j = m, n
while i > 0 and j > 0:
# If current character in x and y are same, then current character is
# part of the longest common subsequence.
if x[i - 1] == y[j - 1]:
ret.append(x[i - 1])
i, j = i - 1, j - 1
# If not same, then find the larger of two and go in the direction of
# larger value.
elif L[i - 1, j] > L[i, j - 1]:
i -= 1
else:
j -= 1
return ret[::-1] | Return the longest common subsequence between two sequences.
Parameters
----------
x, y : sequence
Returns
-------
sequence
Longest common subsequence of x and y.
Examples
--------
>>> _longest_common_subsequence("AGGTAB", "GXTXAYB")
['G', 'T', 'A', 'B']
>>> _longest_common_subsequence(["A", "GA", "G", "T", "A", "B"],
... ["GA", "X", "T", "X", "A", "Y", "B"])
['GA', 'T', 'A', 'B'] | train | https://github.com/schneiderfelipe/pyrrole/blob/13e26accc9a059f0ab69773648b24292fe1fbfd6/pyrrole/drawing.py#L24-L78 | null | #!/usr/bin/python3
# -*- encoding: utf-8 -*-
"""Tools for drawing chemical diagrams."""
# TODO: revisit all examples in this module.
import os
import collections as _collections
import numpy as _np
import networkx as _nx
import matplotlib as _mpl
if os.environ.get('DISPLAY', '') == '':
print('no display found. Using non-interactive Agg backend')
_mpl.use('Agg')
import matplotlib.cbook as _cb # noqa
import matplotlib.pyplot as _plt # noqa
from matplotlib.colors import Colormap as _Colormap # noqa
from matplotlib.colors import colorConverter as _colorConverter # noqa
from matplotlib.collections import LineCollection as _LineCollection # noqa
def tower_layout(graph, height='freeenergy', scale=None, center=None, dim=2):
"""
Position all nodes of graph stacked on top of each other.
Parameters
----------
graph : `networkx.Graph` or `list` of nodes
A position will be assigned to every node in graph.
height : `str` or `None`, optional
The node attribute that holds the numerical value used for the node
height. This defaults to ``'freeenergy'``. If `None`, all node heights
are set to zero.
scale : number, optional
Scale factor for positions.
center : array-like, optional
Coordinate pair around which to center the layout. Default is the
origin.
dim : `int`
Dimension of layout. If `dim` > 2, the remaining dimensions are set to
zero in the returned positions.
Returns
-------
pos : mapping
A mapping of positions keyed by node.
Examples
--------
>>> from pyrrole import ChemicalSystem
>>> from pyrrole.atoms import create_data, read_cclib
>>> from pyrrole.drawing import tower_layout
>>> data = create_data(
... read_cclib("data/acetate/acetic_acid.out", "AcOH(g)"),
... read_cclib("data/acetate/acetic_acid@water.out", "AcOH(aq)"))
>>> digraph = (ChemicalSystem("AcOH(g) <=> AcOH(aq)", data)
... .to_digraph())
>>> layout = tower_layout(digraph)
>>> layout['AcOH(g)']
array([ 0. , -228.56450866])
Passing ``scale=1`` means scaling positions to ``(-1, 1)`` in all axes:
>>> layout = tower_layout(digraph, scale=1)
>>> layout['AcOH(g)'][1] <= 1.
True
"""
# TODO: private function of packages should not be used.
graph, center = _nx.drawing.layout._process_params(graph, center, dim)
num_nodes = len(graph)
if num_nodes == 0:
return {}
elif num_nodes == 1:
return {_nx.utils.arbitrary_element(graph): center}
paddims = max(0, (dim - 2))
if height is None:
y = _np.zeros(len(graph))
else:
y = _np.array([data for node, data in graph.nodes(data=height)])
pos_arr = _np.column_stack([_np.zeros((num_nodes, 1)), y,
_np.zeros((num_nodes, paddims))])
if scale is not None:
pos_arr = _nx.drawing.layout.rescale_layout(pos_arr,
scale=scale) + center
pos = dict(zip(graph, pos_arr))
# TODO: make test
return pos
def diagram_layout(graph, height='freeenergy', sources=None, targets=None,
pos=None, scale=None, center=None, dim=2):
"""
Position nodes such that paths are highlighted, from left to right.
Parameters
----------
graph : `networkx.Graph` or `list` of nodes
A position will be assigned to every node in graph.
height : `str` or `None`, optional
The node attribute that holds the numerical value used for the node
height. This defaults to ``'freeenergy'``. If `None`, all node heights
are set to zero.
sources : `list` of `str`
All simple paths starting at members of `sources` are considered.
Defaults to all nodes of graph.
targets : `list` of `str`
All simple paths ending at members of `targets` are considered.
Defaults to all nodes of graph.
pos : mapping, optional
Initial positions for nodes as a mapping with node as keys and
values as a coordinate `list` or `tuple`. If not specified (default),
initial positions are computed with `tower_layout`.
scale : number, optional
Scale factor for positions.
center : array-like, optional
Coordinate pair around which to center the layout. Default is the
origin.
dim : `int`
Dimension of layout. If `dim` > 2, the remaining dimensions are set to
zero in the returned positions.
Returns
-------
pos : mapping
A mapping of positions keyed by node.
Examples
--------
>>> import pandas as pd
>>> from pyrrole import ChemicalSystem
>>> from pyrrole.drawing import diagram_layout
>>> data = pd.DataFrame(
... [{"name": "Separated_Reactants", "freeenergy": 0.},
... {"name": "mlC1", "freeenergy": -5.4},
... {"name": "mlC2", "freeenergy": -15.6},
... {"name": "mTS1", "freeenergy": 28.5, "color": "g"},
... {"name": "mCARB1", "freeenergy": -9.7},
... {"name": "mCARB2", "freeenergy": -19.8},
... {"name": "mCARBX", "freeenergy": 20}]).set_index("name")
>>> system = ChemicalSystem(
... ["Separated_Reactants -> mlC1 -> mTS1",
... "Separated_Reactants -> mlC2 -> mTS1",
... "mCARB2 <- mTS1 -> mCARB1",
... "Separated_Reactants -> mCARBX"], data)
>>> digraph = system.to_digraph()
>>> layout = diagram_layout(digraph)
>>> layout['mCARB2']
array([ 3. , -19.8])
Passing ``scale=1`` means scaling positions to ``(-1, 1)`` in all axes:
>>> layout = diagram_layout(digraph, scale=1)
>>> layout['mTS1'][1] <= 1.
True
"""
# TODO: private function of packages should not be used.
graph, center = _nx.drawing.layout._process_params(graph, center, dim)
num_nodes = len(graph)
if num_nodes == 0:
return {}
elif num_nodes == 1:
return {_nx.utils.arbitrary_element(graph): center}
if sources is None:
sources = graph.nodes()
if targets is None:
targets = graph.nodes()
simple_paths = [path for source in set(sources) for target in set(targets)
for path in _nx.all_simple_paths(graph, source, target)]
if pos is None:
pos = tower_layout(graph, height=height, scale=None, center=center,
dim=dim)
for path in simple_paths:
for n, step in enumerate(path):
if pos[step][0] < n:
pos[step][0] = n
if scale is not None:
pos_arr = _np.array([pos[node] for node in graph])
pos_arr = _nx.drawing.layout.rescale_layout(pos_arr,
scale=scale) + center
pos = dict(zip(graph, pos_arr))
# TODO: make test
return pos
def draw_diagram_nodes(graph, pos=None, nodelist=None, node_size=.7,
node_color='k', style='solid', alpha=1.0, cmap=None,
vmin=None, vmax=None, ax=None, label=None):
"""
Draw nodes of graph.
This draws only the nodes of graph as horizontal lines at each
``y = pos[1]`` from ``x - node_size/2`` to ``x + node_size/2``, where
``x = pos[0]``.
Parameters
----------
graph : `networkx.Graph`
A NetworkX graph.
pos : mapping, optional
A mapping with nodes as keys and positions as values. Positions should
be sequences of length 2. If not specified (default), a diagram layout
positioning will be computed. See `networkx.layout` and
`pyrrole.drawing` for functions that compute node positions.
nodelist : `list`, optional
Draw only specified nodes (default is ``graph.nodes()``).
node_size : scalar or array
Size of nodes (default is ``.7``). If an array is specified it must be
the same length as nodelist.
node_color : color `str`, or array of `float`
Node color. Can be a single color format `str` (default is ``'k'``), or
a sequence of colors with the same length as nodelist. If numeric
values are specified they will be mapped to colors using the `cmap` and
`vmin`, `vmax` parameters. See `matplotlib.hlines` for more details.
style : `str` (``'solid'``, ``'dashed'``, ``'dotted'``, ``'dashdot'``)
Edge line style (default is ``'solid'``). See `matplotlib.hlines` for
more details.
alpha : `float` or array of `float`, optional
The node transparency. This can be a single alpha value (default is
``'1.0'``), in which case it will be applied to all the nodes of color.
Otherwise, if it is an array, the elements of alpha will be applied to
the colors in order (cycling through alpha multiple times if
necessary).
cmap : Matplotlib colormap, optional
Colormap name or Colormap instance for mapping intensities of nodes.
vmin : `float`, optional
Minimum for node colormap scaling.
vmax : `float`, optional
Maximum for node colormap scaling.
ax : `matplotlib.axes.Axes`, optional
Draw the graph in the specified Matplotlib axes.
label : `str`, optional
Label for legend.
Returns
-------
`matplotlib.collections.LineCollection`
`LineCollection` of the nodes.
Raises
------
networkx.NetworkXError
Raised if a node has no position or one with bad value.
Examples
--------
>>> import pandas as pd
>>> from pyrrole import ChemicalSystem
>>> from pyrrole.drawing import draw_diagram_nodes
>>> data = pd.DataFrame(
... [{"name": "Separated_Reactants", "freeenergy": 0.},
... {"name": "mlC1", "freeenergy": -5.4},
... {"name": "mlC2", "freeenergy": -15.6},
... {"name": "mTS1", "freeenergy": 28.5, "color": "g"},
... {"name": "mCARB1", "freeenergy": -9.7},
... {"name": "mCARB2", "freeenergy": -19.8},
... {"name": "mCARBX", "freeenergy": 20}]).set_index("name")
>>> system = ChemicalSystem(
... ["Separated_Reactants -> mlC1 -> mTS1",
... "Separated_Reactants -> mlC2 -> mTS1",
... "mCARB2 <- mTS1 -> mCARB1",
... "Separated_Reactants -> mCARBX"], data)
>>> digraph = system.to_digraph()
>>> nodes = draw_diagram_nodes(digraph)
"""
if ax is None:
ax = _plt.gca()
if nodelist is None:
nodelist = list(graph.nodes())
if not nodelist or len(nodelist) == 0: # empty nodelist, no drawing
return None
if pos is None:
pos = diagram_layout(graph)
try:
xy = _np.asarray([pos[v] for v in nodelist])
except KeyError as e:
raise _nx.NetworkXError('Node {} has no position.'.format(e))
except ValueError:
raise _nx.NetworkXError('Bad value in node positions.')
if isinstance(alpha, _collections.Iterable):
node_color = _nx.drawing.apply_alpha(node_color, alpha, nodelist, cmap,
vmin, vmax)
alpha = None
node_collection = ax.hlines(xy[:, 1],
xy[:, 0] - node_size/2.,
xy[:, 0] + node_size/2.,
colors=node_color,
linestyles=style,
label=label,
cmap=cmap)
node_collection.set_zorder(2)
return node_collection
def draw_diagram_edges(graph, pos=None, edgelist=None, width=1.0,
edge_color='k', style='dashed', alpha=1.0,
edge_cmap=None, edge_vmin=None, edge_vmax=None, ax=None,
label=None, nodelist=None, node_size=.7):
"""
Draw edges of graph.
This draws only the edges of a graph.
Parameters
----------
graph : `networkx.Graph`
A NetworkX graph.
pos : mapping, optional
A mapping with nodes as keys and positions as values. Positions should
be sequences of length 2. If not specified (default), a diagram layout
positioning will be computed. See `networkx.layout` and
`pyrrole.drawing` for functions that compute node positions.
edgelist : collection of edge `tuple`
Draw only specified edges (default is ``graph.edges()``).
width : `float`, or array of `float`
Line width of edges (default is ``1.0``).
edge_color : color `str`, or array of `float`
Edge color. Can be a single color format `str` (default is ``'r'``),
or a sequence of colors with the same length as edgelist. If numeric
values are specified they will be mapped to colors using the
`edge_cmap` and `edge_vmin`, `edge_vmax` parameters.
style : `str` (``'solid'``, ``'dashed'``, ``'dotted'``, ``'dashdot'``)
Edge line style (default is ``'dashed'``). See `matplotlib.hlines` for
more details.
alpha : `float`, optional
The edge transparency (default is ``1.0``).
edge_cmap : Matplotlib colormap, optional
Colormap for mapping intensities of edges.
edge_vmin : `float`, optional
Minimum for edge colormap scaling.
edge_vmax : `float`, optional
Maximum for edge colormap scaling.
ax : `matplotlib.axes.Axes`, optional
Draw the graph in the specified Matplotlib axes.
label : `str`, optional
Label for legend.
nodelist : `list`, optional
Draw only specified nodes (default is ``graph.nodes()``).
node_size : scalar or array
Size of nodes (default is ``.7``). If an array is specified it must be
the same length as nodelist.
Returns
-------
`matplotlib.collections.LineCollection`
`LineCollection` of the edges.
Raises
------
networkx.NetworkXError
Raised if a node has no position or one with bad value.
ValueError
Raised if `edge_color` contains something other than color names (one
or a list of one per edge) or numbers.
Examples
--------
>>> import pandas as pd
>>> from pyrrole import ChemicalSystem
>>> from pyrrole.drawing import draw_diagram_edges
>>> data = pd.DataFrame(
... [{"name": "Separated_Reactants", "freeenergy": 0.},
... {"name": "mlC1", "freeenergy": -5.4},
... {"name": "mlC2", "freeenergy": -15.6},
... {"name": "mTS1", "freeenergy": 28.5, "color": "g"},
... {"name": "mCARB1", "freeenergy": -9.7},
... {"name": "mCARB2", "freeenergy": -19.8},
... {"name": "mCARBX", "freeenergy": 20}]).set_index("name")
>>> system = ChemicalSystem(
... ["Separated_Reactants -> mlC1 -> mTS1",
... "Separated_Reactants -> mlC2 -> mTS1",
... "mCARB2 <- mTS1 -> mCARB1",
... "Separated_Reactants -> mCARBX"], data)
>>> digraph = system.to_digraph()
>>> edges = draw_diagram_edges(digraph)
"""
if ax is None:
ax = _plt.gca()
if edgelist is None:
edgelist = list(graph.edges())
if not edgelist or len(edgelist) == 0: # no edges!
return None
if nodelist is None:
nodelist = list(graph.nodes())
if pos is None:
pos = diagram_layout(graph)
try:
# set edge positions
edge_pos = _np.asarray([(pos[e[0]] + node_size/2.,
pos[e[1]] - node_size/2.) for e in edgelist])
except KeyError as e:
raise _nx.NetworkXError('Node {} has no position.'.format(e))
except ValueError:
raise _nx.NetworkXError('Bad value in node positions.')
if not _cb.iterable(width):
lw = (width,)
else:
lw = width
if not isinstance(edge_color, str) \
and _cb.iterable(edge_color) \
and len(edge_color) == len(edge_pos):
if _np.alltrue([isinstance(c, str) for c in edge_color]):
# (should check ALL elements)
# list of color letters such as ['k','r','k',...]
edge_colors = tuple([_colorConverter.to_rgba(c, alpha)
for c in edge_color])
elif _np.alltrue([not isinstance(c, str) for c in edge_color]):
# If color specs are given as (rgb) or (rgba) tuples, we're OK
if _np.alltrue([_cb.iterable(c) and len(c) in (3, 4)
for c in edge_color]):
edge_colors = tuple(edge_color)
else:
# numbers (which are going to be mapped with a colormap)
edge_colors = None
else:
raise ValueError('edge_color must contain color names or numbers')
else:
if isinstance(edge_color, str) or len(edge_color) == 1:
edge_colors = (_colorConverter.to_rgba(edge_color, alpha), )
else:
raise ValueError('edge_color must be a color or list of one color '
' per edge')
edge_collection = _LineCollection(edge_pos,
colors=edge_colors,
linewidths=lw,
antialiaseds=(1,),
linestyle=style,
transOffset=ax.transData)
edge_collection.set_zorder(1) # edges go behind nodes
edge_collection.set_label(label)
ax.add_collection(edge_collection)
if _cb.is_numlike(alpha):
edge_collection.set_alpha(alpha)
if edge_colors is None:
if edge_cmap is not None:
assert(isinstance(edge_cmap, _Colormap))
edge_collection.set_array(_np.asarray(edge_color))
edge_collection.set_cmap(edge_cmap)
if edge_vmin is not None or edge_vmax is not None:
edge_collection.set_clim(edge_vmin, edge_vmax)
else:
edge_collection.autoscale()
ax.autoscale_view()
return edge_collection
def draw_diagram_labels(graph, pos=None, labels=None, font_size=12,
font_color='k', font_family='sans-serif',
font_weight='normal', alpha=1.0, bbox=None, ax=None,
offset=None, **kwds):
"""
Draw node labels of graph.
This draws only the node labels of a graph.
Parameters
----------
graph : `networkx.Graph`
A NetworkX graph.
pos : mapping, optional
A mapping with nodes as keys and positions as values. Positions should
be sequences of length 2. If not specified (default), a diagram layout
positioning will be computed. See `networkx.layout` and
`pyrrole.drawing` for functions that compute node positions.
labels : mapping, optional
Node labels in a mapping keyed by node of text labels.
font_size : `int`, optional
Font size for text labels (default is ``12``).
font_color : `str`, optional
Font color `str` (default is ``'k'``, i.e., black).
font_family : `str`, optional
Font family (default is ``'sans-serif'``).
font_weight : `str`, optional
Font weight (default is ``'normal'``).
alpha : `float`, optional
The text transparency (default is ``1.0``).
ax : `matplotlib.axes.Axes`, optional
Draw the graph in the specified Matplotlib axes.
offset : array-like or `str`, optional
Label positions are summed to this before drawing. Defaults to zero
vector. If `str`, can be either ``'above'`` (equivalent to
``(0, 1.5)``) or ``'below'`` (equivalent to ``(0, -1.5)``).
Returns
-------
mapping
Mapping of labels keyed on the nodes.
Examples
--------
>>> import pandas as pd
>>> from pyrrole import ChemicalSystem
>>> from pyrrole.drawing import draw_diagram_labels
>>> data = pd.DataFrame(
... [{"name": "Separated_Reactants", "freeenergy": 0.},
... {"name": "mlC1", "freeenergy": -5.4},
... {"name": "mlC2", "freeenergy": -15.6},
... {"name": "mTS1", "freeenergy": 28.5, "color": "g"},
... {"name": "mCARB1", "freeenergy": -9.7},
... {"name": "mCARB2", "freeenergy": -19.8},
... {"name": "mCARBX", "freeenergy": 20}]).set_index("name")
>>> system = ChemicalSystem(
... ["Separated_Reactants -> mlC1 -> mTS1",
... "Separated_Reactants -> mlC2 -> mTS1",
... "mCARB2 <- mTS1 -> mCARB1",
... "Separated_Reactants -> mCARBX"], data)
>>> digraph = system.to_digraph()
>>> edges = draw_diagram_labels(digraph, font_color='blue',
... offset="below")
>>> labels = {k: "{:g}".format(v)
... for k, v in digraph.nodes(data='freeenergy')}
>>> edges = draw_diagram_labels(digraph, labels=labels,
... offset="above")
"""
if ax is None:
ax = _plt.gca()
if labels is None:
labels = dict((n, n) for n in graph.nodes())
if pos is None:
pos = diagram_layout(graph)
if offset is None:
offset = _np.array([0., 0.])
elif offset == "above":
offset = _np.array([0., 1.5])
elif offset == "below":
offset = _np.array([0., -1.5])
# set optional alignment
horizontalalignment = kwds.get('horizontalalignment', 'center')
verticalalignment = kwds.get('verticalalignment', 'center')
text_items = {} # there is no text collection so we'll fake one
for n, label in labels.items():
(x, y) = _np.asanyarray(pos[n]) + _np.asanyarray(offset)
if not isinstance(label, str):
label = str(label) # this makes "1" and 1 labeled the same
t = ax.text(x, y, label,
size=font_size,
color=font_color,
family=font_family,
weight=font_weight,
alpha=alpha,
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
transform=ax.transData,
bbox=bbox,
clip_on=True)
text_items[n] = t
return text_items
def draw_diagram(graph, pos=None, with_labels=True, offset=None, **kwds):
"""
Draw a diagram for graph using Matplotlib.
Draw graph as a simple energy diagram with Matplotlib with options for node
positions, labeling, titles, and many other drawing features. See examples
below.
Parameters
----------
graph : `networkx.Graph`
A NetworkX graph.
pos : mapping, optional
A mapping with nodes as keys and positions as values. Positions should
be sequences of length 2. If not specified (default) a diagram layout
positioning will be computed. See `networkx.drawing.layout` and
`pyrrole.drawing` for functions that compute node positions.
with_labels : `bool`, optional
Set to `True` (default) to draw labels on the nodes.
offset : array-like or `str`, optional
Label positions are summed to this before drawing. Defaults to
``'below'``. See `draw_diagram_labels` for more.
Notes
-----
Further keywords are passed to `draw_diagram_nodes` and
`draw_diagram_edges`. If `pos` is `None`, `diagram_layout` is also called
and have keywords passed as well. The same happens with
`draw_diagram_labels` if `with_labels` is `True`.
Examples
--------
>>> import pandas as pd
>>> from pyrrole import ChemicalSystem
>>> from pyrrole.drawing import draw_diagram, draw_diagram_labels
>>> data = pd.DataFrame(
... [{"name": "Separated_Reactants", "freeenergy": 0.},
... {"name": "mlC1", "freeenergy": -5.4},
... {"name": "mlC2", "freeenergy": -15.6},
... {"name": "mTS1", "freeenergy": 28.5, "color": "g"},
... {"name": "mCARB1", "freeenergy": -9.7},
... {"name": "mCARB2", "freeenergy": -19.8},
... {"name": "mCARBX", "freeenergy": 20}]).set_index("name")
>>> system = ChemicalSystem(
... ["Separated_Reactants -> mlC1 -> mTS1",
... "Separated_Reactants -> mlC2 -> mTS1",
... "mCARB2 <- mTS1 -> mCARB1",
... "Separated_Reactants -> mCARBX"], data)
>>> digraph = system.to_digraph()
>>> draw_diagram(digraph)
>>> labels = {k: "{:g}".format(v)
... for k, v in digraph.nodes(data='freeenergy')}
>>> edges = draw_diagram_labels(digraph, labels=labels,
... offset="above")
"""
if pos is None:
pos = diagram_layout(graph, **kwds) # default to diagram layout
node_collection = draw_diagram_nodes(graph, pos, **kwds) # noqa
edge_collection = draw_diagram_edges(graph, pos, **kwds) # noqa
if with_labels:
if offset is None:
# TODO: This changes the default behaviour of draw_diagram_labels.
offset = "below"
draw_diagram_labels(graph, pos, offset=offset, **kwds)
_plt.draw_if_interactive()
|
schneiderfelipe/pyrrole | pyrrole/drawing.py | tower_layout | python | def tower_layout(graph, height='freeenergy', scale=None, center=None, dim=2):
# TODO: private function of packages should not be used.
graph, center = _nx.drawing.layout._process_params(graph, center, dim)
num_nodes = len(graph)
if num_nodes == 0:
return {}
elif num_nodes == 1:
return {_nx.utils.arbitrary_element(graph): center}
paddims = max(0, (dim - 2))
if height is None:
y = _np.zeros(len(graph))
else:
y = _np.array([data for node, data in graph.nodes(data=height)])
pos_arr = _np.column_stack([_np.zeros((num_nodes, 1)), y,
_np.zeros((num_nodes, paddims))])
if scale is not None:
pos_arr = _nx.drawing.layout.rescale_layout(pos_arr,
scale=scale) + center
pos = dict(zip(graph, pos_arr))
# TODO: make test
return pos | Position all nodes of graph stacked on top of each other.
Parameters
----------
graph : `networkx.Graph` or `list` of nodes
A position will be assigned to every node in graph.
height : `str` or `None`, optional
The node attribute that holds the numerical value used for the node
height. This defaults to ``'freeenergy'``. If `None`, all node heights
are set to zero.
scale : number, optional
Scale factor for positions.
center : array-like, optional
Coordinate pair around which to center the layout. Default is the
origin.
dim : `int`
Dimension of layout. If `dim` > 2, the remaining dimensions are set to
zero in the returned positions.
Returns
-------
pos : mapping
A mapping of positions keyed by node.
Examples
--------
>>> from pyrrole import ChemicalSystem
>>> from pyrrole.atoms import create_data, read_cclib
>>> from pyrrole.drawing import tower_layout
>>> data = create_data(
... read_cclib("data/acetate/acetic_acid.out", "AcOH(g)"),
... read_cclib("data/acetate/acetic_acid@water.out", "AcOH(aq)"))
>>> digraph = (ChemicalSystem("AcOH(g) <=> AcOH(aq)", data)
... .to_digraph())
>>> layout = tower_layout(digraph)
>>> layout['AcOH(g)']
array([ 0. , -228.56450866])
Passing ``scale=1`` means scaling positions to ``(-1, 1)`` in all axes:
>>> layout = tower_layout(digraph, scale=1)
>>> layout['AcOH(g)'][1] <= 1.
True | train | https://github.com/schneiderfelipe/pyrrole/blob/13e26accc9a059f0ab69773648b24292fe1fbfd6/pyrrole/drawing.py#L81-L152 | null | #!/usr/bin/python3
# -*- encoding: utf-8 -*-
"""Tools for drawing chemical diagrams."""
# TODO: revisit all examples in this module.
import os
import collections as _collections
import numpy as _np
import networkx as _nx
import matplotlib as _mpl
if os.environ.get('DISPLAY', '') == '':
print('no display found. Using non-interactive Agg backend')
_mpl.use('Agg')
import matplotlib.cbook as _cb # noqa
import matplotlib.pyplot as _plt # noqa
from matplotlib.colors import Colormap as _Colormap # noqa
from matplotlib.colors import colorConverter as _colorConverter # noqa
from matplotlib.collections import LineCollection as _LineCollection # noqa
def _longest_common_subsequence(x, y):
"""
Return the longest common subsequence between two sequences.
Parameters
----------
x, y : sequence
Returns
-------
sequence
Longest common subsequence of x and y.
Examples
--------
>>> _longest_common_subsequence("AGGTAB", "GXTXAYB")
['G', 'T', 'A', 'B']
>>> _longest_common_subsequence(["A", "GA", "G", "T", "A", "B"],
... ["GA", "X", "T", "X", "A", "Y", "B"])
['GA', 'T', 'A', 'B']
"""
m = len(x)
n = len(y)
# L[i, j] will contain the length of the longest common subsequence of
# x[0..i - 1] and y[0..j - 1].
L = _np.zeros((m + 1, n + 1), dtype=int)
for i in range(m + 1):
for j in range(n + 1):
if i == 0 or j == 0:
continue
elif x[i - 1] == y[j - 1]:
L[i, j] = L[i - 1, j - 1] + 1
else:
L[i, j] = max(L[i - 1, j], L[i, j - 1])
ret = []
i, j = m, n
while i > 0 and j > 0:
# If current character in x and y are same, then current character is
# part of the longest common subsequence.
if x[i - 1] == y[j - 1]:
ret.append(x[i - 1])
i, j = i - 1, j - 1
# If not same, then find the larger of two and go in the direction of
# larger value.
elif L[i - 1, j] > L[i, j - 1]:
i -= 1
else:
j -= 1
return ret[::-1]
def diagram_layout(graph, height='freeenergy', sources=None, targets=None,
pos=None, scale=None, center=None, dim=2):
"""
Position nodes such that paths are highlighted, from left to right.
Parameters
----------
graph : `networkx.Graph` or `list` of nodes
A position will be assigned to every node in graph.
height : `str` or `None`, optional
The node attribute that holds the numerical value used for the node
height. This defaults to ``'freeenergy'``. If `None`, all node heights
are set to zero.
sources : `list` of `str`
All simple paths starting at members of `sources` are considered.
Defaults to all nodes of graph.
targets : `list` of `str`
All simple paths ending at members of `targets` are considered.
Defaults to all nodes of graph.
pos : mapping, optional
Initial positions for nodes as a mapping with node as keys and
values as a coordinate `list` or `tuple`. If not specified (default),
initial positions are computed with `tower_layout`.
scale : number, optional
Scale factor for positions.
center : array-like, optional
Coordinate pair around which to center the layout. Default is the
origin.
dim : `int`
Dimension of layout. If `dim` > 2, the remaining dimensions are set to
zero in the returned positions.
Returns
-------
pos : mapping
A mapping of positions keyed by node.
Examples
--------
>>> import pandas as pd
>>> from pyrrole import ChemicalSystem
>>> from pyrrole.drawing import diagram_layout
>>> data = pd.DataFrame(
... [{"name": "Separated_Reactants", "freeenergy": 0.},
... {"name": "mlC1", "freeenergy": -5.4},
... {"name": "mlC2", "freeenergy": -15.6},
... {"name": "mTS1", "freeenergy": 28.5, "color": "g"},
... {"name": "mCARB1", "freeenergy": -9.7},
... {"name": "mCARB2", "freeenergy": -19.8},
... {"name": "mCARBX", "freeenergy": 20}]).set_index("name")
>>> system = ChemicalSystem(
... ["Separated_Reactants -> mlC1 -> mTS1",
... "Separated_Reactants -> mlC2 -> mTS1",
... "mCARB2 <- mTS1 -> mCARB1",
... "Separated_Reactants -> mCARBX"], data)
>>> digraph = system.to_digraph()
>>> layout = diagram_layout(digraph)
>>> layout['mCARB2']
array([ 3. , -19.8])
Passing ``scale=1`` means scaling positions to ``(-1, 1)`` in all axes:
>>> layout = diagram_layout(digraph, scale=1)
>>> layout['mTS1'][1] <= 1.
True
"""
# TODO: private function of packages should not be used.
graph, center = _nx.drawing.layout._process_params(graph, center, dim)
num_nodes = len(graph)
if num_nodes == 0:
return {}
elif num_nodes == 1:
return {_nx.utils.arbitrary_element(graph): center}
if sources is None:
sources = graph.nodes()
if targets is None:
targets = graph.nodes()
simple_paths = [path for source in set(sources) for target in set(targets)
for path in _nx.all_simple_paths(graph, source, target)]
if pos is None:
pos = tower_layout(graph, height=height, scale=None, center=center,
dim=dim)
for path in simple_paths:
for n, step in enumerate(path):
if pos[step][0] < n:
pos[step][0] = n
if scale is not None:
pos_arr = _np.array([pos[node] for node in graph])
pos_arr = _nx.drawing.layout.rescale_layout(pos_arr,
scale=scale) + center
pos = dict(zip(graph, pos_arr))
# TODO: make test
return pos
def draw_diagram_nodes(graph, pos=None, nodelist=None, node_size=.7,
node_color='k', style='solid', alpha=1.0, cmap=None,
vmin=None, vmax=None, ax=None, label=None):
"""
Draw nodes of graph.
This draws only the nodes of graph as horizontal lines at each
``y = pos[1]`` from ``x - node_size/2`` to ``x + node_size/2``, where
``x = pos[0]``.
Parameters
----------
graph : `networkx.Graph`
A NetworkX graph.
pos : mapping, optional
A mapping with nodes as keys and positions as values. Positions should
be sequences of length 2. If not specified (default), a diagram layout
positioning will be computed. See `networkx.layout` and
`pyrrole.drawing` for functions that compute node positions.
nodelist : `list`, optional
Draw only specified nodes (default is ``graph.nodes()``).
node_size : scalar or array
Size of nodes (default is ``.7``). If an array is specified it must be
the same length as nodelist.
node_color : color `str`, or array of `float`
Node color. Can be a single color format `str` (default is ``'k'``), or
a sequence of colors with the same length as nodelist. If numeric
values are specified they will be mapped to colors using the `cmap` and
`vmin`, `vmax` parameters. See `matplotlib.hlines` for more details.
style : `str` (``'solid'``, ``'dashed'``, ``'dotted'``, ``'dashdot'``)
Edge line style (default is ``'solid'``). See `matplotlib.hlines` for
more details.
alpha : `float` or array of `float`, optional
The node transparency. This can be a single alpha value (default is
``'1.0'``), in which case it will be applied to all the nodes of color.
Otherwise, if it is an array, the elements of alpha will be applied to
the colors in order (cycling through alpha multiple times if
necessary).
cmap : Matplotlib colormap, optional
Colormap name or Colormap instance for mapping intensities of nodes.
vmin : `float`, optional
Minimum for node colormap scaling.
vmax : `float`, optional
Maximum for node colormap scaling.
ax : `matplotlib.axes.Axes`, optional
Draw the graph in the specified Matplotlib axes.
label : `str`, optional
Label for legend.
Returns
-------
`matplotlib.collections.LineCollection`
`LineCollection` of the nodes.
Raises
------
networkx.NetworkXError
Raised if a node has no position or one with bad value.
Examples
--------
>>> import pandas as pd
>>> from pyrrole import ChemicalSystem
>>> from pyrrole.drawing import draw_diagram_nodes
>>> data = pd.DataFrame(
... [{"name": "Separated_Reactants", "freeenergy": 0.},
... {"name": "mlC1", "freeenergy": -5.4},
... {"name": "mlC2", "freeenergy": -15.6},
... {"name": "mTS1", "freeenergy": 28.5, "color": "g"},
... {"name": "mCARB1", "freeenergy": -9.7},
... {"name": "mCARB2", "freeenergy": -19.8},
... {"name": "mCARBX", "freeenergy": 20}]).set_index("name")
>>> system = ChemicalSystem(
... ["Separated_Reactants -> mlC1 -> mTS1",
... "Separated_Reactants -> mlC2 -> mTS1",
... "mCARB2 <- mTS1 -> mCARB1",
... "Separated_Reactants -> mCARBX"], data)
>>> digraph = system.to_digraph()
>>> nodes = draw_diagram_nodes(digraph)
"""
if ax is None:
ax = _plt.gca()
if nodelist is None:
nodelist = list(graph.nodes())
if not nodelist or len(nodelist) == 0: # empty nodelist, no drawing
return None
if pos is None:
pos = diagram_layout(graph)
try:
xy = _np.asarray([pos[v] for v in nodelist])
except KeyError as e:
raise _nx.NetworkXError('Node {} has no position.'.format(e))
except ValueError:
raise _nx.NetworkXError('Bad value in node positions.')
if isinstance(alpha, _collections.Iterable):
node_color = _nx.drawing.apply_alpha(node_color, alpha, nodelist, cmap,
vmin, vmax)
alpha = None
node_collection = ax.hlines(xy[:, 1],
xy[:, 0] - node_size/2.,
xy[:, 0] + node_size/2.,
colors=node_color,
linestyles=style,
label=label,
cmap=cmap)
node_collection.set_zorder(2)
return node_collection
def draw_diagram_edges(graph, pos=None, edgelist=None, width=1.0,
edge_color='k', style='dashed', alpha=1.0,
edge_cmap=None, edge_vmin=None, edge_vmax=None, ax=None,
label=None, nodelist=None, node_size=.7):
"""
Draw edges of graph.
This draws only the edges of a graph.
Parameters
----------
graph : `networkx.Graph`
A NetworkX graph.
pos : mapping, optional
A mapping with nodes as keys and positions as values. Positions should
be sequences of length 2. If not specified (default), a diagram layout
positioning will be computed. See `networkx.layout` and
`pyrrole.drawing` for functions that compute node positions.
edgelist : collection of edge `tuple`
Draw only specified edges (default is ``graph.edges()``).
width : `float`, or array of `float`
Line width of edges (default is ``1.0``).
edge_color : color `str`, or array of `float`
Edge color. Can be a single color format `str` (default is ``'r'``),
or a sequence of colors with the same length as edgelist. If numeric
values are specified they will be mapped to colors using the
`edge_cmap` and `edge_vmin`, `edge_vmax` parameters.
style : `str` (``'solid'``, ``'dashed'``, ``'dotted'``, ``'dashdot'``)
Edge line style (default is ``'dashed'``). See `matplotlib.hlines` for
more details.
alpha : `float`, optional
The edge transparency (default is ``1.0``).
edge_cmap : Matplotlib colormap, optional
Colormap for mapping intensities of edges.
edge_vmin : `float`, optional
Minimum for edge colormap scaling.
edge_vmax : `float`, optional
Maximum for edge colormap scaling.
ax : `matplotlib.axes.Axes`, optional
Draw the graph in the specified Matplotlib axes.
label : `str`, optional
Label for legend.
nodelist : `list`, optional
Draw only specified nodes (default is ``graph.nodes()``).
node_size : scalar or array
Size of nodes (default is ``.7``). If an array is specified it must be
the same length as nodelist.
Returns
-------
`matplotlib.collections.LineCollection`
`LineCollection` of the edges.
Raises
------
networkx.NetworkXError
Raised if a node has no position or one with bad value.
ValueError
Raised if `edge_color` contains something other than color names (one
or a list of one per edge) or numbers.
Examples
--------
>>> import pandas as pd
>>> from pyrrole import ChemicalSystem
>>> from pyrrole.drawing import draw_diagram_edges
>>> data = pd.DataFrame(
... [{"name": "Separated_Reactants", "freeenergy": 0.},
... {"name": "mlC1", "freeenergy": -5.4},
... {"name": "mlC2", "freeenergy": -15.6},
... {"name": "mTS1", "freeenergy": 28.5, "color": "g"},
... {"name": "mCARB1", "freeenergy": -9.7},
... {"name": "mCARB2", "freeenergy": -19.8},
... {"name": "mCARBX", "freeenergy": 20}]).set_index("name")
>>> system = ChemicalSystem(
... ["Separated_Reactants -> mlC1 -> mTS1",
... "Separated_Reactants -> mlC2 -> mTS1",
... "mCARB2 <- mTS1 -> mCARB1",
... "Separated_Reactants -> mCARBX"], data)
>>> digraph = system.to_digraph()
>>> edges = draw_diagram_edges(digraph)
"""
if ax is None:
ax = _plt.gca()
if edgelist is None:
edgelist = list(graph.edges())
if not edgelist or len(edgelist) == 0: # no edges!
return None
if nodelist is None:
nodelist = list(graph.nodes())
if pos is None:
pos = diagram_layout(graph)
try:
# set edge positions
edge_pos = _np.asarray([(pos[e[0]] + node_size/2.,
pos[e[1]] - node_size/2.) for e in edgelist])
except KeyError as e:
raise _nx.NetworkXError('Node {} has no position.'.format(e))
except ValueError:
raise _nx.NetworkXError('Bad value in node positions.')
if not _cb.iterable(width):
lw = (width,)
else:
lw = width
if not isinstance(edge_color, str) \
and _cb.iterable(edge_color) \
and len(edge_color) == len(edge_pos):
if _np.alltrue([isinstance(c, str) for c in edge_color]):
# (should check ALL elements)
# list of color letters such as ['k','r','k',...]
edge_colors = tuple([_colorConverter.to_rgba(c, alpha)
for c in edge_color])
elif _np.alltrue([not isinstance(c, str) for c in edge_color]):
# If color specs are given as (rgb) or (rgba) tuples, we're OK
if _np.alltrue([_cb.iterable(c) and len(c) in (3, 4)
for c in edge_color]):
edge_colors = tuple(edge_color)
else:
# numbers (which are going to be mapped with a colormap)
edge_colors = None
else:
raise ValueError('edge_color must contain color names or numbers')
else:
if isinstance(edge_color, str) or len(edge_color) == 1:
edge_colors = (_colorConverter.to_rgba(edge_color, alpha), )
else:
raise ValueError('edge_color must be a color or list of one color '
' per edge')
edge_collection = _LineCollection(edge_pos,
colors=edge_colors,
linewidths=lw,
antialiaseds=(1,),
linestyle=style,
transOffset=ax.transData)
edge_collection.set_zorder(1) # edges go behind nodes
edge_collection.set_label(label)
ax.add_collection(edge_collection)
if _cb.is_numlike(alpha):
edge_collection.set_alpha(alpha)
if edge_colors is None:
if edge_cmap is not None:
assert(isinstance(edge_cmap, _Colormap))
edge_collection.set_array(_np.asarray(edge_color))
edge_collection.set_cmap(edge_cmap)
if edge_vmin is not None or edge_vmax is not None:
edge_collection.set_clim(edge_vmin, edge_vmax)
else:
edge_collection.autoscale()
ax.autoscale_view()
return edge_collection
def draw_diagram_labels(graph, pos=None, labels=None, font_size=12,
font_color='k', font_family='sans-serif',
font_weight='normal', alpha=1.0, bbox=None, ax=None,
offset=None, **kwds):
"""
Draw node labels of graph.
This draws only the node labels of a graph.
Parameters
----------
graph : `networkx.Graph`
A NetworkX graph.
pos : mapping, optional
A mapping with nodes as keys and positions as values. Positions should
be sequences of length 2. If not specified (default), a diagram layout
positioning will be computed. See `networkx.layout` and
`pyrrole.drawing` for functions that compute node positions.
labels : mapping, optional
Node labels in a mapping keyed by node of text labels.
font_size : `int`, optional
Font size for text labels (default is ``12``).
font_color : `str`, optional
Font color `str` (default is ``'k'``, i.e., black).
font_family : `str`, optional
Font family (default is ``'sans-serif'``).
font_weight : `str`, optional
Font weight (default is ``'normal'``).
alpha : `float`, optional
The text transparency (default is ``1.0``).
ax : `matplotlib.axes.Axes`, optional
Draw the graph in the specified Matplotlib axes.
offset : array-like or `str`, optional
Label positions are summed to this before drawing. Defaults to zero
vector. If `str`, can be either ``'above'`` (equivalent to
``(0, 1.5)``) or ``'below'`` (equivalent to ``(0, -1.5)``).
Returns
-------
mapping
Mapping of labels keyed on the nodes.
Examples
--------
>>> import pandas as pd
>>> from pyrrole import ChemicalSystem
>>> from pyrrole.drawing import draw_diagram_labels
>>> data = pd.DataFrame(
... [{"name": "Separated_Reactants", "freeenergy": 0.},
... {"name": "mlC1", "freeenergy": -5.4},
... {"name": "mlC2", "freeenergy": -15.6},
... {"name": "mTS1", "freeenergy": 28.5, "color": "g"},
... {"name": "mCARB1", "freeenergy": -9.7},
... {"name": "mCARB2", "freeenergy": -19.8},
... {"name": "mCARBX", "freeenergy": 20}]).set_index("name")
>>> system = ChemicalSystem(
... ["Separated_Reactants -> mlC1 -> mTS1",
... "Separated_Reactants -> mlC2 -> mTS1",
... "mCARB2 <- mTS1 -> mCARB1",
... "Separated_Reactants -> mCARBX"], data)
>>> digraph = system.to_digraph()
>>> edges = draw_diagram_labels(digraph, font_color='blue',
... offset="below")
>>> labels = {k: "{:g}".format(v)
... for k, v in digraph.nodes(data='freeenergy')}
>>> edges = draw_diagram_labels(digraph, labels=labels,
... offset="above")
"""
if ax is None:
ax = _plt.gca()
if labels is None:
labels = dict((n, n) for n in graph.nodes())
if pos is None:
pos = diagram_layout(graph)
if offset is None:
offset = _np.array([0., 0.])
elif offset == "above":
offset = _np.array([0., 1.5])
elif offset == "below":
offset = _np.array([0., -1.5])
# set optional alignment
horizontalalignment = kwds.get('horizontalalignment', 'center')
verticalalignment = kwds.get('verticalalignment', 'center')
text_items = {} # there is no text collection so we'll fake one
for n, label in labels.items():
(x, y) = _np.asanyarray(pos[n]) + _np.asanyarray(offset)
if not isinstance(label, str):
label = str(label) # this makes "1" and 1 labeled the same
t = ax.text(x, y, label,
size=font_size,
color=font_color,
family=font_family,
weight=font_weight,
alpha=alpha,
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
transform=ax.transData,
bbox=bbox,
clip_on=True)
text_items[n] = t
return text_items
def draw_diagram(graph, pos=None, with_labels=True, offset=None, **kwds):
"""
Draw a diagram for graph using Matplotlib.
Draw graph as a simple energy diagram with Matplotlib with options for node
positions, labeling, titles, and many other drawing features. See examples
below.
Parameters
----------
graph : `networkx.Graph`
A NetworkX graph.
pos : mapping, optional
A mapping with nodes as keys and positions as values. Positions should
be sequences of length 2. If not specified (default) a diagram layout
positioning will be computed. See `networkx.drawing.layout` and
`pyrrole.drawing` for functions that compute node positions.
with_labels : `bool`, optional
Set to `True` (default) to draw labels on the nodes.
offset : array-like or `str`, optional
Label positions are summed to this before drawing. Defaults to
``'below'``. See `draw_diagram_labels` for more.
Notes
-----
Further keywords are passed to `draw_diagram_nodes` and
`draw_diagram_edges`. If `pos` is `None`, `diagram_layout` is also called
and have keywords passed as well. The same happens with
`draw_diagram_labels` if `with_labels` is `True`.
Examples
--------
>>> import pandas as pd
>>> from pyrrole import ChemicalSystem
>>> from pyrrole.drawing import draw_diagram, draw_diagram_labels
>>> data = pd.DataFrame(
... [{"name": "Separated_Reactants", "freeenergy": 0.},
... {"name": "mlC1", "freeenergy": -5.4},
... {"name": "mlC2", "freeenergy": -15.6},
... {"name": "mTS1", "freeenergy": 28.5, "color": "g"},
... {"name": "mCARB1", "freeenergy": -9.7},
... {"name": "mCARB2", "freeenergy": -19.8},
... {"name": "mCARBX", "freeenergy": 20}]).set_index("name")
>>> system = ChemicalSystem(
... ["Separated_Reactants -> mlC1 -> mTS1",
... "Separated_Reactants -> mlC2 -> mTS1",
... "mCARB2 <- mTS1 -> mCARB1",
... "Separated_Reactants -> mCARBX"], data)
>>> digraph = system.to_digraph()
>>> draw_diagram(digraph)
>>> labels = {k: "{:g}".format(v)
... for k, v in digraph.nodes(data='freeenergy')}
>>> edges = draw_diagram_labels(digraph, labels=labels,
... offset="above")
"""
if pos is None:
pos = diagram_layout(graph, **kwds) # default to diagram layout
node_collection = draw_diagram_nodes(graph, pos, **kwds) # noqa
edge_collection = draw_diagram_edges(graph, pos, **kwds) # noqa
if with_labels:
if offset is None:
# TODO: This changes the default behaviour of draw_diagram_labels.
offset = "below"
draw_diagram_labels(graph, pos, offset=offset, **kwds)
_plt.draw_if_interactive()
|
schneiderfelipe/pyrrole | pyrrole/drawing.py | diagram_layout | python | def diagram_layout(graph, height='freeenergy', sources=None, targets=None,
pos=None, scale=None, center=None, dim=2):
# TODO: private function of packages should not be used.
graph, center = _nx.drawing.layout._process_params(graph, center, dim)
num_nodes = len(graph)
if num_nodes == 0:
return {}
elif num_nodes == 1:
return {_nx.utils.arbitrary_element(graph): center}
if sources is None:
sources = graph.nodes()
if targets is None:
targets = graph.nodes()
simple_paths = [path for source in set(sources) for target in set(targets)
for path in _nx.all_simple_paths(graph, source, target)]
if pos is None:
pos = tower_layout(graph, height=height, scale=None, center=center,
dim=dim)
for path in simple_paths:
for n, step in enumerate(path):
if pos[step][0] < n:
pos[step][0] = n
if scale is not None:
pos_arr = _np.array([pos[node] for node in graph])
pos_arr = _nx.drawing.layout.rescale_layout(pos_arr,
scale=scale) + center
pos = dict(zip(graph, pos_arr))
# TODO: make test
return pos | Position nodes such that paths are highlighted, from left to right.
Parameters
----------
graph : `networkx.Graph` or `list` of nodes
A position will be assigned to every node in graph.
height : `str` or `None`, optional
The node attribute that holds the numerical value used for the node
height. This defaults to ``'freeenergy'``. If `None`, all node heights
are set to zero.
sources : `list` of `str`
All simple paths starting at members of `sources` are considered.
Defaults to all nodes of graph.
targets : `list` of `str`
All simple paths ending at members of `targets` are considered.
Defaults to all nodes of graph.
pos : mapping, optional
Initial positions for nodes as a mapping with node as keys and
values as a coordinate `list` or `tuple`. If not specified (default),
initial positions are computed with `tower_layout`.
scale : number, optional
Scale factor for positions.
center : array-like, optional
Coordinate pair around which to center the layout. Default is the
origin.
dim : `int`
Dimension of layout. If `dim` > 2, the remaining dimensions are set to
zero in the returned positions.
Returns
-------
pos : mapping
A mapping of positions keyed by node.
Examples
--------
>>> import pandas as pd
>>> from pyrrole import ChemicalSystem
>>> from pyrrole.drawing import diagram_layout
>>> data = pd.DataFrame(
... [{"name": "Separated_Reactants", "freeenergy": 0.},
... {"name": "mlC1", "freeenergy": -5.4},
... {"name": "mlC2", "freeenergy": -15.6},
... {"name": "mTS1", "freeenergy": 28.5, "color": "g"},
... {"name": "mCARB1", "freeenergy": -9.7},
... {"name": "mCARB2", "freeenergy": -19.8},
... {"name": "mCARBX", "freeenergy": 20}]).set_index("name")
>>> system = ChemicalSystem(
... ["Separated_Reactants -> mlC1 -> mTS1",
... "Separated_Reactants -> mlC2 -> mTS1",
... "mCARB2 <- mTS1 -> mCARB1",
... "Separated_Reactants -> mCARBX"], data)
>>> digraph = system.to_digraph()
>>> layout = diagram_layout(digraph)
>>> layout['mCARB2']
array([ 3. , -19.8])
Passing ``scale=1`` means scaling positions to ``(-1, 1)`` in all axes:
>>> layout = diagram_layout(digraph, scale=1)
>>> layout['mTS1'][1] <= 1.
True | train | https://github.com/schneiderfelipe/pyrrole/blob/13e26accc9a059f0ab69773648b24292fe1fbfd6/pyrrole/drawing.py#L155-L254 | [
"def tower_layout(graph, height='freeenergy', scale=None, center=None, dim=2):\n \"\"\"\n Position all nodes of graph stacked on top of each other.\n\n Parameters\n ----------\n graph : `networkx.Graph` or `list` of nodes\n A position will be assigned to every node in graph.\n height : `str` or `None`, optional\n The node attribute that holds the numerical value used for the node\n height. This defaults to ``'freeenergy'``. If `None`, all node heights\n are set to zero.\n scale : number, optional\n Scale factor for positions.\n center : array-like, optional\n Coordinate pair around which to center the layout. Default is the\n origin.\n dim : `int`\n Dimension of layout. If `dim` > 2, the remaining dimensions are set to\n zero in the returned positions.\n\n Returns\n -------\n pos : mapping\n A mapping of positions keyed by node.\n\n Examples\n --------\n >>> from pyrrole import ChemicalSystem\n >>> from pyrrole.atoms import create_data, read_cclib\n >>> from pyrrole.drawing import tower_layout\n >>> data = create_data(\n ... read_cclib(\"data/acetate/acetic_acid.out\", \"AcOH(g)\"),\n ... read_cclib(\"data/acetate/acetic_acid@water.out\", \"AcOH(aq)\"))\n >>> digraph = (ChemicalSystem(\"AcOH(g) <=> AcOH(aq)\", data)\n ... .to_digraph())\n >>> layout = tower_layout(digraph)\n >>> layout['AcOH(g)']\n array([ 0. , -228.56450866])\n\n Passing ``scale=1`` means scaling positions to ``(-1, 1)`` in all axes:\n\n >>> layout = tower_layout(digraph, scale=1)\n >>> layout['AcOH(g)'][1] <= 1.\n True\n\n \"\"\"\n # TODO: private function of packages should not be used.\n graph, center = _nx.drawing.layout._process_params(graph, center, dim)\n\n num_nodes = len(graph)\n if num_nodes == 0:\n return {}\n elif num_nodes == 1:\n return {_nx.utils.arbitrary_element(graph): center}\n\n paddims = max(0, (dim - 2))\n\n if height is None:\n y = _np.zeros(len(graph))\n else:\n y = _np.array([data for node, data in graph.nodes(data=height)])\n pos_arr = _np.column_stack([_np.zeros((num_nodes, 1)), y,\n _np.zeros((num_nodes, paddims))])\n\n if scale is not None:\n pos_arr = _nx.drawing.layout.rescale_layout(pos_arr,\n scale=scale) + center\n pos = dict(zip(graph, pos_arr))\n\n # TODO: make test\n return pos\n"
] | #!/usr/bin/python3
# -*- encoding: utf-8 -*-
"""Tools for drawing chemical diagrams."""
# TODO: revisit all examples in this module.
import os
import collections as _collections
import numpy as _np
import networkx as _nx
import matplotlib as _mpl
if os.environ.get('DISPLAY', '') == '':
print('no display found. Using non-interactive Agg backend')
_mpl.use('Agg')
import matplotlib.cbook as _cb # noqa
import matplotlib.pyplot as _plt # noqa
from matplotlib.colors import Colormap as _Colormap # noqa
from matplotlib.colors import colorConverter as _colorConverter # noqa
from matplotlib.collections import LineCollection as _LineCollection # noqa
def _longest_common_subsequence(x, y):
"""
Return the longest common subsequence between two sequences.
Parameters
----------
x, y : sequence
Returns
-------
sequence
Longest common subsequence of x and y.
Examples
--------
>>> _longest_common_subsequence("AGGTAB", "GXTXAYB")
['G', 'T', 'A', 'B']
>>> _longest_common_subsequence(["A", "GA", "G", "T", "A", "B"],
... ["GA", "X", "T", "X", "A", "Y", "B"])
['GA', 'T', 'A', 'B']
"""
m = len(x)
n = len(y)
# L[i, j] will contain the length of the longest common subsequence of
# x[0..i - 1] and y[0..j - 1].
L = _np.zeros((m + 1, n + 1), dtype=int)
for i in range(m + 1):
for j in range(n + 1):
if i == 0 or j == 0:
continue
elif x[i - 1] == y[j - 1]:
L[i, j] = L[i - 1, j - 1] + 1
else:
L[i, j] = max(L[i - 1, j], L[i, j - 1])
ret = []
i, j = m, n
while i > 0 and j > 0:
# If current character in x and y are same, then current character is
# part of the longest common subsequence.
if x[i - 1] == y[j - 1]:
ret.append(x[i - 1])
i, j = i - 1, j - 1
# If not same, then find the larger of two and go in the direction of
# larger value.
elif L[i - 1, j] > L[i, j - 1]:
i -= 1
else:
j -= 1
return ret[::-1]
def tower_layout(graph, height='freeenergy', scale=None, center=None, dim=2):
"""
Position all nodes of graph stacked on top of each other.
Parameters
----------
graph : `networkx.Graph` or `list` of nodes
A position will be assigned to every node in graph.
height : `str` or `None`, optional
The node attribute that holds the numerical value used for the node
height. This defaults to ``'freeenergy'``. If `None`, all node heights
are set to zero.
scale : number, optional
Scale factor for positions.
center : array-like, optional
Coordinate pair around which to center the layout. Default is the
origin.
dim : `int`
Dimension of layout. If `dim` > 2, the remaining dimensions are set to
zero in the returned positions.
Returns
-------
pos : mapping
A mapping of positions keyed by node.
Examples
--------
>>> from pyrrole import ChemicalSystem
>>> from pyrrole.atoms import create_data, read_cclib
>>> from pyrrole.drawing import tower_layout
>>> data = create_data(
... read_cclib("data/acetate/acetic_acid.out", "AcOH(g)"),
... read_cclib("data/acetate/acetic_acid@water.out", "AcOH(aq)"))
>>> digraph = (ChemicalSystem("AcOH(g) <=> AcOH(aq)", data)
... .to_digraph())
>>> layout = tower_layout(digraph)
>>> layout['AcOH(g)']
array([ 0. , -228.56450866])
Passing ``scale=1`` means scaling positions to ``(-1, 1)`` in all axes:
>>> layout = tower_layout(digraph, scale=1)
>>> layout['AcOH(g)'][1] <= 1.
True
"""
# TODO: private function of packages should not be used.
graph, center = _nx.drawing.layout._process_params(graph, center, dim)
num_nodes = len(graph)
if num_nodes == 0:
return {}
elif num_nodes == 1:
return {_nx.utils.arbitrary_element(graph): center}
paddims = max(0, (dim - 2))
if height is None:
y = _np.zeros(len(graph))
else:
y = _np.array([data for node, data in graph.nodes(data=height)])
pos_arr = _np.column_stack([_np.zeros((num_nodes, 1)), y,
_np.zeros((num_nodes, paddims))])
if scale is not None:
pos_arr = _nx.drawing.layout.rescale_layout(pos_arr,
scale=scale) + center
pos = dict(zip(graph, pos_arr))
# TODO: make test
return pos
def draw_diagram_nodes(graph, pos=None, nodelist=None, node_size=.7,
node_color='k', style='solid', alpha=1.0, cmap=None,
vmin=None, vmax=None, ax=None, label=None):
"""
Draw nodes of graph.
This draws only the nodes of graph as horizontal lines at each
``y = pos[1]`` from ``x - node_size/2`` to ``x + node_size/2``, where
``x = pos[0]``.
Parameters
----------
graph : `networkx.Graph`
A NetworkX graph.
pos : mapping, optional
A mapping with nodes as keys and positions as values. Positions should
be sequences of length 2. If not specified (default), a diagram layout
positioning will be computed. See `networkx.layout` and
`pyrrole.drawing` for functions that compute node positions.
nodelist : `list`, optional
Draw only specified nodes (default is ``graph.nodes()``).
node_size : scalar or array
Size of nodes (default is ``.7``). If an array is specified it must be
the same length as nodelist.
node_color : color `str`, or array of `float`
Node color. Can be a single color format `str` (default is ``'k'``), or
a sequence of colors with the same length as nodelist. If numeric
values are specified they will be mapped to colors using the `cmap` and
`vmin`, `vmax` parameters. See `matplotlib.hlines` for more details.
style : `str` (``'solid'``, ``'dashed'``, ``'dotted'``, ``'dashdot'``)
Edge line style (default is ``'solid'``). See `matplotlib.hlines` for
more details.
alpha : `float` or array of `float`, optional
The node transparency. This can be a single alpha value (default is
``'1.0'``), in which case it will be applied to all the nodes of color.
Otherwise, if it is an array, the elements of alpha will be applied to
the colors in order (cycling through alpha multiple times if
necessary).
cmap : Matplotlib colormap, optional
Colormap name or Colormap instance for mapping intensities of nodes.
vmin : `float`, optional
Minimum for node colormap scaling.
vmax : `float`, optional
Maximum for node colormap scaling.
ax : `matplotlib.axes.Axes`, optional
Draw the graph in the specified Matplotlib axes.
label : `str`, optional
Label for legend.
Returns
-------
`matplotlib.collections.LineCollection`
`LineCollection` of the nodes.
Raises
------
networkx.NetworkXError
Raised if a node has no position or one with bad value.
Examples
--------
>>> import pandas as pd
>>> from pyrrole import ChemicalSystem
>>> from pyrrole.drawing import draw_diagram_nodes
>>> data = pd.DataFrame(
... [{"name": "Separated_Reactants", "freeenergy": 0.},
... {"name": "mlC1", "freeenergy": -5.4},
... {"name": "mlC2", "freeenergy": -15.6},
... {"name": "mTS1", "freeenergy": 28.5, "color": "g"},
... {"name": "mCARB1", "freeenergy": -9.7},
... {"name": "mCARB2", "freeenergy": -19.8},
... {"name": "mCARBX", "freeenergy": 20}]).set_index("name")
>>> system = ChemicalSystem(
... ["Separated_Reactants -> mlC1 -> mTS1",
... "Separated_Reactants -> mlC2 -> mTS1",
... "mCARB2 <- mTS1 -> mCARB1",
... "Separated_Reactants -> mCARBX"], data)
>>> digraph = system.to_digraph()
>>> nodes = draw_diagram_nodes(digraph)
"""
if ax is None:
ax = _plt.gca()
if nodelist is None:
nodelist = list(graph.nodes())
if not nodelist or len(nodelist) == 0: # empty nodelist, no drawing
return None
if pos is None:
pos = diagram_layout(graph)
try:
xy = _np.asarray([pos[v] for v in nodelist])
except KeyError as e:
raise _nx.NetworkXError('Node {} has no position.'.format(e))
except ValueError:
raise _nx.NetworkXError('Bad value in node positions.')
if isinstance(alpha, _collections.Iterable):
node_color = _nx.drawing.apply_alpha(node_color, alpha, nodelist, cmap,
vmin, vmax)
alpha = None
node_collection = ax.hlines(xy[:, 1],
xy[:, 0] - node_size/2.,
xy[:, 0] + node_size/2.,
colors=node_color,
linestyles=style,
label=label,
cmap=cmap)
node_collection.set_zorder(2)
return node_collection
def draw_diagram_edges(graph, pos=None, edgelist=None, width=1.0,
edge_color='k', style='dashed', alpha=1.0,
edge_cmap=None, edge_vmin=None, edge_vmax=None, ax=None,
label=None, nodelist=None, node_size=.7):
"""
Draw edges of graph.
This draws only the edges of a graph.
Parameters
----------
graph : `networkx.Graph`
A NetworkX graph.
pos : mapping, optional
A mapping with nodes as keys and positions as values. Positions should
be sequences of length 2. If not specified (default), a diagram layout
positioning will be computed. See `networkx.layout` and
`pyrrole.drawing` for functions that compute node positions.
edgelist : collection of edge `tuple`
Draw only specified edges (default is ``graph.edges()``).
width : `float`, or array of `float`
Line width of edges (default is ``1.0``).
edge_color : color `str`, or array of `float`
Edge color. Can be a single color format `str` (default is ``'r'``),
or a sequence of colors with the same length as edgelist. If numeric
values are specified they will be mapped to colors using the
`edge_cmap` and `edge_vmin`, `edge_vmax` parameters.
style : `str` (``'solid'``, ``'dashed'``, ``'dotted'``, ``'dashdot'``)
Edge line style (default is ``'dashed'``). See `matplotlib.hlines` for
more details.
alpha : `float`, optional
The edge transparency (default is ``1.0``).
edge_cmap : Matplotlib colormap, optional
Colormap for mapping intensities of edges.
edge_vmin : `float`, optional
Minimum for edge colormap scaling.
edge_vmax : `float`, optional
Maximum for edge colormap scaling.
ax : `matplotlib.axes.Axes`, optional
Draw the graph in the specified Matplotlib axes.
label : `str`, optional
Label for legend.
nodelist : `list`, optional
Draw only specified nodes (default is ``graph.nodes()``).
node_size : scalar or array
Size of nodes (default is ``.7``). If an array is specified it must be
the same length as nodelist.
Returns
-------
`matplotlib.collections.LineCollection`
`LineCollection` of the edges.
Raises
------
networkx.NetworkXError
Raised if a node has no position or one with bad value.
ValueError
Raised if `edge_color` contains something other than color names (one
or a list of one per edge) or numbers.
Examples
--------
>>> import pandas as pd
>>> from pyrrole import ChemicalSystem
>>> from pyrrole.drawing import draw_diagram_edges
>>> data = pd.DataFrame(
... [{"name": "Separated_Reactants", "freeenergy": 0.},
... {"name": "mlC1", "freeenergy": -5.4},
... {"name": "mlC2", "freeenergy": -15.6},
... {"name": "mTS1", "freeenergy": 28.5, "color": "g"},
... {"name": "mCARB1", "freeenergy": -9.7},
... {"name": "mCARB2", "freeenergy": -19.8},
... {"name": "mCARBX", "freeenergy": 20}]).set_index("name")
>>> system = ChemicalSystem(
... ["Separated_Reactants -> mlC1 -> mTS1",
... "Separated_Reactants -> mlC2 -> mTS1",
... "mCARB2 <- mTS1 -> mCARB1",
... "Separated_Reactants -> mCARBX"], data)
>>> digraph = system.to_digraph()
>>> edges = draw_diagram_edges(digraph)
"""
if ax is None:
ax = _plt.gca()
if edgelist is None:
edgelist = list(graph.edges())
if not edgelist or len(edgelist) == 0: # no edges!
return None
if nodelist is None:
nodelist = list(graph.nodes())
if pos is None:
pos = diagram_layout(graph)
try:
# set edge positions
edge_pos = _np.asarray([(pos[e[0]] + node_size/2.,
pos[e[1]] - node_size/2.) for e in edgelist])
except KeyError as e:
raise _nx.NetworkXError('Node {} has no position.'.format(e))
except ValueError:
raise _nx.NetworkXError('Bad value in node positions.')
if not _cb.iterable(width):
lw = (width,)
else:
lw = width
if not isinstance(edge_color, str) \
and _cb.iterable(edge_color) \
and len(edge_color) == len(edge_pos):
if _np.alltrue([isinstance(c, str) for c in edge_color]):
# (should check ALL elements)
# list of color letters such as ['k','r','k',...]
edge_colors = tuple([_colorConverter.to_rgba(c, alpha)
for c in edge_color])
elif _np.alltrue([not isinstance(c, str) for c in edge_color]):
# If color specs are given as (rgb) or (rgba) tuples, we're OK
if _np.alltrue([_cb.iterable(c) and len(c) in (3, 4)
for c in edge_color]):
edge_colors = tuple(edge_color)
else:
# numbers (which are going to be mapped with a colormap)
edge_colors = None
else:
raise ValueError('edge_color must contain color names or numbers')
else:
if isinstance(edge_color, str) or len(edge_color) == 1:
edge_colors = (_colorConverter.to_rgba(edge_color, alpha), )
else:
raise ValueError('edge_color must be a color or list of one color '
' per edge')
edge_collection = _LineCollection(edge_pos,
colors=edge_colors,
linewidths=lw,
antialiaseds=(1,),
linestyle=style,
transOffset=ax.transData)
edge_collection.set_zorder(1) # edges go behind nodes
edge_collection.set_label(label)
ax.add_collection(edge_collection)
if _cb.is_numlike(alpha):
edge_collection.set_alpha(alpha)
if edge_colors is None:
if edge_cmap is not None:
assert(isinstance(edge_cmap, _Colormap))
edge_collection.set_array(_np.asarray(edge_color))
edge_collection.set_cmap(edge_cmap)
if edge_vmin is not None or edge_vmax is not None:
edge_collection.set_clim(edge_vmin, edge_vmax)
else:
edge_collection.autoscale()
ax.autoscale_view()
return edge_collection
def draw_diagram_labels(graph, pos=None, labels=None, font_size=12,
font_color='k', font_family='sans-serif',
font_weight='normal', alpha=1.0, bbox=None, ax=None,
offset=None, **kwds):
"""
Draw node labels of graph.
This draws only the node labels of a graph.
Parameters
----------
graph : `networkx.Graph`
A NetworkX graph.
pos : mapping, optional
A mapping with nodes as keys and positions as values. Positions should
be sequences of length 2. If not specified (default), a diagram layout
positioning will be computed. See `networkx.layout` and
`pyrrole.drawing` for functions that compute node positions.
labels : mapping, optional
Node labels in a mapping keyed by node of text labels.
font_size : `int`, optional
Font size for text labels (default is ``12``).
font_color : `str`, optional
Font color `str` (default is ``'k'``, i.e., black).
font_family : `str`, optional
Font family (default is ``'sans-serif'``).
font_weight : `str`, optional
Font weight (default is ``'normal'``).
alpha : `float`, optional
The text transparency (default is ``1.0``).
ax : `matplotlib.axes.Axes`, optional
Draw the graph in the specified Matplotlib axes.
offset : array-like or `str`, optional
Label positions are summed to this before drawing. Defaults to zero
vector. If `str`, can be either ``'above'`` (equivalent to
``(0, 1.5)``) or ``'below'`` (equivalent to ``(0, -1.5)``).
Returns
-------
mapping
Mapping of labels keyed on the nodes.
Examples
--------
>>> import pandas as pd
>>> from pyrrole import ChemicalSystem
>>> from pyrrole.drawing import draw_diagram_labels
>>> data = pd.DataFrame(
... [{"name": "Separated_Reactants", "freeenergy": 0.},
... {"name": "mlC1", "freeenergy": -5.4},
... {"name": "mlC2", "freeenergy": -15.6},
... {"name": "mTS1", "freeenergy": 28.5, "color": "g"},
... {"name": "mCARB1", "freeenergy": -9.7},
... {"name": "mCARB2", "freeenergy": -19.8},
... {"name": "mCARBX", "freeenergy": 20}]).set_index("name")
>>> system = ChemicalSystem(
... ["Separated_Reactants -> mlC1 -> mTS1",
... "Separated_Reactants -> mlC2 -> mTS1",
... "mCARB2 <- mTS1 -> mCARB1",
... "Separated_Reactants -> mCARBX"], data)
>>> digraph = system.to_digraph()
>>> edges = draw_diagram_labels(digraph, font_color='blue',
... offset="below")
>>> labels = {k: "{:g}".format(v)
... for k, v in digraph.nodes(data='freeenergy')}
>>> edges = draw_diagram_labels(digraph, labels=labels,
... offset="above")
"""
if ax is None:
ax = _plt.gca()
if labels is None:
labels = dict((n, n) for n in graph.nodes())
if pos is None:
pos = diagram_layout(graph)
if offset is None:
offset = _np.array([0., 0.])
elif offset == "above":
offset = _np.array([0., 1.5])
elif offset == "below":
offset = _np.array([0., -1.5])
# set optional alignment
horizontalalignment = kwds.get('horizontalalignment', 'center')
verticalalignment = kwds.get('verticalalignment', 'center')
text_items = {} # there is no text collection so we'll fake one
for n, label in labels.items():
(x, y) = _np.asanyarray(pos[n]) + _np.asanyarray(offset)
if not isinstance(label, str):
label = str(label) # this makes "1" and 1 labeled the same
t = ax.text(x, y, label,
size=font_size,
color=font_color,
family=font_family,
weight=font_weight,
alpha=alpha,
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
transform=ax.transData,
bbox=bbox,
clip_on=True)
text_items[n] = t
return text_items
def draw_diagram(graph, pos=None, with_labels=True, offset=None, **kwds):
"""
Draw a diagram for graph using Matplotlib.
Draw graph as a simple energy diagram with Matplotlib with options for node
positions, labeling, titles, and many other drawing features. See examples
below.
Parameters
----------
graph : `networkx.Graph`
A NetworkX graph.
pos : mapping, optional
A mapping with nodes as keys and positions as values. Positions should
be sequences of length 2. If not specified (default) a diagram layout
positioning will be computed. See `networkx.drawing.layout` and
`pyrrole.drawing` for functions that compute node positions.
with_labels : `bool`, optional
Set to `True` (default) to draw labels on the nodes.
offset : array-like or `str`, optional
Label positions are summed to this before drawing. Defaults to
``'below'``. See `draw_diagram_labels` for more.
Notes
-----
Further keywords are passed to `draw_diagram_nodes` and
`draw_diagram_edges`. If `pos` is `None`, `diagram_layout` is also called
and have keywords passed as well. The same happens with
`draw_diagram_labels` if `with_labels` is `True`.
Examples
--------
>>> import pandas as pd
>>> from pyrrole import ChemicalSystem
>>> from pyrrole.drawing import draw_diagram, draw_diagram_labels
>>> data = pd.DataFrame(
... [{"name": "Separated_Reactants", "freeenergy": 0.},
... {"name": "mlC1", "freeenergy": -5.4},
... {"name": "mlC2", "freeenergy": -15.6},
... {"name": "mTS1", "freeenergy": 28.5, "color": "g"},
... {"name": "mCARB1", "freeenergy": -9.7},
... {"name": "mCARB2", "freeenergy": -19.8},
... {"name": "mCARBX", "freeenergy": 20}]).set_index("name")
>>> system = ChemicalSystem(
... ["Separated_Reactants -> mlC1 -> mTS1",
... "Separated_Reactants -> mlC2 -> mTS1",
... "mCARB2 <- mTS1 -> mCARB1",
... "Separated_Reactants -> mCARBX"], data)
>>> digraph = system.to_digraph()
>>> draw_diagram(digraph)
>>> labels = {k: "{:g}".format(v)
... for k, v in digraph.nodes(data='freeenergy')}
>>> edges = draw_diagram_labels(digraph, labels=labels,
... offset="above")
"""
if pos is None:
pos = diagram_layout(graph, **kwds) # default to diagram layout
node_collection = draw_diagram_nodes(graph, pos, **kwds) # noqa
edge_collection = draw_diagram_edges(graph, pos, **kwds) # noqa
if with_labels:
if offset is None:
# TODO: This changes the default behaviour of draw_diagram_labels.
offset = "below"
draw_diagram_labels(graph, pos, offset=offset, **kwds)
_plt.draw_if_interactive()
|
schneiderfelipe/pyrrole | pyrrole/drawing.py | draw_diagram_nodes | python | def draw_diagram_nodes(graph, pos=None, nodelist=None, node_size=.7,
node_color='k', style='solid', alpha=1.0, cmap=None,
vmin=None, vmax=None, ax=None, label=None):
if ax is None:
ax = _plt.gca()
if nodelist is None:
nodelist = list(graph.nodes())
if not nodelist or len(nodelist) == 0: # empty nodelist, no drawing
return None
if pos is None:
pos = diagram_layout(graph)
try:
xy = _np.asarray([pos[v] for v in nodelist])
except KeyError as e:
raise _nx.NetworkXError('Node {} has no position.'.format(e))
except ValueError:
raise _nx.NetworkXError('Bad value in node positions.')
if isinstance(alpha, _collections.Iterable):
node_color = _nx.drawing.apply_alpha(node_color, alpha, nodelist, cmap,
vmin, vmax)
alpha = None
node_collection = ax.hlines(xy[:, 1],
xy[:, 0] - node_size/2.,
xy[:, 0] + node_size/2.,
colors=node_color,
linestyles=style,
label=label,
cmap=cmap)
node_collection.set_zorder(2)
return node_collection | Draw nodes of graph.
This draws only the nodes of graph as horizontal lines at each
``y = pos[1]`` from ``x - node_size/2`` to ``x + node_size/2``, where
``x = pos[0]``.
Parameters
----------
graph : `networkx.Graph`
A NetworkX graph.
pos : mapping, optional
A mapping with nodes as keys and positions as values. Positions should
be sequences of length 2. If not specified (default), a diagram layout
positioning will be computed. See `networkx.layout` and
`pyrrole.drawing` for functions that compute node positions.
nodelist : `list`, optional
Draw only specified nodes (default is ``graph.nodes()``).
node_size : scalar or array
Size of nodes (default is ``.7``). If an array is specified it must be
the same length as nodelist.
node_color : color `str`, or array of `float`
Node color. Can be a single color format `str` (default is ``'k'``), or
a sequence of colors with the same length as nodelist. If numeric
values are specified they will be mapped to colors using the `cmap` and
`vmin`, `vmax` parameters. See `matplotlib.hlines` for more details.
style : `str` (``'solid'``, ``'dashed'``, ``'dotted'``, ``'dashdot'``)
Edge line style (default is ``'solid'``). See `matplotlib.hlines` for
more details.
alpha : `float` or array of `float`, optional
The node transparency. This can be a single alpha value (default is
``'1.0'``), in which case it will be applied to all the nodes of color.
Otherwise, if it is an array, the elements of alpha will be applied to
the colors in order (cycling through alpha multiple times if
necessary).
cmap : Matplotlib colormap, optional
Colormap name or Colormap instance for mapping intensities of nodes.
vmin : `float`, optional
Minimum for node colormap scaling.
vmax : `float`, optional
Maximum for node colormap scaling.
ax : `matplotlib.axes.Axes`, optional
Draw the graph in the specified Matplotlib axes.
label : `str`, optional
Label for legend.
Returns
-------
`matplotlib.collections.LineCollection`
`LineCollection` of the nodes.
Raises
------
networkx.NetworkXError
Raised if a node has no position or one with bad value.
Examples
--------
>>> import pandas as pd
>>> from pyrrole import ChemicalSystem
>>> from pyrrole.drawing import draw_diagram_nodes
>>> data = pd.DataFrame(
... [{"name": "Separated_Reactants", "freeenergy": 0.},
... {"name": "mlC1", "freeenergy": -5.4},
... {"name": "mlC2", "freeenergy": -15.6},
... {"name": "mTS1", "freeenergy": 28.5, "color": "g"},
... {"name": "mCARB1", "freeenergy": -9.7},
... {"name": "mCARB2", "freeenergy": -19.8},
... {"name": "mCARBX", "freeenergy": 20}]).set_index("name")
>>> system = ChemicalSystem(
... ["Separated_Reactants -> mlC1 -> mTS1",
... "Separated_Reactants -> mlC2 -> mTS1",
... "mCARB2 <- mTS1 -> mCARB1",
... "Separated_Reactants -> mCARBX"], data)
>>> digraph = system.to_digraph()
>>> nodes = draw_diagram_nodes(digraph) | train | https://github.com/schneiderfelipe/pyrrole/blob/13e26accc9a059f0ab69773648b24292fe1fbfd6/pyrrole/drawing.py#L257-L371 | [
"def diagram_layout(graph, height='freeenergy', sources=None, targets=None,\n pos=None, scale=None, center=None, dim=2):\n \"\"\"\n Position nodes such that paths are highlighted, from left to right.\n\n Parameters\n ----------\n graph : `networkx.Graph` or `list` of nodes\n A position will be assigned to every node in graph.\n height : `str` or `None`, optional\n The node attribute that holds the numerical value used for the node\n height. This defaults to ``'freeenergy'``. If `None`, all node heights\n are set to zero.\n sources : `list` of `str`\n All simple paths starting at members of `sources` are considered.\n Defaults to all nodes of graph.\n targets : `list` of `str`\n All simple paths ending at members of `targets` are considered.\n Defaults to all nodes of graph.\n pos : mapping, optional\n Initial positions for nodes as a mapping with node as keys and\n values as a coordinate `list` or `tuple`. If not specified (default),\n initial positions are computed with `tower_layout`.\n scale : number, optional\n Scale factor for positions.\n center : array-like, optional\n Coordinate pair around which to center the layout. Default is the\n origin.\n dim : `int`\n Dimension of layout. If `dim` > 2, the remaining dimensions are set to\n zero in the returned positions.\n\n Returns\n -------\n pos : mapping\n A mapping of positions keyed by node.\n\n Examples\n --------\n >>> import pandas as pd\n >>> from pyrrole import ChemicalSystem\n >>> from pyrrole.drawing import diagram_layout\n >>> data = pd.DataFrame(\n ... [{\"name\": \"Separated_Reactants\", \"freeenergy\": 0.},\n ... {\"name\": \"mlC1\", \"freeenergy\": -5.4},\n ... {\"name\": \"mlC2\", \"freeenergy\": -15.6},\n ... {\"name\": \"mTS1\", \"freeenergy\": 28.5, \"color\": \"g\"},\n ... {\"name\": \"mCARB1\", \"freeenergy\": -9.7},\n ... {\"name\": \"mCARB2\", \"freeenergy\": -19.8},\n ... {\"name\": \"mCARBX\", \"freeenergy\": 20}]).set_index(\"name\")\n >>> system = ChemicalSystem(\n ... [\"Separated_Reactants -> mlC1 -> mTS1\",\n ... \"Separated_Reactants -> mlC2 -> mTS1\",\n ... \"mCARB2 <- mTS1 -> mCARB1\",\n ... \"Separated_Reactants -> mCARBX\"], data)\n >>> digraph = system.to_digraph()\n >>> layout = diagram_layout(digraph)\n >>> layout['mCARB2']\n array([ 3. , -19.8])\n\n Passing ``scale=1`` means scaling positions to ``(-1, 1)`` in all axes:\n\n >>> layout = diagram_layout(digraph, scale=1)\n >>> layout['mTS1'][1] <= 1.\n True\n\n \"\"\"\n # TODO: private function of packages should not be used.\n graph, center = _nx.drawing.layout._process_params(graph, center, dim)\n\n num_nodes = len(graph)\n if num_nodes == 0:\n return {}\n elif num_nodes == 1:\n return {_nx.utils.arbitrary_element(graph): center}\n\n if sources is None:\n sources = graph.nodes()\n if targets is None:\n targets = graph.nodes()\n simple_paths = [path for source in set(sources) for target in set(targets)\n for path in _nx.all_simple_paths(graph, source, target)]\n\n if pos is None:\n pos = tower_layout(graph, height=height, scale=None, center=center,\n dim=dim)\n\n for path in simple_paths:\n for n, step in enumerate(path):\n if pos[step][0] < n:\n pos[step][0] = n\n\n if scale is not None:\n pos_arr = _np.array([pos[node] for node in graph])\n pos_arr = _nx.drawing.layout.rescale_layout(pos_arr,\n scale=scale) + center\n pos = dict(zip(graph, pos_arr))\n\n # TODO: make test\n return pos\n"
] | #!/usr/bin/python3
# -*- encoding: utf-8 -*-
"""Tools for drawing chemical diagrams."""
# TODO: revisit all examples in this module.
import os
import collections as _collections
import numpy as _np
import networkx as _nx
import matplotlib as _mpl
if os.environ.get('DISPLAY', '') == '':
print('no display found. Using non-interactive Agg backend')
_mpl.use('Agg')
import matplotlib.cbook as _cb # noqa
import matplotlib.pyplot as _plt # noqa
from matplotlib.colors import Colormap as _Colormap # noqa
from matplotlib.colors import colorConverter as _colorConverter # noqa
from matplotlib.collections import LineCollection as _LineCollection # noqa
def _longest_common_subsequence(x, y):
"""
Return the longest common subsequence between two sequences.
Parameters
----------
x, y : sequence
Returns
-------
sequence
Longest common subsequence of x and y.
Examples
--------
>>> _longest_common_subsequence("AGGTAB", "GXTXAYB")
['G', 'T', 'A', 'B']
>>> _longest_common_subsequence(["A", "GA", "G", "T", "A", "B"],
... ["GA", "X", "T", "X", "A", "Y", "B"])
['GA', 'T', 'A', 'B']
"""
m = len(x)
n = len(y)
# L[i, j] will contain the length of the longest common subsequence of
# x[0..i - 1] and y[0..j - 1].
L = _np.zeros((m + 1, n + 1), dtype=int)
for i in range(m + 1):
for j in range(n + 1):
if i == 0 or j == 0:
continue
elif x[i - 1] == y[j - 1]:
L[i, j] = L[i - 1, j - 1] + 1
else:
L[i, j] = max(L[i - 1, j], L[i, j - 1])
ret = []
i, j = m, n
while i > 0 and j > 0:
# If current character in x and y are same, then current character is
# part of the longest common subsequence.
if x[i - 1] == y[j - 1]:
ret.append(x[i - 1])
i, j = i - 1, j - 1
# If not same, then find the larger of two and go in the direction of
# larger value.
elif L[i - 1, j] > L[i, j - 1]:
i -= 1
else:
j -= 1
return ret[::-1]
def tower_layout(graph, height='freeenergy', scale=None, center=None, dim=2):
"""
Position all nodes of graph stacked on top of each other.
Parameters
----------
graph : `networkx.Graph` or `list` of nodes
A position will be assigned to every node in graph.
height : `str` or `None`, optional
The node attribute that holds the numerical value used for the node
height. This defaults to ``'freeenergy'``. If `None`, all node heights
are set to zero.
scale : number, optional
Scale factor for positions.
center : array-like, optional
Coordinate pair around which to center the layout. Default is the
origin.
dim : `int`
Dimension of layout. If `dim` > 2, the remaining dimensions are set to
zero in the returned positions.
Returns
-------
pos : mapping
A mapping of positions keyed by node.
Examples
--------
>>> from pyrrole import ChemicalSystem
>>> from pyrrole.atoms import create_data, read_cclib
>>> from pyrrole.drawing import tower_layout
>>> data = create_data(
... read_cclib("data/acetate/acetic_acid.out", "AcOH(g)"),
... read_cclib("data/acetate/acetic_acid@water.out", "AcOH(aq)"))
>>> digraph = (ChemicalSystem("AcOH(g) <=> AcOH(aq)", data)
... .to_digraph())
>>> layout = tower_layout(digraph)
>>> layout['AcOH(g)']
array([ 0. , -228.56450866])
Passing ``scale=1`` means scaling positions to ``(-1, 1)`` in all axes:
>>> layout = tower_layout(digraph, scale=1)
>>> layout['AcOH(g)'][1] <= 1.
True
"""
# TODO: private function of packages should not be used.
graph, center = _nx.drawing.layout._process_params(graph, center, dim)
num_nodes = len(graph)
if num_nodes == 0:
return {}
elif num_nodes == 1:
return {_nx.utils.arbitrary_element(graph): center}
paddims = max(0, (dim - 2))
if height is None:
y = _np.zeros(len(graph))
else:
y = _np.array([data for node, data in graph.nodes(data=height)])
pos_arr = _np.column_stack([_np.zeros((num_nodes, 1)), y,
_np.zeros((num_nodes, paddims))])
if scale is not None:
pos_arr = _nx.drawing.layout.rescale_layout(pos_arr,
scale=scale) + center
pos = dict(zip(graph, pos_arr))
# TODO: make test
return pos
def diagram_layout(graph, height='freeenergy', sources=None, targets=None,
pos=None, scale=None, center=None, dim=2):
"""
Position nodes such that paths are highlighted, from left to right.
Parameters
----------
graph : `networkx.Graph` or `list` of nodes
A position will be assigned to every node in graph.
height : `str` or `None`, optional
The node attribute that holds the numerical value used for the node
height. This defaults to ``'freeenergy'``. If `None`, all node heights
are set to zero.
sources : `list` of `str`
All simple paths starting at members of `sources` are considered.
Defaults to all nodes of graph.
targets : `list` of `str`
All simple paths ending at members of `targets` are considered.
Defaults to all nodes of graph.
pos : mapping, optional
Initial positions for nodes as a mapping with node as keys and
values as a coordinate `list` or `tuple`. If not specified (default),
initial positions are computed with `tower_layout`.
scale : number, optional
Scale factor for positions.
center : array-like, optional
Coordinate pair around which to center the layout. Default is the
origin.
dim : `int`
Dimension of layout. If `dim` > 2, the remaining dimensions are set to
zero in the returned positions.
Returns
-------
pos : mapping
A mapping of positions keyed by node.
Examples
--------
>>> import pandas as pd
>>> from pyrrole import ChemicalSystem
>>> from pyrrole.drawing import diagram_layout
>>> data = pd.DataFrame(
... [{"name": "Separated_Reactants", "freeenergy": 0.},
... {"name": "mlC1", "freeenergy": -5.4},
... {"name": "mlC2", "freeenergy": -15.6},
... {"name": "mTS1", "freeenergy": 28.5, "color": "g"},
... {"name": "mCARB1", "freeenergy": -9.7},
... {"name": "mCARB2", "freeenergy": -19.8},
... {"name": "mCARBX", "freeenergy": 20}]).set_index("name")
>>> system = ChemicalSystem(
... ["Separated_Reactants -> mlC1 -> mTS1",
... "Separated_Reactants -> mlC2 -> mTS1",
... "mCARB2 <- mTS1 -> mCARB1",
... "Separated_Reactants -> mCARBX"], data)
>>> digraph = system.to_digraph()
>>> layout = diagram_layout(digraph)
>>> layout['mCARB2']
array([ 3. , -19.8])
Passing ``scale=1`` means scaling positions to ``(-1, 1)`` in all axes:
>>> layout = diagram_layout(digraph, scale=1)
>>> layout['mTS1'][1] <= 1.
True
"""
# TODO: private function of packages should not be used.
graph, center = _nx.drawing.layout._process_params(graph, center, dim)
num_nodes = len(graph)
if num_nodes == 0:
return {}
elif num_nodes == 1:
return {_nx.utils.arbitrary_element(graph): center}
if sources is None:
sources = graph.nodes()
if targets is None:
targets = graph.nodes()
simple_paths = [path for source in set(sources) for target in set(targets)
for path in _nx.all_simple_paths(graph, source, target)]
if pos is None:
pos = tower_layout(graph, height=height, scale=None, center=center,
dim=dim)
for path in simple_paths:
for n, step in enumerate(path):
if pos[step][0] < n:
pos[step][0] = n
if scale is not None:
pos_arr = _np.array([pos[node] for node in graph])
pos_arr = _nx.drawing.layout.rescale_layout(pos_arr,
scale=scale) + center
pos = dict(zip(graph, pos_arr))
# TODO: make test
return pos
def draw_diagram_edges(graph, pos=None, edgelist=None, width=1.0,
edge_color='k', style='dashed', alpha=1.0,
edge_cmap=None, edge_vmin=None, edge_vmax=None, ax=None,
label=None, nodelist=None, node_size=.7):
"""
Draw edges of graph.
This draws only the edges of a graph.
Parameters
----------
graph : `networkx.Graph`
A NetworkX graph.
pos : mapping, optional
A mapping with nodes as keys and positions as values. Positions should
be sequences of length 2. If not specified (default), a diagram layout
positioning will be computed. See `networkx.layout` and
`pyrrole.drawing` for functions that compute node positions.
edgelist : collection of edge `tuple`
Draw only specified edges (default is ``graph.edges()``).
width : `float`, or array of `float`
Line width of edges (default is ``1.0``).
edge_color : color `str`, or array of `float`
Edge color. Can be a single color format `str` (default is ``'r'``),
or a sequence of colors with the same length as edgelist. If numeric
values are specified they will be mapped to colors using the
`edge_cmap` and `edge_vmin`, `edge_vmax` parameters.
style : `str` (``'solid'``, ``'dashed'``, ``'dotted'``, ``'dashdot'``)
Edge line style (default is ``'dashed'``). See `matplotlib.hlines` for
more details.
alpha : `float`, optional
The edge transparency (default is ``1.0``).
edge_cmap : Matplotlib colormap, optional
Colormap for mapping intensities of edges.
edge_vmin : `float`, optional
Minimum for edge colormap scaling.
edge_vmax : `float`, optional
Maximum for edge colormap scaling.
ax : `matplotlib.axes.Axes`, optional
Draw the graph in the specified Matplotlib axes.
label : `str`, optional
Label for legend.
nodelist : `list`, optional
Draw only specified nodes (default is ``graph.nodes()``).
node_size : scalar or array
Size of nodes (default is ``.7``). If an array is specified it must be
the same length as nodelist.
Returns
-------
`matplotlib.collections.LineCollection`
`LineCollection` of the edges.
Raises
------
networkx.NetworkXError
Raised if a node has no position or one with bad value.
ValueError
Raised if `edge_color` contains something other than color names (one
or a list of one per edge) or numbers.
Examples
--------
>>> import pandas as pd
>>> from pyrrole import ChemicalSystem
>>> from pyrrole.drawing import draw_diagram_edges
>>> data = pd.DataFrame(
... [{"name": "Separated_Reactants", "freeenergy": 0.},
... {"name": "mlC1", "freeenergy": -5.4},
... {"name": "mlC2", "freeenergy": -15.6},
... {"name": "mTS1", "freeenergy": 28.5, "color": "g"},
... {"name": "mCARB1", "freeenergy": -9.7},
... {"name": "mCARB2", "freeenergy": -19.8},
... {"name": "mCARBX", "freeenergy": 20}]).set_index("name")
>>> system = ChemicalSystem(
... ["Separated_Reactants -> mlC1 -> mTS1",
... "Separated_Reactants -> mlC2 -> mTS1",
... "mCARB2 <- mTS1 -> mCARB1",
... "Separated_Reactants -> mCARBX"], data)
>>> digraph = system.to_digraph()
>>> edges = draw_diagram_edges(digraph)
"""
if ax is None:
ax = _plt.gca()
if edgelist is None:
edgelist = list(graph.edges())
if not edgelist or len(edgelist) == 0: # no edges!
return None
if nodelist is None:
nodelist = list(graph.nodes())
if pos is None:
pos = diagram_layout(graph)
try:
# set edge positions
edge_pos = _np.asarray([(pos[e[0]] + node_size/2.,
pos[e[1]] - node_size/2.) for e in edgelist])
except KeyError as e:
raise _nx.NetworkXError('Node {} has no position.'.format(e))
except ValueError:
raise _nx.NetworkXError('Bad value in node positions.')
if not _cb.iterable(width):
lw = (width,)
else:
lw = width
if not isinstance(edge_color, str) \
and _cb.iterable(edge_color) \
and len(edge_color) == len(edge_pos):
if _np.alltrue([isinstance(c, str) for c in edge_color]):
# (should check ALL elements)
# list of color letters such as ['k','r','k',...]
edge_colors = tuple([_colorConverter.to_rgba(c, alpha)
for c in edge_color])
elif _np.alltrue([not isinstance(c, str) for c in edge_color]):
# If color specs are given as (rgb) or (rgba) tuples, we're OK
if _np.alltrue([_cb.iterable(c) and len(c) in (3, 4)
for c in edge_color]):
edge_colors = tuple(edge_color)
else:
# numbers (which are going to be mapped with a colormap)
edge_colors = None
else:
raise ValueError('edge_color must contain color names or numbers')
else:
if isinstance(edge_color, str) or len(edge_color) == 1:
edge_colors = (_colorConverter.to_rgba(edge_color, alpha), )
else:
raise ValueError('edge_color must be a color or list of one color '
' per edge')
edge_collection = _LineCollection(edge_pos,
colors=edge_colors,
linewidths=lw,
antialiaseds=(1,),
linestyle=style,
transOffset=ax.transData)
edge_collection.set_zorder(1) # edges go behind nodes
edge_collection.set_label(label)
ax.add_collection(edge_collection)
if _cb.is_numlike(alpha):
edge_collection.set_alpha(alpha)
if edge_colors is None:
if edge_cmap is not None:
assert(isinstance(edge_cmap, _Colormap))
edge_collection.set_array(_np.asarray(edge_color))
edge_collection.set_cmap(edge_cmap)
if edge_vmin is not None or edge_vmax is not None:
edge_collection.set_clim(edge_vmin, edge_vmax)
else:
edge_collection.autoscale()
ax.autoscale_view()
return edge_collection
def draw_diagram_labels(graph, pos=None, labels=None, font_size=12,
font_color='k', font_family='sans-serif',
font_weight='normal', alpha=1.0, bbox=None, ax=None,
offset=None, **kwds):
"""
Draw node labels of graph.
This draws only the node labels of a graph.
Parameters
----------
graph : `networkx.Graph`
A NetworkX graph.
pos : mapping, optional
A mapping with nodes as keys and positions as values. Positions should
be sequences of length 2. If not specified (default), a diagram layout
positioning will be computed. See `networkx.layout` and
`pyrrole.drawing` for functions that compute node positions.
labels : mapping, optional
Node labels in a mapping keyed by node of text labels.
font_size : `int`, optional
Font size for text labels (default is ``12``).
font_color : `str`, optional
Font color `str` (default is ``'k'``, i.e., black).
font_family : `str`, optional
Font family (default is ``'sans-serif'``).
font_weight : `str`, optional
Font weight (default is ``'normal'``).
alpha : `float`, optional
The text transparency (default is ``1.0``).
ax : `matplotlib.axes.Axes`, optional
Draw the graph in the specified Matplotlib axes.
offset : array-like or `str`, optional
Label positions are summed to this before drawing. Defaults to zero
vector. If `str`, can be either ``'above'`` (equivalent to
``(0, 1.5)``) or ``'below'`` (equivalent to ``(0, -1.5)``).
Returns
-------
mapping
Mapping of labels keyed on the nodes.
Examples
--------
>>> import pandas as pd
>>> from pyrrole import ChemicalSystem
>>> from pyrrole.drawing import draw_diagram_labels
>>> data = pd.DataFrame(
... [{"name": "Separated_Reactants", "freeenergy": 0.},
... {"name": "mlC1", "freeenergy": -5.4},
... {"name": "mlC2", "freeenergy": -15.6},
... {"name": "mTS1", "freeenergy": 28.5, "color": "g"},
... {"name": "mCARB1", "freeenergy": -9.7},
... {"name": "mCARB2", "freeenergy": -19.8},
... {"name": "mCARBX", "freeenergy": 20}]).set_index("name")
>>> system = ChemicalSystem(
... ["Separated_Reactants -> mlC1 -> mTS1",
... "Separated_Reactants -> mlC2 -> mTS1",
... "mCARB2 <- mTS1 -> mCARB1",
... "Separated_Reactants -> mCARBX"], data)
>>> digraph = system.to_digraph()
>>> edges = draw_diagram_labels(digraph, font_color='blue',
... offset="below")
>>> labels = {k: "{:g}".format(v)
... for k, v in digraph.nodes(data='freeenergy')}
>>> edges = draw_diagram_labels(digraph, labels=labels,
... offset="above")
"""
if ax is None:
ax = _plt.gca()
if labels is None:
labels = dict((n, n) for n in graph.nodes())
if pos is None:
pos = diagram_layout(graph)
if offset is None:
offset = _np.array([0., 0.])
elif offset == "above":
offset = _np.array([0., 1.5])
elif offset == "below":
offset = _np.array([0., -1.5])
# set optional alignment
horizontalalignment = kwds.get('horizontalalignment', 'center')
verticalalignment = kwds.get('verticalalignment', 'center')
text_items = {} # there is no text collection so we'll fake one
for n, label in labels.items():
(x, y) = _np.asanyarray(pos[n]) + _np.asanyarray(offset)
if not isinstance(label, str):
label = str(label) # this makes "1" and 1 labeled the same
t = ax.text(x, y, label,
size=font_size,
color=font_color,
family=font_family,
weight=font_weight,
alpha=alpha,
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
transform=ax.transData,
bbox=bbox,
clip_on=True)
text_items[n] = t
return text_items
def draw_diagram(graph, pos=None, with_labels=True, offset=None, **kwds):
"""
Draw a diagram for graph using Matplotlib.
Draw graph as a simple energy diagram with Matplotlib with options for node
positions, labeling, titles, and many other drawing features. See examples
below.
Parameters
----------
graph : `networkx.Graph`
A NetworkX graph.
pos : mapping, optional
A mapping with nodes as keys and positions as values. Positions should
be sequences of length 2. If not specified (default) a diagram layout
positioning will be computed. See `networkx.drawing.layout` and
`pyrrole.drawing` for functions that compute node positions.
with_labels : `bool`, optional
Set to `True` (default) to draw labels on the nodes.
offset : array-like or `str`, optional
Label positions are summed to this before drawing. Defaults to
``'below'``. See `draw_diagram_labels` for more.
Notes
-----
Further keywords are passed to `draw_diagram_nodes` and
`draw_diagram_edges`. If `pos` is `None`, `diagram_layout` is also called
and have keywords passed as well. The same happens with
`draw_diagram_labels` if `with_labels` is `True`.
Examples
--------
>>> import pandas as pd
>>> from pyrrole import ChemicalSystem
>>> from pyrrole.drawing import draw_diagram, draw_diagram_labels
>>> data = pd.DataFrame(
... [{"name": "Separated_Reactants", "freeenergy": 0.},
... {"name": "mlC1", "freeenergy": -5.4},
... {"name": "mlC2", "freeenergy": -15.6},
... {"name": "mTS1", "freeenergy": 28.5, "color": "g"},
... {"name": "mCARB1", "freeenergy": -9.7},
... {"name": "mCARB2", "freeenergy": -19.8},
... {"name": "mCARBX", "freeenergy": 20}]).set_index("name")
>>> system = ChemicalSystem(
... ["Separated_Reactants -> mlC1 -> mTS1",
... "Separated_Reactants -> mlC2 -> mTS1",
... "mCARB2 <- mTS1 -> mCARB1",
... "Separated_Reactants -> mCARBX"], data)
>>> digraph = system.to_digraph()
>>> draw_diagram(digraph)
>>> labels = {k: "{:g}".format(v)
... for k, v in digraph.nodes(data='freeenergy')}
>>> edges = draw_diagram_labels(digraph, labels=labels,
... offset="above")
"""
if pos is None:
pos = diagram_layout(graph, **kwds) # default to diagram layout
node_collection = draw_diagram_nodes(graph, pos, **kwds) # noqa
edge_collection = draw_diagram_edges(graph, pos, **kwds) # noqa
if with_labels:
if offset is None:
# TODO: This changes the default behaviour of draw_diagram_labels.
offset = "below"
draw_diagram_labels(graph, pos, offset=offset, **kwds)
_plt.draw_if_interactive()
|
schneiderfelipe/pyrrole | pyrrole/drawing.py | draw_diagram_edges | python | def draw_diagram_edges(graph, pos=None, edgelist=None, width=1.0,
edge_color='k', style='dashed', alpha=1.0,
edge_cmap=None, edge_vmin=None, edge_vmax=None, ax=None,
label=None, nodelist=None, node_size=.7):
if ax is None:
ax = _plt.gca()
if edgelist is None:
edgelist = list(graph.edges())
if not edgelist or len(edgelist) == 0: # no edges!
return None
if nodelist is None:
nodelist = list(graph.nodes())
if pos is None:
pos = diagram_layout(graph)
try:
# set edge positions
edge_pos = _np.asarray([(pos[e[0]] + node_size/2.,
pos[e[1]] - node_size/2.) for e in edgelist])
except KeyError as e:
raise _nx.NetworkXError('Node {} has no position.'.format(e))
except ValueError:
raise _nx.NetworkXError('Bad value in node positions.')
if not _cb.iterable(width):
lw = (width,)
else:
lw = width
if not isinstance(edge_color, str) \
and _cb.iterable(edge_color) \
and len(edge_color) == len(edge_pos):
if _np.alltrue([isinstance(c, str) for c in edge_color]):
# (should check ALL elements)
# list of color letters such as ['k','r','k',...]
edge_colors = tuple([_colorConverter.to_rgba(c, alpha)
for c in edge_color])
elif _np.alltrue([not isinstance(c, str) for c in edge_color]):
# If color specs are given as (rgb) or (rgba) tuples, we're OK
if _np.alltrue([_cb.iterable(c) and len(c) in (3, 4)
for c in edge_color]):
edge_colors = tuple(edge_color)
else:
# numbers (which are going to be mapped with a colormap)
edge_colors = None
else:
raise ValueError('edge_color must contain color names or numbers')
else:
if isinstance(edge_color, str) or len(edge_color) == 1:
edge_colors = (_colorConverter.to_rgba(edge_color, alpha), )
else:
raise ValueError('edge_color must be a color or list of one color '
' per edge')
edge_collection = _LineCollection(edge_pos,
colors=edge_colors,
linewidths=lw,
antialiaseds=(1,),
linestyle=style,
transOffset=ax.transData)
edge_collection.set_zorder(1) # edges go behind nodes
edge_collection.set_label(label)
ax.add_collection(edge_collection)
if _cb.is_numlike(alpha):
edge_collection.set_alpha(alpha)
if edge_colors is None:
if edge_cmap is not None:
assert(isinstance(edge_cmap, _Colormap))
edge_collection.set_array(_np.asarray(edge_color))
edge_collection.set_cmap(edge_cmap)
if edge_vmin is not None or edge_vmax is not None:
edge_collection.set_clim(edge_vmin, edge_vmax)
else:
edge_collection.autoscale()
ax.autoscale_view()
return edge_collection | Draw edges of graph.
This draws only the edges of a graph.
Parameters
----------
graph : `networkx.Graph`
A NetworkX graph.
pos : mapping, optional
A mapping with nodes as keys and positions as values. Positions should
be sequences of length 2. If not specified (default), a diagram layout
positioning will be computed. See `networkx.layout` and
`pyrrole.drawing` for functions that compute node positions.
edgelist : collection of edge `tuple`
Draw only specified edges (default is ``graph.edges()``).
width : `float`, or array of `float`
Line width of edges (default is ``1.0``).
edge_color : color `str`, or array of `float`
Edge color. Can be a single color format `str` (default is ``'r'``),
or a sequence of colors with the same length as edgelist. If numeric
values are specified they will be mapped to colors using the
`edge_cmap` and `edge_vmin`, `edge_vmax` parameters.
style : `str` (``'solid'``, ``'dashed'``, ``'dotted'``, ``'dashdot'``)
Edge line style (default is ``'dashed'``). See `matplotlib.hlines` for
more details.
alpha : `float`, optional
The edge transparency (default is ``1.0``).
edge_cmap : Matplotlib colormap, optional
Colormap for mapping intensities of edges.
edge_vmin : `float`, optional
Minimum for edge colormap scaling.
edge_vmax : `float`, optional
Maximum for edge colormap scaling.
ax : `matplotlib.axes.Axes`, optional
Draw the graph in the specified Matplotlib axes.
label : `str`, optional
Label for legend.
nodelist : `list`, optional
Draw only specified nodes (default is ``graph.nodes()``).
node_size : scalar or array
Size of nodes (default is ``.7``). If an array is specified it must be
the same length as nodelist.
Returns
-------
`matplotlib.collections.LineCollection`
`LineCollection` of the edges.
Raises
------
networkx.NetworkXError
Raised if a node has no position or one with bad value.
ValueError
Raised if `edge_color` contains something other than color names (one
or a list of one per edge) or numbers.
Examples
--------
>>> import pandas as pd
>>> from pyrrole import ChemicalSystem
>>> from pyrrole.drawing import draw_diagram_edges
>>> data = pd.DataFrame(
... [{"name": "Separated_Reactants", "freeenergy": 0.},
... {"name": "mlC1", "freeenergy": -5.4},
... {"name": "mlC2", "freeenergy": -15.6},
... {"name": "mTS1", "freeenergy": 28.5, "color": "g"},
... {"name": "mCARB1", "freeenergy": -9.7},
... {"name": "mCARB2", "freeenergy": -19.8},
... {"name": "mCARBX", "freeenergy": 20}]).set_index("name")
>>> system = ChemicalSystem(
... ["Separated_Reactants -> mlC1 -> mTS1",
... "Separated_Reactants -> mlC2 -> mTS1",
... "mCARB2 <- mTS1 -> mCARB1",
... "Separated_Reactants -> mCARBX"], data)
>>> digraph = system.to_digraph()
>>> edges = draw_diagram_edges(digraph) | train | https://github.com/schneiderfelipe/pyrrole/blob/13e26accc9a059f0ab69773648b24292fe1fbfd6/pyrrole/drawing.py#L374-L536 | [
"def diagram_layout(graph, height='freeenergy', sources=None, targets=None,\n pos=None, scale=None, center=None, dim=2):\n \"\"\"\n Position nodes such that paths are highlighted, from left to right.\n\n Parameters\n ----------\n graph : `networkx.Graph` or `list` of nodes\n A position will be assigned to every node in graph.\n height : `str` or `None`, optional\n The node attribute that holds the numerical value used for the node\n height. This defaults to ``'freeenergy'``. If `None`, all node heights\n are set to zero.\n sources : `list` of `str`\n All simple paths starting at members of `sources` are considered.\n Defaults to all nodes of graph.\n targets : `list` of `str`\n All simple paths ending at members of `targets` are considered.\n Defaults to all nodes of graph.\n pos : mapping, optional\n Initial positions for nodes as a mapping with node as keys and\n values as a coordinate `list` or `tuple`. If not specified (default),\n initial positions are computed with `tower_layout`.\n scale : number, optional\n Scale factor for positions.\n center : array-like, optional\n Coordinate pair around which to center the layout. Default is the\n origin.\n dim : `int`\n Dimension of layout. If `dim` > 2, the remaining dimensions are set to\n zero in the returned positions.\n\n Returns\n -------\n pos : mapping\n A mapping of positions keyed by node.\n\n Examples\n --------\n >>> import pandas as pd\n >>> from pyrrole import ChemicalSystem\n >>> from pyrrole.drawing import diagram_layout\n >>> data = pd.DataFrame(\n ... [{\"name\": \"Separated_Reactants\", \"freeenergy\": 0.},\n ... {\"name\": \"mlC1\", \"freeenergy\": -5.4},\n ... {\"name\": \"mlC2\", \"freeenergy\": -15.6},\n ... {\"name\": \"mTS1\", \"freeenergy\": 28.5, \"color\": \"g\"},\n ... {\"name\": \"mCARB1\", \"freeenergy\": -9.7},\n ... {\"name\": \"mCARB2\", \"freeenergy\": -19.8},\n ... {\"name\": \"mCARBX\", \"freeenergy\": 20}]).set_index(\"name\")\n >>> system = ChemicalSystem(\n ... [\"Separated_Reactants -> mlC1 -> mTS1\",\n ... \"Separated_Reactants -> mlC2 -> mTS1\",\n ... \"mCARB2 <- mTS1 -> mCARB1\",\n ... \"Separated_Reactants -> mCARBX\"], data)\n >>> digraph = system.to_digraph()\n >>> layout = diagram_layout(digraph)\n >>> layout['mCARB2']\n array([ 3. , -19.8])\n\n Passing ``scale=1`` means scaling positions to ``(-1, 1)`` in all axes:\n\n >>> layout = diagram_layout(digraph, scale=1)\n >>> layout['mTS1'][1] <= 1.\n True\n\n \"\"\"\n # TODO: private function of packages should not be used.\n graph, center = _nx.drawing.layout._process_params(graph, center, dim)\n\n num_nodes = len(graph)\n if num_nodes == 0:\n return {}\n elif num_nodes == 1:\n return {_nx.utils.arbitrary_element(graph): center}\n\n if sources is None:\n sources = graph.nodes()\n if targets is None:\n targets = graph.nodes()\n simple_paths = [path for source in set(sources) for target in set(targets)\n for path in _nx.all_simple_paths(graph, source, target)]\n\n if pos is None:\n pos = tower_layout(graph, height=height, scale=None, center=center,\n dim=dim)\n\n for path in simple_paths:\n for n, step in enumerate(path):\n if pos[step][0] < n:\n pos[step][0] = n\n\n if scale is not None:\n pos_arr = _np.array([pos[node] for node in graph])\n pos_arr = _nx.drawing.layout.rescale_layout(pos_arr,\n scale=scale) + center\n pos = dict(zip(graph, pos_arr))\n\n # TODO: make test\n return pos\n"
] | #!/usr/bin/python3
# -*- encoding: utf-8 -*-
"""Tools for drawing chemical diagrams."""
# TODO: revisit all examples in this module.
import os
import collections as _collections
import numpy as _np
import networkx as _nx
import matplotlib as _mpl
if os.environ.get('DISPLAY', '') == '':
print('no display found. Using non-interactive Agg backend')
_mpl.use('Agg')
import matplotlib.cbook as _cb # noqa
import matplotlib.pyplot as _plt # noqa
from matplotlib.colors import Colormap as _Colormap # noqa
from matplotlib.colors import colorConverter as _colorConverter # noqa
from matplotlib.collections import LineCollection as _LineCollection # noqa
def _longest_common_subsequence(x, y):
"""
Return the longest common subsequence between two sequences.
Parameters
----------
x, y : sequence
Returns
-------
sequence
Longest common subsequence of x and y.
Examples
--------
>>> _longest_common_subsequence("AGGTAB", "GXTXAYB")
['G', 'T', 'A', 'B']
>>> _longest_common_subsequence(["A", "GA", "G", "T", "A", "B"],
... ["GA", "X", "T", "X", "A", "Y", "B"])
['GA', 'T', 'A', 'B']
"""
m = len(x)
n = len(y)
# L[i, j] will contain the length of the longest common subsequence of
# x[0..i - 1] and y[0..j - 1].
L = _np.zeros((m + 1, n + 1), dtype=int)
for i in range(m + 1):
for j in range(n + 1):
if i == 0 or j == 0:
continue
elif x[i - 1] == y[j - 1]:
L[i, j] = L[i - 1, j - 1] + 1
else:
L[i, j] = max(L[i - 1, j], L[i, j - 1])
ret = []
i, j = m, n
while i > 0 and j > 0:
# If current character in x and y are same, then current character is
# part of the longest common subsequence.
if x[i - 1] == y[j - 1]:
ret.append(x[i - 1])
i, j = i - 1, j - 1
# If not same, then find the larger of two and go in the direction of
# larger value.
elif L[i - 1, j] > L[i, j - 1]:
i -= 1
else:
j -= 1
return ret[::-1]
def tower_layout(graph, height='freeenergy', scale=None, center=None, dim=2):
"""
Position all nodes of graph stacked on top of each other.
Parameters
----------
graph : `networkx.Graph` or `list` of nodes
A position will be assigned to every node in graph.
height : `str` or `None`, optional
The node attribute that holds the numerical value used for the node
height. This defaults to ``'freeenergy'``. If `None`, all node heights
are set to zero.
scale : number, optional
Scale factor for positions.
center : array-like, optional
Coordinate pair around which to center the layout. Default is the
origin.
dim : `int`
Dimension of layout. If `dim` > 2, the remaining dimensions are set to
zero in the returned positions.
Returns
-------
pos : mapping
A mapping of positions keyed by node.
Examples
--------
>>> from pyrrole import ChemicalSystem
>>> from pyrrole.atoms import create_data, read_cclib
>>> from pyrrole.drawing import tower_layout
>>> data = create_data(
... read_cclib("data/acetate/acetic_acid.out", "AcOH(g)"),
... read_cclib("data/acetate/acetic_acid@water.out", "AcOH(aq)"))
>>> digraph = (ChemicalSystem("AcOH(g) <=> AcOH(aq)", data)
... .to_digraph())
>>> layout = tower_layout(digraph)
>>> layout['AcOH(g)']
array([ 0. , -228.56450866])
Passing ``scale=1`` means scaling positions to ``(-1, 1)`` in all axes:
>>> layout = tower_layout(digraph, scale=1)
>>> layout['AcOH(g)'][1] <= 1.
True
"""
# TODO: private function of packages should not be used.
graph, center = _nx.drawing.layout._process_params(graph, center, dim)
num_nodes = len(graph)
if num_nodes == 0:
return {}
elif num_nodes == 1:
return {_nx.utils.arbitrary_element(graph): center}
paddims = max(0, (dim - 2))
if height is None:
y = _np.zeros(len(graph))
else:
y = _np.array([data for node, data in graph.nodes(data=height)])
pos_arr = _np.column_stack([_np.zeros((num_nodes, 1)), y,
_np.zeros((num_nodes, paddims))])
if scale is not None:
pos_arr = _nx.drawing.layout.rescale_layout(pos_arr,
scale=scale) + center
pos = dict(zip(graph, pos_arr))
# TODO: make test
return pos
def diagram_layout(graph, height='freeenergy', sources=None, targets=None,
pos=None, scale=None, center=None, dim=2):
"""
Position nodes such that paths are highlighted, from left to right.
Parameters
----------
graph : `networkx.Graph` or `list` of nodes
A position will be assigned to every node in graph.
height : `str` or `None`, optional
The node attribute that holds the numerical value used for the node
height. This defaults to ``'freeenergy'``. If `None`, all node heights
are set to zero.
sources : `list` of `str`
All simple paths starting at members of `sources` are considered.
Defaults to all nodes of graph.
targets : `list` of `str`
All simple paths ending at members of `targets` are considered.
Defaults to all nodes of graph.
pos : mapping, optional
Initial positions for nodes as a mapping with node as keys and
values as a coordinate `list` or `tuple`. If not specified (default),
initial positions are computed with `tower_layout`.
scale : number, optional
Scale factor for positions.
center : array-like, optional
Coordinate pair around which to center the layout. Default is the
origin.
dim : `int`
Dimension of layout. If `dim` > 2, the remaining dimensions are set to
zero in the returned positions.
Returns
-------
pos : mapping
A mapping of positions keyed by node.
Examples
--------
>>> import pandas as pd
>>> from pyrrole import ChemicalSystem
>>> from pyrrole.drawing import diagram_layout
>>> data = pd.DataFrame(
... [{"name": "Separated_Reactants", "freeenergy": 0.},
... {"name": "mlC1", "freeenergy": -5.4},
... {"name": "mlC2", "freeenergy": -15.6},
... {"name": "mTS1", "freeenergy": 28.5, "color": "g"},
... {"name": "mCARB1", "freeenergy": -9.7},
... {"name": "mCARB2", "freeenergy": -19.8},
... {"name": "mCARBX", "freeenergy": 20}]).set_index("name")
>>> system = ChemicalSystem(
... ["Separated_Reactants -> mlC1 -> mTS1",
... "Separated_Reactants -> mlC2 -> mTS1",
... "mCARB2 <- mTS1 -> mCARB1",
... "Separated_Reactants -> mCARBX"], data)
>>> digraph = system.to_digraph()
>>> layout = diagram_layout(digraph)
>>> layout['mCARB2']
array([ 3. , -19.8])
Passing ``scale=1`` means scaling positions to ``(-1, 1)`` in all axes:
>>> layout = diagram_layout(digraph, scale=1)
>>> layout['mTS1'][1] <= 1.
True
"""
# TODO: private function of packages should not be used.
graph, center = _nx.drawing.layout._process_params(graph, center, dim)
num_nodes = len(graph)
if num_nodes == 0:
return {}
elif num_nodes == 1:
return {_nx.utils.arbitrary_element(graph): center}
if sources is None:
sources = graph.nodes()
if targets is None:
targets = graph.nodes()
simple_paths = [path for source in set(sources) for target in set(targets)
for path in _nx.all_simple_paths(graph, source, target)]
if pos is None:
pos = tower_layout(graph, height=height, scale=None, center=center,
dim=dim)
for path in simple_paths:
for n, step in enumerate(path):
if pos[step][0] < n:
pos[step][0] = n
if scale is not None:
pos_arr = _np.array([pos[node] for node in graph])
pos_arr = _nx.drawing.layout.rescale_layout(pos_arr,
scale=scale) + center
pos = dict(zip(graph, pos_arr))
# TODO: make test
return pos
def draw_diagram_nodes(graph, pos=None, nodelist=None, node_size=.7,
node_color='k', style='solid', alpha=1.0, cmap=None,
vmin=None, vmax=None, ax=None, label=None):
"""
Draw nodes of graph.
This draws only the nodes of graph as horizontal lines at each
``y = pos[1]`` from ``x - node_size/2`` to ``x + node_size/2``, where
``x = pos[0]``.
Parameters
----------
graph : `networkx.Graph`
A NetworkX graph.
pos : mapping, optional
A mapping with nodes as keys and positions as values. Positions should
be sequences of length 2. If not specified (default), a diagram layout
positioning will be computed. See `networkx.layout` and
`pyrrole.drawing` for functions that compute node positions.
nodelist : `list`, optional
Draw only specified nodes (default is ``graph.nodes()``).
node_size : scalar or array
Size of nodes (default is ``.7``). If an array is specified it must be
the same length as nodelist.
node_color : color `str`, or array of `float`
Node color. Can be a single color format `str` (default is ``'k'``), or
a sequence of colors with the same length as nodelist. If numeric
values are specified they will be mapped to colors using the `cmap` and
`vmin`, `vmax` parameters. See `matplotlib.hlines` for more details.
style : `str` (``'solid'``, ``'dashed'``, ``'dotted'``, ``'dashdot'``)
Edge line style (default is ``'solid'``). See `matplotlib.hlines` for
more details.
alpha : `float` or array of `float`, optional
The node transparency. This can be a single alpha value (default is
``'1.0'``), in which case it will be applied to all the nodes of color.
Otherwise, if it is an array, the elements of alpha will be applied to
the colors in order (cycling through alpha multiple times if
necessary).
cmap : Matplotlib colormap, optional
Colormap name or Colormap instance for mapping intensities of nodes.
vmin : `float`, optional
Minimum for node colormap scaling.
vmax : `float`, optional
Maximum for node colormap scaling.
ax : `matplotlib.axes.Axes`, optional
Draw the graph in the specified Matplotlib axes.
label : `str`, optional
Label for legend.
Returns
-------
`matplotlib.collections.LineCollection`
`LineCollection` of the nodes.
Raises
------
networkx.NetworkXError
Raised if a node has no position or one with bad value.
Examples
--------
>>> import pandas as pd
>>> from pyrrole import ChemicalSystem
>>> from pyrrole.drawing import draw_diagram_nodes
>>> data = pd.DataFrame(
... [{"name": "Separated_Reactants", "freeenergy": 0.},
... {"name": "mlC1", "freeenergy": -5.4},
... {"name": "mlC2", "freeenergy": -15.6},
... {"name": "mTS1", "freeenergy": 28.5, "color": "g"},
... {"name": "mCARB1", "freeenergy": -9.7},
... {"name": "mCARB2", "freeenergy": -19.8},
... {"name": "mCARBX", "freeenergy": 20}]).set_index("name")
>>> system = ChemicalSystem(
... ["Separated_Reactants -> mlC1 -> mTS1",
... "Separated_Reactants -> mlC2 -> mTS1",
... "mCARB2 <- mTS1 -> mCARB1",
... "Separated_Reactants -> mCARBX"], data)
>>> digraph = system.to_digraph()
>>> nodes = draw_diagram_nodes(digraph)
"""
if ax is None:
ax = _plt.gca()
if nodelist is None:
nodelist = list(graph.nodes())
if not nodelist or len(nodelist) == 0: # empty nodelist, no drawing
return None
if pos is None:
pos = diagram_layout(graph)
try:
xy = _np.asarray([pos[v] for v in nodelist])
except KeyError as e:
raise _nx.NetworkXError('Node {} has no position.'.format(e))
except ValueError:
raise _nx.NetworkXError('Bad value in node positions.')
if isinstance(alpha, _collections.Iterable):
node_color = _nx.drawing.apply_alpha(node_color, alpha, nodelist, cmap,
vmin, vmax)
alpha = None
node_collection = ax.hlines(xy[:, 1],
xy[:, 0] - node_size/2.,
xy[:, 0] + node_size/2.,
colors=node_color,
linestyles=style,
label=label,
cmap=cmap)
node_collection.set_zorder(2)
return node_collection
def draw_diagram_labels(graph, pos=None, labels=None, font_size=12,
font_color='k', font_family='sans-serif',
font_weight='normal', alpha=1.0, bbox=None, ax=None,
offset=None, **kwds):
"""
Draw node labels of graph.
This draws only the node labels of a graph.
Parameters
----------
graph : `networkx.Graph`
A NetworkX graph.
pos : mapping, optional
A mapping with nodes as keys and positions as values. Positions should
be sequences of length 2. If not specified (default), a diagram layout
positioning will be computed. See `networkx.layout` and
`pyrrole.drawing` for functions that compute node positions.
labels : mapping, optional
Node labels in a mapping keyed by node of text labels.
font_size : `int`, optional
Font size for text labels (default is ``12``).
font_color : `str`, optional
Font color `str` (default is ``'k'``, i.e., black).
font_family : `str`, optional
Font family (default is ``'sans-serif'``).
font_weight : `str`, optional
Font weight (default is ``'normal'``).
alpha : `float`, optional
The text transparency (default is ``1.0``).
ax : `matplotlib.axes.Axes`, optional
Draw the graph in the specified Matplotlib axes.
offset : array-like or `str`, optional
Label positions are summed to this before drawing. Defaults to zero
vector. If `str`, can be either ``'above'`` (equivalent to
``(0, 1.5)``) or ``'below'`` (equivalent to ``(0, -1.5)``).
Returns
-------
mapping
Mapping of labels keyed on the nodes.
Examples
--------
>>> import pandas as pd
>>> from pyrrole import ChemicalSystem
>>> from pyrrole.drawing import draw_diagram_labels
>>> data = pd.DataFrame(
... [{"name": "Separated_Reactants", "freeenergy": 0.},
... {"name": "mlC1", "freeenergy": -5.4},
... {"name": "mlC2", "freeenergy": -15.6},
... {"name": "mTS1", "freeenergy": 28.5, "color": "g"},
... {"name": "mCARB1", "freeenergy": -9.7},
... {"name": "mCARB2", "freeenergy": -19.8},
... {"name": "mCARBX", "freeenergy": 20}]).set_index("name")
>>> system = ChemicalSystem(
... ["Separated_Reactants -> mlC1 -> mTS1",
... "Separated_Reactants -> mlC2 -> mTS1",
... "mCARB2 <- mTS1 -> mCARB1",
... "Separated_Reactants -> mCARBX"], data)
>>> digraph = system.to_digraph()
>>> edges = draw_diagram_labels(digraph, font_color='blue',
... offset="below")
>>> labels = {k: "{:g}".format(v)
... for k, v in digraph.nodes(data='freeenergy')}
>>> edges = draw_diagram_labels(digraph, labels=labels,
... offset="above")
"""
if ax is None:
ax = _plt.gca()
if labels is None:
labels = dict((n, n) for n in graph.nodes())
if pos is None:
pos = diagram_layout(graph)
if offset is None:
offset = _np.array([0., 0.])
elif offset == "above":
offset = _np.array([0., 1.5])
elif offset == "below":
offset = _np.array([0., -1.5])
# set optional alignment
horizontalalignment = kwds.get('horizontalalignment', 'center')
verticalalignment = kwds.get('verticalalignment', 'center')
text_items = {} # there is no text collection so we'll fake one
for n, label in labels.items():
(x, y) = _np.asanyarray(pos[n]) + _np.asanyarray(offset)
if not isinstance(label, str):
label = str(label) # this makes "1" and 1 labeled the same
t = ax.text(x, y, label,
size=font_size,
color=font_color,
family=font_family,
weight=font_weight,
alpha=alpha,
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
transform=ax.transData,
bbox=bbox,
clip_on=True)
text_items[n] = t
return text_items
def draw_diagram(graph, pos=None, with_labels=True, offset=None, **kwds):
"""
Draw a diagram for graph using Matplotlib.
Draw graph as a simple energy diagram with Matplotlib with options for node
positions, labeling, titles, and many other drawing features. See examples
below.
Parameters
----------
graph : `networkx.Graph`
A NetworkX graph.
pos : mapping, optional
A mapping with nodes as keys and positions as values. Positions should
be sequences of length 2. If not specified (default) a diagram layout
positioning will be computed. See `networkx.drawing.layout` and
`pyrrole.drawing` for functions that compute node positions.
with_labels : `bool`, optional
Set to `True` (default) to draw labels on the nodes.
offset : array-like or `str`, optional
Label positions are summed to this before drawing. Defaults to
``'below'``. See `draw_diagram_labels` for more.
Notes
-----
Further keywords are passed to `draw_diagram_nodes` and
`draw_diagram_edges`. If `pos` is `None`, `diagram_layout` is also called
and have keywords passed as well. The same happens with
`draw_diagram_labels` if `with_labels` is `True`.
Examples
--------
>>> import pandas as pd
>>> from pyrrole import ChemicalSystem
>>> from pyrrole.drawing import draw_diagram, draw_diagram_labels
>>> data = pd.DataFrame(
... [{"name": "Separated_Reactants", "freeenergy": 0.},
... {"name": "mlC1", "freeenergy": -5.4},
... {"name": "mlC2", "freeenergy": -15.6},
... {"name": "mTS1", "freeenergy": 28.5, "color": "g"},
... {"name": "mCARB1", "freeenergy": -9.7},
... {"name": "mCARB2", "freeenergy": -19.8},
... {"name": "mCARBX", "freeenergy": 20}]).set_index("name")
>>> system = ChemicalSystem(
... ["Separated_Reactants -> mlC1 -> mTS1",
... "Separated_Reactants -> mlC2 -> mTS1",
... "mCARB2 <- mTS1 -> mCARB1",
... "Separated_Reactants -> mCARBX"], data)
>>> digraph = system.to_digraph()
>>> draw_diagram(digraph)
>>> labels = {k: "{:g}".format(v)
... for k, v in digraph.nodes(data='freeenergy')}
>>> edges = draw_diagram_labels(digraph, labels=labels,
... offset="above")
"""
if pos is None:
pos = diagram_layout(graph, **kwds) # default to diagram layout
node_collection = draw_diagram_nodes(graph, pos, **kwds) # noqa
edge_collection = draw_diagram_edges(graph, pos, **kwds) # noqa
if with_labels:
if offset is None:
# TODO: This changes the default behaviour of draw_diagram_labels.
offset = "below"
draw_diagram_labels(graph, pos, offset=offset, **kwds)
_plt.draw_if_interactive()
|
schneiderfelipe/pyrrole | pyrrole/drawing.py | draw_diagram_labels | python | def draw_diagram_labels(graph, pos=None, labels=None, font_size=12,
font_color='k', font_family='sans-serif',
font_weight='normal', alpha=1.0, bbox=None, ax=None,
offset=None, **kwds):
if ax is None:
ax = _plt.gca()
if labels is None:
labels = dict((n, n) for n in graph.nodes())
if pos is None:
pos = diagram_layout(graph)
if offset is None:
offset = _np.array([0., 0.])
elif offset == "above":
offset = _np.array([0., 1.5])
elif offset == "below":
offset = _np.array([0., -1.5])
# set optional alignment
horizontalalignment = kwds.get('horizontalalignment', 'center')
verticalalignment = kwds.get('verticalalignment', 'center')
text_items = {} # there is no text collection so we'll fake one
for n, label in labels.items():
(x, y) = _np.asanyarray(pos[n]) + _np.asanyarray(offset)
if not isinstance(label, str):
label = str(label) # this makes "1" and 1 labeled the same
t = ax.text(x, y, label,
size=font_size,
color=font_color,
family=font_family,
weight=font_weight,
alpha=alpha,
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
transform=ax.transData,
bbox=bbox,
clip_on=True)
text_items[n] = t
return text_items | Draw node labels of graph.
This draws only the node labels of a graph.
Parameters
----------
graph : `networkx.Graph`
A NetworkX graph.
pos : mapping, optional
A mapping with nodes as keys and positions as values. Positions should
be sequences of length 2. If not specified (default), a diagram layout
positioning will be computed. See `networkx.layout` and
`pyrrole.drawing` for functions that compute node positions.
labels : mapping, optional
Node labels in a mapping keyed by node of text labels.
font_size : `int`, optional
Font size for text labels (default is ``12``).
font_color : `str`, optional
Font color `str` (default is ``'k'``, i.e., black).
font_family : `str`, optional
Font family (default is ``'sans-serif'``).
font_weight : `str`, optional
Font weight (default is ``'normal'``).
alpha : `float`, optional
The text transparency (default is ``1.0``).
ax : `matplotlib.axes.Axes`, optional
Draw the graph in the specified Matplotlib axes.
offset : array-like or `str`, optional
Label positions are summed to this before drawing. Defaults to zero
vector. If `str`, can be either ``'above'`` (equivalent to
``(0, 1.5)``) or ``'below'`` (equivalent to ``(0, -1.5)``).
Returns
-------
mapping
Mapping of labels keyed on the nodes.
Examples
--------
>>> import pandas as pd
>>> from pyrrole import ChemicalSystem
>>> from pyrrole.drawing import draw_diagram_labels
>>> data = pd.DataFrame(
... [{"name": "Separated_Reactants", "freeenergy": 0.},
... {"name": "mlC1", "freeenergy": -5.4},
... {"name": "mlC2", "freeenergy": -15.6},
... {"name": "mTS1", "freeenergy": 28.5, "color": "g"},
... {"name": "mCARB1", "freeenergy": -9.7},
... {"name": "mCARB2", "freeenergy": -19.8},
... {"name": "mCARBX", "freeenergy": 20}]).set_index("name")
>>> system = ChemicalSystem(
... ["Separated_Reactants -> mlC1 -> mTS1",
... "Separated_Reactants -> mlC2 -> mTS1",
... "mCARB2 <- mTS1 -> mCARB1",
... "Separated_Reactants -> mCARBX"], data)
>>> digraph = system.to_digraph()
>>> edges = draw_diagram_labels(digraph, font_color='blue',
... offset="below")
>>> labels = {k: "{:g}".format(v)
... for k, v in digraph.nodes(data='freeenergy')}
>>> edges = draw_diagram_labels(digraph, labels=labels,
... offset="above") | train | https://github.com/schneiderfelipe/pyrrole/blob/13e26accc9a059f0ab69773648b24292fe1fbfd6/pyrrole/drawing.py#L539-L646 | [
"def diagram_layout(graph, height='freeenergy', sources=None, targets=None,\n pos=None, scale=None, center=None, dim=2):\n \"\"\"\n Position nodes such that paths are highlighted, from left to right.\n\n Parameters\n ----------\n graph : `networkx.Graph` or `list` of nodes\n A position will be assigned to every node in graph.\n height : `str` or `None`, optional\n The node attribute that holds the numerical value used for the node\n height. This defaults to ``'freeenergy'``. If `None`, all node heights\n are set to zero.\n sources : `list` of `str`\n All simple paths starting at members of `sources` are considered.\n Defaults to all nodes of graph.\n targets : `list` of `str`\n All simple paths ending at members of `targets` are considered.\n Defaults to all nodes of graph.\n pos : mapping, optional\n Initial positions for nodes as a mapping with node as keys and\n values as a coordinate `list` or `tuple`. If not specified (default),\n initial positions are computed with `tower_layout`.\n scale : number, optional\n Scale factor for positions.\n center : array-like, optional\n Coordinate pair around which to center the layout. Default is the\n origin.\n dim : `int`\n Dimension of layout. If `dim` > 2, the remaining dimensions are set to\n zero in the returned positions.\n\n Returns\n -------\n pos : mapping\n A mapping of positions keyed by node.\n\n Examples\n --------\n >>> import pandas as pd\n >>> from pyrrole import ChemicalSystem\n >>> from pyrrole.drawing import diagram_layout\n >>> data = pd.DataFrame(\n ... [{\"name\": \"Separated_Reactants\", \"freeenergy\": 0.},\n ... {\"name\": \"mlC1\", \"freeenergy\": -5.4},\n ... {\"name\": \"mlC2\", \"freeenergy\": -15.6},\n ... {\"name\": \"mTS1\", \"freeenergy\": 28.5, \"color\": \"g\"},\n ... {\"name\": \"mCARB1\", \"freeenergy\": -9.7},\n ... {\"name\": \"mCARB2\", \"freeenergy\": -19.8},\n ... {\"name\": \"mCARBX\", \"freeenergy\": 20}]).set_index(\"name\")\n >>> system = ChemicalSystem(\n ... [\"Separated_Reactants -> mlC1 -> mTS1\",\n ... \"Separated_Reactants -> mlC2 -> mTS1\",\n ... \"mCARB2 <- mTS1 -> mCARB1\",\n ... \"Separated_Reactants -> mCARBX\"], data)\n >>> digraph = system.to_digraph()\n >>> layout = diagram_layout(digraph)\n >>> layout['mCARB2']\n array([ 3. , -19.8])\n\n Passing ``scale=1`` means scaling positions to ``(-1, 1)`` in all axes:\n\n >>> layout = diagram_layout(digraph, scale=1)\n >>> layout['mTS1'][1] <= 1.\n True\n\n \"\"\"\n # TODO: private function of packages should not be used.\n graph, center = _nx.drawing.layout._process_params(graph, center, dim)\n\n num_nodes = len(graph)\n if num_nodes == 0:\n return {}\n elif num_nodes == 1:\n return {_nx.utils.arbitrary_element(graph): center}\n\n if sources is None:\n sources = graph.nodes()\n if targets is None:\n targets = graph.nodes()\n simple_paths = [path for source in set(sources) for target in set(targets)\n for path in _nx.all_simple_paths(graph, source, target)]\n\n if pos is None:\n pos = tower_layout(graph, height=height, scale=None, center=center,\n dim=dim)\n\n for path in simple_paths:\n for n, step in enumerate(path):\n if pos[step][0] < n:\n pos[step][0] = n\n\n if scale is not None:\n pos_arr = _np.array([pos[node] for node in graph])\n pos_arr = _nx.drawing.layout.rescale_layout(pos_arr,\n scale=scale) + center\n pos = dict(zip(graph, pos_arr))\n\n # TODO: make test\n return pos\n"
] | #!/usr/bin/python3
# -*- encoding: utf-8 -*-
"""Tools for drawing chemical diagrams."""
# TODO: revisit all examples in this module.
import os
import collections as _collections
import numpy as _np
import networkx as _nx
import matplotlib as _mpl
if os.environ.get('DISPLAY', '') == '':
print('no display found. Using non-interactive Agg backend')
_mpl.use('Agg')
import matplotlib.cbook as _cb # noqa
import matplotlib.pyplot as _plt # noqa
from matplotlib.colors import Colormap as _Colormap # noqa
from matplotlib.colors import colorConverter as _colorConverter # noqa
from matplotlib.collections import LineCollection as _LineCollection # noqa
def _longest_common_subsequence(x, y):
"""
Return the longest common subsequence between two sequences.
Parameters
----------
x, y : sequence
Returns
-------
sequence
Longest common subsequence of x and y.
Examples
--------
>>> _longest_common_subsequence("AGGTAB", "GXTXAYB")
['G', 'T', 'A', 'B']
>>> _longest_common_subsequence(["A", "GA", "G", "T", "A", "B"],
... ["GA", "X", "T", "X", "A", "Y", "B"])
['GA', 'T', 'A', 'B']
"""
m = len(x)
n = len(y)
# L[i, j] will contain the length of the longest common subsequence of
# x[0..i - 1] and y[0..j - 1].
L = _np.zeros((m + 1, n + 1), dtype=int)
for i in range(m + 1):
for j in range(n + 1):
if i == 0 or j == 0:
continue
elif x[i - 1] == y[j - 1]:
L[i, j] = L[i - 1, j - 1] + 1
else:
L[i, j] = max(L[i - 1, j], L[i, j - 1])
ret = []
i, j = m, n
while i > 0 and j > 0:
# If current character in x and y are same, then current character is
# part of the longest common subsequence.
if x[i - 1] == y[j - 1]:
ret.append(x[i - 1])
i, j = i - 1, j - 1
# If not same, then find the larger of two and go in the direction of
# larger value.
elif L[i - 1, j] > L[i, j - 1]:
i -= 1
else:
j -= 1
return ret[::-1]
def tower_layout(graph, height='freeenergy', scale=None, center=None, dim=2):
"""
Position all nodes of graph stacked on top of each other.
Parameters
----------
graph : `networkx.Graph` or `list` of nodes
A position will be assigned to every node in graph.
height : `str` or `None`, optional
The node attribute that holds the numerical value used for the node
height. This defaults to ``'freeenergy'``. If `None`, all node heights
are set to zero.
scale : number, optional
Scale factor for positions.
center : array-like, optional
Coordinate pair around which to center the layout. Default is the
origin.
dim : `int`
Dimension of layout. If `dim` > 2, the remaining dimensions are set to
zero in the returned positions.
Returns
-------
pos : mapping
A mapping of positions keyed by node.
Examples
--------
>>> from pyrrole import ChemicalSystem
>>> from pyrrole.atoms import create_data, read_cclib
>>> from pyrrole.drawing import tower_layout
>>> data = create_data(
... read_cclib("data/acetate/acetic_acid.out", "AcOH(g)"),
... read_cclib("data/acetate/acetic_acid@water.out", "AcOH(aq)"))
>>> digraph = (ChemicalSystem("AcOH(g) <=> AcOH(aq)", data)
... .to_digraph())
>>> layout = tower_layout(digraph)
>>> layout['AcOH(g)']
array([ 0. , -228.56450866])
Passing ``scale=1`` means scaling positions to ``(-1, 1)`` in all axes:
>>> layout = tower_layout(digraph, scale=1)
>>> layout['AcOH(g)'][1] <= 1.
True
"""
# TODO: private function of packages should not be used.
graph, center = _nx.drawing.layout._process_params(graph, center, dim)
num_nodes = len(graph)
if num_nodes == 0:
return {}
elif num_nodes == 1:
return {_nx.utils.arbitrary_element(graph): center}
paddims = max(0, (dim - 2))
if height is None:
y = _np.zeros(len(graph))
else:
y = _np.array([data for node, data in graph.nodes(data=height)])
pos_arr = _np.column_stack([_np.zeros((num_nodes, 1)), y,
_np.zeros((num_nodes, paddims))])
if scale is not None:
pos_arr = _nx.drawing.layout.rescale_layout(pos_arr,
scale=scale) + center
pos = dict(zip(graph, pos_arr))
# TODO: make test
return pos
def diagram_layout(graph, height='freeenergy', sources=None, targets=None,
pos=None, scale=None, center=None, dim=2):
"""
Position nodes such that paths are highlighted, from left to right.
Parameters
----------
graph : `networkx.Graph` or `list` of nodes
A position will be assigned to every node in graph.
height : `str` or `None`, optional
The node attribute that holds the numerical value used for the node
height. This defaults to ``'freeenergy'``. If `None`, all node heights
are set to zero.
sources : `list` of `str`
All simple paths starting at members of `sources` are considered.
Defaults to all nodes of graph.
targets : `list` of `str`
All simple paths ending at members of `targets` are considered.
Defaults to all nodes of graph.
pos : mapping, optional
Initial positions for nodes as a mapping with node as keys and
values as a coordinate `list` or `tuple`. If not specified (default),
initial positions are computed with `tower_layout`.
scale : number, optional
Scale factor for positions.
center : array-like, optional
Coordinate pair around which to center the layout. Default is the
origin.
dim : `int`
Dimension of layout. If `dim` > 2, the remaining dimensions are set to
zero in the returned positions.
Returns
-------
pos : mapping
A mapping of positions keyed by node.
Examples
--------
>>> import pandas as pd
>>> from pyrrole import ChemicalSystem
>>> from pyrrole.drawing import diagram_layout
>>> data = pd.DataFrame(
... [{"name": "Separated_Reactants", "freeenergy": 0.},
... {"name": "mlC1", "freeenergy": -5.4},
... {"name": "mlC2", "freeenergy": -15.6},
... {"name": "mTS1", "freeenergy": 28.5, "color": "g"},
... {"name": "mCARB1", "freeenergy": -9.7},
... {"name": "mCARB2", "freeenergy": -19.8},
... {"name": "mCARBX", "freeenergy": 20}]).set_index("name")
>>> system = ChemicalSystem(
... ["Separated_Reactants -> mlC1 -> mTS1",
... "Separated_Reactants -> mlC2 -> mTS1",
... "mCARB2 <- mTS1 -> mCARB1",
... "Separated_Reactants -> mCARBX"], data)
>>> digraph = system.to_digraph()
>>> layout = diagram_layout(digraph)
>>> layout['mCARB2']
array([ 3. , -19.8])
Passing ``scale=1`` means scaling positions to ``(-1, 1)`` in all axes:
>>> layout = diagram_layout(digraph, scale=1)
>>> layout['mTS1'][1] <= 1.
True
"""
# TODO: private function of packages should not be used.
graph, center = _nx.drawing.layout._process_params(graph, center, dim)
num_nodes = len(graph)
if num_nodes == 0:
return {}
elif num_nodes == 1:
return {_nx.utils.arbitrary_element(graph): center}
if sources is None:
sources = graph.nodes()
if targets is None:
targets = graph.nodes()
simple_paths = [path for source in set(sources) for target in set(targets)
for path in _nx.all_simple_paths(graph, source, target)]
if pos is None:
pos = tower_layout(graph, height=height, scale=None, center=center,
dim=dim)
for path in simple_paths:
for n, step in enumerate(path):
if pos[step][0] < n:
pos[step][0] = n
if scale is not None:
pos_arr = _np.array([pos[node] for node in graph])
pos_arr = _nx.drawing.layout.rescale_layout(pos_arr,
scale=scale) + center
pos = dict(zip(graph, pos_arr))
# TODO: make test
return pos
def draw_diagram_nodes(graph, pos=None, nodelist=None, node_size=.7,
node_color='k', style='solid', alpha=1.0, cmap=None,
vmin=None, vmax=None, ax=None, label=None):
"""
Draw nodes of graph.
This draws only the nodes of graph as horizontal lines at each
``y = pos[1]`` from ``x - node_size/2`` to ``x + node_size/2``, where
``x = pos[0]``.
Parameters
----------
graph : `networkx.Graph`
A NetworkX graph.
pos : mapping, optional
A mapping with nodes as keys and positions as values. Positions should
be sequences of length 2. If not specified (default), a diagram layout
positioning will be computed. See `networkx.layout` and
`pyrrole.drawing` for functions that compute node positions.
nodelist : `list`, optional
Draw only specified nodes (default is ``graph.nodes()``).
node_size : scalar or array
Size of nodes (default is ``.7``). If an array is specified it must be
the same length as nodelist.
node_color : color `str`, or array of `float`
Node color. Can be a single color format `str` (default is ``'k'``), or
a sequence of colors with the same length as nodelist. If numeric
values are specified they will be mapped to colors using the `cmap` and
`vmin`, `vmax` parameters. See `matplotlib.hlines` for more details.
style : `str` (``'solid'``, ``'dashed'``, ``'dotted'``, ``'dashdot'``)
Edge line style (default is ``'solid'``). See `matplotlib.hlines` for
more details.
alpha : `float` or array of `float`, optional
The node transparency. This can be a single alpha value (default is
``'1.0'``), in which case it will be applied to all the nodes of color.
Otherwise, if it is an array, the elements of alpha will be applied to
the colors in order (cycling through alpha multiple times if
necessary).
cmap : Matplotlib colormap, optional
Colormap name or Colormap instance for mapping intensities of nodes.
vmin : `float`, optional
Minimum for node colormap scaling.
vmax : `float`, optional
Maximum for node colormap scaling.
ax : `matplotlib.axes.Axes`, optional
Draw the graph in the specified Matplotlib axes.
label : `str`, optional
Label for legend.
Returns
-------
`matplotlib.collections.LineCollection`
`LineCollection` of the nodes.
Raises
------
networkx.NetworkXError
Raised if a node has no position or one with bad value.
Examples
--------
>>> import pandas as pd
>>> from pyrrole import ChemicalSystem
>>> from pyrrole.drawing import draw_diagram_nodes
>>> data = pd.DataFrame(
... [{"name": "Separated_Reactants", "freeenergy": 0.},
... {"name": "mlC1", "freeenergy": -5.4},
... {"name": "mlC2", "freeenergy": -15.6},
... {"name": "mTS1", "freeenergy": 28.5, "color": "g"},
... {"name": "mCARB1", "freeenergy": -9.7},
... {"name": "mCARB2", "freeenergy": -19.8},
... {"name": "mCARBX", "freeenergy": 20}]).set_index("name")
>>> system = ChemicalSystem(
... ["Separated_Reactants -> mlC1 -> mTS1",
... "Separated_Reactants -> mlC2 -> mTS1",
... "mCARB2 <- mTS1 -> mCARB1",
... "Separated_Reactants -> mCARBX"], data)
>>> digraph = system.to_digraph()
>>> nodes = draw_diagram_nodes(digraph)
"""
if ax is None:
ax = _plt.gca()
if nodelist is None:
nodelist = list(graph.nodes())
if not nodelist or len(nodelist) == 0: # empty nodelist, no drawing
return None
if pos is None:
pos = diagram_layout(graph)
try:
xy = _np.asarray([pos[v] for v in nodelist])
except KeyError as e:
raise _nx.NetworkXError('Node {} has no position.'.format(e))
except ValueError:
raise _nx.NetworkXError('Bad value in node positions.')
if isinstance(alpha, _collections.Iterable):
node_color = _nx.drawing.apply_alpha(node_color, alpha, nodelist, cmap,
vmin, vmax)
alpha = None
node_collection = ax.hlines(xy[:, 1],
xy[:, 0] - node_size/2.,
xy[:, 0] + node_size/2.,
colors=node_color,
linestyles=style,
label=label,
cmap=cmap)
node_collection.set_zorder(2)
return node_collection
def draw_diagram_edges(graph, pos=None, edgelist=None, width=1.0,
edge_color='k', style='dashed', alpha=1.0,
edge_cmap=None, edge_vmin=None, edge_vmax=None, ax=None,
label=None, nodelist=None, node_size=.7):
"""
Draw edges of graph.
This draws only the edges of a graph.
Parameters
----------
graph : `networkx.Graph`
A NetworkX graph.
pos : mapping, optional
A mapping with nodes as keys and positions as values. Positions should
be sequences of length 2. If not specified (default), a diagram layout
positioning will be computed. See `networkx.layout` and
`pyrrole.drawing` for functions that compute node positions.
edgelist : collection of edge `tuple`
Draw only specified edges (default is ``graph.edges()``).
width : `float`, or array of `float`
Line width of edges (default is ``1.0``).
edge_color : color `str`, or array of `float`
Edge color. Can be a single color format `str` (default is ``'r'``),
or a sequence of colors with the same length as edgelist. If numeric
values are specified they will be mapped to colors using the
`edge_cmap` and `edge_vmin`, `edge_vmax` parameters.
style : `str` (``'solid'``, ``'dashed'``, ``'dotted'``, ``'dashdot'``)
Edge line style (default is ``'dashed'``). See `matplotlib.hlines` for
more details.
alpha : `float`, optional
The edge transparency (default is ``1.0``).
edge_cmap : Matplotlib colormap, optional
Colormap for mapping intensities of edges.
edge_vmin : `float`, optional
Minimum for edge colormap scaling.
edge_vmax : `float`, optional
Maximum for edge colormap scaling.
ax : `matplotlib.axes.Axes`, optional
Draw the graph in the specified Matplotlib axes.
label : `str`, optional
Label for legend.
nodelist : `list`, optional
Draw only specified nodes (default is ``graph.nodes()``).
node_size : scalar or array
Size of nodes (default is ``.7``). If an array is specified it must be
the same length as nodelist.
Returns
-------
`matplotlib.collections.LineCollection`
`LineCollection` of the edges.
Raises
------
networkx.NetworkXError
Raised if a node has no position or one with bad value.
ValueError
Raised if `edge_color` contains something other than color names (one
or a list of one per edge) or numbers.
Examples
--------
>>> import pandas as pd
>>> from pyrrole import ChemicalSystem
>>> from pyrrole.drawing import draw_diagram_edges
>>> data = pd.DataFrame(
... [{"name": "Separated_Reactants", "freeenergy": 0.},
... {"name": "mlC1", "freeenergy": -5.4},
... {"name": "mlC2", "freeenergy": -15.6},
... {"name": "mTS1", "freeenergy": 28.5, "color": "g"},
... {"name": "mCARB1", "freeenergy": -9.7},
... {"name": "mCARB2", "freeenergy": -19.8},
... {"name": "mCARBX", "freeenergy": 20}]).set_index("name")
>>> system = ChemicalSystem(
... ["Separated_Reactants -> mlC1 -> mTS1",
... "Separated_Reactants -> mlC2 -> mTS1",
... "mCARB2 <- mTS1 -> mCARB1",
... "Separated_Reactants -> mCARBX"], data)
>>> digraph = system.to_digraph()
>>> edges = draw_diagram_edges(digraph)
"""
if ax is None:
ax = _plt.gca()
if edgelist is None:
edgelist = list(graph.edges())
if not edgelist or len(edgelist) == 0: # no edges!
return None
if nodelist is None:
nodelist = list(graph.nodes())
if pos is None:
pos = diagram_layout(graph)
try:
# set edge positions
edge_pos = _np.asarray([(pos[e[0]] + node_size/2.,
pos[e[1]] - node_size/2.) for e in edgelist])
except KeyError as e:
raise _nx.NetworkXError('Node {} has no position.'.format(e))
except ValueError:
raise _nx.NetworkXError('Bad value in node positions.')
if not _cb.iterable(width):
lw = (width,)
else:
lw = width
if not isinstance(edge_color, str) \
and _cb.iterable(edge_color) \
and len(edge_color) == len(edge_pos):
if _np.alltrue([isinstance(c, str) for c in edge_color]):
# (should check ALL elements)
# list of color letters such as ['k','r','k',...]
edge_colors = tuple([_colorConverter.to_rgba(c, alpha)
for c in edge_color])
elif _np.alltrue([not isinstance(c, str) for c in edge_color]):
# If color specs are given as (rgb) or (rgba) tuples, we're OK
if _np.alltrue([_cb.iterable(c) and len(c) in (3, 4)
for c in edge_color]):
edge_colors = tuple(edge_color)
else:
# numbers (which are going to be mapped with a colormap)
edge_colors = None
else:
raise ValueError('edge_color must contain color names or numbers')
else:
if isinstance(edge_color, str) or len(edge_color) == 1:
edge_colors = (_colorConverter.to_rgba(edge_color, alpha), )
else:
raise ValueError('edge_color must be a color or list of one color '
' per edge')
edge_collection = _LineCollection(edge_pos,
colors=edge_colors,
linewidths=lw,
antialiaseds=(1,),
linestyle=style,
transOffset=ax.transData)
edge_collection.set_zorder(1) # edges go behind nodes
edge_collection.set_label(label)
ax.add_collection(edge_collection)
if _cb.is_numlike(alpha):
edge_collection.set_alpha(alpha)
if edge_colors is None:
if edge_cmap is not None:
assert(isinstance(edge_cmap, _Colormap))
edge_collection.set_array(_np.asarray(edge_color))
edge_collection.set_cmap(edge_cmap)
if edge_vmin is not None or edge_vmax is not None:
edge_collection.set_clim(edge_vmin, edge_vmax)
else:
edge_collection.autoscale()
ax.autoscale_view()
return edge_collection
def draw_diagram(graph, pos=None, with_labels=True, offset=None, **kwds):
"""
Draw a diagram for graph using Matplotlib.
Draw graph as a simple energy diagram with Matplotlib with options for node
positions, labeling, titles, and many other drawing features. See examples
below.
Parameters
----------
graph : `networkx.Graph`
A NetworkX graph.
pos : mapping, optional
A mapping with nodes as keys and positions as values. Positions should
be sequences of length 2. If not specified (default) a diagram layout
positioning will be computed. See `networkx.drawing.layout` and
`pyrrole.drawing` for functions that compute node positions.
with_labels : `bool`, optional
Set to `True` (default) to draw labels on the nodes.
offset : array-like or `str`, optional
Label positions are summed to this before drawing. Defaults to
``'below'``. See `draw_diagram_labels` for more.
Notes
-----
Further keywords are passed to `draw_diagram_nodes` and
`draw_diagram_edges`. If `pos` is `None`, `diagram_layout` is also called
and have keywords passed as well. The same happens with
`draw_diagram_labels` if `with_labels` is `True`.
Examples
--------
>>> import pandas as pd
>>> from pyrrole import ChemicalSystem
>>> from pyrrole.drawing import draw_diagram, draw_diagram_labels
>>> data = pd.DataFrame(
... [{"name": "Separated_Reactants", "freeenergy": 0.},
... {"name": "mlC1", "freeenergy": -5.4},
... {"name": "mlC2", "freeenergy": -15.6},
... {"name": "mTS1", "freeenergy": 28.5, "color": "g"},
... {"name": "mCARB1", "freeenergy": -9.7},
... {"name": "mCARB2", "freeenergy": -19.8},
... {"name": "mCARBX", "freeenergy": 20}]).set_index("name")
>>> system = ChemicalSystem(
... ["Separated_Reactants -> mlC1 -> mTS1",
... "Separated_Reactants -> mlC2 -> mTS1",
... "mCARB2 <- mTS1 -> mCARB1",
... "Separated_Reactants -> mCARBX"], data)
>>> digraph = system.to_digraph()
>>> draw_diagram(digraph)
>>> labels = {k: "{:g}".format(v)
... for k, v in digraph.nodes(data='freeenergy')}
>>> edges = draw_diagram_labels(digraph, labels=labels,
... offset="above")
"""
if pos is None:
pos = diagram_layout(graph, **kwds) # default to diagram layout
node_collection = draw_diagram_nodes(graph, pos, **kwds) # noqa
edge_collection = draw_diagram_edges(graph, pos, **kwds) # noqa
if with_labels:
if offset is None:
# TODO: This changes the default behaviour of draw_diagram_labels.
offset = "below"
draw_diagram_labels(graph, pos, offset=offset, **kwds)
_plt.draw_if_interactive()
|
schneiderfelipe/pyrrole | pyrrole/drawing.py | draw_diagram | python | def draw_diagram(graph, pos=None, with_labels=True, offset=None, **kwds):
if pos is None:
pos = diagram_layout(graph, **kwds) # default to diagram layout
node_collection = draw_diagram_nodes(graph, pos, **kwds) # noqa
edge_collection = draw_diagram_edges(graph, pos, **kwds) # noqa
if with_labels:
if offset is None:
# TODO: This changes the default behaviour of draw_diagram_labels.
offset = "below"
draw_diagram_labels(graph, pos, offset=offset, **kwds)
_plt.draw_if_interactive() | Draw a diagram for graph using Matplotlib.
Draw graph as a simple energy diagram with Matplotlib with options for node
positions, labeling, titles, and many other drawing features. See examples
below.
Parameters
----------
graph : `networkx.Graph`
A NetworkX graph.
pos : mapping, optional
A mapping with nodes as keys and positions as values. Positions should
be sequences of length 2. If not specified (default) a diagram layout
positioning will be computed. See `networkx.drawing.layout` and
`pyrrole.drawing` for functions that compute node positions.
with_labels : `bool`, optional
Set to `True` (default) to draw labels on the nodes.
offset : array-like or `str`, optional
Label positions are summed to this before drawing. Defaults to
``'below'``. See `draw_diagram_labels` for more.
Notes
-----
Further keywords are passed to `draw_diagram_nodes` and
`draw_diagram_edges`. If `pos` is `None`, `diagram_layout` is also called
and have keywords passed as well. The same happens with
`draw_diagram_labels` if `with_labels` is `True`.
Examples
--------
>>> import pandas as pd
>>> from pyrrole import ChemicalSystem
>>> from pyrrole.drawing import draw_diagram, draw_diagram_labels
>>> data = pd.DataFrame(
... [{"name": "Separated_Reactants", "freeenergy": 0.},
... {"name": "mlC1", "freeenergy": -5.4},
... {"name": "mlC2", "freeenergy": -15.6},
... {"name": "mTS1", "freeenergy": 28.5, "color": "g"},
... {"name": "mCARB1", "freeenergy": -9.7},
... {"name": "mCARB2", "freeenergy": -19.8},
... {"name": "mCARBX", "freeenergy": 20}]).set_index("name")
>>> system = ChemicalSystem(
... ["Separated_Reactants -> mlC1 -> mTS1",
... "Separated_Reactants -> mlC2 -> mTS1",
... "mCARB2 <- mTS1 -> mCARB1",
... "Separated_Reactants -> mCARBX"], data)
>>> digraph = system.to_digraph()
>>> draw_diagram(digraph)
>>> labels = {k: "{:g}".format(v)
... for k, v in digraph.nodes(data='freeenergy')}
>>> edges = draw_diagram_labels(digraph, labels=labels,
... offset="above") | train | https://github.com/schneiderfelipe/pyrrole/blob/13e26accc9a059f0ab69773648b24292fe1fbfd6/pyrrole/drawing.py#L649-L715 | [
"def diagram_layout(graph, height='freeenergy', sources=None, targets=None,\n pos=None, scale=None, center=None, dim=2):\n \"\"\"\n Position nodes such that paths are highlighted, from left to right.\n\n Parameters\n ----------\n graph : `networkx.Graph` or `list` of nodes\n A position will be assigned to every node in graph.\n height : `str` or `None`, optional\n The node attribute that holds the numerical value used for the node\n height. This defaults to ``'freeenergy'``. If `None`, all node heights\n are set to zero.\n sources : `list` of `str`\n All simple paths starting at members of `sources` are considered.\n Defaults to all nodes of graph.\n targets : `list` of `str`\n All simple paths ending at members of `targets` are considered.\n Defaults to all nodes of graph.\n pos : mapping, optional\n Initial positions for nodes as a mapping with node as keys and\n values as a coordinate `list` or `tuple`. If not specified (default),\n initial positions are computed with `tower_layout`.\n scale : number, optional\n Scale factor for positions.\n center : array-like, optional\n Coordinate pair around which to center the layout. Default is the\n origin.\n dim : `int`\n Dimension of layout. If `dim` > 2, the remaining dimensions are set to\n zero in the returned positions.\n\n Returns\n -------\n pos : mapping\n A mapping of positions keyed by node.\n\n Examples\n --------\n >>> import pandas as pd\n >>> from pyrrole import ChemicalSystem\n >>> from pyrrole.drawing import diagram_layout\n >>> data = pd.DataFrame(\n ... [{\"name\": \"Separated_Reactants\", \"freeenergy\": 0.},\n ... {\"name\": \"mlC1\", \"freeenergy\": -5.4},\n ... {\"name\": \"mlC2\", \"freeenergy\": -15.6},\n ... {\"name\": \"mTS1\", \"freeenergy\": 28.5, \"color\": \"g\"},\n ... {\"name\": \"mCARB1\", \"freeenergy\": -9.7},\n ... {\"name\": \"mCARB2\", \"freeenergy\": -19.8},\n ... {\"name\": \"mCARBX\", \"freeenergy\": 20}]).set_index(\"name\")\n >>> system = ChemicalSystem(\n ... [\"Separated_Reactants -> mlC1 -> mTS1\",\n ... \"Separated_Reactants -> mlC2 -> mTS1\",\n ... \"mCARB2 <- mTS1 -> mCARB1\",\n ... \"Separated_Reactants -> mCARBX\"], data)\n >>> digraph = system.to_digraph()\n >>> layout = diagram_layout(digraph)\n >>> layout['mCARB2']\n array([ 3. , -19.8])\n\n Passing ``scale=1`` means scaling positions to ``(-1, 1)`` in all axes:\n\n >>> layout = diagram_layout(digraph, scale=1)\n >>> layout['mTS1'][1] <= 1.\n True\n\n \"\"\"\n # TODO: private function of packages should not be used.\n graph, center = _nx.drawing.layout._process_params(graph, center, dim)\n\n num_nodes = len(graph)\n if num_nodes == 0:\n return {}\n elif num_nodes == 1:\n return {_nx.utils.arbitrary_element(graph): center}\n\n if sources is None:\n sources = graph.nodes()\n if targets is None:\n targets = graph.nodes()\n simple_paths = [path for source in set(sources) for target in set(targets)\n for path in _nx.all_simple_paths(graph, source, target)]\n\n if pos is None:\n pos = tower_layout(graph, height=height, scale=None, center=center,\n dim=dim)\n\n for path in simple_paths:\n for n, step in enumerate(path):\n if pos[step][0] < n:\n pos[step][0] = n\n\n if scale is not None:\n pos_arr = _np.array([pos[node] for node in graph])\n pos_arr = _nx.drawing.layout.rescale_layout(pos_arr,\n scale=scale) + center\n pos = dict(zip(graph, pos_arr))\n\n # TODO: make test\n return pos\n",
"def draw_diagram_nodes(graph, pos=None, nodelist=None, node_size=.7,\n node_color='k', style='solid', alpha=1.0, cmap=None,\n vmin=None, vmax=None, ax=None, label=None):\n \"\"\"\n Draw nodes of graph.\n\n This draws only the nodes of graph as horizontal lines at each\n ``y = pos[1]`` from ``x - node_size/2`` to ``x + node_size/2``, where\n ``x = pos[0]``.\n\n Parameters\n ----------\n graph : `networkx.Graph`\n A NetworkX graph.\n pos : mapping, optional\n A mapping with nodes as keys and positions as values. Positions should\n be sequences of length 2. If not specified (default), a diagram layout\n positioning will be computed. See `networkx.layout` and\n `pyrrole.drawing` for functions that compute node positions.\n nodelist : `list`, optional\n Draw only specified nodes (default is ``graph.nodes()``).\n node_size : scalar or array\n Size of nodes (default is ``.7``). If an array is specified it must be\n the same length as nodelist.\n node_color : color `str`, or array of `float`\n Node color. Can be a single color format `str` (default is ``'k'``), or\n a sequence of colors with the same length as nodelist. If numeric\n values are specified they will be mapped to colors using the `cmap` and\n `vmin`, `vmax` parameters. See `matplotlib.hlines` for more details.\n style : `str` (``'solid'``, ``'dashed'``, ``'dotted'``, ``'dashdot'``)\n Edge line style (default is ``'solid'``). See `matplotlib.hlines` for\n more details.\n alpha : `float` or array of `float`, optional\n The node transparency. This can be a single alpha value (default is\n ``'1.0'``), in which case it will be applied to all the nodes of color.\n Otherwise, if it is an array, the elements of alpha will be applied to\n the colors in order (cycling through alpha multiple times if\n necessary).\n cmap : Matplotlib colormap, optional\n Colormap name or Colormap instance for mapping intensities of nodes.\n vmin : `float`, optional\n Minimum for node colormap scaling.\n vmax : `float`, optional\n Maximum for node colormap scaling.\n ax : `matplotlib.axes.Axes`, optional\n Draw the graph in the specified Matplotlib axes.\n label : `str`, optional\n Label for legend.\n\n Returns\n -------\n `matplotlib.collections.LineCollection`\n `LineCollection` of the nodes.\n\n Raises\n ------\n networkx.NetworkXError\n Raised if a node has no position or one with bad value.\n\n Examples\n --------\n >>> import pandas as pd\n >>> from pyrrole import ChemicalSystem\n >>> from pyrrole.drawing import draw_diagram_nodes\n >>> data = pd.DataFrame(\n ... [{\"name\": \"Separated_Reactants\", \"freeenergy\": 0.},\n ... {\"name\": \"mlC1\", \"freeenergy\": -5.4},\n ... {\"name\": \"mlC2\", \"freeenergy\": -15.6},\n ... {\"name\": \"mTS1\", \"freeenergy\": 28.5, \"color\": \"g\"},\n ... {\"name\": \"mCARB1\", \"freeenergy\": -9.7},\n ... {\"name\": \"mCARB2\", \"freeenergy\": -19.8},\n ... {\"name\": \"mCARBX\", \"freeenergy\": 20}]).set_index(\"name\")\n >>> system = ChemicalSystem(\n ... [\"Separated_Reactants -> mlC1 -> mTS1\",\n ... \"Separated_Reactants -> mlC2 -> mTS1\",\n ... \"mCARB2 <- mTS1 -> mCARB1\",\n ... \"Separated_Reactants -> mCARBX\"], data)\n >>> digraph = system.to_digraph()\n >>> nodes = draw_diagram_nodes(digraph)\n\n \"\"\"\n if ax is None:\n ax = _plt.gca()\n\n if nodelist is None:\n nodelist = list(graph.nodes())\n\n if not nodelist or len(nodelist) == 0: # empty nodelist, no drawing\n return None\n\n if pos is None:\n pos = diagram_layout(graph)\n\n try:\n xy = _np.asarray([pos[v] for v in nodelist])\n except KeyError as e:\n raise _nx.NetworkXError('Node {} has no position.'.format(e))\n except ValueError:\n raise _nx.NetworkXError('Bad value in node positions.')\n\n if isinstance(alpha, _collections.Iterable):\n node_color = _nx.drawing.apply_alpha(node_color, alpha, nodelist, cmap,\n vmin, vmax)\n alpha = None\n\n node_collection = ax.hlines(xy[:, 1],\n xy[:, 0] - node_size/2.,\n xy[:, 0] + node_size/2.,\n colors=node_color,\n linestyles=style,\n label=label,\n cmap=cmap)\n\n node_collection.set_zorder(2)\n return node_collection\n",
"def draw_diagram_edges(graph, pos=None, edgelist=None, width=1.0,\n edge_color='k', style='dashed', alpha=1.0,\n edge_cmap=None, edge_vmin=None, edge_vmax=None, ax=None,\n label=None, nodelist=None, node_size=.7):\n \"\"\"\n Draw edges of graph.\n\n This draws only the edges of a graph.\n\n Parameters\n ----------\n graph : `networkx.Graph`\n A NetworkX graph.\n pos : mapping, optional\n A mapping with nodes as keys and positions as values. Positions should\n be sequences of length 2. If not specified (default), a diagram layout\n positioning will be computed. See `networkx.layout` and\n `pyrrole.drawing` for functions that compute node positions.\n edgelist : collection of edge `tuple`\n Draw only specified edges (default is ``graph.edges()``).\n width : `float`, or array of `float`\n Line width of edges (default is ``1.0``).\n edge_color : color `str`, or array of `float`\n Edge color. Can be a single color format `str` (default is ``'r'``),\n or a sequence of colors with the same length as edgelist. If numeric\n values are specified they will be mapped to colors using the\n `edge_cmap` and `edge_vmin`, `edge_vmax` parameters.\n style : `str` (``'solid'``, ``'dashed'``, ``'dotted'``, ``'dashdot'``)\n Edge line style (default is ``'dashed'``). See `matplotlib.hlines` for\n more details.\n alpha : `float`, optional\n The edge transparency (default is ``1.0``).\n edge_cmap : Matplotlib colormap, optional\n Colormap for mapping intensities of edges.\n edge_vmin : `float`, optional\n Minimum for edge colormap scaling.\n edge_vmax : `float`, optional\n Maximum for edge colormap scaling.\n ax : `matplotlib.axes.Axes`, optional\n Draw the graph in the specified Matplotlib axes.\n label : `str`, optional\n Label for legend.\n nodelist : `list`, optional\n Draw only specified nodes (default is ``graph.nodes()``).\n node_size : scalar or array\n Size of nodes (default is ``.7``). If an array is specified it must be\n the same length as nodelist.\n\n Returns\n -------\n `matplotlib.collections.LineCollection`\n `LineCollection` of the edges.\n\n Raises\n ------\n networkx.NetworkXError\n Raised if a node has no position or one with bad value.\n ValueError\n Raised if `edge_color` contains something other than color names (one\n or a list of one per edge) or numbers.\n\n Examples\n --------\n >>> import pandas as pd\n >>> from pyrrole import ChemicalSystem\n >>> from pyrrole.drawing import draw_diagram_edges\n >>> data = pd.DataFrame(\n ... [{\"name\": \"Separated_Reactants\", \"freeenergy\": 0.},\n ... {\"name\": \"mlC1\", \"freeenergy\": -5.4},\n ... {\"name\": \"mlC2\", \"freeenergy\": -15.6},\n ... {\"name\": \"mTS1\", \"freeenergy\": 28.5, \"color\": \"g\"},\n ... {\"name\": \"mCARB1\", \"freeenergy\": -9.7},\n ... {\"name\": \"mCARB2\", \"freeenergy\": -19.8},\n ... {\"name\": \"mCARBX\", \"freeenergy\": 20}]).set_index(\"name\")\n >>> system = ChemicalSystem(\n ... [\"Separated_Reactants -> mlC1 -> mTS1\",\n ... \"Separated_Reactants -> mlC2 -> mTS1\",\n ... \"mCARB2 <- mTS1 -> mCARB1\",\n ... \"Separated_Reactants -> mCARBX\"], data)\n >>> digraph = system.to_digraph()\n >>> edges = draw_diagram_edges(digraph)\n\n \"\"\"\n if ax is None:\n ax = _plt.gca()\n\n if edgelist is None:\n edgelist = list(graph.edges())\n\n if not edgelist or len(edgelist) == 0: # no edges!\n return None\n\n if nodelist is None:\n nodelist = list(graph.nodes())\n\n if pos is None:\n pos = diagram_layout(graph)\n\n try:\n # set edge positions\n edge_pos = _np.asarray([(pos[e[0]] + node_size/2.,\n pos[e[1]] - node_size/2.) for e in edgelist])\n except KeyError as e:\n raise _nx.NetworkXError('Node {} has no position.'.format(e))\n except ValueError:\n raise _nx.NetworkXError('Bad value in node positions.')\n\n if not _cb.iterable(width):\n lw = (width,)\n else:\n lw = width\n\n if not isinstance(edge_color, str) \\\n and _cb.iterable(edge_color) \\\n and len(edge_color) == len(edge_pos):\n if _np.alltrue([isinstance(c, str) for c in edge_color]):\n # (should check ALL elements)\n # list of color letters such as ['k','r','k',...]\n edge_colors = tuple([_colorConverter.to_rgba(c, alpha)\n for c in edge_color])\n elif _np.alltrue([not isinstance(c, str) for c in edge_color]):\n # If color specs are given as (rgb) or (rgba) tuples, we're OK\n if _np.alltrue([_cb.iterable(c) and len(c) in (3, 4)\n for c in edge_color]):\n edge_colors = tuple(edge_color)\n else:\n # numbers (which are going to be mapped with a colormap)\n edge_colors = None\n else:\n raise ValueError('edge_color must contain color names or numbers')\n else:\n if isinstance(edge_color, str) or len(edge_color) == 1:\n edge_colors = (_colorConverter.to_rgba(edge_color, alpha), )\n else:\n raise ValueError('edge_color must be a color or list of one color '\n ' per edge')\n\n edge_collection = _LineCollection(edge_pos,\n colors=edge_colors,\n linewidths=lw,\n antialiaseds=(1,),\n linestyle=style,\n transOffset=ax.transData)\n\n edge_collection.set_zorder(1) # edges go behind nodes\n edge_collection.set_label(label)\n ax.add_collection(edge_collection)\n\n if _cb.is_numlike(alpha):\n edge_collection.set_alpha(alpha)\n\n if edge_colors is None:\n if edge_cmap is not None:\n assert(isinstance(edge_cmap, _Colormap))\n edge_collection.set_array(_np.asarray(edge_color))\n edge_collection.set_cmap(edge_cmap)\n if edge_vmin is not None or edge_vmax is not None:\n edge_collection.set_clim(edge_vmin, edge_vmax)\n else:\n edge_collection.autoscale()\n\n ax.autoscale_view()\n return edge_collection\n",
"def draw_diagram_labels(graph, pos=None, labels=None, font_size=12,\n font_color='k', font_family='sans-serif',\n font_weight='normal', alpha=1.0, bbox=None, ax=None,\n offset=None, **kwds):\n \"\"\"\n Draw node labels of graph.\n\n This draws only the node labels of a graph.\n\n Parameters\n ----------\n graph : `networkx.Graph`\n A NetworkX graph.\n pos : mapping, optional\n A mapping with nodes as keys and positions as values. Positions should\n be sequences of length 2. If not specified (default), a diagram layout\n positioning will be computed. See `networkx.layout` and\n `pyrrole.drawing` for functions that compute node positions.\n labels : mapping, optional\n Node labels in a mapping keyed by node of text labels.\n font_size : `int`, optional\n Font size for text labels (default is ``12``).\n font_color : `str`, optional\n Font color `str` (default is ``'k'``, i.e., black).\n font_family : `str`, optional\n Font family (default is ``'sans-serif'``).\n font_weight : `str`, optional\n Font weight (default is ``'normal'``).\n alpha : `float`, optional\n The text transparency (default is ``1.0``).\n ax : `matplotlib.axes.Axes`, optional\n Draw the graph in the specified Matplotlib axes.\n offset : array-like or `str`, optional\n Label positions are summed to this before drawing. Defaults to zero\n vector. If `str`, can be either ``'above'`` (equivalent to\n ``(0, 1.5)``) or ``'below'`` (equivalent to ``(0, -1.5)``).\n\n Returns\n -------\n mapping\n Mapping of labels keyed on the nodes.\n\n Examples\n --------\n >>> import pandas as pd\n >>> from pyrrole import ChemicalSystem\n >>> from pyrrole.drawing import draw_diagram_labels\n >>> data = pd.DataFrame(\n ... [{\"name\": \"Separated_Reactants\", \"freeenergy\": 0.},\n ... {\"name\": \"mlC1\", \"freeenergy\": -5.4},\n ... {\"name\": \"mlC2\", \"freeenergy\": -15.6},\n ... {\"name\": \"mTS1\", \"freeenergy\": 28.5, \"color\": \"g\"},\n ... {\"name\": \"mCARB1\", \"freeenergy\": -9.7},\n ... {\"name\": \"mCARB2\", \"freeenergy\": -19.8},\n ... {\"name\": \"mCARBX\", \"freeenergy\": 20}]).set_index(\"name\")\n >>> system = ChemicalSystem(\n ... [\"Separated_Reactants -> mlC1 -> mTS1\",\n ... \"Separated_Reactants -> mlC2 -> mTS1\",\n ... \"mCARB2 <- mTS1 -> mCARB1\",\n ... \"Separated_Reactants -> mCARBX\"], data)\n >>> digraph = system.to_digraph()\n >>> edges = draw_diagram_labels(digraph, font_color='blue',\n ... offset=\"below\")\n >>> labels = {k: \"{:g}\".format(v)\n ... for k, v in digraph.nodes(data='freeenergy')}\n >>> edges = draw_diagram_labels(digraph, labels=labels,\n ... offset=\"above\")\n\n \"\"\"\n if ax is None:\n ax = _plt.gca()\n\n if labels is None:\n labels = dict((n, n) for n in graph.nodes())\n\n if pos is None:\n pos = diagram_layout(graph)\n\n if offset is None:\n offset = _np.array([0., 0.])\n elif offset == \"above\":\n offset = _np.array([0., 1.5])\n elif offset == \"below\":\n offset = _np.array([0., -1.5])\n\n # set optional alignment\n horizontalalignment = kwds.get('horizontalalignment', 'center')\n verticalalignment = kwds.get('verticalalignment', 'center')\n\n text_items = {} # there is no text collection so we'll fake one\n for n, label in labels.items():\n (x, y) = _np.asanyarray(pos[n]) + _np.asanyarray(offset)\n if not isinstance(label, str):\n label = str(label) # this makes \"1\" and 1 labeled the same\n t = ax.text(x, y, label,\n size=font_size,\n color=font_color,\n family=font_family,\n weight=font_weight,\n alpha=alpha,\n horizontalalignment=horizontalalignment,\n verticalalignment=verticalalignment,\n transform=ax.transData,\n bbox=bbox,\n clip_on=True)\n text_items[n] = t\n\n return text_items\n"
] | #!/usr/bin/python3
# -*- encoding: utf-8 -*-
"""Tools for drawing chemical diagrams."""
# TODO: revisit all examples in this module.
import os
import collections as _collections
import numpy as _np
import networkx as _nx
import matplotlib as _mpl
if os.environ.get('DISPLAY', '') == '':
print('no display found. Using non-interactive Agg backend')
_mpl.use('Agg')
import matplotlib.cbook as _cb # noqa
import matplotlib.pyplot as _plt # noqa
from matplotlib.colors import Colormap as _Colormap # noqa
from matplotlib.colors import colorConverter as _colorConverter # noqa
from matplotlib.collections import LineCollection as _LineCollection # noqa
def _longest_common_subsequence(x, y):
"""
Return the longest common subsequence between two sequences.
Parameters
----------
x, y : sequence
Returns
-------
sequence
Longest common subsequence of x and y.
Examples
--------
>>> _longest_common_subsequence("AGGTAB", "GXTXAYB")
['G', 'T', 'A', 'B']
>>> _longest_common_subsequence(["A", "GA", "G", "T", "A", "B"],
... ["GA", "X", "T", "X", "A", "Y", "B"])
['GA', 'T', 'A', 'B']
"""
m = len(x)
n = len(y)
# L[i, j] will contain the length of the longest common subsequence of
# x[0..i - 1] and y[0..j - 1].
L = _np.zeros((m + 1, n + 1), dtype=int)
for i in range(m + 1):
for j in range(n + 1):
if i == 0 or j == 0:
continue
elif x[i - 1] == y[j - 1]:
L[i, j] = L[i - 1, j - 1] + 1
else:
L[i, j] = max(L[i - 1, j], L[i, j - 1])
ret = []
i, j = m, n
while i > 0 and j > 0:
# If current character in x and y are same, then current character is
# part of the longest common subsequence.
if x[i - 1] == y[j - 1]:
ret.append(x[i - 1])
i, j = i - 1, j - 1
# If not same, then find the larger of two and go in the direction of
# larger value.
elif L[i - 1, j] > L[i, j - 1]:
i -= 1
else:
j -= 1
return ret[::-1]
def tower_layout(graph, height='freeenergy', scale=None, center=None, dim=2):
"""
Position all nodes of graph stacked on top of each other.
Parameters
----------
graph : `networkx.Graph` or `list` of nodes
A position will be assigned to every node in graph.
height : `str` or `None`, optional
The node attribute that holds the numerical value used for the node
height. This defaults to ``'freeenergy'``. If `None`, all node heights
are set to zero.
scale : number, optional
Scale factor for positions.
center : array-like, optional
Coordinate pair around which to center the layout. Default is the
origin.
dim : `int`
Dimension of layout. If `dim` > 2, the remaining dimensions are set to
zero in the returned positions.
Returns
-------
pos : mapping
A mapping of positions keyed by node.
Examples
--------
>>> from pyrrole import ChemicalSystem
>>> from pyrrole.atoms import create_data, read_cclib
>>> from pyrrole.drawing import tower_layout
>>> data = create_data(
... read_cclib("data/acetate/acetic_acid.out", "AcOH(g)"),
... read_cclib("data/acetate/acetic_acid@water.out", "AcOH(aq)"))
>>> digraph = (ChemicalSystem("AcOH(g) <=> AcOH(aq)", data)
... .to_digraph())
>>> layout = tower_layout(digraph)
>>> layout['AcOH(g)']
array([ 0. , -228.56450866])
Passing ``scale=1`` means scaling positions to ``(-1, 1)`` in all axes:
>>> layout = tower_layout(digraph, scale=1)
>>> layout['AcOH(g)'][1] <= 1.
True
"""
# TODO: private function of packages should not be used.
graph, center = _nx.drawing.layout._process_params(graph, center, dim)
num_nodes = len(graph)
if num_nodes == 0:
return {}
elif num_nodes == 1:
return {_nx.utils.arbitrary_element(graph): center}
paddims = max(0, (dim - 2))
if height is None:
y = _np.zeros(len(graph))
else:
y = _np.array([data for node, data in graph.nodes(data=height)])
pos_arr = _np.column_stack([_np.zeros((num_nodes, 1)), y,
_np.zeros((num_nodes, paddims))])
if scale is not None:
pos_arr = _nx.drawing.layout.rescale_layout(pos_arr,
scale=scale) + center
pos = dict(zip(graph, pos_arr))
# TODO: make test
return pos
def diagram_layout(graph, height='freeenergy', sources=None, targets=None,
pos=None, scale=None, center=None, dim=2):
"""
Position nodes such that paths are highlighted, from left to right.
Parameters
----------
graph : `networkx.Graph` or `list` of nodes
A position will be assigned to every node in graph.
height : `str` or `None`, optional
The node attribute that holds the numerical value used for the node
height. This defaults to ``'freeenergy'``. If `None`, all node heights
are set to zero.
sources : `list` of `str`
All simple paths starting at members of `sources` are considered.
Defaults to all nodes of graph.
targets : `list` of `str`
All simple paths ending at members of `targets` are considered.
Defaults to all nodes of graph.
pos : mapping, optional
Initial positions for nodes as a mapping with node as keys and
values as a coordinate `list` or `tuple`. If not specified (default),
initial positions are computed with `tower_layout`.
scale : number, optional
Scale factor for positions.
center : array-like, optional
Coordinate pair around which to center the layout. Default is the
origin.
dim : `int`
Dimension of layout. If `dim` > 2, the remaining dimensions are set to
zero in the returned positions.
Returns
-------
pos : mapping
A mapping of positions keyed by node.
Examples
--------
>>> import pandas as pd
>>> from pyrrole import ChemicalSystem
>>> from pyrrole.drawing import diagram_layout
>>> data = pd.DataFrame(
... [{"name": "Separated_Reactants", "freeenergy": 0.},
... {"name": "mlC1", "freeenergy": -5.4},
... {"name": "mlC2", "freeenergy": -15.6},
... {"name": "mTS1", "freeenergy": 28.5, "color": "g"},
... {"name": "mCARB1", "freeenergy": -9.7},
... {"name": "mCARB2", "freeenergy": -19.8},
... {"name": "mCARBX", "freeenergy": 20}]).set_index("name")
>>> system = ChemicalSystem(
... ["Separated_Reactants -> mlC1 -> mTS1",
... "Separated_Reactants -> mlC2 -> mTS1",
... "mCARB2 <- mTS1 -> mCARB1",
... "Separated_Reactants -> mCARBX"], data)
>>> digraph = system.to_digraph()
>>> layout = diagram_layout(digraph)
>>> layout['mCARB2']
array([ 3. , -19.8])
Passing ``scale=1`` means scaling positions to ``(-1, 1)`` in all axes:
>>> layout = diagram_layout(digraph, scale=1)
>>> layout['mTS1'][1] <= 1.
True
"""
# TODO: private function of packages should not be used.
graph, center = _nx.drawing.layout._process_params(graph, center, dim)
num_nodes = len(graph)
if num_nodes == 0:
return {}
elif num_nodes == 1:
return {_nx.utils.arbitrary_element(graph): center}
if sources is None:
sources = graph.nodes()
if targets is None:
targets = graph.nodes()
simple_paths = [path for source in set(sources) for target in set(targets)
for path in _nx.all_simple_paths(graph, source, target)]
if pos is None:
pos = tower_layout(graph, height=height, scale=None, center=center,
dim=dim)
for path in simple_paths:
for n, step in enumerate(path):
if pos[step][0] < n:
pos[step][0] = n
if scale is not None:
pos_arr = _np.array([pos[node] for node in graph])
pos_arr = _nx.drawing.layout.rescale_layout(pos_arr,
scale=scale) + center
pos = dict(zip(graph, pos_arr))
# TODO: make test
return pos
def draw_diagram_nodes(graph, pos=None, nodelist=None, node_size=.7,
node_color='k', style='solid', alpha=1.0, cmap=None,
vmin=None, vmax=None, ax=None, label=None):
"""
Draw nodes of graph.
This draws only the nodes of graph as horizontal lines at each
``y = pos[1]`` from ``x - node_size/2`` to ``x + node_size/2``, where
``x = pos[0]``.
Parameters
----------
graph : `networkx.Graph`
A NetworkX graph.
pos : mapping, optional
A mapping with nodes as keys and positions as values. Positions should
be sequences of length 2. If not specified (default), a diagram layout
positioning will be computed. See `networkx.layout` and
`pyrrole.drawing` for functions that compute node positions.
nodelist : `list`, optional
Draw only specified nodes (default is ``graph.nodes()``).
node_size : scalar or array
Size of nodes (default is ``.7``). If an array is specified it must be
the same length as nodelist.
node_color : color `str`, or array of `float`
Node color. Can be a single color format `str` (default is ``'k'``), or
a sequence of colors with the same length as nodelist. If numeric
values are specified they will be mapped to colors using the `cmap` and
`vmin`, `vmax` parameters. See `matplotlib.hlines` for more details.
style : `str` (``'solid'``, ``'dashed'``, ``'dotted'``, ``'dashdot'``)
Edge line style (default is ``'solid'``). See `matplotlib.hlines` for
more details.
alpha : `float` or array of `float`, optional
The node transparency. This can be a single alpha value (default is
``'1.0'``), in which case it will be applied to all the nodes of color.
Otherwise, if it is an array, the elements of alpha will be applied to
the colors in order (cycling through alpha multiple times if
necessary).
cmap : Matplotlib colormap, optional
Colormap name or Colormap instance for mapping intensities of nodes.
vmin : `float`, optional
Minimum for node colormap scaling.
vmax : `float`, optional
Maximum for node colormap scaling.
ax : `matplotlib.axes.Axes`, optional
Draw the graph in the specified Matplotlib axes.
label : `str`, optional
Label for legend.
Returns
-------
`matplotlib.collections.LineCollection`
`LineCollection` of the nodes.
Raises
------
networkx.NetworkXError
Raised if a node has no position or one with bad value.
Examples
--------
>>> import pandas as pd
>>> from pyrrole import ChemicalSystem
>>> from pyrrole.drawing import draw_diagram_nodes
>>> data = pd.DataFrame(
... [{"name": "Separated_Reactants", "freeenergy": 0.},
... {"name": "mlC1", "freeenergy": -5.4},
... {"name": "mlC2", "freeenergy": -15.6},
... {"name": "mTS1", "freeenergy": 28.5, "color": "g"},
... {"name": "mCARB1", "freeenergy": -9.7},
... {"name": "mCARB2", "freeenergy": -19.8},
... {"name": "mCARBX", "freeenergy": 20}]).set_index("name")
>>> system = ChemicalSystem(
... ["Separated_Reactants -> mlC1 -> mTS1",
... "Separated_Reactants -> mlC2 -> mTS1",
... "mCARB2 <- mTS1 -> mCARB1",
... "Separated_Reactants -> mCARBX"], data)
>>> digraph = system.to_digraph()
>>> nodes = draw_diagram_nodes(digraph)
"""
if ax is None:
ax = _plt.gca()
if nodelist is None:
nodelist = list(graph.nodes())
if not nodelist or len(nodelist) == 0: # empty nodelist, no drawing
return None
if pos is None:
pos = diagram_layout(graph)
try:
xy = _np.asarray([pos[v] for v in nodelist])
except KeyError as e:
raise _nx.NetworkXError('Node {} has no position.'.format(e))
except ValueError:
raise _nx.NetworkXError('Bad value in node positions.')
if isinstance(alpha, _collections.Iterable):
node_color = _nx.drawing.apply_alpha(node_color, alpha, nodelist, cmap,
vmin, vmax)
alpha = None
node_collection = ax.hlines(xy[:, 1],
xy[:, 0] - node_size/2.,
xy[:, 0] + node_size/2.,
colors=node_color,
linestyles=style,
label=label,
cmap=cmap)
node_collection.set_zorder(2)
return node_collection
def draw_diagram_edges(graph, pos=None, edgelist=None, width=1.0,
edge_color='k', style='dashed', alpha=1.0,
edge_cmap=None, edge_vmin=None, edge_vmax=None, ax=None,
label=None, nodelist=None, node_size=.7):
"""
Draw edges of graph.
This draws only the edges of a graph.
Parameters
----------
graph : `networkx.Graph`
A NetworkX graph.
pos : mapping, optional
A mapping with nodes as keys and positions as values. Positions should
be sequences of length 2. If not specified (default), a diagram layout
positioning will be computed. See `networkx.layout` and
`pyrrole.drawing` for functions that compute node positions.
edgelist : collection of edge `tuple`
Draw only specified edges (default is ``graph.edges()``).
width : `float`, or array of `float`
Line width of edges (default is ``1.0``).
edge_color : color `str`, or array of `float`
Edge color. Can be a single color format `str` (default is ``'r'``),
or a sequence of colors with the same length as edgelist. If numeric
values are specified they will be mapped to colors using the
`edge_cmap` and `edge_vmin`, `edge_vmax` parameters.
style : `str` (``'solid'``, ``'dashed'``, ``'dotted'``, ``'dashdot'``)
Edge line style (default is ``'dashed'``). See `matplotlib.hlines` for
more details.
alpha : `float`, optional
The edge transparency (default is ``1.0``).
edge_cmap : Matplotlib colormap, optional
Colormap for mapping intensities of edges.
edge_vmin : `float`, optional
Minimum for edge colormap scaling.
edge_vmax : `float`, optional
Maximum for edge colormap scaling.
ax : `matplotlib.axes.Axes`, optional
Draw the graph in the specified Matplotlib axes.
label : `str`, optional
Label for legend.
nodelist : `list`, optional
Draw only specified nodes (default is ``graph.nodes()``).
node_size : scalar or array
Size of nodes (default is ``.7``). If an array is specified it must be
the same length as nodelist.
Returns
-------
`matplotlib.collections.LineCollection`
`LineCollection` of the edges.
Raises
------
networkx.NetworkXError
Raised if a node has no position or one with bad value.
ValueError
Raised if `edge_color` contains something other than color names (one
or a list of one per edge) or numbers.
Examples
--------
>>> import pandas as pd
>>> from pyrrole import ChemicalSystem
>>> from pyrrole.drawing import draw_diagram_edges
>>> data = pd.DataFrame(
... [{"name": "Separated_Reactants", "freeenergy": 0.},
... {"name": "mlC1", "freeenergy": -5.4},
... {"name": "mlC2", "freeenergy": -15.6},
... {"name": "mTS1", "freeenergy": 28.5, "color": "g"},
... {"name": "mCARB1", "freeenergy": -9.7},
... {"name": "mCARB2", "freeenergy": -19.8},
... {"name": "mCARBX", "freeenergy": 20}]).set_index("name")
>>> system = ChemicalSystem(
... ["Separated_Reactants -> mlC1 -> mTS1",
... "Separated_Reactants -> mlC2 -> mTS1",
... "mCARB2 <- mTS1 -> mCARB1",
... "Separated_Reactants -> mCARBX"], data)
>>> digraph = system.to_digraph()
>>> edges = draw_diagram_edges(digraph)
"""
if ax is None:
ax = _plt.gca()
if edgelist is None:
edgelist = list(graph.edges())
if not edgelist or len(edgelist) == 0: # no edges!
return None
if nodelist is None:
nodelist = list(graph.nodes())
if pos is None:
pos = diagram_layout(graph)
try:
# set edge positions
edge_pos = _np.asarray([(pos[e[0]] + node_size/2.,
pos[e[1]] - node_size/2.) for e in edgelist])
except KeyError as e:
raise _nx.NetworkXError('Node {} has no position.'.format(e))
except ValueError:
raise _nx.NetworkXError('Bad value in node positions.')
if not _cb.iterable(width):
lw = (width,)
else:
lw = width
if not isinstance(edge_color, str) \
and _cb.iterable(edge_color) \
and len(edge_color) == len(edge_pos):
if _np.alltrue([isinstance(c, str) for c in edge_color]):
# (should check ALL elements)
# list of color letters such as ['k','r','k',...]
edge_colors = tuple([_colorConverter.to_rgba(c, alpha)
for c in edge_color])
elif _np.alltrue([not isinstance(c, str) for c in edge_color]):
# If color specs are given as (rgb) or (rgba) tuples, we're OK
if _np.alltrue([_cb.iterable(c) and len(c) in (3, 4)
for c in edge_color]):
edge_colors = tuple(edge_color)
else:
# numbers (which are going to be mapped with a colormap)
edge_colors = None
else:
raise ValueError('edge_color must contain color names or numbers')
else:
if isinstance(edge_color, str) or len(edge_color) == 1:
edge_colors = (_colorConverter.to_rgba(edge_color, alpha), )
else:
raise ValueError('edge_color must be a color or list of one color '
' per edge')
edge_collection = _LineCollection(edge_pos,
colors=edge_colors,
linewidths=lw,
antialiaseds=(1,),
linestyle=style,
transOffset=ax.transData)
edge_collection.set_zorder(1) # edges go behind nodes
edge_collection.set_label(label)
ax.add_collection(edge_collection)
if _cb.is_numlike(alpha):
edge_collection.set_alpha(alpha)
if edge_colors is None:
if edge_cmap is not None:
assert(isinstance(edge_cmap, _Colormap))
edge_collection.set_array(_np.asarray(edge_color))
edge_collection.set_cmap(edge_cmap)
if edge_vmin is not None or edge_vmax is not None:
edge_collection.set_clim(edge_vmin, edge_vmax)
else:
edge_collection.autoscale()
ax.autoscale_view()
return edge_collection
def draw_diagram_labels(graph, pos=None, labels=None, font_size=12,
font_color='k', font_family='sans-serif',
font_weight='normal', alpha=1.0, bbox=None, ax=None,
offset=None, **kwds):
"""
Draw node labels of graph.
This draws only the node labels of a graph.
Parameters
----------
graph : `networkx.Graph`
A NetworkX graph.
pos : mapping, optional
A mapping with nodes as keys and positions as values. Positions should
be sequences of length 2. If not specified (default), a diagram layout
positioning will be computed. See `networkx.layout` and
`pyrrole.drawing` for functions that compute node positions.
labels : mapping, optional
Node labels in a mapping keyed by node of text labels.
font_size : `int`, optional
Font size for text labels (default is ``12``).
font_color : `str`, optional
Font color `str` (default is ``'k'``, i.e., black).
font_family : `str`, optional
Font family (default is ``'sans-serif'``).
font_weight : `str`, optional
Font weight (default is ``'normal'``).
alpha : `float`, optional
The text transparency (default is ``1.0``).
ax : `matplotlib.axes.Axes`, optional
Draw the graph in the specified Matplotlib axes.
offset : array-like or `str`, optional
Label positions are summed to this before drawing. Defaults to zero
vector. If `str`, can be either ``'above'`` (equivalent to
``(0, 1.5)``) or ``'below'`` (equivalent to ``(0, -1.5)``).
Returns
-------
mapping
Mapping of labels keyed on the nodes.
Examples
--------
>>> import pandas as pd
>>> from pyrrole import ChemicalSystem
>>> from pyrrole.drawing import draw_diagram_labels
>>> data = pd.DataFrame(
... [{"name": "Separated_Reactants", "freeenergy": 0.},
... {"name": "mlC1", "freeenergy": -5.4},
... {"name": "mlC2", "freeenergy": -15.6},
... {"name": "mTS1", "freeenergy": 28.5, "color": "g"},
... {"name": "mCARB1", "freeenergy": -9.7},
... {"name": "mCARB2", "freeenergy": -19.8},
... {"name": "mCARBX", "freeenergy": 20}]).set_index("name")
>>> system = ChemicalSystem(
... ["Separated_Reactants -> mlC1 -> mTS1",
... "Separated_Reactants -> mlC2 -> mTS1",
... "mCARB2 <- mTS1 -> mCARB1",
... "Separated_Reactants -> mCARBX"], data)
>>> digraph = system.to_digraph()
>>> edges = draw_diagram_labels(digraph, font_color='blue',
... offset="below")
>>> labels = {k: "{:g}".format(v)
... for k, v in digraph.nodes(data='freeenergy')}
>>> edges = draw_diagram_labels(digraph, labels=labels,
... offset="above")
"""
if ax is None:
ax = _plt.gca()
if labels is None:
labels = dict((n, n) for n in graph.nodes())
if pos is None:
pos = diagram_layout(graph)
if offset is None:
offset = _np.array([0., 0.])
elif offset == "above":
offset = _np.array([0., 1.5])
elif offset == "below":
offset = _np.array([0., -1.5])
# set optional alignment
horizontalalignment = kwds.get('horizontalalignment', 'center')
verticalalignment = kwds.get('verticalalignment', 'center')
text_items = {} # there is no text collection so we'll fake one
for n, label in labels.items():
(x, y) = _np.asanyarray(pos[n]) + _np.asanyarray(offset)
if not isinstance(label, str):
label = str(label) # this makes "1" and 1 labeled the same
t = ax.text(x, y, label,
size=font_size,
color=font_color,
family=font_family,
weight=font_weight,
alpha=alpha,
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
transform=ax.transData,
bbox=bbox,
clip_on=True)
text_items[n] = t
return text_items
|
sangoma/pysensu | pysensu/api.py | SensuAPI.get_clients | python | def get_clients(self, limit=None, offset=None):
data = {}
if limit:
data['limit'] = limit
if offset:
data['offset'] = offset
result = self._request('GET', '/clients', data=json.dumps(data))
return result.json() | Returns a list of clients. | train | https://github.com/sangoma/pysensu/blob/dc6799edbf2635247aec61fcf45b04ddec1beb49/pysensu/api.py#L76-L86 | [
"def _request(self, method, path, **kwargs):\n url = '{}{}'.format(self._url_base, path)\n logger.debug('{} -> {} with {}'.format(method, url, kwargs))\n\n if method == 'GET':\n resp = requests.get(url, auth=self.auth, headers=self._header,\n **kwargs)\n\n elif method == 'POST':\n resp = requests.post(url, auth=self.auth, headers=self._header,\n **kwargs)\n\n elif method == 'PUT':\n resp = requests.put(url, auth=self.auth, headers=self._header,\n **kwargs)\n\n elif method == 'DELETE':\n resp = requests.delete(url, auth=self.auth, headers=self._header,\n **kwargs)\n else:\n raise SensuAPIException(\n 'Method {} not implemented'.format(method)\n )\n\n if resp.status_code in self.good_status:\n logger.debug('{}: {}'.format(\n resp.status_code,\n ''.join(resp.text.split('\\n'))[0:80]\n ))\n return resp\n\n elif resp.status_code.startswith('400'):\n logger.error('{}: {}'.format(\n resp.status_code,\n resp.text\n ))\n raise SensuAPIException('API returned \"Bad Request\"')\n\n else:\n logger.warning('{}: {}'.format(\n resp.status_code,\n resp.text\n ))\n raise SensuAPIException('API bad response {}: {}'.format(resp.status_code, resp.text))\n"
] | class SensuAPI(object):
def __init__(self, url_base, username=None, password=None):
self._url_base = url_base
self._header = {
'User-Agent': USER_AGENT
}
self.good_status = (200, 201, 202, 204)
if username and password:
self.auth = HTTPBasicAuth(username, password)
else:
self.auth = None
def _request(self, method, path, **kwargs):
url = '{}{}'.format(self._url_base, path)
logger.debug('{} -> {} with {}'.format(method, url, kwargs))
if method == 'GET':
resp = requests.get(url, auth=self.auth, headers=self._header,
**kwargs)
elif method == 'POST':
resp = requests.post(url, auth=self.auth, headers=self._header,
**kwargs)
elif method == 'PUT':
resp = requests.put(url, auth=self.auth, headers=self._header,
**kwargs)
elif method == 'DELETE':
resp = requests.delete(url, auth=self.auth, headers=self._header,
**kwargs)
else:
raise SensuAPIException(
'Method {} not implemented'.format(method)
)
if resp.status_code in self.good_status:
logger.debug('{}: {}'.format(
resp.status_code,
''.join(resp.text.split('\n'))[0:80]
))
return resp
elif resp.status_code.startswith('400'):
logger.error('{}: {}'.format(
resp.status_code,
resp.text
))
raise SensuAPIException('API returned "Bad Request"')
else:
logger.warning('{}: {}'.format(
resp.status_code,
resp.text
))
raise SensuAPIException('API bad response {}: {}'.format(resp.status_code, resp.text))
"""
Clients ops
"""
def get_client_data(self, client):
"""
Returns a client.
"""
data = self._request('GET', '/clients/{}'.format(client))
return data.json()
def get_client_history(self, client):
"""
Returns the history for a client.
"""
data = self._request('GET', '/clients/{}/history'.format(client))
return data.json()
def delete_client(self, client):
"""
Removes a client, resolving its current events. (delayed action)
"""
self._request('DELETE', '/clients/{}'.format(client))
return True
"""
Events ops
"""
def get_events(self):
"""
Returns the list of current events.
"""
data = self._request('GET', '/events')
return data.json()
def get_all_client_events(self, client):
"""
Returns the list of current events for a given client.
"""
data = self._request('GET', '/events/{}'.format(client))
return data.json()
def get_event(self, client, check):
"""
Returns an event for a given client & check name.
"""
data = self._request('GET', '/events/{}/{}'.format(client, check))
return data.json()
def delete_event(self, client, check):
"""
Resolves an event for a given check on a given client. (delayed action)
"""
self._request('DELETE', '/events/{}/{}'.format(client, check))
return True
def post_event(self, client, check):
"""
Resolves an event. (delayed action)
"""
self._request('POST', '/resolve',
data=json.dumps({'client': client, 'check': check}))
return True
"""
Checks ops
"""
def get_checks(self):
"""
Returns the list of checks.
"""
data = self._request('GET', '/checks')
return data.json()
def get_check(self, check):
"""
Returns a check.
"""
data = self._request('GET', '/checks/{}'.format(check))
return data.json()
def post_check_request(self, check, subscribers):
"""
Issues a check execution request.
"""
data = {
'check': check,
'subscribers': [subscribers]
}
self._request('POST', '/request', data=json.dumps(data))
return True
"""
Silenced API ops
"""
def get_silenced(self, limit=None, offset=None):
"""
Returns a list of silence entries.
"""
data = {}
if limit:
data['limit'] = limit
if offset:
data['offset'] = offset
result = self._request('GET', '/silenced', data=json.dumps(data))
return result.json()
def post_silence_request(self, kwargs):
"""
Create a silence entry.
"""
self._request('POST', '/silenced', data=json.dumps(kwargs))
return True
def clear_silence(self, kwargs):
"""
Clear a silence entry.
"""
self._request('POST', '/silenced/clear', data=json.dumps(kwargs))
return True
"""
Aggregates ops
"""
def get_aggregates(self):
"""
Returns the list of named aggregates.
"""
data = self._request('GET', '/aggregates')
return data.json()
def get_aggregate_check(self, check, age=None):
"""
Returns the list of aggregates for a given check
"""
data = {}
if age:
data['max_age'] = age
result = self._request('GET', '/aggregates/{}'.format(check),
data=json.dumps(data))
return result.json()
def delete_aggregate(self, check):
"""
Deletes all aggregate data for a named aggregate
"""
self._request('DELETE', '/aggregates/{}'.format(check))
return True
"""
Status ops
"""
def get_info(self):
"""
Returns information on the API.
"""
data = self._request('GET', '/info')
return data.json()
def get_health(self, consumers=2, messages=100):
"""
Returns health information on transport & Redis connections.
"""
data = {'consumers': consumers, 'messages': messages}
try:
self._request('GET', '/health', data=json.dumps(data))
return True
except SensuAPIException:
return False
"""
Results ops
"""
def get_all_client_results(self):
"""
Returns the list of results.
"""
data = self._request('GET', '/results')
return data.json()
def get_results(self, client):
"""
Returns a result.
"""
data = self._request('GET', '/results/{}'.format(client))
return data.json()
def get_result(self, client, check):
"""
Returns an event for a given client & result name.
"""
data = self._request('GET', '/results/{}/{}'.format(client, check))
return data.json()
def delete_result(self, client, check):
"""
Deletes an check result data for a given check on a given client.
"""
self._request('DELETE', '/results/{}/{}'.format(client, check))
return True
def post_result_data(self, client, check, output, status):
"""
Posts check result data.
"""
data = {
'source': client,
'name': check,
'output': output,
'status': status,
}
self._request('POST', '/results', data=json.dumps(data))
return True
"""
Stashes ops
"""
def get_stashes(self):
"""
Returns a list of stashes.
"""
data = self._request('GET', '/stashes')
return data.json()
def create_stash(self, payload, path=None):
"""
Create a stash. (JSON document)
"""
if path:
self._request('POST', '/stashes/{}'.format(path),
json=payload)
else:
self._request('POST', '/stashes', json=payload)
return True
def delete_stash(self, path):
"""
Delete a stash. (JSON document)
"""
self._request('DELETE', '/stashes/{}'.format(path))
return True
"""
Subscriptions ops (not directly in the Sensu API)
"""
def get_subscriptions(self, nodes=[]):
"""
Returns all the channels where (optionally specified) nodes are subscribed
"""
if len(nodes) > 0:
data = [node for node in self.get_clients() if node['name'] in nodes]
else:
data = self.get_clients()
channels = []
for client in data:
if 'subscriptions' in client:
if isinstance(client['subscriptions'], list):
for channel in client['subscriptions']:
if channel not in channels:
channels.append(channel)
else:
if client['subscriptions'] not in channels:
channels.append(client['subscriptions'])
return channels
def get_subscriptions_channel(self, search_channel):
"""
Return all the nodes that are subscribed to the specified channel
"""
data = self.get_clients()
clients = []
for client in data:
if 'subscriptions' in client:
if isinstance(client['subscriptions'], list):
if search_channel in client['subscriptions']:
clients.append(client['name'])
else:
if search_channel == client['subscriptions']:
clients.append(client['name'])
return clients
|
sangoma/pysensu | pysensu/api.py | SensuAPI.get_client_data | python | def get_client_data(self, client):
data = self._request('GET', '/clients/{}'.format(client))
return data.json() | Returns a client. | train | https://github.com/sangoma/pysensu/blob/dc6799edbf2635247aec61fcf45b04ddec1beb49/pysensu/api.py#L88-L93 | [
"def _request(self, method, path, **kwargs):\n url = '{}{}'.format(self._url_base, path)\n logger.debug('{} -> {} with {}'.format(method, url, kwargs))\n\n if method == 'GET':\n resp = requests.get(url, auth=self.auth, headers=self._header,\n **kwargs)\n\n elif method == 'POST':\n resp = requests.post(url, auth=self.auth, headers=self._header,\n **kwargs)\n\n elif method == 'PUT':\n resp = requests.put(url, auth=self.auth, headers=self._header,\n **kwargs)\n\n elif method == 'DELETE':\n resp = requests.delete(url, auth=self.auth, headers=self._header,\n **kwargs)\n else:\n raise SensuAPIException(\n 'Method {} not implemented'.format(method)\n )\n\n if resp.status_code in self.good_status:\n logger.debug('{}: {}'.format(\n resp.status_code,\n ''.join(resp.text.split('\\n'))[0:80]\n ))\n return resp\n\n elif resp.status_code.startswith('400'):\n logger.error('{}: {}'.format(\n resp.status_code,\n resp.text\n ))\n raise SensuAPIException('API returned \"Bad Request\"')\n\n else:\n logger.warning('{}: {}'.format(\n resp.status_code,\n resp.text\n ))\n raise SensuAPIException('API bad response {}: {}'.format(resp.status_code, resp.text))\n"
] | class SensuAPI(object):
def __init__(self, url_base, username=None, password=None):
self._url_base = url_base
self._header = {
'User-Agent': USER_AGENT
}
self.good_status = (200, 201, 202, 204)
if username and password:
self.auth = HTTPBasicAuth(username, password)
else:
self.auth = None
def _request(self, method, path, **kwargs):
url = '{}{}'.format(self._url_base, path)
logger.debug('{} -> {} with {}'.format(method, url, kwargs))
if method == 'GET':
resp = requests.get(url, auth=self.auth, headers=self._header,
**kwargs)
elif method == 'POST':
resp = requests.post(url, auth=self.auth, headers=self._header,
**kwargs)
elif method == 'PUT':
resp = requests.put(url, auth=self.auth, headers=self._header,
**kwargs)
elif method == 'DELETE':
resp = requests.delete(url, auth=self.auth, headers=self._header,
**kwargs)
else:
raise SensuAPIException(
'Method {} not implemented'.format(method)
)
if resp.status_code in self.good_status:
logger.debug('{}: {}'.format(
resp.status_code,
''.join(resp.text.split('\n'))[0:80]
))
return resp
elif resp.status_code.startswith('400'):
logger.error('{}: {}'.format(
resp.status_code,
resp.text
))
raise SensuAPIException('API returned "Bad Request"')
else:
logger.warning('{}: {}'.format(
resp.status_code,
resp.text
))
raise SensuAPIException('API bad response {}: {}'.format(resp.status_code, resp.text))
"""
Clients ops
"""
def get_clients(self, limit=None, offset=None):
"""
Returns a list of clients.
"""
data = {}
if limit:
data['limit'] = limit
if offset:
data['offset'] = offset
result = self._request('GET', '/clients', data=json.dumps(data))
return result.json()
def get_client_history(self, client):
"""
Returns the history for a client.
"""
data = self._request('GET', '/clients/{}/history'.format(client))
return data.json()
def delete_client(self, client):
"""
Removes a client, resolving its current events. (delayed action)
"""
self._request('DELETE', '/clients/{}'.format(client))
return True
"""
Events ops
"""
def get_events(self):
"""
Returns the list of current events.
"""
data = self._request('GET', '/events')
return data.json()
def get_all_client_events(self, client):
"""
Returns the list of current events for a given client.
"""
data = self._request('GET', '/events/{}'.format(client))
return data.json()
def get_event(self, client, check):
"""
Returns an event for a given client & check name.
"""
data = self._request('GET', '/events/{}/{}'.format(client, check))
return data.json()
def delete_event(self, client, check):
"""
Resolves an event for a given check on a given client. (delayed action)
"""
self._request('DELETE', '/events/{}/{}'.format(client, check))
return True
def post_event(self, client, check):
"""
Resolves an event. (delayed action)
"""
self._request('POST', '/resolve',
data=json.dumps({'client': client, 'check': check}))
return True
"""
Checks ops
"""
def get_checks(self):
"""
Returns the list of checks.
"""
data = self._request('GET', '/checks')
return data.json()
def get_check(self, check):
"""
Returns a check.
"""
data = self._request('GET', '/checks/{}'.format(check))
return data.json()
def post_check_request(self, check, subscribers):
"""
Issues a check execution request.
"""
data = {
'check': check,
'subscribers': [subscribers]
}
self._request('POST', '/request', data=json.dumps(data))
return True
"""
Silenced API ops
"""
def get_silenced(self, limit=None, offset=None):
"""
Returns a list of silence entries.
"""
data = {}
if limit:
data['limit'] = limit
if offset:
data['offset'] = offset
result = self._request('GET', '/silenced', data=json.dumps(data))
return result.json()
def post_silence_request(self, kwargs):
"""
Create a silence entry.
"""
self._request('POST', '/silenced', data=json.dumps(kwargs))
return True
def clear_silence(self, kwargs):
"""
Clear a silence entry.
"""
self._request('POST', '/silenced/clear', data=json.dumps(kwargs))
return True
"""
Aggregates ops
"""
def get_aggregates(self):
"""
Returns the list of named aggregates.
"""
data = self._request('GET', '/aggregates')
return data.json()
def get_aggregate_check(self, check, age=None):
"""
Returns the list of aggregates for a given check
"""
data = {}
if age:
data['max_age'] = age
result = self._request('GET', '/aggregates/{}'.format(check),
data=json.dumps(data))
return result.json()
def delete_aggregate(self, check):
"""
Deletes all aggregate data for a named aggregate
"""
self._request('DELETE', '/aggregates/{}'.format(check))
return True
"""
Status ops
"""
def get_info(self):
"""
Returns information on the API.
"""
data = self._request('GET', '/info')
return data.json()
def get_health(self, consumers=2, messages=100):
"""
Returns health information on transport & Redis connections.
"""
data = {'consumers': consumers, 'messages': messages}
try:
self._request('GET', '/health', data=json.dumps(data))
return True
except SensuAPIException:
return False
"""
Results ops
"""
def get_all_client_results(self):
"""
Returns the list of results.
"""
data = self._request('GET', '/results')
return data.json()
def get_results(self, client):
"""
Returns a result.
"""
data = self._request('GET', '/results/{}'.format(client))
return data.json()
def get_result(self, client, check):
"""
Returns an event for a given client & result name.
"""
data = self._request('GET', '/results/{}/{}'.format(client, check))
return data.json()
def delete_result(self, client, check):
"""
Deletes an check result data for a given check on a given client.
"""
self._request('DELETE', '/results/{}/{}'.format(client, check))
return True
def post_result_data(self, client, check, output, status):
"""
Posts check result data.
"""
data = {
'source': client,
'name': check,
'output': output,
'status': status,
}
self._request('POST', '/results', data=json.dumps(data))
return True
"""
Stashes ops
"""
def get_stashes(self):
"""
Returns a list of stashes.
"""
data = self._request('GET', '/stashes')
return data.json()
def create_stash(self, payload, path=None):
"""
Create a stash. (JSON document)
"""
if path:
self._request('POST', '/stashes/{}'.format(path),
json=payload)
else:
self._request('POST', '/stashes', json=payload)
return True
def delete_stash(self, path):
"""
Delete a stash. (JSON document)
"""
self._request('DELETE', '/stashes/{}'.format(path))
return True
"""
Subscriptions ops (not directly in the Sensu API)
"""
def get_subscriptions(self, nodes=[]):
"""
Returns all the channels where (optionally specified) nodes are subscribed
"""
if len(nodes) > 0:
data = [node for node in self.get_clients() if node['name'] in nodes]
else:
data = self.get_clients()
channels = []
for client in data:
if 'subscriptions' in client:
if isinstance(client['subscriptions'], list):
for channel in client['subscriptions']:
if channel not in channels:
channels.append(channel)
else:
if client['subscriptions'] not in channels:
channels.append(client['subscriptions'])
return channels
def get_subscriptions_channel(self, search_channel):
"""
Return all the nodes that are subscribed to the specified channel
"""
data = self.get_clients()
clients = []
for client in data:
if 'subscriptions' in client:
if isinstance(client['subscriptions'], list):
if search_channel in client['subscriptions']:
clients.append(client['name'])
else:
if search_channel == client['subscriptions']:
clients.append(client['name'])
return clients
|
sangoma/pysensu | pysensu/api.py | SensuAPI.get_client_history | python | def get_client_history(self, client):
data = self._request('GET', '/clients/{}/history'.format(client))
return data.json() | Returns the history for a client. | train | https://github.com/sangoma/pysensu/blob/dc6799edbf2635247aec61fcf45b04ddec1beb49/pysensu/api.py#L95-L100 | [
"def _request(self, method, path, **kwargs):\n url = '{}{}'.format(self._url_base, path)\n logger.debug('{} -> {} with {}'.format(method, url, kwargs))\n\n if method == 'GET':\n resp = requests.get(url, auth=self.auth, headers=self._header,\n **kwargs)\n\n elif method == 'POST':\n resp = requests.post(url, auth=self.auth, headers=self._header,\n **kwargs)\n\n elif method == 'PUT':\n resp = requests.put(url, auth=self.auth, headers=self._header,\n **kwargs)\n\n elif method == 'DELETE':\n resp = requests.delete(url, auth=self.auth, headers=self._header,\n **kwargs)\n else:\n raise SensuAPIException(\n 'Method {} not implemented'.format(method)\n )\n\n if resp.status_code in self.good_status:\n logger.debug('{}: {}'.format(\n resp.status_code,\n ''.join(resp.text.split('\\n'))[0:80]\n ))\n return resp\n\n elif resp.status_code.startswith('400'):\n logger.error('{}: {}'.format(\n resp.status_code,\n resp.text\n ))\n raise SensuAPIException('API returned \"Bad Request\"')\n\n else:\n logger.warning('{}: {}'.format(\n resp.status_code,\n resp.text\n ))\n raise SensuAPIException('API bad response {}: {}'.format(resp.status_code, resp.text))\n"
] | class SensuAPI(object):
def __init__(self, url_base, username=None, password=None):
self._url_base = url_base
self._header = {
'User-Agent': USER_AGENT
}
self.good_status = (200, 201, 202, 204)
if username and password:
self.auth = HTTPBasicAuth(username, password)
else:
self.auth = None
def _request(self, method, path, **kwargs):
url = '{}{}'.format(self._url_base, path)
logger.debug('{} -> {} with {}'.format(method, url, kwargs))
if method == 'GET':
resp = requests.get(url, auth=self.auth, headers=self._header,
**kwargs)
elif method == 'POST':
resp = requests.post(url, auth=self.auth, headers=self._header,
**kwargs)
elif method == 'PUT':
resp = requests.put(url, auth=self.auth, headers=self._header,
**kwargs)
elif method == 'DELETE':
resp = requests.delete(url, auth=self.auth, headers=self._header,
**kwargs)
else:
raise SensuAPIException(
'Method {} not implemented'.format(method)
)
if resp.status_code in self.good_status:
logger.debug('{}: {}'.format(
resp.status_code,
''.join(resp.text.split('\n'))[0:80]
))
return resp
elif resp.status_code.startswith('400'):
logger.error('{}: {}'.format(
resp.status_code,
resp.text
))
raise SensuAPIException('API returned "Bad Request"')
else:
logger.warning('{}: {}'.format(
resp.status_code,
resp.text
))
raise SensuAPIException('API bad response {}: {}'.format(resp.status_code, resp.text))
"""
Clients ops
"""
def get_clients(self, limit=None, offset=None):
"""
Returns a list of clients.
"""
data = {}
if limit:
data['limit'] = limit
if offset:
data['offset'] = offset
result = self._request('GET', '/clients', data=json.dumps(data))
return result.json()
def get_client_data(self, client):
"""
Returns a client.
"""
data = self._request('GET', '/clients/{}'.format(client))
return data.json()
def delete_client(self, client):
"""
Removes a client, resolving its current events. (delayed action)
"""
self._request('DELETE', '/clients/{}'.format(client))
return True
"""
Events ops
"""
def get_events(self):
"""
Returns the list of current events.
"""
data = self._request('GET', '/events')
return data.json()
def get_all_client_events(self, client):
"""
Returns the list of current events for a given client.
"""
data = self._request('GET', '/events/{}'.format(client))
return data.json()
def get_event(self, client, check):
"""
Returns an event for a given client & check name.
"""
data = self._request('GET', '/events/{}/{}'.format(client, check))
return data.json()
def delete_event(self, client, check):
"""
Resolves an event for a given check on a given client. (delayed action)
"""
self._request('DELETE', '/events/{}/{}'.format(client, check))
return True
def post_event(self, client, check):
"""
Resolves an event. (delayed action)
"""
self._request('POST', '/resolve',
data=json.dumps({'client': client, 'check': check}))
return True
"""
Checks ops
"""
def get_checks(self):
"""
Returns the list of checks.
"""
data = self._request('GET', '/checks')
return data.json()
def get_check(self, check):
"""
Returns a check.
"""
data = self._request('GET', '/checks/{}'.format(check))
return data.json()
def post_check_request(self, check, subscribers):
"""
Issues a check execution request.
"""
data = {
'check': check,
'subscribers': [subscribers]
}
self._request('POST', '/request', data=json.dumps(data))
return True
"""
Silenced API ops
"""
def get_silenced(self, limit=None, offset=None):
"""
Returns a list of silence entries.
"""
data = {}
if limit:
data['limit'] = limit
if offset:
data['offset'] = offset
result = self._request('GET', '/silenced', data=json.dumps(data))
return result.json()
def post_silence_request(self, kwargs):
"""
Create a silence entry.
"""
self._request('POST', '/silenced', data=json.dumps(kwargs))
return True
def clear_silence(self, kwargs):
"""
Clear a silence entry.
"""
self._request('POST', '/silenced/clear', data=json.dumps(kwargs))
return True
"""
Aggregates ops
"""
def get_aggregates(self):
"""
Returns the list of named aggregates.
"""
data = self._request('GET', '/aggregates')
return data.json()
def get_aggregate_check(self, check, age=None):
"""
Returns the list of aggregates for a given check
"""
data = {}
if age:
data['max_age'] = age
result = self._request('GET', '/aggregates/{}'.format(check),
data=json.dumps(data))
return result.json()
def delete_aggregate(self, check):
"""
Deletes all aggregate data for a named aggregate
"""
self._request('DELETE', '/aggregates/{}'.format(check))
return True
"""
Status ops
"""
def get_info(self):
"""
Returns information on the API.
"""
data = self._request('GET', '/info')
return data.json()
def get_health(self, consumers=2, messages=100):
"""
Returns health information on transport & Redis connections.
"""
data = {'consumers': consumers, 'messages': messages}
try:
self._request('GET', '/health', data=json.dumps(data))
return True
except SensuAPIException:
return False
"""
Results ops
"""
def get_all_client_results(self):
"""
Returns the list of results.
"""
data = self._request('GET', '/results')
return data.json()
def get_results(self, client):
"""
Returns a result.
"""
data = self._request('GET', '/results/{}'.format(client))
return data.json()
def get_result(self, client, check):
"""
Returns an event for a given client & result name.
"""
data = self._request('GET', '/results/{}/{}'.format(client, check))
return data.json()
def delete_result(self, client, check):
"""
Deletes an check result data for a given check on a given client.
"""
self._request('DELETE', '/results/{}/{}'.format(client, check))
return True
def post_result_data(self, client, check, output, status):
"""
Posts check result data.
"""
data = {
'source': client,
'name': check,
'output': output,
'status': status,
}
self._request('POST', '/results', data=json.dumps(data))
return True
"""
Stashes ops
"""
def get_stashes(self):
"""
Returns a list of stashes.
"""
data = self._request('GET', '/stashes')
return data.json()
def create_stash(self, payload, path=None):
"""
Create a stash. (JSON document)
"""
if path:
self._request('POST', '/stashes/{}'.format(path),
json=payload)
else:
self._request('POST', '/stashes', json=payload)
return True
def delete_stash(self, path):
"""
Delete a stash. (JSON document)
"""
self._request('DELETE', '/stashes/{}'.format(path))
return True
"""
Subscriptions ops (not directly in the Sensu API)
"""
def get_subscriptions(self, nodes=[]):
"""
Returns all the channels where (optionally specified) nodes are subscribed
"""
if len(nodes) > 0:
data = [node for node in self.get_clients() if node['name'] in nodes]
else:
data = self.get_clients()
channels = []
for client in data:
if 'subscriptions' in client:
if isinstance(client['subscriptions'], list):
for channel in client['subscriptions']:
if channel not in channels:
channels.append(channel)
else:
if client['subscriptions'] not in channels:
channels.append(client['subscriptions'])
return channels
def get_subscriptions_channel(self, search_channel):
"""
Return all the nodes that are subscribed to the specified channel
"""
data = self.get_clients()
clients = []
for client in data:
if 'subscriptions' in client:
if isinstance(client['subscriptions'], list):
if search_channel in client['subscriptions']:
clients.append(client['name'])
else:
if search_channel == client['subscriptions']:
clients.append(client['name'])
return clients
|
sangoma/pysensu | pysensu/api.py | SensuAPI.get_all_client_events | python | def get_all_client_events(self, client):
data = self._request('GET', '/events/{}'.format(client))
return data.json() | Returns the list of current events for a given client. | train | https://github.com/sangoma/pysensu/blob/dc6799edbf2635247aec61fcf45b04ddec1beb49/pysensu/api.py#L119-L124 | [
"def _request(self, method, path, **kwargs):\n url = '{}{}'.format(self._url_base, path)\n logger.debug('{} -> {} with {}'.format(method, url, kwargs))\n\n if method == 'GET':\n resp = requests.get(url, auth=self.auth, headers=self._header,\n **kwargs)\n\n elif method == 'POST':\n resp = requests.post(url, auth=self.auth, headers=self._header,\n **kwargs)\n\n elif method == 'PUT':\n resp = requests.put(url, auth=self.auth, headers=self._header,\n **kwargs)\n\n elif method == 'DELETE':\n resp = requests.delete(url, auth=self.auth, headers=self._header,\n **kwargs)\n else:\n raise SensuAPIException(\n 'Method {} not implemented'.format(method)\n )\n\n if resp.status_code in self.good_status:\n logger.debug('{}: {}'.format(\n resp.status_code,\n ''.join(resp.text.split('\\n'))[0:80]\n ))\n return resp\n\n elif resp.status_code.startswith('400'):\n logger.error('{}: {}'.format(\n resp.status_code,\n resp.text\n ))\n raise SensuAPIException('API returned \"Bad Request\"')\n\n else:\n logger.warning('{}: {}'.format(\n resp.status_code,\n resp.text\n ))\n raise SensuAPIException('API bad response {}: {}'.format(resp.status_code, resp.text))\n"
] | class SensuAPI(object):
def __init__(self, url_base, username=None, password=None):
self._url_base = url_base
self._header = {
'User-Agent': USER_AGENT
}
self.good_status = (200, 201, 202, 204)
if username and password:
self.auth = HTTPBasicAuth(username, password)
else:
self.auth = None
def _request(self, method, path, **kwargs):
url = '{}{}'.format(self._url_base, path)
logger.debug('{} -> {} with {}'.format(method, url, kwargs))
if method == 'GET':
resp = requests.get(url, auth=self.auth, headers=self._header,
**kwargs)
elif method == 'POST':
resp = requests.post(url, auth=self.auth, headers=self._header,
**kwargs)
elif method == 'PUT':
resp = requests.put(url, auth=self.auth, headers=self._header,
**kwargs)
elif method == 'DELETE':
resp = requests.delete(url, auth=self.auth, headers=self._header,
**kwargs)
else:
raise SensuAPIException(
'Method {} not implemented'.format(method)
)
if resp.status_code in self.good_status:
logger.debug('{}: {}'.format(
resp.status_code,
''.join(resp.text.split('\n'))[0:80]
))
return resp
elif resp.status_code.startswith('400'):
logger.error('{}: {}'.format(
resp.status_code,
resp.text
))
raise SensuAPIException('API returned "Bad Request"')
else:
logger.warning('{}: {}'.format(
resp.status_code,
resp.text
))
raise SensuAPIException('API bad response {}: {}'.format(resp.status_code, resp.text))
"""
Clients ops
"""
def get_clients(self, limit=None, offset=None):
"""
Returns a list of clients.
"""
data = {}
if limit:
data['limit'] = limit
if offset:
data['offset'] = offset
result = self._request('GET', '/clients', data=json.dumps(data))
return result.json()
def get_client_data(self, client):
"""
Returns a client.
"""
data = self._request('GET', '/clients/{}'.format(client))
return data.json()
def get_client_history(self, client):
"""
Returns the history for a client.
"""
data = self._request('GET', '/clients/{}/history'.format(client))
return data.json()
def delete_client(self, client):
"""
Removes a client, resolving its current events. (delayed action)
"""
self._request('DELETE', '/clients/{}'.format(client))
return True
"""
Events ops
"""
def get_events(self):
"""
Returns the list of current events.
"""
data = self._request('GET', '/events')
return data.json()
def get_event(self, client, check):
"""
Returns an event for a given client & check name.
"""
data = self._request('GET', '/events/{}/{}'.format(client, check))
return data.json()
def delete_event(self, client, check):
"""
Resolves an event for a given check on a given client. (delayed action)
"""
self._request('DELETE', '/events/{}/{}'.format(client, check))
return True
def post_event(self, client, check):
"""
Resolves an event. (delayed action)
"""
self._request('POST', '/resolve',
data=json.dumps({'client': client, 'check': check}))
return True
"""
Checks ops
"""
def get_checks(self):
"""
Returns the list of checks.
"""
data = self._request('GET', '/checks')
return data.json()
def get_check(self, check):
"""
Returns a check.
"""
data = self._request('GET', '/checks/{}'.format(check))
return data.json()
def post_check_request(self, check, subscribers):
"""
Issues a check execution request.
"""
data = {
'check': check,
'subscribers': [subscribers]
}
self._request('POST', '/request', data=json.dumps(data))
return True
"""
Silenced API ops
"""
def get_silenced(self, limit=None, offset=None):
"""
Returns a list of silence entries.
"""
data = {}
if limit:
data['limit'] = limit
if offset:
data['offset'] = offset
result = self._request('GET', '/silenced', data=json.dumps(data))
return result.json()
def post_silence_request(self, kwargs):
"""
Create a silence entry.
"""
self._request('POST', '/silenced', data=json.dumps(kwargs))
return True
def clear_silence(self, kwargs):
"""
Clear a silence entry.
"""
self._request('POST', '/silenced/clear', data=json.dumps(kwargs))
return True
"""
Aggregates ops
"""
def get_aggregates(self):
"""
Returns the list of named aggregates.
"""
data = self._request('GET', '/aggregates')
return data.json()
def get_aggregate_check(self, check, age=None):
"""
Returns the list of aggregates for a given check
"""
data = {}
if age:
data['max_age'] = age
result = self._request('GET', '/aggregates/{}'.format(check),
data=json.dumps(data))
return result.json()
def delete_aggregate(self, check):
"""
Deletes all aggregate data for a named aggregate
"""
self._request('DELETE', '/aggregates/{}'.format(check))
return True
"""
Status ops
"""
def get_info(self):
"""
Returns information on the API.
"""
data = self._request('GET', '/info')
return data.json()
def get_health(self, consumers=2, messages=100):
"""
Returns health information on transport & Redis connections.
"""
data = {'consumers': consumers, 'messages': messages}
try:
self._request('GET', '/health', data=json.dumps(data))
return True
except SensuAPIException:
return False
"""
Results ops
"""
def get_all_client_results(self):
"""
Returns the list of results.
"""
data = self._request('GET', '/results')
return data.json()
def get_results(self, client):
"""
Returns a result.
"""
data = self._request('GET', '/results/{}'.format(client))
return data.json()
def get_result(self, client, check):
"""
Returns an event for a given client & result name.
"""
data = self._request('GET', '/results/{}/{}'.format(client, check))
return data.json()
def delete_result(self, client, check):
"""
Deletes an check result data for a given check on a given client.
"""
self._request('DELETE', '/results/{}/{}'.format(client, check))
return True
def post_result_data(self, client, check, output, status):
"""
Posts check result data.
"""
data = {
'source': client,
'name': check,
'output': output,
'status': status,
}
self._request('POST', '/results', data=json.dumps(data))
return True
"""
Stashes ops
"""
def get_stashes(self):
"""
Returns a list of stashes.
"""
data = self._request('GET', '/stashes')
return data.json()
def create_stash(self, payload, path=None):
"""
Create a stash. (JSON document)
"""
if path:
self._request('POST', '/stashes/{}'.format(path),
json=payload)
else:
self._request('POST', '/stashes', json=payload)
return True
def delete_stash(self, path):
"""
Delete a stash. (JSON document)
"""
self._request('DELETE', '/stashes/{}'.format(path))
return True
"""
Subscriptions ops (not directly in the Sensu API)
"""
def get_subscriptions(self, nodes=[]):
"""
Returns all the channels where (optionally specified) nodes are subscribed
"""
if len(nodes) > 0:
data = [node for node in self.get_clients() if node['name'] in nodes]
else:
data = self.get_clients()
channels = []
for client in data:
if 'subscriptions' in client:
if isinstance(client['subscriptions'], list):
for channel in client['subscriptions']:
if channel not in channels:
channels.append(channel)
else:
if client['subscriptions'] not in channels:
channels.append(client['subscriptions'])
return channels
def get_subscriptions_channel(self, search_channel):
"""
Return all the nodes that are subscribed to the specified channel
"""
data = self.get_clients()
clients = []
for client in data:
if 'subscriptions' in client:
if isinstance(client['subscriptions'], list):
if search_channel in client['subscriptions']:
clients.append(client['name'])
else:
if search_channel == client['subscriptions']:
clients.append(client['name'])
return clients
|
sangoma/pysensu | pysensu/api.py | SensuAPI.get_event | python | def get_event(self, client, check):
data = self._request('GET', '/events/{}/{}'.format(client, check))
return data.json() | Returns an event for a given client & check name. | train | https://github.com/sangoma/pysensu/blob/dc6799edbf2635247aec61fcf45b04ddec1beb49/pysensu/api.py#L126-L131 | [
"def _request(self, method, path, **kwargs):\n url = '{}{}'.format(self._url_base, path)\n logger.debug('{} -> {} with {}'.format(method, url, kwargs))\n\n if method == 'GET':\n resp = requests.get(url, auth=self.auth, headers=self._header,\n **kwargs)\n\n elif method == 'POST':\n resp = requests.post(url, auth=self.auth, headers=self._header,\n **kwargs)\n\n elif method == 'PUT':\n resp = requests.put(url, auth=self.auth, headers=self._header,\n **kwargs)\n\n elif method == 'DELETE':\n resp = requests.delete(url, auth=self.auth, headers=self._header,\n **kwargs)\n else:\n raise SensuAPIException(\n 'Method {} not implemented'.format(method)\n )\n\n if resp.status_code in self.good_status:\n logger.debug('{}: {}'.format(\n resp.status_code,\n ''.join(resp.text.split('\\n'))[0:80]\n ))\n return resp\n\n elif resp.status_code.startswith('400'):\n logger.error('{}: {}'.format(\n resp.status_code,\n resp.text\n ))\n raise SensuAPIException('API returned \"Bad Request\"')\n\n else:\n logger.warning('{}: {}'.format(\n resp.status_code,\n resp.text\n ))\n raise SensuAPIException('API bad response {}: {}'.format(resp.status_code, resp.text))\n"
] | class SensuAPI(object):
def __init__(self, url_base, username=None, password=None):
self._url_base = url_base
self._header = {
'User-Agent': USER_AGENT
}
self.good_status = (200, 201, 202, 204)
if username and password:
self.auth = HTTPBasicAuth(username, password)
else:
self.auth = None
def _request(self, method, path, **kwargs):
url = '{}{}'.format(self._url_base, path)
logger.debug('{} -> {} with {}'.format(method, url, kwargs))
if method == 'GET':
resp = requests.get(url, auth=self.auth, headers=self._header,
**kwargs)
elif method == 'POST':
resp = requests.post(url, auth=self.auth, headers=self._header,
**kwargs)
elif method == 'PUT':
resp = requests.put(url, auth=self.auth, headers=self._header,
**kwargs)
elif method == 'DELETE':
resp = requests.delete(url, auth=self.auth, headers=self._header,
**kwargs)
else:
raise SensuAPIException(
'Method {} not implemented'.format(method)
)
if resp.status_code in self.good_status:
logger.debug('{}: {}'.format(
resp.status_code,
''.join(resp.text.split('\n'))[0:80]
))
return resp
elif resp.status_code.startswith('400'):
logger.error('{}: {}'.format(
resp.status_code,
resp.text
))
raise SensuAPIException('API returned "Bad Request"')
else:
logger.warning('{}: {}'.format(
resp.status_code,
resp.text
))
raise SensuAPIException('API bad response {}: {}'.format(resp.status_code, resp.text))
"""
Clients ops
"""
def get_clients(self, limit=None, offset=None):
"""
Returns a list of clients.
"""
data = {}
if limit:
data['limit'] = limit
if offset:
data['offset'] = offset
result = self._request('GET', '/clients', data=json.dumps(data))
return result.json()
def get_client_data(self, client):
"""
Returns a client.
"""
data = self._request('GET', '/clients/{}'.format(client))
return data.json()
def get_client_history(self, client):
"""
Returns the history for a client.
"""
data = self._request('GET', '/clients/{}/history'.format(client))
return data.json()
def delete_client(self, client):
"""
Removes a client, resolving its current events. (delayed action)
"""
self._request('DELETE', '/clients/{}'.format(client))
return True
"""
Events ops
"""
def get_events(self):
"""
Returns the list of current events.
"""
data = self._request('GET', '/events')
return data.json()
def get_all_client_events(self, client):
"""
Returns the list of current events for a given client.
"""
data = self._request('GET', '/events/{}'.format(client))
return data.json()
def delete_event(self, client, check):
"""
Resolves an event for a given check on a given client. (delayed action)
"""
self._request('DELETE', '/events/{}/{}'.format(client, check))
return True
def post_event(self, client, check):
"""
Resolves an event. (delayed action)
"""
self._request('POST', '/resolve',
data=json.dumps({'client': client, 'check': check}))
return True
"""
Checks ops
"""
def get_checks(self):
"""
Returns the list of checks.
"""
data = self._request('GET', '/checks')
return data.json()
def get_check(self, check):
"""
Returns a check.
"""
data = self._request('GET', '/checks/{}'.format(check))
return data.json()
def post_check_request(self, check, subscribers):
"""
Issues a check execution request.
"""
data = {
'check': check,
'subscribers': [subscribers]
}
self._request('POST', '/request', data=json.dumps(data))
return True
"""
Silenced API ops
"""
def get_silenced(self, limit=None, offset=None):
"""
Returns a list of silence entries.
"""
data = {}
if limit:
data['limit'] = limit
if offset:
data['offset'] = offset
result = self._request('GET', '/silenced', data=json.dumps(data))
return result.json()
def post_silence_request(self, kwargs):
"""
Create a silence entry.
"""
self._request('POST', '/silenced', data=json.dumps(kwargs))
return True
def clear_silence(self, kwargs):
"""
Clear a silence entry.
"""
self._request('POST', '/silenced/clear', data=json.dumps(kwargs))
return True
"""
Aggregates ops
"""
def get_aggregates(self):
"""
Returns the list of named aggregates.
"""
data = self._request('GET', '/aggregates')
return data.json()
def get_aggregate_check(self, check, age=None):
"""
Returns the list of aggregates for a given check
"""
data = {}
if age:
data['max_age'] = age
result = self._request('GET', '/aggregates/{}'.format(check),
data=json.dumps(data))
return result.json()
def delete_aggregate(self, check):
"""
Deletes all aggregate data for a named aggregate
"""
self._request('DELETE', '/aggregates/{}'.format(check))
return True
"""
Status ops
"""
def get_info(self):
"""
Returns information on the API.
"""
data = self._request('GET', '/info')
return data.json()
def get_health(self, consumers=2, messages=100):
"""
Returns health information on transport & Redis connections.
"""
data = {'consumers': consumers, 'messages': messages}
try:
self._request('GET', '/health', data=json.dumps(data))
return True
except SensuAPIException:
return False
"""
Results ops
"""
def get_all_client_results(self):
"""
Returns the list of results.
"""
data = self._request('GET', '/results')
return data.json()
def get_results(self, client):
"""
Returns a result.
"""
data = self._request('GET', '/results/{}'.format(client))
return data.json()
def get_result(self, client, check):
"""
Returns an event for a given client & result name.
"""
data = self._request('GET', '/results/{}/{}'.format(client, check))
return data.json()
def delete_result(self, client, check):
"""
Deletes an check result data for a given check on a given client.
"""
self._request('DELETE', '/results/{}/{}'.format(client, check))
return True
def post_result_data(self, client, check, output, status):
"""
Posts check result data.
"""
data = {
'source': client,
'name': check,
'output': output,
'status': status,
}
self._request('POST', '/results', data=json.dumps(data))
return True
"""
Stashes ops
"""
def get_stashes(self):
"""
Returns a list of stashes.
"""
data = self._request('GET', '/stashes')
return data.json()
def create_stash(self, payload, path=None):
"""
Create a stash. (JSON document)
"""
if path:
self._request('POST', '/stashes/{}'.format(path),
json=payload)
else:
self._request('POST', '/stashes', json=payload)
return True
def delete_stash(self, path):
"""
Delete a stash. (JSON document)
"""
self._request('DELETE', '/stashes/{}'.format(path))
return True
"""
Subscriptions ops (not directly in the Sensu API)
"""
def get_subscriptions(self, nodes=[]):
"""
Returns all the channels where (optionally specified) nodes are subscribed
"""
if len(nodes) > 0:
data = [node for node in self.get_clients() if node['name'] in nodes]
else:
data = self.get_clients()
channels = []
for client in data:
if 'subscriptions' in client:
if isinstance(client['subscriptions'], list):
for channel in client['subscriptions']:
if channel not in channels:
channels.append(channel)
else:
if client['subscriptions'] not in channels:
channels.append(client['subscriptions'])
return channels
def get_subscriptions_channel(self, search_channel):
"""
Return all the nodes that are subscribed to the specified channel
"""
data = self.get_clients()
clients = []
for client in data:
if 'subscriptions' in client:
if isinstance(client['subscriptions'], list):
if search_channel in client['subscriptions']:
clients.append(client['name'])
else:
if search_channel == client['subscriptions']:
clients.append(client['name'])
return clients
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.