CombinedText stringlengths 4 3.42M |
|---|
# coding=utf-8
import os
import sys
import time
import misc
import arrow
from datetime import datetime
from login import Login
from errors import BadCredentials, SlotErrorNotification
from twisted.internet import reactor
from twisted.internet.protocol import ServerFactory
from twisted.cred.error import UnauthorizedLogin
from twisted.protocols.amp import AMP
from twisted.protocols.policies import TimeoutMixin
from twisted.python import log
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
from ampCommands import StartRemote
from ampCommands import EndRemote
from ampCommands import SendMsg
from ampCommands import NotifyMsg
from ampCommands import NotifyEvent
from rpcrequests import Satnet_RPC
from server_amp import *
from rpcrequests import Satnet_GetSlot
from rpcrequests import Satnet_StorePassiveMessage
from rpcrequests import Satnet_StoreMessage
"""
Copyright 2014, 2015, 2016 Xabier Crespo Álvarez, Samuel Góngora García
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
:Author:
Xabier Crespo Álvarez (xabicrespog@gmail.com)
Samuel Góngora García (s.gongoragarcia@gmail.com)
"""
__author__ = 'xabicrespog@gmail.com'
__author__ = 's.gongoragarcia@gmail.com'
class CredReceiver(AMP, TimeoutMixin):
"""
Integration between AMP and L{twisted.cred}. This class is only intended
to be used for credentials purposes. The specific SATNET protocol will be
implemented in L{SATNETServer} (see server_amp.py).
:ivar sUsername:
Each protocol belongs to a User. This field represents User.username
:type sUsername:
L{String}
:ivar iTimeOut:
The duration of the session timeout in seconds. After this
time the user will be automatically disconnected.
:type iTimeOut:
L{int}
:ivar session:
Reference to a object in charge of removing the user from
active_protocols when it is inactive.
:type session:
L{IDelayedCall}
:ivar clientA
Groundstation user
:type clientA
L{String}
:ivar clientB
Spacecraft user
:type clientB
L{String}
:ivar bGSuser
Indicates if the current user is a GS user (True) or a SC user (false).
If this variable is None, it means that it has not been yet connected.
:type bGSuser
L{Boolean}
"""
logout = None
sUsername = ''
iTimeOut = 259200 # seconds
session = None
# avatar = None
# Metodo que comprueba cuando una conexion se ha realizado con twisted
def connectionMade(self):
self.setTimeout(self.iTimeOut)
super(CredReceiver, self).connectionMade()
self.factory.clients.append(self)
def dataReceived(self, data):
log.msg(self.sUsername + ' session timeout reset')
self.resetTimeout()
super(CredReceiver, self).dataReceived(data)
def timeoutConnection(self):
log.err('Session timeout expired')
self.transport.loseConnection()
def connectionLost(self, reason):
# Remove client from active users
if self.session is not None:
self.session.cancel()
log.err(reason.getErrorMessage())
log.msg('Active clients: ' +
str(len(self.factory.active_protocols)))
log.msg('Active connections: ' +
str(len(self.factory.active_connections)/2))
self.setTimeout(None) # Cancel the pending timeout
self.transport.loseConnection()
super(CredReceiver, self).connectionLost(reason)
def login(self, sUsername, sPassword):
# self.factory = CredAMPServerFactory() ¿?
if sUsername in self.factory.active_protocols:
log.err("Client already logged in.")
raise UnauthorizedLogin("Client already logged in.")
else:
self.sUsername = sUsername
self.factory.active_protocols[sUsername] = None
#
# Don't mix asynchronus and syncronus code.
# Try-except sentences aren't allowed.
#
try:
self.rpc = Satnet_RPC(sUsername, sPassword)
self.factory.active_protocols[sUsername] = self
log.msg('Connection made')
log.msg('Active clients: ' +
str(len(self.factory.active_protocols)))
log.msg('Active connections: ' +
str(len(self.factory.active_connections)))
return {'bAuthenticated': True}
except BadCredentials:
log.err('Incorrect username and/or password')
log.msg(self.factory.active_protocols)
raise BadCredentials("Incorrect username and/or password")
Login.responder(login)
# Check user name
def iStartRemote(self, iSlotId):
log.msg("(" + self.sUsername + ") --------- Start Remote ---------")
self.iSlotId = iSlotId
slot = Satnet_GetSlot(self.iSlotId)
self.slot = slot.slot
# If slot NOT operational yet...
if not self.slot:
log.err('Slot ' + str(iSlotId) + ' is not yet operational')
raise SlotErrorNotification(
'Slot ' + str(iSlotId) + ' is not yet operational')
else:
# Now only works in test cases
if self.slot['state'] == 'TEST':
gs_user = self.slot['gs_username']
sc_user = self.slot['sc_username']
# If this slot has not been assigned to this user...
if gs_user != self.sUsername and sc_user != self.sUsername:
log.err('This slot has not been assigned to this user')
raise SlotErrorNotification('This user is not ' +
'assigned to this slot')
# if the GS user and the SC user belong to the same client...
elif gs_user == self.sUsername and sc_user == self.sUsername:
log.msg('Both MCC and GSS belong to the same client')
return {'iResult': StartRemote.CLIENTS_COINCIDE}
# if the remote client is the SC user...
elif gs_user == self.sUsername:
self.bGSuser = True
return self.iCreateConnection(self.slot['ending_time'],
iSlotId, gs_user, sc_user)
# if the remote client is the GS user...
elif sc_user == self.sUsername:
self.bGSuser = False
return self.iCreateConnection(self.slot['ending_time'],
iSlotId, sc_user, gs_user)
if self.slot['state'] != 'TEST':
log.err('Slot ' + str(iSlotId) + ' has not yet been reserved')
raise SlotErrorNotification('Slot ' + str(iSlotId) +
' has not yet been reserved')
StartRemote.responder(iStartRemote)
def iCreateConnection(self, iSlotEnd, iSlotId, clientA, clientC):
"""
Create a new connection checking the time slot.
ClientA sends data
ClientC receive data
"""
clientA = str(clientA)
clientC = str(clientC)
timeNow = misc.localize_datetime_utc(datetime.utcnow())
timeNow = int(time.mktime(timeNow.timetuple()))
timeEnd = arrow.get(str(iSlotEnd))
timeEnd = timeEnd.timestamp
slot_remaining_time = int(timeEnd) - timeNow
log.msg('Slot remaining time: ' + str(slot_remaining_time))
if (slot_remaining_time <= 0):
log.err("This slot (" + str(iSlotId) + ") has expired.")
raise SlotErrorNotification("This slot (" + str(iSlotId) +
" has expired.")
# Create an instante for finish the slot at correct time.
self.session = reactor.callLater(slot_remaining_time,
self.vSlotEnd, iSlotId)
if clientC not in self.factory.active_protocols:
log.msg("Remote user " + clientC + " not connected yet.")
# if remote user ins't available remove local user from
# active connections list
self.factory.active_connections[clientA] = None
return {'iResult': StartRemote.REMOTE_NOT_CONNECTED}
else:
log.msg("Remote user " + clientC + ".")
self.factory.active_connections[clientC] = clientA
self.factory.active_connections[clientA] = clientC
self.factory.active_protocols[clientC].callRemote(
NotifyEvent, iEvent=NotifyEvent.REMOTE_CONNECTED,
sDetails=str(clientA))
self.callRemote(
NotifyEvent, iEvent=NotifyEvent.REMOTE_CONNECTED,
sDetails=str(clientC))
return {'iResult': StartRemote.REMOTE_READY}
def vSlotEnd(self, iSlotId):
log.msg("(" + self.sUsername + ") Slot " +
str(iSlotId) + ' has finished')
self.callRemote(NotifyEvent, iEvent=NotifyEvent.SLOT_END,
sDetails=None)
# Session is an instance of
self.session = None
def vEndRemote(self):
log.msg("(" + self.sUsername + ") --------- End Remote ---------")
# Disconnect local user
self.transport.loseConnection()
self.factory.active_protocols.pop(self.sUsername)
# Try to remove the remote connection
try:
# Notify remote user
self.factory.active_protocols[self.factory.active_connections[
self.sUsername]].callRemote(NotifyEvent,
iEvent=NotifyEvent.END_REMOTE,
sDetails=None)
# Close remote connection
self.factory.active_protocols[self.factory.active_connections[
self.sUsername]].transport.loseConnection()
# Remove remove factory
self.factory.active_connections.pop(
self.factory.active_connections[self.sUsername])
self.factory.active_connections.pop(self.sUsername)
except:
log.msg("Connections already cleared")
return {'bResult': True}
EndRemote.responder(vEndRemote)
def vSendMsg(self, sMsg, iTimestamp):
log.msg("(" + self.sUsername + ") --------- Send Message ---------")
# If the client haven't started a connection via StartRemote command...
if self.sUsername not in self.factory.active_connections:
log.msg('Connection not available. Call StartRemote command first')
raise SlotErrorNotification(
'Connection not available. Call StartRemote command first.')
# ... if the SC operator is not connected, sent messages will be saved
# as passive messages...
elif (self.factory.active_connections[self.sUsername] is None and
self.bGSuser is True):
log.msg("RPC Call to Satnet_StorePassiveMessage")
# ... if the GS operator is not connected, the remote SC client will be
# notified to wait for the GS to connect...
elif (self.factory.active_connections[self.sUsername] is None and
self.bGSuser is False):
self.callRemote(NotifyEvent,
iEvent=NotifyEvent.REMOTE_DISCONNECTED,
sDetails=None)
else:
# Try to send a message to remote client
try:
self.factory.active_protocols[self.factory.active_connections[
self.sUsername]].callRemote(NotifyMsg, sMsg=sMsg)
except:
raise WrongFormatNotification("Error forwarding frame "
"to remote user.")
# Try to store the message in the remote SatNet server
forwarded = ''
self.storeMessage = Satnet_StoreMessage(self.iSlotId, self.bGSuser,
forwarded, iTimestamp,
sMsg)
return {'bResult': True}
SendMsg.responder(vSendMsg)
class CredAMPServerFactory(ServerFactory):
"""
Server factory useful for creating L{CredReceiver} instances.
"""
clients = []
active_protocols = {}
active_connections = {}
protocol = CredReceiver
Update timeout.
# coding=utf-8
import os
import sys
import time
import misc
import arrow
from datetime import datetime
from login import Login
from errors import BadCredentials, SlotErrorNotification
from twisted.internet import reactor
from twisted.internet.protocol import ServerFactory
from twisted.cred.error import UnauthorizedLogin
from twisted.protocols.amp import AMP
from twisted.protocols.policies import TimeoutMixin
from twisted.python import log
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
from ampCommands import StartRemote
from ampCommands import EndRemote
from ampCommands import SendMsg
from ampCommands import NotifyMsg
from ampCommands import NotifyEvent
from rpcrequests import Satnet_RPC
from server_amp import *
from rpcrequests import Satnet_GetSlot
from rpcrequests import Satnet_StorePassiveMessage
from rpcrequests import Satnet_StoreMessage
"""
Copyright 2014, 2015, 2016 Xabier Crespo Álvarez, Samuel Góngora García
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
:Author:
Xabier Crespo Álvarez (xabicrespog@gmail.com)
Samuel Góngora García (s.gongoragarcia@gmail.com)
"""
__author__ = 'xabicrespog@gmail.com'
__author__ = 's.gongoragarcia@gmail.com'
class CredReceiver(AMP, TimeoutMixin):
"""
Integration between AMP and L{twisted.cred}. This class is only intended
to be used for credentials purposes. The specific SATNET protocol will be
implemented in L{SATNETServer} (see server_amp.py).
:ivar sUsername:
Each protocol belongs to a User. This field represents User.username
:type sUsername:
L{String}
:ivar iTimeOut:
The duration of the session timeout in seconds. After this
time the user will be automatically disconnected.
:type iTimeOut:
L{int}
:ivar session:
Reference to a object in charge of removing the user from
active_protocols when it is inactive.
:type session:
L{IDelayedCall}
:ivar clientA
Groundstation user
:type clientA
L{String}
:ivar clientB
Spacecraft user
:type clientB
L{String}
:ivar bGSuser
Indicates if the current user is a GS user (True) or a SC user (false).
If this variable is None, it means that it has not been yet connected.
:type bGSuser
L{Boolean}
"""
logout = None
sUsername = ''
iTimeOut = 60 # seconds
session = None
# avatar = None
def connectionMade(self):
self.setTimeout(self.iTimeOut)
super(CredReceiver, self).connectionMade()
self.factory.clients.append(self)
def dataReceived(self, data):
log.msg(self.sUsername + ' session timeout reset')
self.resetTimeout()
super(CredReceiver, self).dataReceived(data)
def timeoutConnection(self):
log.err('Session timeout expired')
self.transport.loseConnection()
def connectionLost(self, reason):
# Remove client from active users
if self.session is not None:
self.session.cancel()
log.err(reason.getErrorMessage())
log.msg('Active clients: ' +
str(len(self.factory.active_protocols)))
log.msg('Active connections: ' +
str(len(self.factory.active_connections)/2))
self.setTimeout(None) # Cancel the pending timeout
self.transport.loseConnection()
super(CredReceiver, self).connectionLost(reason)
def login(self, sUsername, sPassword):
# self.factory = CredAMPServerFactory() ¿?
if sUsername in self.factory.active_protocols:
log.err("Client already logged in.")
raise UnauthorizedLogin("Client already logged in.")
else:
self.sUsername = sUsername
self.factory.active_protocols[sUsername] = None
#
# Don't mix asynchronus and syncronus code.
# Try-except sentences aren't allowed.
#
try:
self.rpc = Satnet_RPC(sUsername, sPassword)
self.factory.active_protocols[sUsername] = self
log.msg('Connection made')
log.msg('Active clients: ' +
str(len(self.factory.active_protocols)))
log.msg('Active connections: ' +
str(len(self.factory.active_connections)))
return {'bAuthenticated': True}
except BadCredentials:
log.err('Incorrect username and/or password')
log.msg(self.factory.active_protocols)
raise BadCredentials("Incorrect username and/or password")
Login.responder(login)
# Check user name
def iStartRemote(self, iSlotId):
log.msg("(" + self.sUsername + ") --------- Start Remote ---------")
self.iSlotId = iSlotId
slot = Satnet_GetSlot(self.iSlotId)
self.slot = slot.slot
# If slot NOT operational yet...
if not self.slot:
log.err('Slot ' + str(iSlotId) + ' is not yet operational')
raise SlotErrorNotification(
'Slot ' + str(iSlotId) + ' is not yet operational')
else:
# Now only works in test cases
if self.slot['state'] == 'TEST':
gs_user = self.slot['gs_username']
sc_user = self.slot['sc_username']
# If this slot has not been assigned to this user...
if gs_user != self.sUsername and sc_user != self.sUsername:
log.err('This slot has not been assigned to this user')
raise SlotErrorNotification('This user is not ' +
'assigned to this slot')
# if the GS user and the SC user belong to the same client...
elif gs_user == self.sUsername and sc_user == self.sUsername:
log.msg('Both MCC and GSS belong to the same client')
return {'iResult': StartRemote.CLIENTS_COINCIDE}
# if the remote client is the SC user...
elif gs_user == self.sUsername:
self.bGSuser = True
return self.iCreateConnection(self.slot['ending_time'],
iSlotId, gs_user, sc_user)
# if the remote client is the GS user...
elif sc_user == self.sUsername:
self.bGSuser = False
return self.iCreateConnection(self.slot['ending_time'],
iSlotId, sc_user, gs_user)
if self.slot['state'] != 'TEST':
log.err('Slot ' + str(iSlotId) + ' has not yet been reserved')
raise SlotErrorNotification('Slot ' + str(iSlotId) +
' has not yet been reserved')
StartRemote.responder(iStartRemote)
def iCreateConnection(self, iSlotEnd, iSlotId, clientA, clientC):
"""
Create a new connection checking the time slot.
ClientA sends data
ClientC receive data
"""
clientA = str(clientA)
clientC = str(clientC)
timeNow = misc.localize_datetime_utc(datetime.utcnow())
timeNow = int(time.mktime(timeNow.timetuple()))
timeEnd = arrow.get(str(iSlotEnd))
timeEnd = timeEnd.timestamp
slot_remaining_time = int(timeEnd) - timeNow
log.msg('Slot remaining time: ' + str(slot_remaining_time))
if (slot_remaining_time <= 0):
log.err("This slot (" + str(iSlotId) + ") has expired.")
raise SlotErrorNotification("This slot (" + str(iSlotId) +
" has expired.")
# Create an instante for finish the slot at correct time.
self.session = reactor.callLater(slot_remaining_time,
self.vSlotEnd, iSlotId)
if clientC not in self.factory.active_protocols:
log.msg("Remote user " + clientC + " not connected yet.")
# if remote user ins't available remove local user from
# active connections list
self.factory.active_connections[clientA] = None
return {'iResult': StartRemote.REMOTE_NOT_CONNECTED}
else:
log.msg("Remote user " + clientC + ".")
self.factory.active_connections[clientC] = clientA
self.factory.active_connections[clientA] = clientC
self.factory.active_protocols[clientC].callRemote(
NotifyEvent, iEvent=NotifyEvent.REMOTE_CONNECTED,
sDetails=str(clientA))
self.callRemote(
NotifyEvent, iEvent=NotifyEvent.REMOTE_CONNECTED,
sDetails=str(clientC))
return {'iResult': StartRemote.REMOTE_READY}
def vSlotEnd(self, iSlotId):
log.msg("(" + self.sUsername + ") Slot " +
str(iSlotId) + ' has finished')
self.callRemote(NotifyEvent, iEvent=NotifyEvent.SLOT_END,
sDetails=None)
# Session is an instance of
self.session = None
def vEndRemote(self):
log.msg("(" + self.sUsername + ") --------- End Remote ---------")
# Disconnect local user
self.transport.loseConnection()
self.factory.active_protocols.pop(self.sUsername)
# Try to remove the remote connection
try:
# Notify remote user
self.factory.active_protocols[self.factory.active_connections[
self.sUsername]].callRemote(NotifyEvent,
iEvent=NotifyEvent.END_REMOTE,
sDetails=None)
# Close remote connection
self.factory.active_protocols[self.factory.active_connections[
self.sUsername]].transport.loseConnection()
# Remove remove factory
self.factory.active_connections.pop(
self.factory.active_connections[self.sUsername])
self.factory.active_connections.pop(self.sUsername)
except:
log.msg("Connections already cleared")
return {'bResult': True}
EndRemote.responder(vEndRemote)
def vSendMsg(self, sMsg, iTimestamp):
log.msg("(" + self.sUsername + ") --------- Send Message ---------")
# If the client haven't started a connection via StartRemote command...
if self.sUsername not in self.factory.active_connections:
log.msg('Connection not available. Call StartRemote command first')
raise SlotErrorNotification(
'Connection not available. Call StartRemote command first.')
# ... if the SC operator is not connected, sent messages will be saved
# as passive messages...
elif (self.factory.active_connections[self.sUsername] is None and
self.bGSuser is True):
log.msg("RPC Call to Satnet_StorePassiveMessage")
# ... if the GS operator is not connected, the remote SC client will be
# notified to wait for the GS to connect...
elif (self.factory.active_connections[self.sUsername] is None and
self.bGSuser is False):
self.callRemote(NotifyEvent,
iEvent=NotifyEvent.REMOTE_DISCONNECTED,
sDetails=None)
else:
# Try to send a message to remote client
try:
self.factory.active_protocols[self.factory.active_connections[
self.sUsername]].callRemote(NotifyMsg, sMsg=sMsg)
except:
raise WrongFormatNotification("Error forwarding frame "
"to remote user.")
# Try to store the message in the remote SatNet server
forwarded = ''
self.storeMessage = Satnet_StoreMessage(self.iSlotId, self.bGSuser,
forwarded, iTimestamp,
sMsg)
return {'bResult': True}
SendMsg.responder(vSendMsg)
class CredAMPServerFactory(ServerFactory):
"""
Server factory useful for creating L{CredReceiver} instances.
"""
clients = []
active_protocols = {}
active_connections = {}
protocol = CredReceiver
|
from rj_lib_parse_xds import rj_parse_idxref_xds_inp, rj_parse_idxref_lp, \
rj_parse_integrate_lp, rj_parse_xds_correct_lp
from rj_lib_run_job import rj_run_job
from rj_lib_lattice_symmetry import lattice_symmetry, sort_lattices, \
lattice_spacegroup
import shutil
import sys
import os
import time
def nint(a):
i = int(a)
if (a - i) > 0.5:
i += 1
return i
def lattice_test(integrate_lp, xds_inp_file):
images, phi, cell, records = rj_parse_integrate_lp(
open(integrate_lp).readlines())
# next work through the XDS.INP file to get the proper name template
# out...
nt = None
distance = None
for record in open(xds_inp_file, 'r').readlines():
if 'NAME_TEMPLATE_OF_DATA_FRAMES' in record:
nt = record.strip()
if 'DETECTOR_DISTANCE' in record:
distance = record.strip()
if not nt:
raise RuntimeError, 'filename template not found in %s' % xds_inp_file
if not distance:
raise RuntimeError, 'distance not found in %s' % xds_inp_file
r_new = [distance]
for r in records:
if not 'NAME_TEMPLATE_OF_DATA_FRAMES' in r:
r_new.append(r)
else:
r_new.append(nt)
records = r_new
# ok, in here need to rerun XDS with all of the data from all of
# the images and the triclinic target cell, then parse out the
# solutions from the CORRECT.LP file (applying the cell constants -
# done in the parser) and then use *these* as the target, as the
# lattice symmetry code (interestingly) does not always give the
# right answer...
standard = [
'JOB=CORRECT',
'MAXIMUM_NUMBER_OF_PROCESSORS=4',
'CORRECTIONS=!',
'REFINE(CORRECT)=CELL',
'OVERLOAD=65000',
'DIRECTION_OF_DETECTOR_X-AXIS=1.0 0.0 0.0',
'DIRECTION_OF_DETECTOR_Y-AXIS=0.0 1.0 0.0',
'TRUSTED_REGION=0.0 1.41'
]
# first get the list of possible lattices - do this by running CORRECT
# with all of the images, then looking at the favourite settings for the
# P1 result (or something) - meh.
fout = open('XDS.INP', 'w')
for record in standard:
fout.write('%s\n' % record)
for record in records:
fout.write('%s\n' % record)
fout.write('DATA_RANGE= %d %d\n' % images)
fout.write('OSCILLATION_RANGE= %.2f\n' % phi)
fout.write(
'UNIT_CELL_CONSTANTS= %.2f %.2f %.2f %.2f %.2f %.2f\n' % tuple(cell))
fout.write('SPACE_GROUP_NUMBER=%d\n' % 1)
fout.close()
output = rj_run_job('xds_par', [], [])
# read CORRECT.LP to get the right solutions...
result = rj_parse_xds_correct_lp(open('CORRECT.LP', 'r').readlines())
for lattice in result:
cp = '%.2f %.2f %.2f %.2f %.2f %.2f' % result[lattice]['cell']
# print '%s %s' % (lattice, cp)
# result = lattice_symmetry(cell)
lattices = sort_lattices(result)
# then iterate through them...
data = { }
for l in lattices:
data[l] = { }
c = result[l]['cell']
# print 'Lattice: %s' % l
# print 'Cell: %.2f %.2f %.2f %.2f %.2f %.2f' % tuple(c)
# then iterate through the image ranges
w = nint(10.0/phi)
m = nint((images[1] - images[0] + 1) / w)
for j in range(m):
start = j * w + 1
end = j * w + w
data[l][j] = { }
fout = open('XDS.INP', 'w')
for record in standard:
fout.write('%s\n' % record)
for record in records:
fout.write('%s\n' % record)
fout.write('DATA_RANGE= %d %d\n' % (start, end))
fout.write('OSCILLATION_RANGE= %.2f\n' % phi)
fout.write(
'UNIT_CELL_CONSTANTS= %.2f %.2f %.2f %.2f %.2f %.2f\n' % tuple(c))
fout.write('SPACE_GROUP_NUMBER=%d\n' % lattice_spacegroup(l))
fout.close()
output = rj_run_job('xds_par', [], [])
# now read out the records I want from CORRECT.LP...
rmsd = None
rmsp = None
for record in open('CORRECT.LP').readlines():
if 'STANDARD DEVIATION OF SPOT POSITION' in record:
rmsd = float(record.split()[-1])
if 'STANDARD DEVIATION OF SPINDLE POSITION' in record:
rmsp = float(record.split()[-1])
if not rmsp or not rmsd:
raise RuntimeError, 'refinement failed'
data[l][j] = {'d':rmsd,
'p':rmsp}
# now tabulate the results
for j in range(m):
record = '%d' % j
for l in lattices[1:]:
record += ' %.3f %.3f' % (data[l][j]['d'] / data['aP'][j]['d'],
data[l][j]['p'] / data['aP'][j]['p'])
print record
if __name__ == '__main__':
lattice_test('INTEGRATE.LP', 'integrate/XDS.INP')
Now calculates mean, sd &c.
Ready.
python /home/gw56/CVS/xia2/Applications/ResearchJiffies/rj_xds_lattice_test.py
0 1.224 1.000 2.414 1.100
1 1.328 1.000 2.414 1.100
2 1.317 1.000 2.450 1.222
3 1.286 1.000 2.302 1.556
4 1.169 1.000 2.015 2.250
5 1.099 1.000 1.944 2.000
6 1.013 1.000 1.734 2.000
7 1.000 1.000 1.706 1.667
8 0.990 1.000 1.680 1.500
M 1.158 1.000 2.073 1.599
S 0.130 0.000 0.307 0.395
mC 1.215 0.000
oI 3.491 1.519
from rj_lib_parse_xds import rj_parse_idxref_xds_inp, rj_parse_idxref_lp, \
rj_parse_integrate_lp, rj_parse_xds_correct_lp
from rj_lib_run_job import rj_run_job
from rj_lib_lattice_symmetry import lattice_symmetry, sort_lattices, \
lattice_spacegroup
import shutil
import sys
import os
import time
import math
def nint(a):
i = int(a)
if (a - i) > 0.5:
i += 1
return i
def meansd(values):
mean = sum(values) / len(values)
var = sum([(v - mean) * (v - mean) for v in values]) / len(values)
return mean, math.sqrt(var)
def lattice_test(integrate_lp, xds_inp_file):
images, phi, cell, records = rj_parse_integrate_lp(
open(integrate_lp).readlines())
# next work through the XDS.INP file to get the proper name template
# out...
nt = None
distance = None
for record in open(xds_inp_file, 'r').readlines():
if 'NAME_TEMPLATE_OF_DATA_FRAMES' in record:
nt = record.strip()
if 'DETECTOR_DISTANCE' in record:
distance = record.strip()
if not nt:
raise RuntimeError, 'filename template not found in %s' % xds_inp_file
if not distance:
raise RuntimeError, 'distance not found in %s' % xds_inp_file
r_new = [distance]
for r in records:
if not 'NAME_TEMPLATE_OF_DATA_FRAMES' in r:
r_new.append(r)
else:
r_new.append(nt)
records = r_new
# ok, in here need to rerun XDS with all of the data from all of
# the images and the triclinic target cell, then parse out the
# solutions from the CORRECT.LP file (applying the cell constants -
# done in the parser) and then use *these* as the target, as the
# lattice symmetry code (interestingly) does not always give the
# right answer...
standard = [
'JOB=CORRECT',
'MAXIMUM_NUMBER_OF_PROCESSORS=4',
'CORRECTIONS=!',
'REFINE(CORRECT)=CELL',
'OVERLOAD=65000',
'DIRECTION_OF_DETECTOR_X-AXIS=1.0 0.0 0.0',
'DIRECTION_OF_DETECTOR_Y-AXIS=0.0 1.0 0.0',
'TRUSTED_REGION=0.0 1.41'
]
# first get the list of possible lattices - do this by running CORRECT
# with all of the images, then looking at the favourite settings for the
# P1 result (or something) - meh.
fout = open('XDS.INP', 'w')
for record in standard:
fout.write('%s\n' % record)
for record in records:
fout.write('%s\n' % record)
fout.write('DATA_RANGE= %d %d\n' % images)
fout.write('OSCILLATION_RANGE= %.2f\n' % phi)
fout.write(
'UNIT_CELL_CONSTANTS= %.2f %.2f %.2f %.2f %.2f %.2f\n' % tuple(cell))
fout.write('SPACE_GROUP_NUMBER=%d\n' % 1)
fout.close()
output = rj_run_job('xds_par', [], [])
# read CORRECT.LP to get the right solutions...
result = rj_parse_xds_correct_lp(open('CORRECT.LP', 'r').readlines())
for lattice in result:
cp = '%.2f %.2f %.2f %.2f %.2f %.2f' % result[lattice]['cell']
# print '%s %s' % (lattice, cp)
# result = lattice_symmetry(cell)
lattices = sort_lattices(result)
# then iterate through them...
data = { }
for l in lattices:
data[l] = { }
c = result[l]['cell']
# print 'Lattice: %s' % l
# print 'Cell: %.2f %.2f %.2f %.2f %.2f %.2f' % tuple(c)
# then iterate through the image ranges
w = nint(10.0/phi)
m = nint((images[1] - images[0] + 1) / w)
for j in range(m):
start = j * w + 1
end = j * w + w
data[l][j] = { }
fout = open('XDS.INP', 'w')
for record in standard:
fout.write('%s\n' % record)
for record in records:
fout.write('%s\n' % record)
fout.write('DATA_RANGE= %d %d\n' % (start, end))
fout.write('OSCILLATION_RANGE= %.2f\n' % phi)
fout.write(
'UNIT_CELL_CONSTANTS= %.2f %.2f %.2f %.2f %.2f %.2f\n' % tuple(c))
fout.write('SPACE_GROUP_NUMBER=%d\n' % lattice_spacegroup(l))
fout.close()
output = rj_run_job('xds_par', [], [])
# now read out the records I want from CORRECT.LP...
rmsd = None
rmsp = None
for record in open('CORRECT.LP').readlines():
if 'STANDARD DEVIATION OF SPOT POSITION' in record:
rmsd = float(record.split()[-1])
if 'STANDARD DEVIATION OF SPINDLE POSITION' in record:
rmsp = float(record.split()[-1])
if not rmsp or not rmsd:
raise RuntimeError, 'refinement failed'
data[l][j] = {'d':rmsd,
'p':rmsp}
# now tabulate the results
for j in range(m):
record = '%d' % j
for l in lattices[1:]:
record += ' %.3f %.3f' % (data[l][j]['d'] / data['aP'][j]['d'],
data[l][j]['p'] / data['aP'][j]['p'])
print record
# now print out the averages, sd's
recordm = 'M'
records = 'S'
sigma = { }
for l in lattices[1:]:
values = [(data[l][j]['d'] / data['aP'][j]['d']) for j in range(m)]
md, sd = meansd(values)
values = [(data[l][j]['p'] / data['aP'][j]['p']) for j in range(m)]
mp, sp = meansd(values)
recordm += ' %.3f %.3f' % (md, mp)
records += ' %.3f %.3f' % (sd, sp)
sigma[l] = { }
if sd > 0:
sigma[l]['d'] = ((md - 1) / sd)
else:
sigma[l]['d'] = 0.0
if sp > 0:
sigma[l]['p'] = ((mp - 1) / sp)
else:
sigma[l]['p'] = 0.0
print recordm
print records
for l in lattices[1:]:
d = sigma[l]['d']
p = sigma[l]['p']
print '%s %.3f %.3f' % (l, d, p)
if __name__ == '__main__':
lattice_test('INTEGRATE.LP', 'integrate/XDS.INP')
|
# coding:utf-8
import os
import tornado.httpserver
import tornado.ioloop
import tornado.options
import tornado.web
import tornado.websocket
import tornado.escape
from tornado.options import define, options
import logging
# slide を表示しているクライアントを格納
slide_waiters = set()
class SlideHandler(tornado.web.RequestHandler):
def get(self):
self.render("slide.html")
class ControllerHandler(tornado.web.RequestHandler):
def get(self):
self.render("controller.html", messages=SlideSocketHandler.command_cache)
class SlideSocketHandler(tornado.websocket.WebSocketHandler):
command_cache = []
command_cache_size = 200
def check_origin(self, origin):
return True
def get_compression_options(self):
# Non-None enables compression with default options.
return {}
# slide.htmlでコネクションが確保されクライアントを追加する
def open(self):
if self not in slide_waiters:
slide_waiters.add(self)
# slide.htmlからメッセージが送られてくると呼び出される
def on_message(self, message):
logging.info("got message %r", message)
parsed = tornado.escape.json_decode(message)
command = {"keyCode": parsed["keyCode"]}
SlideSocketHandler.update_cache(command)
SlideSocketHandler.send_updates(command)
# slide.htmlが閉じ、コネクションが切れる事でクライアントが削除される
def on_close(self):
if self in slide_waiters:
slide_waiters.remove(self)
@classmethod
def update_cache(cls, command):
cls.command_cache.append(command)
if len(cls.command_cache) > cls.command_cache_size:
cls.command_cache = cls.command_cache[-cls.command_cache_size:]
@classmethod
def send_updates(cls, command):
logging.info("sending message to %d waiters", len(slide_waiters))
for waiter in slide_waiters:
try:
waiter.write_message(command)
except:
logging.error("Error sending message", exc_info=True)
class Application(tornado.web.Application):
def __init__(self):
handlers = [
(r"/", SlideHandler),
(r"/controller", ControllerHandler),
(r"/ws", SlideSocketHandler),
]
settings = dict(
cookie_secret="__TODO:_GENERATE_YOUR_OWN_RANDOM_VALUE_HERE__",
template_path=os.path.join(os.path.dirname(__file__), "templates"),
static_path=os.path.join(os.path.dirname(__file__), "static"),
xsrf_cookies=True,
)
tornado.web.Application.__init__(self, handlers, **settings)
def main():
tornado.options.parse_command_line()
http_server = tornado.httpserver.HTTPServer(Application())
port = int(os.environ.get("PORT", 80))
http_server.listen(port)
tornado.ioloop.IOLoop.current().start()
if __name__ == "__main__":
main()
change handler slide
# coding:utf-8
import os
import tornado.httpserver
import tornado.ioloop
import tornado.options
import tornado.web
import tornado.websocket
import tornado.escape
from tornado.options import define, options
import logging
# slide を表示しているクライアントを格納
slide_waiters = set()
class SlideHandler(tornado.web.RequestHandler):
def get(self):
self.render("slide.html")
class ControllerHandler(tornado.web.RequestHandler):
def get(self):
self.render("controller.html", messages=SlideSocketHandler.command_cache)
class SlideSocketHandler(tornado.websocket.WebSocketHandler):
command_cache = []
command_cache_size = 200
def check_origin(self, origin):
return True
def get_compression_options(self):
# Non-None enables compression with default options.
return {}
# slide.htmlでコネクションが確保されクライアントを追加する
def open(self):
if self not in slide_waiters:
slide_waiters.add(self)
# slide.htmlからメッセージが送られてくると呼び出される
def on_message(self, message):
logging.info("got message %r", message)
parsed = tornado.escape.json_decode(message)
command = {"keyCode": parsed["keyCode"]}
SlideSocketHandler.update_cache(command)
SlideSocketHandler.send_updates(command)
# slide.htmlが閉じ、コネクションが切れる事でクライアントが削除される
def on_close(self):
if self in slide_waiters:
slide_waiters.remove(self)
@classmethod
def update_cache(cls, command):
cls.command_cache.append(command)
if len(cls.command_cache) > cls.command_cache_size:
cls.command_cache = cls.command_cache[-cls.command_cache_size:]
@classmethod
def send_updates(cls, command):
logging.info("sending message to %d waiters", len(slide_waiters))
for waiter in slide_waiters:
try:
waiter.write_message(command)
except:
logging.error("Error sending message", exc_info=True)
class Application(tornado.web.Application):
def __init__(self):
handlers = [
(r"/slide", SlideHandler),
(r"/controller", ControllerHandler),
(r"/ws", SlideSocketHandler),
]
settings = dict(
cookie_secret="__TODO:_GENERATE_YOUR_OWN_RANDOM_VALUE_HERE__",
template_path=os.path.join(os.path.dirname(__file__), "templates"),
static_path=os.path.join(os.path.dirname(__file__), "static"),
xsrf_cookies=True,
)
tornado.web.Application.__init__(self, handlers, **settings)
def main():
tornado.options.parse_command_line()
http_server = tornado.httpserver.HTTPServer(Application())
port = int(os.environ.get("PORT", 80))
http_server.listen(port)
tornado.ioloop.IOLoop.current().start()
if __name__ == "__main__":
main()
|
from data_importers.management.commands import BaseXpressDemocracyClubCsvImporter
class Command(BaseXpressDemocracyClubCsvImporter):
council_id = "CRF"
addresses_name = "2021-03-24T16:24:49.782410/cardiff_deduped.tsv"
stations_name = "2021-03-24T16:24:49.782410/cardiff_deduped.tsv"
elections = ["2021-05-06"]
csv_delimiter = "\t"
csv_encoding = "windows-1252"
def address_record_to_dict(self, record):
uprn = record.property_urn.strip().lstrip("0")
if uprn == "100100110392":
return None
if record.addressline6 in [
"CF24 3DZ",
"CF3 0UH",
"CF14 2FN",
"CF14 6PE",
"CF24 4RU",
"CF3 4LL",
"CF5 6HF",
"CF14 9UA",
]:
return None
return super().address_record_to_dict(record)
Import script for City of Cardiff (2022-05-05) (closes #4196)
from data_importers.management.commands import BaseXpressDemocracyClubCsvImporter
class Command(BaseXpressDemocracyClubCsvImporter):
council_id = "CRF"
addresses_name = (
"2022-05-05/2022-03-03T10:11:12.032905/Democracy_Club__05May2022.tsv"
)
stations_name = (
"2022-05-05/2022-03-03T10:11:12.032905/Democracy_Club__05May2022.tsv"
)
elections = ["2022-05-05"]
csv_encoding = "windows-1252"
csv_delimiter = "\t"
def address_record_to_dict(self, record):
if record.addressline6 in ["CF24 2DG", "CF3 4LL", "CF14 9UA"]:
return None
return super().address_record_to_dict(record)
|
# Python Imports
import select
import socket
import sys
import traceback
# Local imports
import objects
constants = objects.constants
from common import Connection, BUFFER_SIZE
socket_error = (socket.error,)
socket_fatal = (IOError,)
class ServerConnection(Connection):
def __init__(self, s, address, debug=False):
Connection.__init__(self)
self.address = address
self.setup(s, debug=debug, nb=True)
self.poll = self.initalpoll
def initalpoll(self):
"""\
Checks to see if any packets are on the line
"""
print "Inital Poll"
buffer = self.buffered['bytes-received']
buffer.write(self.s.recv(BUFFER_SIZE))
if buffer.peek(2) == "TP":
if self.debug:
print "Got a normal tp connection..."
self.poll = self.tppoll
return self.poll()
if buffer.peek(17).startswith("POST /"):
if self.debug:
print "Got a http connection..."
self.s.recv(len(self.buffer)) # Clear all the already recived data...
self.poll = self.httppoll
return self.poll()
# We have gotten to much data, we need to close this connection now
if buffer.left() > 18:
raise IOError("No valid connection header found...")
def httppoll(self):
print "HTTP Poll"
buffer = self.buffered['bytes-received']
buffer.write(self.s.recv(BUFFER_SIZE))
# FIXME: This is broken
if self.buffer.endswith("\r\n\r\n"):
if self.debug:
print "Finished the http headers..."
print self.buffer
# Send the http headers
self.s.send("HTTP/1.0 200 OK")
self.s.send("Cache-Control: no-cache, private\n")
self.s.send("Content-Type: application/binary\n")
self.s.send("\n")
self.buffer = ""
self.poll = self.tppoll
return self.poll()
# We have gotten to much data, we need to close this connection now
if buffer.left() > 1024:
raise IOError("HTTP Request was to large!")
def tppoll(self):
print "TP Poll"
# Get the packets
try:
self._recv(-1)
except socket_error, e:
print self, e
sequences = self.buffered['frames-received'].keys()
sequences.sort()
print "tppoll", sequences
for sequence in sequences:
p = self._recv(sequence)
if not p:
continue
success = False
bases = [p.__class__]
while len(bases) > 0:
print bases
c = bases.pop(0)
function = "On" + c.__name__
print function
if hasattr(self, function):
try:
success = getattr(self, function)(p)
except:
type, val, tb = sys.exc_info()
print ''.join(traceback.format_exception(type, val, tb))
break
bases += list(c.__bases__)
if not success:
self._send(objects.Fail(p.sequence, constants.FAIL_PERM, "Service unavalible."))
def _description_error(self, p):
self._send(objects.Fail(p.sequence, constants.FAIL_FRAME, "Packet which doesn't have a possible description."))
def _error(self, p):
type, val, tb = sys.exc_info()
print ''.join(traceback.format_exception(type, val, tb))
self._send(objects.Fail(p.sequence, constants.FAIL_FRAME, "Packet wasn't valid."))
def OnInit(self):
pass
def OnConnect(self, p):
self._send(objects.OK(p.sequence, "Welcome to py-server!"))
return True
def OnPing(self, p):
self._send(objects.OK(p.sequence, "PONG!"))
return True
class SSLSocket(object):
def __init__(self, s, pem):
global socket_error, socket_fatal
try:
import OpenSSL.crypto
import OpenSSL.SSL as SSL
context = SSL.Context(SSL.SSLv23_METHOD)
context.set_verify(SSL.VERIFY_NONE, lambda x: True)
context.use_certificate(OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, pem))
context.use_privatekey(OpenSSL.crypto.load_privatekey(OpenSSL.crypto.FILETYPE_PEM, pem))
self.s = SSL.Connection(context, s)
socket_error = tuple([SSL.WantReadError] + list(socket_error))
socket_error = tuple([SSL.WantWriteError] + list(socket_error))
socket_fatal = tuple([SSL.Error] + list(socket_fatal))
print "Found pyopenssl"
return
except ImportError, e:
print "Unable to import pyopenssl"
try:
from tempfile import NamedTemporaryFile
import M2Crypto
import M2Crypto.SSL as SSL
context = SSL.Context('sslv23')
context.set_verify(SSL.verify_none, 4, lambda x: True)
f = NamedTemporaryFile(mode='w+b')
f.write(pem); f.flush()
context.load_cert(f.name)
f.close()
self.s = SSL.Connection(context, s)
socket_fatal = tuple([SSL.SSLError] + list(socket_fatal))
return
except ImportError, e:
print "Unable to import M2Crypto"
raise ImportError("Unable to find SSL library")
def __getattr__(self, key):
return getattr(self.s, key)
def __str__(self):
return object.__str__(self)
class Server:
"""\
Select based, single threaded, polling server.
"""
handler = ServerConnection
def __init__(self, address, port=None, sslport=None, ports=None, sslports=None):
if ports is None:
ports = []
if not port is None:
ports.append(port)
if sslports is None:
sslports = []
if not sslport is None:
sslports.append(sslport)
self.ports = ports
self.sslports = sslports
print "Ports", self.ports, self.sslports
self.s = []
for port in ports+sslports:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if port in sslports:
pem = """\
-----BEGIN RSA PRIVATE KEY-----
MIIBOwIBAAJBAOTnGJZ1npXzEpchNblVMLOF7Bnv4R+zTrd93nweSEZb6u024o+U
y2Y9s/79f2ytS8csVVxjrFn7Bisw6maXz0MCAwEAAQJAfS7JKpe+l+DsPMyDtgyZ
6sQF4BVo98428XCbuSNSgW8AaWGyqIC1baf0FvNE8OSNrO43Mhqy9C2BG5YQve6K
sQIhAPwHcln2CiPGJ6Rru1SF3MEvC8WImmTrtWVA9IHVNXDbAiEA6IJepK7qvtYc
SoKObjZ+nG0OyGi9b6M9GSO52kWbE7kCIQC7TcV8elB62c+ocLBeVsYDhLVY7vbf
vhWn1KhivVPkNQIhAKaRLwg/n0BT1zSxzyO5un6JyntcPcoKYazu4SgzkWNRAiBn
qEzVAP7TdKkfE2CtVvd2JkGQHQmD7bgOkmhZTIpENg==
-----END RSA PRIVATE KEY-----
-----BEGIN CERTIFICATE-----
MIICTjCCAfigAwIBAgIBADANBgkqhkiG9w0BAQQFADBWMRkwFwYDVQQKExBXZWJt
aW4gV2Vic2VydmVyMRAwDgYDVQQLEwdsZXN0ZXIuMQowCAYDVQQDFAEqMRswGQYJ
KoZIhvcNAQkBFgxyb290QGxlc3Rlci4wHhcNMDQxMDA1MTU0NzQ2WhcNMDkxMDA0
MTU0NzQ2WjBWMRkwFwYDVQQKExBXZWJtaW4gV2Vic2VydmVyMRAwDgYDVQQLEwds
ZXN0ZXIuMQowCAYDVQQDFAEqMRswGQYJKoZIhvcNAQkBFgxyb290QGxlc3Rlci4w
XDANBgkqhkiG9w0BAQEFAANLADBIAkEA5OcYlnWelfMSlyE1uVUws4XsGe/hH7NO
t33efB5IRlvq7Tbij5TLZj2z/v1/bK1LxyxVXGOsWfsGKzDqZpfPQwIDAQABo4Gw
MIGtMB0GA1UdDgQWBBTqK6UJRH7+NpEwgEmJzse910voYTB+BgNVHSMEdzB1gBTq
K6UJRH7+NpEwgEmJzse910voYaFapFgwVjEZMBcGA1UEChMQV2VibWluIFdlYnNl
cnZlcjEQMA4GA1UECxMHbGVzdGVyLjEKMAgGA1UEAxQBKjEbMBkGCSqGSIb3DQEJ
ARYMcm9vdEBsZXN0ZXIuggEAMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEEBQAD
QQBkj8SEY4RAm9WRDtPJ8qPgmIHeiiwDKsJup1ixsbiQOAV7zG/pMCYM4VWVhmR+
trYiuEhD5HiV/W6DM4WBMg+5
-----END CERTIFICATE-----"""
try:
s = SSLSocket(s, pem)
except ImportError:
print "Unable to find a SSL library which I can use :/"
continue
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((address, port))
s.listen(5)
s.setblocking(False)
self.s.append(s)
self.connections = {}
def serve_forever(self):
poller = select.poll()
for s in self.s:
poller.register(s, select.POLLIN)
self.connections[s.fileno()] = s
oldready = []
while True:
# Check if there is any socket to accept or with data
ready = []
errors = []
for fileno, event in poller.poll(100):
print "Event", fileno, event
if event & select.POLLIN:
ready.append(self.connections[fileno])
if event & (select.POLLERR|select.POLLHUP|select.POLLNVAL) > 0:
errors.append(self.connections[fileno])
print ready, oldready, errors, "(", self.connections, ")"
for s in ready+oldready:
if s in self.s:
# Accept a new connection
n, address = s.accept()
print "Accepting connection from %s on %s" % (address, s.getsockname())
connection = self.handler(n, address, debug=self.debug)
poller.register(connection, select.POLLIN|select.POLLERR|select.POLLHUP|select.POLLNVAL)
self.connections[connection.fileno()] = connection
else:
# Poll the connection as it's ready
try:
s.poll()
if s in oldready:
oldready.remove(s)
except socket_error, e:
print e
oldready.append(s)
except socket_fatal, e:
print "fatal fallout", s, e
errors.append(s)
# Cleanup any old sockets
for s in errors:
print "Removing", s
try:
s.s.close()
except Exception, e:
print "Unable to close socket", e
try:
poller.unregister(s)
except Exception, e:
print "Unable to unregister socket", e
del self.connections[s.fileno()]
if __name__ == "__main__":
port = 6924
while True:
try:
s = Server("127.0.0.1", port)
except:
print "This port in use...", port
port += 1
continue
s.serve_forever()
Removed extra prints.
# Python Imports
import select
import socket
import sys
import traceback
# Local imports
import objects
constants = objects.constants
from common import Connection, BUFFER_SIZE
socket_error = (socket.error,)
socket_fatal = (IOError,)
class ServerConnection(Connection):
def __init__(self, s, address, debug=False):
Connection.__init__(self)
self.address = address
self.setup(s, debug=debug, nb=True)
self.poll = self.initalpoll
def initalpoll(self):
"""\
Checks to see if any packets are on the line
"""
print "Inital Poll"
buffer = self.buffered['bytes-received']
buffer.write(self.s.recv(BUFFER_SIZE))
if buffer.peek(2) == "TP":
if self.debug:
print "Got a normal tp connection..."
self.poll = self.tppoll
return self.poll()
if buffer.peek(17).startswith("POST /"):
if self.debug:
print "Got a http connection..."
self.s.recv(len(self.buffer)) # Clear all the already recived data...
self.poll = self.httppoll
return self.poll()
# We have gotten to much data, we need to close this connection now
if buffer.left() > 18:
raise IOError("No valid connection header found...")
def httppoll(self):
print "HTTP Poll"
buffer = self.buffered['bytes-received']
buffer.write(self.s.recv(BUFFER_SIZE))
# FIXME: This is broken
if self.buffer.endswith("\r\n\r\n"):
if self.debug:
print "Finished the http headers..."
print self.buffer
# Send the http headers
self.s.send("HTTP/1.0 200 OK")
self.s.send("Cache-Control: no-cache, private\n")
self.s.send("Content-Type: application/binary\n")
self.s.send("\n")
self.buffer = ""
self.poll = self.tppoll
return self.poll()
# We have gotten to much data, we need to close this connection now
if buffer.left() > 1024:
raise IOError("HTTP Request was to large!")
def tppoll(self):
print "TP Poll"
# Get the packets
try:
self._recv(-1)
except socket_error, e:
print self, e
sequences = self.buffered['frames-received'].keys()
sequences.sort()
for sequence in sequences:
p = self._recv(sequence)
if not p:
continue
success = False
bases = [p.__class__]
while len(bases) > 0:
c = bases.pop(0)
function = "On" + c.__name__
if hasattr(self, function):
print function
try:
success = getattr(self, function)(p)
except:
type, val, tb = sys.exc_info()
print ''.join(traceback.format_exception(type, val, tb))
break
bases += list(c.__bases__)
if not success:
self._send(objects.Fail(p.sequence, constants.FAIL_PERM, "Service unavalible."))
def _description_error(self, p):
self._send(objects.Fail(p.sequence, constants.FAIL_FRAME, "Packet which doesn't have a possible description."))
def _error(self, p):
type, val, tb = sys.exc_info()
print ''.join(traceback.format_exception(type, val, tb))
self._send(objects.Fail(p.sequence, constants.FAIL_FRAME, "Packet wasn't valid."))
def OnInit(self):
pass
def OnConnect(self, p):
self._send(objects.OK(p.sequence, "Welcome to py-server!"))
return True
def OnPing(self, p):
self._send(objects.OK(p.sequence, "PONG!"))
return True
class SSLSocket(object):
def __init__(self, s, pem):
global socket_error, socket_fatal
try:
import OpenSSL.crypto
import OpenSSL.SSL as SSL
context = SSL.Context(SSL.SSLv23_METHOD)
context.set_verify(SSL.VERIFY_NONE, lambda x: True)
context.use_certificate(OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, pem))
context.use_privatekey(OpenSSL.crypto.load_privatekey(OpenSSL.crypto.FILETYPE_PEM, pem))
self.s = SSL.Connection(context, s)
socket_error = tuple([SSL.WantReadError] + list(socket_error))
socket_error = tuple([SSL.WantWriteError] + list(socket_error))
socket_fatal = tuple([SSL.Error] + list(socket_fatal))
print "Found pyopenssl"
return
except ImportError, e:
print "Unable to import pyopenssl"
try:
from tempfile import NamedTemporaryFile
import M2Crypto
import M2Crypto.SSL as SSL
context = SSL.Context('sslv23')
context.set_verify(SSL.verify_none, 4, lambda x: True)
f = NamedTemporaryFile(mode='w+b')
f.write(pem); f.flush()
context.load_cert(f.name)
f.close()
self.s = SSL.Connection(context, s)
socket_fatal = tuple([SSL.SSLError] + list(socket_fatal))
return
except ImportError, e:
print "Unable to import M2Crypto"
raise ImportError("Unable to find SSL library")
def __getattr__(self, key):
return getattr(self.s, key)
def __str__(self):
return object.__str__(self)
class Server:
"""\
Select based, single threaded, polling server.
"""
handler = ServerConnection
def __init__(self, address, port=None, sslport=None, ports=None, sslports=None):
if ports is None:
ports = []
if not port is None:
ports.append(port)
if sslports is None:
sslports = []
if not sslport is None:
sslports.append(sslport)
self.ports = ports
self.sslports = sslports
print "Ports", self.ports, self.sslports
self.s = []
for port in ports+sslports:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if port in sslports:
pem = """\
-----BEGIN RSA PRIVATE KEY-----
MIIBOwIBAAJBAOTnGJZ1npXzEpchNblVMLOF7Bnv4R+zTrd93nweSEZb6u024o+U
y2Y9s/79f2ytS8csVVxjrFn7Bisw6maXz0MCAwEAAQJAfS7JKpe+l+DsPMyDtgyZ
6sQF4BVo98428XCbuSNSgW8AaWGyqIC1baf0FvNE8OSNrO43Mhqy9C2BG5YQve6K
sQIhAPwHcln2CiPGJ6Rru1SF3MEvC8WImmTrtWVA9IHVNXDbAiEA6IJepK7qvtYc
SoKObjZ+nG0OyGi9b6M9GSO52kWbE7kCIQC7TcV8elB62c+ocLBeVsYDhLVY7vbf
vhWn1KhivVPkNQIhAKaRLwg/n0BT1zSxzyO5un6JyntcPcoKYazu4SgzkWNRAiBn
qEzVAP7TdKkfE2CtVvd2JkGQHQmD7bgOkmhZTIpENg==
-----END RSA PRIVATE KEY-----
-----BEGIN CERTIFICATE-----
MIICTjCCAfigAwIBAgIBADANBgkqhkiG9w0BAQQFADBWMRkwFwYDVQQKExBXZWJt
aW4gV2Vic2VydmVyMRAwDgYDVQQLEwdsZXN0ZXIuMQowCAYDVQQDFAEqMRswGQYJ
KoZIhvcNAQkBFgxyb290QGxlc3Rlci4wHhcNMDQxMDA1MTU0NzQ2WhcNMDkxMDA0
MTU0NzQ2WjBWMRkwFwYDVQQKExBXZWJtaW4gV2Vic2VydmVyMRAwDgYDVQQLEwds
ZXN0ZXIuMQowCAYDVQQDFAEqMRswGQYJKoZIhvcNAQkBFgxyb290QGxlc3Rlci4w
XDANBgkqhkiG9w0BAQEFAANLADBIAkEA5OcYlnWelfMSlyE1uVUws4XsGe/hH7NO
t33efB5IRlvq7Tbij5TLZj2z/v1/bK1LxyxVXGOsWfsGKzDqZpfPQwIDAQABo4Gw
MIGtMB0GA1UdDgQWBBTqK6UJRH7+NpEwgEmJzse910voYTB+BgNVHSMEdzB1gBTq
K6UJRH7+NpEwgEmJzse910voYaFapFgwVjEZMBcGA1UEChMQV2VibWluIFdlYnNl
cnZlcjEQMA4GA1UECxMHbGVzdGVyLjEKMAgGA1UEAxQBKjEbMBkGCSqGSIb3DQEJ
ARYMcm9vdEBsZXN0ZXIuggEAMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEEBQAD
QQBkj8SEY4RAm9WRDtPJ8qPgmIHeiiwDKsJup1ixsbiQOAV7zG/pMCYM4VWVhmR+
trYiuEhD5HiV/W6DM4WBMg+5
-----END CERTIFICATE-----"""
try:
s = SSLSocket(s, pem)
except ImportError:
print "Unable to find a SSL library which I can use :/"
continue
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((address, port))
s.listen(5)
s.setblocking(False)
self.s.append(s)
self.connections = {}
def serve_forever(self):
poller = select.poll()
for s in self.s:
poller.register(s, select.POLLIN)
self.connections[s.fileno()] = s
oldready = []
while True:
# Check if there is any socket to accept or with data
ready = []
errors = []
for fileno, event in poller.poll(100):
if event & select.POLLIN:
ready.append(self.connections[fileno])
if event & (select.POLLERR|select.POLLHUP|select.POLLNVAL) > 0:
errors.append(self.connections[fileno])
#print ready, oldready, errors, "(", self.connections, ")"
for s in ready+oldready:
if s in self.s:
# Accept a new connection
n, address = s.accept()
print "Accepting connection from %s on %s" % (address, s.getsockname())
connection = self.handler(n, address, debug=self.debug)
poller.register(connection, select.POLLIN|select.POLLERR|select.POLLHUP|select.POLLNVAL)
self.connections[connection.fileno()] = connection
else:
# Poll the connection as it's ready
try:
s.poll()
if s in oldready:
oldready.remove(s)
except socket_error, e:
print e
oldready.append(s)
except socket_fatal, e:
print "fatal fallout", s, e
errors.append(s)
# Cleanup any old sockets
for s in errors:
print "Removing", s
try:
s.s.close()
except Exception, e:
print "Unable to close socket", e
try:
poller.unregister(s)
except Exception, e:
print "Unable to unregister socket", e
del self.connections[s.fileno()]
if __name__ == "__main__":
port = 6924
while True:
try:
s = Server("127.0.0.1", port)
except:
print "This port in use...", port
port += 1
continue
s.serve_forever()
|
from data_importers.management.commands import BaseXpressDemocracyClubCsvImporter
class Command(BaseXpressDemocracyClubCsvImporter):
council_id = "E07000202"
addresses_name = "parl.2019-12-12/Version 2/ipswich.gov.uk-1573574354000-.CSV"
stations_name = "parl.2019-12-12/Version 2/ipswich.gov.uk-1573574354000-.CSV"
elections = ["parl.2019-12-12"]
allow_station_point_from_postcode = False
def station_record_to_dict(self, record):
# Sikh Temple
if record.polling_place_id == "6642":
record = record._replace(polling_place_uprn="10035058265")
# Castle Hill United Reformed Church
if record.polling_place_id == "6629":
record = record._replace(polling_place_uprn="100091483837")
# St Thomas the Apostle Church
if record.polling_place_id == "6638":
record = record._replace(
polling_place_uprn="10004564181", polling_place_postcode="IP1 5BS"
)
# Stoke Green Baptist Church Hall
if record.polling_place_id == "6827":
record = record._replace(polling_place_uprn="10004567047")
# Ascension Hall
if record.polling_place_id == "6655":
record = record._replace(polling_place_uprn="200001930783")
# Broomhill Library
if record.polling_place_id == "6659":
record = record._replace(polling_place_uprn="10004565452")
# St Mark`s RC Church Hall
if record.polling_place_id == "6834":
record = record._replace(polling_place_easting="614239")
record = record._replace(polling_place_northing="243310")
# Belstead Arms Public House
if record.polling_place_id == "6863":
record = record._replace(
polling_place_uprn="10004566897", polling_place_postcode="IP2 9QU"
)
return super().station_record_to_dict(record)
def address_record_to_dict(self, record):
uprn = record.property_urn.strip().lstrip("0")
if uprn in [
"100091636692", # IP42AU -> IP42AT : 39 St Margarets Street, Ipswich
]:
rec = super().address_record_to_dict(record)
rec["accept_suggestion"] = True
return rec
# UPRNs not in addressbase
if record.post_code in ["IP", "IP1", "IP2"]:
return None
if uprn in ["10093555944"]:
return None
return super().address_record_to_dict(record)
Import script for Ipswich (2022-05-05) (closes #4157)
from data_importers.management.commands import BaseXpressDemocracyClubCsvImporter
class Command(BaseXpressDemocracyClubCsvImporter):
council_id = "IPS"
addresses_name = (
"2022-05-05/2022-02-23T11:56:29.039800/Democracy_Club__05May2022.CSV"
)
stations_name = (
"2022-05-05/2022-02-23T11:56:29.039800/Democracy_Club__05May2022.CSV"
)
elections = ["2022-05-05"]
def station_record_to_dict(self, record):
# Sikh Temple, Guru Nanak Gurdwara
if record.polling_place_id == "7914":
record = record._replace(polling_place_uprn="10035058265")
# Castle Hill United Reformed Church
if record.polling_place_id == "7930":
record = record._replace(polling_place_uprn="100091483837")
# Stoke Green Baptist Church Hall
if record.polling_place_id == "7741":
record = record._replace(polling_place_uprn="10004567047")
# Ascension Hall
if record.polling_place_id == "7748":
record = record._replace(polling_place_uprn="200001930783")
# Broomhill Library
if record.polling_place_id == "7752":
record = record._replace(polling_place_uprn="10004565452")
# St Mark`s RC Church Hall
if record.polling_place_id == "7777":
record = record._replace(polling_place_easting="614239")
record = record._replace(polling_place_northing="243310")
# Belstead Arms Public House
if record.polling_place_id == "7843":
record = record._replace(
polling_place_uprn="10004566897", polling_place_postcode="IP2 9QU"
)
# All Hallows Church Hall
if record.polling_place_id == "7755":
record = record._replace(
polling_place_uprn="10004564821", polling_place_postcode="IP3 0EN"
)
return super().station_record_to_dict(record)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from flask import Flask, render_template
import requests, xmltodict, sr_communication
app = Flask(__name__)
@app.route('/')
def index():
dictionary = sr_communication.getChannels()
return render_template('index.html', channels=dictionary['channel'])
@app.route('/channels/')
def getChannels():
dictionary = sr_communication.getChannels()
return render_template('debugsite.html', name=str(dictionary['channel']))
@app.route('/channels/<channelID>')
def getChannel(channelID):
channel = sr_communication.getChannel(channelID)
return render_template('debugsite.html', name=str(channel))
if __name__ == "__main__":
app.run(debug=True, port=5000)
api v1
#!/usr/bin/python
# -*- coding: utf-8 -*-
from flask import Flask, render_template
import requests, xmltodict, sr_communication, json
app = Flask(__name__)
@app.route('/')
def index():
dictionary = sr_communication.getChannels()
return render_template('index.html', channels=dictionary['channel'])
#API
@app.route('/api/v1.0/channels/', methods=['GET'])
def getChannels():
dictionary = sr_communication.getChannels()
return json.dumps(dictionary)
@app.route('/api/v1.0/channels/<channelID>', methods=['GET'])
def getChannel(channelID):
channel = sr_communication.getChannel(channelID)
return json.dumps(channel)
if __name__ == "__main__":
app.run(debug=True, port=5000)
|
#!/usr/bin/env python3
#-*- coding:utf-8 -*-
from flask import Flask, render_template, g, abort, redirect
import os, pathlib
from os.path import splitext, dirname, abspath
import sqlite3
app = Flask(__name__)
DATABASE = abspath(dirname(__file__))+'imagedb.db'
PHOTOS = 'img/photos/'
DRAWINGS = 'img/drawings/'
LOGOS = 'img/logos/'
THUMBNAILS = PHOTOS+'_thumbnails/'
THUMBNAILS_DR = DRAWINGS+'_thumbnails/'
IMGEXT = ['jpg', 'jpeg', 'png', 'svg', 'gif']
IMGEXT = tuple(list(map( lambda x:x.upper(), IMGEXT))+IMGEXT)
import db # cant be from db import something because of circular import
#////////////////////////////////////
## ROUTES ##
# -----------------------------------------------
@app.route('/')#---------------------------------
def index():
# return render_template('index.html.j2')
return redirect('/about')
#------------------------------------------------
@app.route('/contact')#--------------------------
def contact():
return render_template('contact.html.j2')
#------------------------------------------------
@app.route('/faq')#------------------------------
def faq():
return render_template('faq.html.j2')
#------------------------------------------------
@app.route('/artgallery/')#-------------------------
def artgallery():
try:
conn = get_db()
c = conn.cursor()
phs = db.select_all_drawing_thumbnail(c)
phs = [[url.split(DRAWINGS, maxsplit=1)[1],title] for url,title in phs]
except Exception as e:
print('\x1b[31m', e, '\x1b[0m')
abort(500)
finally:
c.close()
conn.close()
return render_template('artgallery.html.j2', photos=phs,THUMBNAILS=THUMBNAILS_DR)
#------------------------------------------------
@app.route('/photogallery/')#---------------------------
def photogallery():
try:
conn = get_db()
c = conn.cursor()
phs = db.select_all_photo_thumbnail(c)
phs = [[url.split(PHOTOS, maxsplit=1)[1],title] for url,title in phs]
except Exception as e:
print('\x1b[31m', e, '\x1b[0m')
abort(500)
finally:
c.close()
conn.close()
return render_template('photogallery.html.j2', photos=phs,THUMBNAILS=THUMBNAILS)
#------------------------------------------------
@app.route('/about')#----------------------------
def about():
logos = getLogos()
return render_template('about.html.j2', techs=logos)
#------------------------------------------------
#------------------------------------------------
@app.route('/photos/<folder>/<img>')#----------------------------
def photos(folder, img):
# security?
imgUrl = '/{}{}/{}'.format(PHOTOS, folder, img)
try:
conn = get_db()
c = conn.cursor()
img = db.select_a_photo(c, imgUrl) #mainPhoto
phs = db.select_all_photo_thumbnail(c)
phs = calc_neighbours(phs, img, 2, func=lambda x: x[0])
phs = [[url.split(PHOTOS, maxsplit=1)[1],title] for url,title in phs]
except Exception as e:
print('\x1b[31m', e, '\x1b[0m')
abort(500)
finally:
c.close()
conn.close()
return render_template('photos.html.j2', img=img, photos=phs,THUMBNAILS=THUMBNAILS)
#------------------------------------------------
@app.route('/drawings/<img>')#----------------------------
def drawings(img):
# security?
imgUrl = '/{}{}'.format(DRAWINGS, img)
try:
conn = get_db()
c = conn.cursor()
img = db.select_a_drawing(c, imgUrl) #mainPhoto
phs = db.select_all_drawing_thumbnail(c)
phs = calc_neighbours(phs, img, 2, func=lambda x: x[0])
phs = [[url.split(DRAWINGS, maxsplit=1)[1],title] for url,title in phs]
except Exception as e:
print('\x1b[31m', e, '\x1b[0m')
abort(500)
finally:
c.close()
conn.close()
return render_template('drawings.html.j2', img=img, photos=phs,THUMBNAILS=THUMBNAILS_DR)
#------------------------------------------------
#ERRORHANDLER------------------------------------
@app.errorhandler(500)#--------------------------
def internal_server_error(err):
print('\x1b[31m', err, '\x1b[0m') # TODO log it somewhere
return app.send_static_file('html/500.html'), 500
@app.errorhandler(404)
def not_found(err):
return render_template('404.html.j2'), 404
#------------------------------------------------
def getLogos():
logos = os.listdir(app.static_folder + '/'+ LOGOS)
logos = [logo for logo in logos if logo.endswith(IMGEXT)]
return [{
'name':splitext(logo)[0],
'link':pathlib.Path(app.static_folder+ '/' + LOGOS+splitext(logo)[0]+'.txt').read_text(),
'filename':logo
} for logo in logos]
def calc_neighbours(ofItems, targetItem, n, func=lambda x:x):
"""Search through ofItems for targetItem.
Returns n number of neighbours of targetItem from ofItems."""
for i in range(len(ofItems)):
if func(ofItems[i])==func(targetItem):
break # we found our target index
# get the neighbours, handle edge cases
return ofItems[max(0,i-n):min(i+n+1,len(ofItems))]
def get_db():
"""Returns an sqlite3.Connection object stored in g.
Or creates it if doesn't exist yet."""
db = getattr(g, '_database', None)
if db is None:
db = g._database = sqlite3.connect(DATABASE)
return db
#////////////////////////////////////
if __name__=='__main__':
app.run(
debug=True,
threaded=True
)
Fixed a slash in database path at the server file.
#!/usr/bin/env python3
#-*- coding:utf-8 -*-
from flask import Flask, render_template, g, abort, redirect
import os, pathlib
from os.path import splitext, dirname, abspath
import sqlite3
app = Flask(__name__)
DATABASE = abspath(dirname(__file__))+'/imagedb.db'
PHOTOS = 'img/photos/'
DRAWINGS = 'img/drawings/'
LOGOS = 'img/logos/'
THUMBNAILS = PHOTOS+'_thumbnails/'
THUMBNAILS_DR = DRAWINGS+'_thumbnails/'
IMGEXT = ['jpg', 'jpeg', 'png', 'svg', 'gif']
IMGEXT = tuple(list(map( lambda x:x.upper(), IMGEXT))+IMGEXT)
import db # cant be from db import something because of circular import
#////////////////////////////////////
## ROUTES ##
# -----------------------------------------------
@app.route('/')#---------------------------------
def index():
# return render_template('index.html.j2')
return redirect('/about')
#------------------------------------------------
@app.route('/contact')#--------------------------
def contact():
return render_template('contact.html.j2')
#------------------------------------------------
@app.route('/faq')#------------------------------
def faq():
return render_template('faq.html.j2')
#------------------------------------------------
@app.route('/artgallery/')#-------------------------
def artgallery():
try:
conn = get_db()
c = conn.cursor()
phs = db.select_all_drawing_thumbnail(c)
phs = [[url.split(DRAWINGS, maxsplit=1)[1],title] for url,title in phs]
except Exception as e:
print('\x1b[31m', e, '\x1b[0m')
abort(500)
finally:
c.close()
conn.close()
return render_template('artgallery.html.j2', photos=phs,THUMBNAILS=THUMBNAILS_DR)
#------------------------------------------------
@app.route('/photogallery/')#---------------------------
def photogallery():
try:
conn = get_db()
c = conn.cursor()
phs = db.select_all_photo_thumbnail(c)
phs = [[url.split(PHOTOS, maxsplit=1)[1],title] for url,title in phs]
except Exception as e:
print('\x1b[31m', e, '\x1b[0m')
abort(500)
finally:
c.close()
conn.close()
return render_template('photogallery.html.j2', photos=phs,THUMBNAILS=THUMBNAILS)
#------------------------------------------------
@app.route('/about')#----------------------------
def about():
logos = getLogos()
return render_template('about.html.j2', techs=logos)
#------------------------------------------------
#------------------------------------------------
@app.route('/photos/<folder>/<img>')#----------------------------
def photos(folder, img):
# security?
imgUrl = '/{}{}/{}'.format(PHOTOS, folder, img)
try:
conn = get_db()
c = conn.cursor()
img = db.select_a_photo(c, imgUrl) #mainPhoto
phs = db.select_all_photo_thumbnail(c)
phs = calc_neighbours(phs, img, 2, func=lambda x: x[0])
phs = [[url.split(PHOTOS, maxsplit=1)[1],title] for url,title in phs]
except Exception as e:
print('\x1b[31m', e, '\x1b[0m')
abort(500)
finally:
c.close()
conn.close()
return render_template('photos.html.j2', img=img, photos=phs,THUMBNAILS=THUMBNAILS)
#------------------------------------------------
@app.route('/drawings/<img>')#----------------------------
def drawings(img):
# security?
imgUrl = '/{}{}'.format(DRAWINGS, img)
try:
conn = get_db()
c = conn.cursor()
img = db.select_a_drawing(c, imgUrl) #mainPhoto
phs = db.select_all_drawing_thumbnail(c)
phs = calc_neighbours(phs, img, 2, func=lambda x: x[0])
phs = [[url.split(DRAWINGS, maxsplit=1)[1],title] for url,title in phs]
except Exception as e:
print('\x1b[31m', e, '\x1b[0m')
abort(500)
finally:
c.close()
conn.close()
return render_template('drawings.html.j2', img=img, photos=phs,THUMBNAILS=THUMBNAILS_DR)
#------------------------------------------------
#ERRORHANDLER------------------------------------
@app.errorhandler(500)#--------------------------
def internal_server_error(err):
print('\x1b[31m', err, '\x1b[0m') # TODO log it somewhere
return app.send_static_file('html/500.html'), 500
@app.errorhandler(404)
def not_found(err):
return render_template('404.html.j2'), 404
#------------------------------------------------
def getLogos():
logos = os.listdir(app.static_folder + '/'+ LOGOS)
logos = [logo for logo in logos if logo.endswith(IMGEXT)]
return [{
'name':splitext(logo)[0],
'link':pathlib.Path(app.static_folder+ '/' + LOGOS+splitext(logo)[0]+'.txt').read_text(),
'filename':logo
} for logo in logos]
def calc_neighbours(ofItems, targetItem, n, func=lambda x:x):
"""Search through ofItems for targetItem.
Returns n number of neighbours of targetItem from ofItems."""
for i in range(len(ofItems)):
if func(ofItems[i])==func(targetItem):
break # we found our target index
# get the neighbours, handle edge cases
return ofItems[max(0,i-n):min(i+n+1,len(ofItems))]
def get_db():
"""Returns an sqlite3.Connection object stored in g.
Or creates it if doesn't exist yet."""
db = getattr(g, '_database', None)
if db is None:
db = g._database = sqlite3.connect(DATABASE)
return db
#////////////////////////////////////
if __name__=='__main__':
app.run(
debug=True,
threaded=True
)
|
import cherrypy
import json
from daemonize import Daemonize
from digitalocean import ClientV2
from pprint import pformat
DROPLETS_FILE = "droplets.json"
API_KEY_FILE = "API_KEY"
def get_api_key():
with open(API_KEY_FILE, "r") as f:
return f.read().rstrip()
# Reading from a file every time allows live updates to the droplets list
def get_droplets():
with open(DROPLETS_FILE, "r") as f:
data = f.read()
if not data:
return []
else:
return json.loads(data)
class Root(object):
def strongly_expire(func):
def newfunc(*args, **kwargs):
cherrypy.response.headers["Expires"] = "Sun, 19 Nov 1978 05:00:00 GMT"
cherrypy.response.headers["Cache-Control"] = "no-store, no-cache, must-revalidate, post-check=0, pre-check=0"
cherrypy.response.headers["Pragma"] = "no-cache"
return func(*args, **kwargs)
return newfunc
@cherrypy.expose
def index(self):
return "Usage: /reboot?ip_address=1.2.3.4&password=password"
@cherrypy.expose
@strongly_expire
def reboot(self, ip_address=None, password=None):
if not ip_address or not password:
return "Usage: /reboot?ip_address=1.2.3.4&password=password"
body = ""
for droplet in get_droplets():
if droplet["ip_address"] == ip_address and droplet["password"] == password:
client = ClientV2(token=get_api_key())
ret = client.droplets.power_cycle(droplet_id=droplet["id"])
body += "Power cycling %s:<br><br><pre>\n" % ip_address
body += pformat(ret)
body += "</pre>"
return body
return "Couldn't find that IP address / password combination"
def main():
# Start web server
cherrypy.config.update({"server.socket_host": "0.0.0.0", "server.socket_port": 80})
cherrypy.quickstart(Root(), "/")
if __name__ == "__main__":
pid = "/tmp/ctf-server.pid"
daemon = Daemonize(app="CTF server", pid=pid, action=main)
daemon.start()
Fix cwd issues with daemonize
import cherrypy
import json
import os
from daemonize import Daemonize
from digitalocean import ClientV2
from pprint import pformat
DROPLETS_FILE = os.path.join(os.getcwd(), "droplets.json")
API_KEY_FILE = os.path.join(os.getcwd(), "API_KEY")
def get_api_key():
with open(API_KEY_FILE, "r") as f:
return f.read().rstrip()
# Reading from a file every time allows live updates to the droplets list
def get_droplets():
with open(DROPLETS_FILE, "r") as f:
data = f.read()
if not data:
return []
else:
return json.loads(data)
class Root(object):
def strongly_expire(func):
def newfunc(*args, **kwargs):
cherrypy.response.headers["Expires"] = "Sun, 19 Nov 1978 05:00:00 GMT"
cherrypy.response.headers["Cache-Control"] = "no-store, no-cache, must-revalidate, post-check=0, pre-check=0"
cherrypy.response.headers["Pragma"] = "no-cache"
return func(*args, **kwargs)
return newfunc
@cherrypy.expose
def index(self):
return "Usage: /reboot?ip_address=1.2.3.4&password=password"
@cherrypy.expose
@strongly_expire
def reboot(self, ip_address=None, password=None):
if not ip_address or not password:
return "Usage: /reboot?ip_address=1.2.3.4&password=password"
body = ""
for droplet in get_droplets():
if droplet["ip_address"] == ip_address and droplet["password"] == password:
client = ClientV2(token=get_api_key())
ret = client.droplets.power_cycle(droplet_id=droplet["id"])
body += "Power cycling %s:<br><br><pre>\n" % ip_address
body += pformat(ret)
body += "</pre>"
return body
return "Couldn't find that IP address / password combination"
def main():
# Start web server
cherrypy.config.update({"server.socket_host": "0.0.0.0", "server.socket_port": 80})
cherrypy.quickstart(Root(), "/")
if __name__ == "__main__":
pid = "/tmp/ctf-server.pid"
daemon = Daemonize(app="CTF server", pid=pid, action=main)
daemon.start()
|
#application spesific imports
import flask
from random import randint
from words import dictionary
from flask import request
import json
#21 imports
from two1.wallet import Wallet
from two1.bitserv.flask import Payment
import yaml
app = flask.Flask(__name__)
payment = Payment(app, Wallet())
@app.route("/")
@app.route('/make_password/')
@app.route('/make_password/<int:length>')
@payment.required(1000)
def make_password(length = None):
length = length
if length == None:
length = 5
elif length > 16:
length = 16
dice_pass = {
"password" : []
}
password = ""
for x in range(length):
word_in_numbers = ""
for roll in range (0,5):
number = str(randint(1,6))
word_in_numbers = word_in_numbers + number
word_in_numbers = word_in_numbers
word = dictionary[word_in_numbers]
dice_pass["password"].append(word)
return json.dumps(dice_pass)
@app.route('/manifest')
def manifest():
"""Provide the app manifest to the 21 crawler.
"""
with open('./manifest.yaml', 'r') as f:
manifest = yaml.load(f)
return json.dumps(manifest)
if __name__ == "__main__":
app.run(host="0.0.0.0", port=5000)
removing @app.route("/")
#application spesific imports
import flask
from random import randint
from words import dictionary
from flask import request
import json
#21 imports
from two1.wallet import Wallet
from two1.bitserv.flask import Payment
import yaml
app = flask.Flask(__name__)
payment = Payment(app, Wallet())
#@app.route("/")
@app.route('/make_password/')
@app.route('/make_password/<int:length>')
@payment.required(1000)
def make_password(length = None):
length = length
if length == None:
length = 5
elif length > 16:
length = 16
dice_pass = {
"password" : []
}
password = ""
for x in range(length):
word_in_numbers = ""
for roll in range (0,5):
number = str(randint(1,6))
word_in_numbers = word_in_numbers + number
word_in_numbers = word_in_numbers
word = dictionary[word_in_numbers]
dice_pass["password"].append(word)
return json.dumps(dice_pass)
@app.route('/manifest')
def manifest():
"""Provide the app manifest to the 21 crawler.
"""
with open('./manifest.yaml', 'r') as f:
manifest = yaml.load(f)
return json.dumps(manifest)
if __name__ == "__main__":
app.run(host="0.0.0.0", port=5000) |
#!/usr/bin/python
import logging
import re
import pychromecast
from twisted.internet import protocol, reactor, endpoints
from twisted.protocols.basic import LineReceiver
import chromecast_controller
import greenscreen_client
# Protocol: Simple line based TCP protocol
#
# chromecast=Name Of Chromecast,channel=Foo,cast=1
# - Set the channel to 'Foo' on the specified Chromecast and start casting.
# chromecast=Name Of Chromecast,cast=0
# - Stop casting on the specified Chromecast.
# chromecast=Name Of Chromecast,channel=Foo
# - Set the channel on the specified Chromecast but don't cast.
class GSCLineHandler(LineReceiver):
RE_CHROMECAST = re.compile("^chromecast=(?P<chromecast>.*)")
RE_CHANNEL = re.compile("^channel=(?P<channel>.*)")
RE_CAST = re.compile("^cast=(?P<cast>[01])$")
delimiter = "\n"
def __init__(self, greenscreen_client, chromecast_controller, app_id):
self._greenscreen_client = greenscreen_client
self._chromecast_controller = chromecast_controller
self._app_id = app_id
def connectionMade(self):
logging.info("Connection from: %s" % str(self.transport.getPeer()))
def lineReceived(self, line):
chromecast = channel = cast = None
line = line.strip()
print "Line:", line
for piece in line.split(","):
chromecast_result = self.RE_CHROMECAST.search(piece)
if chromecast_result:
chromecast = chromecast_result.group('chromecast')
continue
channel_result = self.RE_CHANNEL.search(piece)
if channel_result:
channel = channel_result.group('channel')
continue
cast_result = self.RE_CAST.search(piece)
if cast_result:
cast = bool(int(cast_result.group('cast')))
continue
if not chromecast:
logging.warning("Received command without chromecast name from: %s" % (
str(self.transport.getPeer())))
return
elif not channel and not cast:
logging.warning("Received incomplete command for '%s' from: %s" % (
chromecast, str(self.transport.getPeer())))
return
if channel:
self._greenscreen_client.set_channel_for_chromecast(
chromecast, channel)
if cast is not None:
if cast:
self._chromecast_controller.start_chromecast_app(chromecast, self._app_id)
else:
self._chromecast_controller.stop_chromecast_app(chromecast)
class GSCLineHandlerFactory(protocol.Factory):
protocol = GSCLineHandler
def __init__(self, greenscreen_client, chromecast_controller, app_id):
self._greenscreen_client = greenscreen_client
self._chromecast_controller = chromecast_controller
self._app_id = app_id
def buildProtocol(self, addr):
return GSCLineHandler(
self._greenscreen_client, self._chromecast_controller, self._app_id)
def Serve(port, greenscreen_client, chromecast_controller, app_id):
logging.info("Starting TCP server on port: %i" % port)
reactor.listenTCP(port, GSCLineHandlerFactory(
greenscreen_client, chromecast_controller, app_id))
reactor.run()
Fix command recognition.
#!/usr/bin/python
import logging
import re
import pychromecast
from twisted.internet import protocol, reactor, endpoints
from twisted.protocols.basic import LineReceiver
import chromecast_controller
import greenscreen_client
# Protocol: Simple line based TCP protocol
#
# chromecast=Name Of Chromecast,channel=Foo,cast=1
# - Set the channel to 'Foo' on the specified Chromecast and start casting.
# chromecast=Name Of Chromecast,cast=0
# - Stop casting on the specified Chromecast.
# chromecast=Name Of Chromecast,channel=Foo
# - Set the channel on the specified Chromecast but don't cast.
class GSCLineHandler(LineReceiver):
RE_CHROMECAST = re.compile("^chromecast=(?P<chromecast>.*)")
RE_CHANNEL = re.compile("^channel=(?P<channel>.*)")
RE_CAST = re.compile("^cast=(?P<cast>[01])$")
delimiter = "\n"
def __init__(self, greenscreen_client, chromecast_controller, app_id):
self._greenscreen_client = greenscreen_client
self._chromecast_controller = chromecast_controller
self._app_id = app_id
def connectionMade(self):
logging.info("Connection from: %s" % str(self.transport.getPeer()))
def lineReceived(self, line):
chromecast = channel = cast = None
line = line.strip()
print "Line:", line
for piece in line.split(","):
chromecast_result = self.RE_CHROMECAST.search(piece)
if chromecast_result:
chromecast = chromecast_result.group('chromecast')
continue
channel_result = self.RE_CHANNEL.search(piece)
if channel_result:
channel = channel_result.group('channel')
continue
cast_result = self.RE_CAST.search(piece)
if cast_result:
cast = bool(int(cast_result.group('cast')))
continue
if chromecast is None:
logging.warning("Received command without chromecast name from: %s" % (
str(self.transport.getPeer())))
return
elif channel is None and cast is None:
logging.warning("Received incomplete command for '%s' from: %s" % (
chromecast, str(self.transport.getPeer())))
return
if channel is not None:
self._greenscreen_client.set_channel_for_chromecast(
chromecast, channel)
if cast is not None:
if cast:
self._chromecast_controller.start_chromecast_app(chromecast, self._app_id)
else:
self._chromecast_controller.stop_chromecast_app(chromecast)
class GSCLineHandlerFactory(protocol.Factory):
protocol = GSCLineHandler
def __init__(self, greenscreen_client, chromecast_controller, app_id):
self._greenscreen_client = greenscreen_client
self._chromecast_controller = chromecast_controller
self._app_id = app_id
def buildProtocol(self, addr):
return GSCLineHandler(
self._greenscreen_client, self._chromecast_controller, self._app_id)
def Serve(port, greenscreen_client, chromecast_controller, app_id):
logging.info("Starting TCP server on port: %i" % port)
reactor.listenTCP(port, GSCLineHandlerFactory(
greenscreen_client, chromecast_controller, app_id))
reactor.run()
|
# -*- coding: utf-8 -*-
import datetime
import json
import os
import time
import flask
import requests
import weather.weather as w
from lib.bibtex_pubs import bibtex_pubs
from lib.bokeh_plot import bokeh_plot
from lib.config import render_template, get_cv_pdfs, STATIC_FOLDER, TEMPLATE_FOLDER, PRESENTATIONS, \
PROJECTS, OWNER, USER, ETC
from lib.utils import dump_json
app = flask.Flask(
__name__,
static_folder=STATIC_FOLDER,
template_folder=TEMPLATE_FOLDER,
)
app.register_blueprint(bokeh_plot)
app.register_blueprint(bibtex_pubs)
@app.route('/')
def index():
return render_template(
'index.html',
title="Welcome to {}'s web page!".format(OWNER),
user=USER,
)
@app.route('/keybase.txt')
def keybase():
with open(os.path.join(STATIC_FOLDER, 'etc', 'keybase.txt'), 'r') as f:
text = f.read()
return flask.Response(text, mimetype='text/xml')
@app.route('/cv')
@app.route('/cv/')
def cv():
data = get_cv_pdfs()
for k in data.keys():
if k in ['cv', 'bio']:
button_type = 'success'
elif k == 'pubs':
button_type = 'warning'
elif k == 'bib':
button_type = 'info'
else:
button_type = 'basic'
data[k]['button_type'] = button_type
return render_template(
'cv.html',
title="{}'s CV".format(OWNER),
data=data,
)
@app.route('/cv/<bib>')
def cv_files(bib=None, as_attachment=True):
data = get_cv_pdfs()
valid_values = []
for k, v in data.items():
valid_values.append(v['orig_name'])
if bib not in valid_values:
return flask.redirect(flask.url_for('cv'))
cv_file = data[bib.lower()]['file']
attachment_filename = os.path.basename(cv_file)
mimetype = 'x-bibtex' if bib == 'bib' else os.path.splitext(cv_file)[1]
return flask.send_file(
cv_file,
as_attachment=as_attachment,
attachment_filename=attachment_filename,
mimetype='application/{}'.format(mimetype),
)
@app.route('/etc')
def etc():
"""Shows a list of interesting projects by other people"""
return render_template(
'etc.html',
title='Useful and interesting projects by other people',
data=ETC,
target='_blank',
)
@app.route('/ip/')
def ip():
ip_info = w.get_external_ip(ip=_remote_address())
return render_template(
'ip.html',
title='Your IP info',
ip_info=ip_info,
time=_time(),
)
@app.route('/myweather')
@app.route('/myweather/')
def my_weather():
remote_addr = _remote_address()
return render_template(
'weather.html',
title='My Weather',
parameter={'name': 'your IP address', 'value': remote_addr},
weather=_get_weather(w.get_city_by_ip(remote_addr)),
time=_time(),
)
@app.route('/weather')
@app.route('/weather/')
@app.route('/weather/<postal>')
def weather(postal=11767):
try:
postal = str(postal)
return render_template(
'weather.html',
title='Weather',
parameter={'name': 'postal', 'value': postal},
weather=_get_weather(w.get_city_by_postal(postal)),
time=_time(),
)
except ValueError as e:
return flask.jsonify({'error': str(e)})
@app.route('/favicon.ico')
def favicon():
"""Routes to favicon.ico file."""
return flask.send_from_directory(
os.path.join(STATIC_FOLDER, 'img'),
'favicon.ico',
mimetype='image/vnd.microsoft.icon',
)
@app.route('/points')
def points():
"""REST API proxying from http://theossrv2.epfl.ch/aiida_assignment2/api/points/"""
url = 'http://theossrv2.epfl.ch/aiida_assignment2/api/points/'
data = requests.get(url=url)
try:
return flask.jsonify(json.loads(data.text))
except:
raise ValueError('Server response cannot be converted to JSON.')
@app.route('/presentations')
def presentations():
"""Shows a list of selected presentations"""
return render_template(
'table.html',
title='Presentations',
data=PRESENTATIONS,
target='_blank',
)
@app.route('/projects')
def projects():
"""Shows a list of selected projects"""
return render_template(
'table.html',
title='Projects',
data=PROJECTS,
target='_blank',
)
@app.route('/robots.txt')
def robots_txt():
# Allow scans by Google robot:
return flask.Response('')
# """Tell robots to go away"""
# return flask.Response(
# 'User-agent: *\nDisallow: /\n',
# mimetype='text/plain',
# )
@app.errorhandler(404)
def page_not_found(e):
return render_template(
'404.html',
title='Page Not Found',
), 404
def _as_attachment(response, content_type, filename):
response.mimetype = content_type
response.headers['Content-Disposition'] = 'attachment; filename="{}"'.format(filename)
return response
def _get_weather(location, debug=False):
try:
conditions = w.get_current_conditions(location['Key'], details=True)
except TypeError:
raise ValueError('Conditions cannot be found for the remote address {}'.format(_remote_address()))
w_json = dump_json(conditions)
if debug:
print('Weather: {}'.format(w_json))
fmt = u'{}'
return fmt.format(w.printable_weather(
city=location['EnglishName'],
state=location['AdministrativeArea']['ID'],
postal=location['PrimaryPostalCode'],
conditions=conditions,
no_icons=False,
))
def _remote_address():
if flask.request.headers.getlist('X-Forwarded-For'):
remote_addr = flask.request.headers.getlist('X-Forwarded-For')[0]
else:
remote_addr = flask.request.remote_addr
if remote_addr == '127.0.0.1':
remote_addr = w.get_external_ip()['ip']
return remote_addr
def _time(time_format='%Y-%m-%d %H:%M:%S'):
timestamp = time.time()
return datetime.datetime.fromtimestamp(timestamp=timestamp).strftime(time_format)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='Web server')
parser.add_argument('-d', '--debug', dest='debug', action='store_true',
help='debug mode')
args = parser.parse_args()
host = '127.0.0.1' if args.debug else '0.0.0.0'
app.run(
debug=args.debug,
host=host,
)
Another fix of mimetype for the keybase.txt
# -*- coding: utf-8 -*-
import datetime
import json
import os
import time
import flask
import requests
import weather.weather as w
from lib.bibtex_pubs import bibtex_pubs
from lib.bokeh_plot import bokeh_plot
from lib.config import render_template, get_cv_pdfs, STATIC_FOLDER, TEMPLATE_FOLDER, PRESENTATIONS, \
PROJECTS, OWNER, USER, ETC
from lib.utils import dump_json
app = flask.Flask(
__name__,
static_folder=STATIC_FOLDER,
template_folder=TEMPLATE_FOLDER,
)
app.register_blueprint(bokeh_plot)
app.register_blueprint(bibtex_pubs)
@app.route('/')
def index():
return render_template(
'index.html',
title="Welcome to {}'s web page!".format(OWNER),
user=USER,
)
@app.route('/keybase.txt')
def keybase():
with open(os.path.join(STATIC_FOLDER, 'etc', 'keybase.txt'), 'r') as f:
text = f.read()
return flask.Response(text, mimetype='text/plain')
@app.route('/cv')
@app.route('/cv/')
def cv():
data = get_cv_pdfs()
for k in data.keys():
if k in ['cv', 'bio']:
button_type = 'success'
elif k == 'pubs':
button_type = 'warning'
elif k == 'bib':
button_type = 'info'
else:
button_type = 'basic'
data[k]['button_type'] = button_type
return render_template(
'cv.html',
title="{}'s CV".format(OWNER),
data=data,
)
@app.route('/cv/<bib>')
def cv_files(bib=None, as_attachment=True):
data = get_cv_pdfs()
valid_values = []
for k, v in data.items():
valid_values.append(v['orig_name'])
if bib not in valid_values:
return flask.redirect(flask.url_for('cv'))
cv_file = data[bib.lower()]['file']
attachment_filename = os.path.basename(cv_file)
mimetype = 'x-bibtex' if bib == 'bib' else os.path.splitext(cv_file)[1]
return flask.send_file(
cv_file,
as_attachment=as_attachment,
attachment_filename=attachment_filename,
mimetype='application/{}'.format(mimetype),
)
@app.route('/etc')
def etc():
"""Shows a list of interesting projects by other people"""
return render_template(
'etc.html',
title='Useful and interesting projects by other people',
data=ETC,
target='_blank',
)
@app.route('/ip/')
def ip():
ip_info = w.get_external_ip(ip=_remote_address())
return render_template(
'ip.html',
title='Your IP info',
ip_info=ip_info,
time=_time(),
)
@app.route('/myweather')
@app.route('/myweather/')
def my_weather():
remote_addr = _remote_address()
return render_template(
'weather.html',
title='My Weather',
parameter={'name': 'your IP address', 'value': remote_addr},
weather=_get_weather(w.get_city_by_ip(remote_addr)),
time=_time(),
)
@app.route('/weather')
@app.route('/weather/')
@app.route('/weather/<postal>')
def weather(postal=11767):
try:
postal = str(postal)
return render_template(
'weather.html',
title='Weather',
parameter={'name': 'postal', 'value': postal},
weather=_get_weather(w.get_city_by_postal(postal)),
time=_time(),
)
except ValueError as e:
return flask.jsonify({'error': str(e)})
@app.route('/favicon.ico')
def favicon():
"""Routes to favicon.ico file."""
return flask.send_from_directory(
os.path.join(STATIC_FOLDER, 'img'),
'favicon.ico',
mimetype='image/vnd.microsoft.icon',
)
@app.route('/points')
def points():
"""REST API proxying from http://theossrv2.epfl.ch/aiida_assignment2/api/points/"""
url = 'http://theossrv2.epfl.ch/aiida_assignment2/api/points/'
data = requests.get(url=url)
try:
return flask.jsonify(json.loads(data.text))
except:
raise ValueError('Server response cannot be converted to JSON.')
@app.route('/presentations')
def presentations():
"""Shows a list of selected presentations"""
return render_template(
'table.html',
title='Presentations',
data=PRESENTATIONS,
target='_blank',
)
@app.route('/projects')
def projects():
"""Shows a list of selected projects"""
return render_template(
'table.html',
title='Projects',
data=PROJECTS,
target='_blank',
)
@app.route('/robots.txt')
def robots_txt():
# Allow scans by Google robot:
return flask.Response('')
# """Tell robots to go away"""
# return flask.Response(
# 'User-agent: *\nDisallow: /\n',
# mimetype='text/plain',
# )
@app.errorhandler(404)
def page_not_found(e):
return render_template(
'404.html',
title='Page Not Found',
), 404
def _as_attachment(response, content_type, filename):
response.mimetype = content_type
response.headers['Content-Disposition'] = 'attachment; filename="{}"'.format(filename)
return response
def _get_weather(location, debug=False):
try:
conditions = w.get_current_conditions(location['Key'], details=True)
except TypeError:
raise ValueError('Conditions cannot be found for the remote address {}'.format(_remote_address()))
w_json = dump_json(conditions)
if debug:
print('Weather: {}'.format(w_json))
fmt = u'{}'
return fmt.format(w.printable_weather(
city=location['EnglishName'],
state=location['AdministrativeArea']['ID'],
postal=location['PrimaryPostalCode'],
conditions=conditions,
no_icons=False,
))
def _remote_address():
if flask.request.headers.getlist('X-Forwarded-For'):
remote_addr = flask.request.headers.getlist('X-Forwarded-For')[0]
else:
remote_addr = flask.request.remote_addr
if remote_addr == '127.0.0.1':
remote_addr = w.get_external_ip()['ip']
return remote_addr
def _time(time_format='%Y-%m-%d %H:%M:%S'):
timestamp = time.time()
return datetime.datetime.fromtimestamp(timestamp=timestamp).strftime(time_format)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='Web server')
parser.add_argument('-d', '--debug', dest='debug', action='store_true',
help='debug mode')
args = parser.parse_args()
host = '127.0.0.1' if args.debug else '0.0.0.0'
app.run(
debug=args.debug,
host=host,
)
|
#!/usr/bin/env python
from flask import Blueprint
from flask import Flask
from flask import redirect
from flask import render_template
from flask.ext.restful import Api
from flask_restful_swagger import swagger
from pal.engine import Engine
from pal.nlp.standard_nlp import StandardNLP
from pal.nlp.feature_extractor import FeatureExtractor
from pal.nlp.keyword_finder import KeywordFinder
from pal.nlp.noun_finder import NounFinder
from pal.nlp.question_classifier import QuestionClassifier
from pal.nlp.question_detector import QuestionDetector
from pal.nlp.tense_classifier import TenseClassifier
app = Flask(__name__)
pal_blueprint = Blueprint('pal_blueprint', __name__)
api_pal = swagger.docs(Api(pal_blueprint), apiVersion='0.1',
basePath='http://localhost:5000',
resourcePath='/',
produces=["application/json", "text/html"],
api_spec_url='/spec')
api_pal.add_resource(Engine, '/pal')
api_pal.add_resource(StandardNLP, '/preprocess')
api_pal.add_resource(FeatureExtractor, '/features')
api_pal.add_resource(KeywordFinder, '/keywords')
api_pal.add_resource(NounFinder, '/nouns')
api_pal.add_resource(QuestionClassifier, '/qtype')
api_pal.add_resource(QuestionDetector, '/is_question')
api_pal.add_resource(TenseClassifier, '/tense')
@app.route('/docs')
def docs():
return redirect('/static/docs.html')
@app.route('/')
def index():
return render_template('home.html')
# main doesn't run in wsgi
app.register_blueprint(pal_blueprint, url_prefix='/api')
if __name__ == '__main__':
# app.register_blueprint(pal_blueprint, url_prefix='/api')
app.run(debug=True)
Turn on debugging in WSGI
#!/usr/bin/env python
from flask import Blueprint
from flask import Flask
from flask import redirect
from flask import render_template
from flask.ext.restful import Api
from flask_restful_swagger import swagger
from pal.engine import Engine
from pal.nlp.standard_nlp import StandardNLP
from pal.nlp.feature_extractor import FeatureExtractor
from pal.nlp.keyword_finder import KeywordFinder
from pal.nlp.noun_finder import NounFinder
from pal.nlp.question_classifier import QuestionClassifier
from pal.nlp.question_detector import QuestionDetector
from pal.nlp.tense_classifier import TenseClassifier
app = Flask(__name__)
app.config['DEBUG'] = True
pal_blueprint = Blueprint('pal_blueprint', __name__)
api_pal = swagger.docs(Api(pal_blueprint), apiVersion='0.1',
basePath='http://localhost:5000',
resourcePath='/',
produces=["application/json", "text/html"],
api_spec_url='/spec')
api_pal.add_resource(Engine, '/pal')
api_pal.add_resource(StandardNLP, '/preprocess')
api_pal.add_resource(FeatureExtractor, '/features')
api_pal.add_resource(KeywordFinder, '/keywords')
api_pal.add_resource(NounFinder, '/nouns')
api_pal.add_resource(QuestionClassifier, '/qtype')
api_pal.add_resource(QuestionDetector, '/is_question')
api_pal.add_resource(TenseClassifier, '/tense')
@app.route('/docs')
def docs():
return redirect('/static/docs.html')
@app.route('/')
def index():
return render_template('home.html')
# main doesn't run in wsgi
app.register_blueprint(pal_blueprint, url_prefix='/api')
if __name__ == '__main__':
# app.register_blueprint(pal_blueprint, url_prefix='/api')
app.run(debug=True)
|
import frame as fm
from settings import ConnectReturn as CR
from settings import TYPE
import socket, select
from threading import Timer, Thread
from frame import Frame
class Broker(Frame):
def __init__(self, host = "127.0.0.1", port = 8888):
super(Broker, self).__init__()
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.serv.bind((host, port))
self.host = host
self.port = port
self.clients = {}
self.topics = {}
self.wills = {}
self.clientSubscribe = {}
self.clientIDs = []
# NOTICE: keys of topics and clientSubscribe should be synchronized
self.serv.listen(1)
def worker(self, client):
while True:
data = client.recv(1 << 16)
self.parseFrame(data, client)
if client.connection:
client.restartTimer()
else:
break
def runServer(self):
while True:
con, addr = self.serv.accept()
self.clients[addr] = Client(self, addr, con)
thread = Thread(target = self.worker, args = (self.clients[addr], ))
#thread.setDaemon(True) # TODO: if this is daemon, error handling should be implemented in client side
thread.start()
def setClient(self, client, cliID, name, passwd, will, keepAlive, clean):
if not cliID:
if not clean:
client.send(self.makeFrame(TYPE.CONNACK, 0, 0, 0, code = CR.R_ID_REJECTED))
client.disconnect()
return
cliID = "unknown" + str(len(self.clients)) # TODO: cliID should be determined in here if no cliID was delivered.
if cliID in self.clientIDs:
#TODO: resume session here
if clean:
self.clientIDs.remove(cliID)
if not clean:
self.clientIDs.append(cliID)
client.setInfo(cliID, name, passwd, will, keepAlive, clean)
def setTopic(self, client, topicQoS, messageID):
client.setTopic(topicQoS)
if self.topics.has_key(topicQoS[0]) and self.topics[topicQoS[0]]:
# this is 'retain'
frame = self.makeFrame(TYPE.PUBLISH, 0, topicQoS[1], 1, topic = topicQoS[0],
message = self.topics[topicQoS[0]], messageID = messageID)
client.send(frame)
if self.clientSubscribe.has_key(topicQoS[0]):
self.clientSubscribe[topicQoS[0]].append([client.addr, topicQoS[1]])
else:
self.clientSubscribe[topicQoS[0]] = [[client.addr, topicQoS[1]]]
def unsetWill(self):
#when
pass
def sendWill(self, frame):
pass # send willFrame to clients ?
def unsetTopic(self, client, topic):
# not cool
client.unsetTopic(topic)
self.clientSubscribe[topic].remove(self.clientSubscribe[topic][[a[0] for a in self.clientSubscribe[topic]].index(client.addr)])
def disconnect(self, client):
# when get DISCONNECT packet from client
client.connection = False
client.sock.close()
client.timer.cancel()
if client.clean:
# TODO: correct ?
for topic in client.subscribe:
self.unsetTopic(client, topic)
self.clients.pop(client.addr)
print("disconnect")
def publish(self, topic, message, messageID = 1, retain = 0):
if self.clientSubscribe.has_key(topic):
for client in self.clientSubscribe[topic]:
frame = self.makeFrame(TYPE.PUBLISH, 0, client[1], 0, topic = topic,
message = message, messageID = messageID)
self.clients[client[0]].send(frame)
else:
self.clientSubscribe[topic] = []
self.topics[topic] = ""
if retain:
self.topics[topic] = message #TODO: QoS shold also be saved
class Client():
def __init__(self, server, addr, sock):
self.server = server
self.addr = addr
self.sock = sock
self.connection = True
self.will = None
def setInfo(self, cliID, name = "", passwd = "", will = {}, keepAlive = 2, clean = 1):
self.cliID = cliID
self.name = name
self.passwd = passwd
self.will = will
self.keepAlive = keepAlive
self.timer = Timer(keepAlive * 1.5, self.disconnect)
self.subscribe = []
self.clean = clean
def sendWill(self, frame):
self.server.sendWill(frame)
def disconnect(self):
# when ping packet didn't came within the keepAlive * 1.5 sec
self.connection = False
if self.will:
# TODO: send message if there is will
frame = self.server.makeFrame(TYPE.PUBLISH, 0, self.will["QoS"], self.will["retain"],
topic = self.will["topic"], message = self.will["message"], messageID = 1)
self.sendWill(frame)
self.sock.close()
self.server.clients.pop(self.addr)
print("disconnect")
def setTopic(self, topicQoS):
self.subscribe.append(topicQoS)
def unsetTopic(self, topic):
self.subscribe.remove(self.subscribe[[t[0] for t in self.subscribe].index(topic)])
def recv(self, num):
return self.sock.recv(num)
def send(self, frame):
self.sock.send(frame)
def restartTimer(self):
self.timer.cancel()
self.timer = Timer(self.keepAlive * 1.5, self.disconnect)
self.timer.start()
implement detail about will message
import frame as fm
from settings import ConnectReturn as CR
from settings import TYPE
import socket, select
from threading import Timer, Thread
from frame import Frame
class Broker(Frame):
def __init__(self, host = "127.0.0.1", port = 8888):
super(Broker, self).__init__()
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.serv.bind((host, port))
self.host = host
self.port = port
self.clients = {}
self.topics = {}
self.wills = {}
self.clientSubscribe = {}
self.clientIDs = []
# NOTICE: keys of topics and clientSubscribe should be synchronized
self.serv.listen(1)
def worker(self, client):
while True:
data = client.recv(1 << 16)
self.parseFrame(data, client)
if client.connection:
client.restartTimer()
else:
break
def runServer(self):
while True:
con, addr = self.serv.accept()
self.clients[addr] = Client(self, addr, con)
thread = Thread(target = self.worker, args = (self.clients[addr], ))
#thread.setDaemon(True) # TODO: if this is daemon, error handling should be implemented in client side
thread.start()
def setClient(self, client, cliID, name, passwd, will, keepAlive, clean):
if not cliID:
if not clean:
client.send(self.makeFrame(TYPE.CONNACK, 0, 0, 0, code = CR.R_ID_REJECTED))
client.disconnect()
return
cliID = "unknown" + str(len(self.clients)) # TODO: cliID should be determined in here if no cliID was delivered.
if cliID in self.clientIDs:
#TODO: resume session here
if clean:
self.clientIDs.remove(cliID)
if not clean:
self.clientIDs.append(cliID)
client.setInfo(cliID, name, passwd, will, keepAlive, clean)
def setTopic(self, client, topicQoS, messageID):
client.setTopic(topicQoS)
if self.topics.has_key(topicQoS[0]) and self.topics[topicQoS[0]]:
# this is 'retain'
frame = self.makeFrame(TYPE.PUBLISH, 0, topicQoS[1], 1, topic = topicQoS[0],
message = self.topics[topicQoS[0]], messageID = messageID)
client.send(frame)
if self.clientSubscribe.has_key(topicQoS[0]):
self.clientSubscribe[topicQoS[0]].append([client.addr, topicQoS[1]])
else:
self.clientSubscribe[topicQoS[0]] = [[client.addr, topicQoS[1]]]
def unsetTopic(self, client, topic):
# not cool
client.unsetTopic(topic)
self.clientSubscribe[topic].remove(self.clientSubscribe[topic][[a[0] for a in self.clientSubscribe[topic]].index(client.addr)])
def disconnect(self, client):
# when get DISCONNECT packet from client
client.connection = False
client.sock.close()
client.timer.cancel()
if client.clean:
# TODO: correct ?
for topic in client.subscribe:
self.unsetTopic(client, topic)
self.clients.pop(client.addr)
print("disconnect")
def publish(self, topic, message, messageID = 1, retain = 0):
if self.clientSubscribe.has_key(topic):
for client in self.clientSubscribe[topic]:
frame = self.makeFrame(TYPE.PUBLISH, 0, client[1], 0, topic = topic,
message = message, messageID = messageID)
self.clients[client[0]].send(frame)
else:
self.clientSubscribe[topic] = []
self.topics[topic] = ""
if retain:
self.topics[topic] = message #TODO: QoS shold also be saved
class Client():
def __init__(self, server, addr, sock):
self.server = server
self.addr = addr
self.sock = sock
self.connection = True
self.will = None
def setInfo(self, cliID, name = "", passwd = "", will = {}, keepAlive = 2, clean = 1):
self.cliID = cliID
self.name = name
self.passwd = passwd
self.will = will
self.keepAlive = keepAlive
self.timer = Timer(keepAlive * 1.5, self.disconnect)
self.subscribe = []
self.clean = clean
def sendWill(self):
frame = self.server.makeFrame(TYPE.PUBLISH, 0, self.will["QoS"], self.will["retain"],
topic = self.will["topic"], message = self.will["message"], messageID = 1)
self.send(frame)
def disconnect(self):
# when ping packet didn't came within the keepAlive * 1.5 sec
self.connection = False
if self.will:
self.sendWill()
self.sock.close()
self.server.clients.pop(self.addr)
print("disconnect")
def setTopic(self, topicQoS):
self.subscribe.append(topicQoS)
def unsetTopic(self, topic):
self.subscribe.remove(self.subscribe[[t[0] for t in self.subscribe].index(topic)])
def recv(self, num):
return self.sock.recv(num)
def send(self, frame):
self.sock.send(frame)
def restartTimer(self):
self.timer.cancel()
self.timer = Timer(self.keepAlive * 1.5, self.disconnect)
self.timer.start()
|
#!/usr/bin/env python
from contextlib import contextmanager
from os.path import join
from shutil import rmtree
from StringIO import StringIO
import tarfile
from tempfile import mkdtemp
import docker
from requests.exceptions import ReadTimeout
from flask import Flask, request, Response
from werkzeug import secure_filename
TIMEOUT = 60
app = Flask(__name__)
c = docker.Client(base_url='unix://var/run/docker.sock')
@contextmanager
def mktmpdir():
try:
tmpdir = mkdtemp()
yield tmpdir
finally:
rmtree(tmpdir, ignore_errors=True)
class BuildException(Exception):
def __init__(self, status, log):
self.msg = 'Build failed with %d:\n%s' % (status, log)
def __str__(self):
return self.msg
def docker_build(archivedir, filename):
try:
container = c.create_container(image='doc',
volumes=['/tmp/archivedir'],
network_disabled=True,
command='./build %s' % filename)
#command='false')
#command='xxx')
#command='sleep 60'); TIMEOUT=1
c.start(container, binds={archivedir: {'bind': 'tmp/archivedir',
'ro': True}})
result = c.wait(container, timeout=TIMEOUT)
log = c.logs(container, stdout=True, stderr=True)
print log
if result != 0:
raise BuildException(result, log)
target = c.copy(container, '/tmp/%s' % filename)
tar = tarfile.open(fileobj=StringIO(target.read()))
return tar.extractfile(filename)
finally:
c.remove_container(container, force=True)
@app.route('/build/', methods=['GET'])
def build_get():
return app.send_static_file('index.html')
@app.route('/build/', methods=['POST'])
@app.route('/build/<filename>', methods=['POST'])
def build(filename=None):
filename = request.form.get('filename', filename)
archive = request.files['archive']
if not filename:
return 'Missing filename', 400
if not archive:
return 'Missing archive', 400
with mktmpdir() as tempdir:
archive.save(join(tempdir, secure_filename(archive.filename)))
try:
response = Response(docker_build(tempdir, filename))
response.mimetype = 'application/octet-stream'
response.headers['Content-Disposition'] = 'attachment; filename="%s"' % filename
return response
except docker.errors.APIError as e:
if 'could not find the file' in e.explanation.lower():
err = 'Could not find file %s' % filename
return err, 400
else:
return e.explanation, 500
except ReadTimeout as e:
err = ('Timeout: Build did not complete after %d seconds' %
TIMEOUT)
return err, 400
except BuildException as e:
return str(e), 400
if __name__ == '__main__':
app.run(host='0.0.0.0', threaded=True, debug=False)
#import sys
#with mktmpdir() as tempdir:
# shutil.copy(sys.argv[1], tempdir)
# print docker_build(tempdir, sys.argv[2])
Add auth
#!/usr/bin/env python
from contextlib import contextmanager
from functools import wraps
from os.path import join
from shutil import rmtree
from StringIO import StringIO
import tarfile
from tempfile import mkdtemp
import docker
from requests.exceptions import ReadTimeout
from flask import Flask, request, Response
from werkzeug import secure_filename
TIMEOUT = 60
app = Flask(__name__)
c = docker.Client(base_url='unix://var/run/docker.sock')
@contextmanager
def mktmpdir():
try:
tmpdir = mkdtemp()
yield tmpdir
finally:
rmtree(tmpdir, ignore_errors=True)
class BuildException(Exception):
def __init__(self, status, log):
self.msg = 'Build failed with %d:\n%s' % (status, log)
def __str__(self):
return self.msg
def docker_build(archivedir, filename):
try:
container = c.create_container(image='doc',
volumes=['/tmp/archivedir'],
network_disabled=True,
command='./build %s' % filename)
#command='false')
#command='xxx')
#command='sleep 60'); TIMEOUT=1
c.start(container, binds={archivedir: {'bind': 'tmp/archivedir',
'ro': True}})
result = c.wait(container, timeout=TIMEOUT)
log = c.logs(container, stdout=True, stderr=True)
print log
if result != 0:
raise BuildException(result, log)
target = c.copy(container, '/tmp/%s' % filename)
tar = tarfile.open(fileobj=StringIO(target.read()))
return tar.extractfile(filename)
finally:
c.remove_container(container, force=True)
@app.route('/build/', methods=['GET'])
def build_get():
return app.send_static_file('index.html')
def check_auth(username, password):
"""This function is called to check if a username /
password combination is valid.
"""
return username == 'admin' and password == 'secret'
def authenticate():
"""Sends a 401 response that enables basic auth"""
return Response(
'Could not verify your access level for that URL.\n'
'You have to login with proper credentials', 401,
{'WWW-Authenticate': 'Basic realm="Login Required"'})
def requires_auth(f):
@wraps(f)
def decorated(*args, **kwargs):
auth = request.authorization
if not auth or not check_auth(auth.username, auth.password):
return authenticate()
return f(*args, **kwargs)
return decorated
@app.route('/build/', methods=['POST'])
@app.route('/build/<filename>', methods=['POST'])
@requires_auth
def build(filename=None):
filename = request.form.get('filename', filename)
archive = request.files['archive']
if not filename:
return 'Missing filename', 400
if not archive:
return 'Missing archive', 400
with mktmpdir() as tempdir:
archive.save(join(tempdir, secure_filename(archive.filename)))
try:
response = Response(docker_build(tempdir, filename))
response.mimetype = 'application/octet-stream'
cd = 'attachment; filename="%s"' % filename
response.headers['Content-Disposition'] = cd
return response
except docker.errors.APIError as e:
if 'could not find the file' in e.explanation.lower():
err = 'Could not find file %s' % filename
return err, 400
else:
return e.explanation, 500
except ReadTimeout as e:
err = ('Timeout: Build did not complete after %d seconds' %
TIMEOUT)
return err, 400
except BuildException as e:
return str(e), 400
if __name__ == '__main__':
app.run(host='0.0.0.0', threaded=True, debug=True)
#import sys
#with mktmpdir() as tempdir:
# shutil.copy(sys.argv[1], tempdir)
# print docker_build(tempdir, sys.argv[2])
|
import flask
import json
import collections
import random
import flask.ext.socketio as socketio
import time
import edgy
import database
import settings
class keydefaultdict(collections.defaultdict):
"""collections.defaultdict except the key is passed to the default_factory
I got this off Stack Overflow: http://stackoverflow.com/a/2912455/2002307
"""
def __missing__(self, key):
if self.default_factory is None:
raise KeyError(key)
else:
ret = self[key] = self.default_factory(key)
return ret
def make_humane_gibberish(length):
"""Generate a meaningless but human-friendly string.
Characters are chosen so that no two characters are alike.
Easily confused characters, such as '1' and 'l' are also excluded.
"""
result = ''
for i in range(length):
result += random.choice('ACEFHJKNPQRTVXY34869')
return result
def time_current():
"""Returns the number of second since the epoch."""
return int(time.time())
def time_breakdown(t):
"""Takes time as seconds-since epoch and breaks it down into human-friendly values."""
t = max(0, t)
return {
'seconds': t % 60,
'minutes': (t // 60) % 60,
'hours': (t // 3600) % 24,
'days': t // 86400
}
class Whiteboard:
"""Object representing a single whiteboard.
This interfaces with the database on the backend.
has_loaded :: whether the content have been loaded
layers :: the list of actions that have been performed on the whiteboard
timestamp :: the time at which the whiteboard was last modified
permissions :: string, either 'open', 'presentation', or 'private'
key :: string used to identify who has access to the whiteboard
owner_key :: string used to identify who owns (created) the whiteboard
"""
def __init__(self, name):
"""Initialise a whiteboard object
Initially, the contents of the whiteboard has not been loaded.
"""
self.has_loaded = False
self.layers = []
self.timestamp = 0
self.permissions = 'open'
self.key = ''
self.owner_key = ''
self.name = name
def ensure_loaded(self):
"""Load the whiteboard's contents if they haven't been already"""
if not self.has_loaded:
self.load_everything()
self.save_everything()
def load_everything(self):
"""Load all the data from the database"""
data = database.load(self.name)
if data:
# Using get will allow backwards compatibility in the future
self.key = data.get('key', '')
self.owner_key = data.get('owner_key', '')
self.layers = data.get('layers', [])
self.timestamp = data.get('timestamp', time_current())
self.has_loaded = True
def save_everything(self):
"""Save all the data to the database"""
self.ensure_loaded()
payload = {
'layers': self.layers,
'key': self.key,
'owner_key': self.owner_key,
'timestamp': self.timestamp
}
database.rewrite(self.name, payload)
def update_time(self):
"""Change the modification timestamp to the current time"""
self.timestamp = time_current()
def full_image(self):
"""Returns a copy of the whiteboard contents, for sending to the cient"""
self.ensure_loaded()
self.update_time()
return self.layers[:]
def add_action(self, action):
"""Add a paint action to the whiteboard"""
self.ensure_loaded()
self.update_time()
self.layers.append(action)
database.action_push(self.name, action, self.timestamp)
def undo_action(self, action):
"""Remove a paint action from the whiteboard"""
self.ensure_loaded()
self.update_time()
self.layers = [i for i in self.layers if i['action_id'] != action]
database.action_remove(self.name, action, self.timestamp)
def make_protected(self):
"""Set the whiteboard to be 'protected'
Will regenerate keys even if the whiteboard is already protected"""
self.ensure_loaded()
self.permissions = 'protected'
self.key = make_humane_gibberish(6)
self.owner_key = make_humane_gibberish(30)
self.save_everything()
def make_private(self):
"""Set the whiteboard to be 'private'
Will regenerate keys even if the whiteboard is already private"""
self.ensure_loaded()
self.permissions = 'private'
self.key = make_humane_gibberish(6)
self.owner_key = make_humane_gibberish(30)
self.save_everything()
def unlock(self):
"""Sets the whiteboard to be publically accessible (by those with the link)"""
self.ensure_loaded()
self.permissions = 'open'
self.save_everything()
def may_view(self, key):
"""Checks if someone with the given key may view the whiteboard"""
self.ensure_loaded()
return self.permissions in ['open', 'protected'] or key in [self.key, self.owner_key]
def may_edit(self, key):
"""Checks if someone with the given key may edit the whiteboard"""
self.ensure_loaded()
return self.permissions == 'open' or key in [self.key, self.owner_key]
# Create a dictioary of whiteboards and load the whiteboard metadata
whiteboards = keydefaultdict(lambda name: Whiteboard(name))
for i in database.load_meta():
whiteboards[i['name']].timestamp = i['timestamp']
# Create the flask server and the socket.io handler
app = flask.Flask(__name__)
app.debug = settings.get('debug')
sock = socketio.SocketIO(app)
def make_board_id():
"""Generates an unused board id"""
attempts = 0
board_id = make_humane_gibberish(4)
# Every time a clash occurs, increase the length of the ID by one
# in order to avoid problems with the birthday paradox.
# Initially having the key to be small ensures that links remain human readable.
while board_id in whiteboards:
board_id = make_humane_gibberish(attempts + 4)
attempts += 1
return board_id
def make_board(permissions = 'open', board_id = None):
"""Creates a new board with specifc permissions"""
board_id = board_id or make_board_id()
whiteboards[board_id]
if permissions == 'protected':
whiteboards[board_id].make_protected()
if permissions == 'private':
whiteboards[board_id].make_private()
return (board_id, whiteboards[board_id].owner_key)
# Set up routing for the information pages (home, about, etc...)
@app.route('/')
def serve_index():
return flask.render_template('index.tpl')
@app.route('/about')
def serve_about():
return flask.render_template('about.tpl')
@app.route('/docs')
def serve_docs():
return flask.render_template('docs.tpl')
@app.route('/legal')
def serve_legal():
return flask.render_template('legal.tpl')
# URLs that create new whiteboards. They create a new whiteboard and
# then automatically redirect the user there.
@app.route('/new')
def server_board_new():
board_id, key = make_board()
return flask.redirect('/board/' + board_id)
@app.route('/new/protected')
def server_board_new_protected():
board_id, key = make_board(permissions = 'protected')
response = flask.make_response(flask.redirect('/board/' + board_id))
response.set_cookie('key_' + board_id, key)
return response
@app.route('/new/private')
def server_board_new_private():
board_id, key = make_board(permissions = 'private')
response = flask.make_response(flask.redirect('/board/' + board_id))
response.set_cookie('key_' + board_id, key)
return response
# Serves up a list of existant whiteboards.
# Used for debugging purposes only.
# TODO: Add a password to this page, don't let normal users see it.
@app.route('/listing')
def serve_listing():
if not settings.get('debug'):
flask.abort(403)
boards = []
for i in whiteboards:
time_diff = time_current() - whiteboards[i].timestamp
timeparts = time_breakdown(time_diff)
rec_str = '{days} days, {hours} hours, {minutes} minutes, {seconds} seconds'.format(**timeparts)
boards.append({
'name': i,
'recency': rec_str
})
return flask.render_template('listing.tpl', boards = boards)
# Serve the board
# Both '/board/x' and '/b/x' are supported for convinience
@app.route('/board/<board_id>')
@app.route('/b/<board_id>')
def serve_board(board_id):
board_id = board_id.upper()
board = whiteboards[board_id]
key = flask.request.cookies.get('key_' + board_id)
if board.may_view(key):
show_controls = board.may_edit(key)
return flask.render_template(
'whiteboard.tpl',
board_id = board_id,
show_controls = show_controls,
permissions = board.permissions,
feedback_form = settings.get('feedback_form')
)
else:
flask.abort(403)
# Serve static files
@app.route('/static/<path:path>')
def serve_static(path):
print('Serving static: ', path)
return flask.send_from_directory('static', path)
# Load the paint action schema into a constant.
def load_schema(name):
text = open('schemas/' + name + '.json').read()
return json.loads(text)
SCHEMA_PAINT = load_schema('paint')
# Handle incomming paint event.
@sock.on('paint')
def socketio_paint(message):
# print('paint', message)
# Ensure the paint action is valid
if edgy.check(SCHEMA_PAINT, message):
bid = message['board_id'].upper()
key = message['key']
board = whiteboards[bid]
# Ensure the user has the correct permissions
if board.may_edit(key):
# Add the action to the whiteboard
board.add_action(message)
# Transmit the action to all other clients
data = {
'board_id': bid,
'actions': [
message
]
}
socketio.emit('paint', data, broadcast = True, room = bid)
else:
print('A paint action failed')
print(message)
# Fired when the user joins the whiteboard.
@sock.on('full image')
def socketio_full_image(message):
# print('full image', message)
bid = message['board_id'].upper()
key = message['key']
board = whiteboards[bid]
# Ensure the user may see this board
if board.may_view(key):
socketio.join_room(bid)
data = {
'board_id': bid,
'actions': board.full_image()
}
socketio.emit('paint', data)
# Fired when a user attempts to undo an action
@sock.on('undo')
def socketio_undo(message):
bid = message['board_id'].upper()
aid = message['action_id']
key = message['key']
board = whiteboards[bid]
# Ensure the user has permission to edit the board
if board.may_edit(key):
# Remove the action from the board
board.undo_action(aid)
# Tell other clients the action has been undone
data = {
'board_id': bid,
'action_id': aid
}
socketio.emit('undo', data, broadcast = True, room = bid)
# Fired when a user attmpts to unlock a whiteboard
@sock.on('unlock')
def socketio_unlock(message):
bid = message['board_id'].upper()
key = message['key']
board = whiteboards[bid]
# Ensure the user has permission to do so
if board.may_edit(key):
board.unlock()
# Tells the other client that the board has been unlocked
# this will cause the browsers to refresh the page
socketio.emit('refresh', broadcast = True, room = bid)
# Run the server
if __name__ == '__main__':
port = int(settings.get('port'))
sock.run(app, host = '0.0.0.0', port = port)
Removed TODO note
import flask
import json
import collections
import random
import flask.ext.socketio as socketio
import time
import edgy
import database
import settings
class keydefaultdict(collections.defaultdict):
"""collections.defaultdict except the key is passed to the default_factory
I got this off Stack Overflow: http://stackoverflow.com/a/2912455/2002307
"""
def __missing__(self, key):
if self.default_factory is None:
raise KeyError(key)
else:
ret = self[key] = self.default_factory(key)
return ret
def make_humane_gibberish(length):
"""Generate a meaningless but human-friendly string.
Characters are chosen so that no two characters are alike.
Easily confused characters, such as '1' and 'l' are also excluded.
"""
result = ''
for i in range(length):
result += random.choice('ACEFHJKNPQRTVXY34869')
return result
def time_current():
"""Returns the number of second since the epoch."""
return int(time.time())
def time_breakdown(t):
"""Takes time as seconds-since epoch and breaks it down into human-friendly values."""
t = max(0, t)
return {
'seconds': t % 60,
'minutes': (t // 60) % 60,
'hours': (t // 3600) % 24,
'days': t // 86400
}
class Whiteboard:
"""Object representing a single whiteboard.
This interfaces with the database on the backend.
has_loaded :: whether the content have been loaded
layers :: the list of actions that have been performed on the whiteboard
timestamp :: the time at which the whiteboard was last modified
permissions :: string, either 'open', 'presentation', or 'private'
key :: string used to identify who has access to the whiteboard
owner_key :: string used to identify who owns (created) the whiteboard
"""
def __init__(self, name):
"""Initialise a whiteboard object
Initially, the contents of the whiteboard has not been loaded.
"""
self.has_loaded = False
self.layers = []
self.timestamp = 0
self.permissions = 'open'
self.key = ''
self.owner_key = ''
self.name = name
def ensure_loaded(self):
"""Load the whiteboard's contents if they haven't been already"""
if not self.has_loaded:
self.load_everything()
self.save_everything()
def load_everything(self):
"""Load all the data from the database"""
data = database.load(self.name)
if data:
# Using get will allow backwards compatibility in the future
self.key = data.get('key', '')
self.owner_key = data.get('owner_key', '')
self.layers = data.get('layers', [])
self.timestamp = data.get('timestamp', time_current())
self.has_loaded = True
def save_everything(self):
"""Save all the data to the database"""
self.ensure_loaded()
payload = {
'layers': self.layers,
'key': self.key,
'owner_key': self.owner_key,
'timestamp': self.timestamp
}
database.rewrite(self.name, payload)
def update_time(self):
"""Change the modification timestamp to the current time"""
self.timestamp = time_current()
def full_image(self):
"""Returns a copy of the whiteboard contents, for sending to the cient"""
self.ensure_loaded()
self.update_time()
return self.layers[:]
def add_action(self, action):
"""Add a paint action to the whiteboard"""
self.ensure_loaded()
self.update_time()
self.layers.append(action)
database.action_push(self.name, action, self.timestamp)
def undo_action(self, action):
"""Remove a paint action from the whiteboard"""
self.ensure_loaded()
self.update_time()
self.layers = [i for i in self.layers if i['action_id'] != action]
database.action_remove(self.name, action, self.timestamp)
def make_protected(self):
"""Set the whiteboard to be 'protected'
Will regenerate keys even if the whiteboard is already protected"""
self.ensure_loaded()
self.permissions = 'protected'
self.key = make_humane_gibberish(6)
self.owner_key = make_humane_gibberish(30)
self.save_everything()
def make_private(self):
"""Set the whiteboard to be 'private'
Will regenerate keys even if the whiteboard is already private"""
self.ensure_loaded()
self.permissions = 'private'
self.key = make_humane_gibberish(6)
self.owner_key = make_humane_gibberish(30)
self.save_everything()
def unlock(self):
"""Sets the whiteboard to be publically accessible (by those with the link)"""
self.ensure_loaded()
self.permissions = 'open'
self.save_everything()
def may_view(self, key):
"""Checks if someone with the given key may view the whiteboard"""
self.ensure_loaded()
return self.permissions in ['open', 'protected'] or key in [self.key, self.owner_key]
def may_edit(self, key):
"""Checks if someone with the given key may edit the whiteboard"""
self.ensure_loaded()
return self.permissions == 'open' or key in [self.key, self.owner_key]
# Create a dictioary of whiteboards and load the whiteboard metadata
whiteboards = keydefaultdict(lambda name: Whiteboard(name))
for i in database.load_meta():
whiteboards[i['name']].timestamp = i['timestamp']
# Create the flask server and the socket.io handler
app = flask.Flask(__name__)
app.debug = settings.get('debug')
sock = socketio.SocketIO(app)
def make_board_id():
"""Generates an unused board id"""
attempts = 0
board_id = make_humane_gibberish(4)
# Every time a clash occurs, increase the length of the ID by one
# in order to avoid problems with the birthday paradox.
# Initially having the key to be small ensures that links remain human readable.
while board_id in whiteboards:
board_id = make_humane_gibberish(attempts + 4)
attempts += 1
return board_id
def make_board(permissions = 'open', board_id = None):
"""Creates a new board with specifc permissions"""
board_id = board_id or make_board_id()
whiteboards[board_id]
if permissions == 'protected':
whiteboards[board_id].make_protected()
if permissions == 'private':
whiteboards[board_id].make_private()
return (board_id, whiteboards[board_id].owner_key)
# Set up routing for the information pages (home, about, etc...)
@app.route('/')
def serve_index():
return flask.render_template('index.tpl')
@app.route('/about')
def serve_about():
return flask.render_template('about.tpl')
@app.route('/docs')
def serve_docs():
return flask.render_template('docs.tpl')
@app.route('/legal')
def serve_legal():
return flask.render_template('legal.tpl')
# URLs that create new whiteboards. They create a new whiteboard and
# then automatically redirect the user there.
@app.route('/new')
def server_board_new():
board_id, key = make_board()
return flask.redirect('/board/' + board_id)
@app.route('/new/protected')
def server_board_new_protected():
board_id, key = make_board(permissions = 'protected')
response = flask.make_response(flask.redirect('/board/' + board_id))
response.set_cookie('key_' + board_id, key)
return response
@app.route('/new/private')
def server_board_new_private():
board_id, key = make_board(permissions = 'private')
response = flask.make_response(flask.redirect('/board/' + board_id))
response.set_cookie('key_' + board_id, key)
return response
# Serves up a list of existant whiteboards.
# Used for debugging purposes only.
@app.route('/listing')
def serve_listing():
if not settings.get('debug'):
flask.abort(403)
boards = []
for i in whiteboards:
time_diff = time_current() - whiteboards[i].timestamp
timeparts = time_breakdown(time_diff)
rec_str = '{days} days, {hours} hours, {minutes} minutes, {seconds} seconds'.format(**timeparts)
boards.append({
'name': i,
'recency': rec_str
})
return flask.render_template('listing.tpl', boards = boards)
# Serve the board
# Both '/board/x' and '/b/x' are supported for convinience
@app.route('/board/<board_id>')
@app.route('/b/<board_id>')
def serve_board(board_id):
board_id = board_id.upper()
board = whiteboards[board_id]
key = flask.request.cookies.get('key_' + board_id)
if board.may_view(key):
show_controls = board.may_edit(key)
return flask.render_template(
'whiteboard.tpl',
board_id = board_id,
show_controls = show_controls,
permissions = board.permissions,
feedback_form = settings.get('feedback_form')
)
else:
flask.abort(403)
# Serve static files
@app.route('/static/<path:path>')
def serve_static(path):
print('Serving static: ', path)
return flask.send_from_directory('static', path)
# Load the paint action schema into a constant.
def load_schema(name):
text = open('schemas/' + name + '.json').read()
return json.loads(text)
SCHEMA_PAINT = load_schema('paint')
# Handle incomming paint event.
@sock.on('paint')
def socketio_paint(message):
# print('paint', message)
# Ensure the paint action is valid
if edgy.check(SCHEMA_PAINT, message):
bid = message['board_id'].upper()
key = message['key']
board = whiteboards[bid]
# Ensure the user has the correct permissions
if board.may_edit(key):
# Add the action to the whiteboard
board.add_action(message)
# Transmit the action to all other clients
data = {
'board_id': bid,
'actions': [
message
]
}
socketio.emit('paint', data, broadcast = True, room = bid)
else:
print('A paint action failed')
print(message)
# Fired when the user joins the whiteboard.
@sock.on('full image')
def socketio_full_image(message):
# print('full image', message)
bid = message['board_id'].upper()
key = message['key']
board = whiteboards[bid]
# Ensure the user may see this board
if board.may_view(key):
socketio.join_room(bid)
data = {
'board_id': bid,
'actions': board.full_image()
}
socketio.emit('paint', data)
# Fired when a user attempts to undo an action
@sock.on('undo')
def socketio_undo(message):
bid = message['board_id'].upper()
aid = message['action_id']
key = message['key']
board = whiteboards[bid]
# Ensure the user has permission to edit the board
if board.may_edit(key):
# Remove the action from the board
board.undo_action(aid)
# Tell other clients the action has been undone
data = {
'board_id': bid,
'action_id': aid
}
socketio.emit('undo', data, broadcast = True, room = bid)
# Fired when a user attmpts to unlock a whiteboard
@sock.on('unlock')
def socketio_unlock(message):
bid = message['board_id'].upper()
key = message['key']
board = whiteboards[bid]
# Ensure the user has permission to do so
if board.may_edit(key):
board.unlock()
# Tells the other client that the board has been unlocked
# this will cause the browsers to refresh the page
socketio.emit('refresh', broadcast = True, room = bid)
# Run the server
if __name__ == '__main__':
port = int(settings.get('port'))
sock.run(app, host = '0.0.0.0', port = port)
|
'''create files contains estimated generalization errors for model
invocation: python ege_week.py YYYY-MM-DD [--testing]
YYYY-MM-DD mid-point of week; anayze -3 to +3 days
--testing if supplied, only subset of cases are run and output file has -test in its name
INPUT FILE
WORKING/transactions-subset2.pickle
OUTPUT FILES
WORKING/ege_week-YYYY-MM-DD-MODEL-df[-test].pickle dataframe with median errors
WORKING/ege_week-YYYY-MM-DD-MODEL-dict-test].pickle dict with importance of features, actuals, estimates
'''
import collections
import cPickle as pickle
import datetime
import numpy as np
import pandas as pd
import pdb
from pprint import pprint
from sklearn import cross_validation
from sklearn import linear_model
from sklearn import ensemble
import sys
import warnings
from Bunch import Bunch
from DataframeAppender import DataframeAppender
from directory import directory
from Logger import Logger
import parse_command_line
def usage():
print 'usage: python ege_week.py YYYY-MM-DD'
def make_control(argv):
'Return control Bunch'''
if len(argv) == 1 or len(argv) > 3:
usage()
sys.exit(1)
script_name = argv[0]
base_name = script_name.split('.')[0]
random_seed = 123
now = datetime.datetime.now()
log_file_name = base_name + '.' + now.isoformat('T') + '.log'
year, month, day = argv[1].split('-')
sale_date = datetime.date(int(year), int(month), int(day))
# prior work found that the assessment was not useful
# just the census and tax roll features
# predictors with transformation to log domain
predictors = { # the columns in the x_arrays are in this order
'fraction.owner.occupied': None,
'FIREPLACE.NUMBER': 'log1p',
'BEDROOMS': 'log1p',
'BASEMENT.SQUARE.FEET': 'log1p',
'LAND.SQUARE.FOOTAGE': 'log',
'zip5.has.industry': None,
'census.tract.has.industry': None,
'census.tract.has.park': None,
'STORIES.NUMBER': 'log1p',
'census.tract.has.school': None,
'TOTAL.BATHS.CALCULATED': 'log1p',
'median.household.income': 'log', # not log feature in earlier version
'LIVING.SQUARE.FEET': 'log',
'has.pool': None,
'zip5.has.retail': None,
'census.tract.has.retail': None,
'is.new.construction': None,
'avg.commute': None,
'zip5.has.park': None,
'PARKING.SPACES': 'log1p',
'zip5.has.school': None,
'TOTAL.ROOMS': 'log1p',
'age': None,
'age2': None,
'effective.age': None,
'effective.age2': None}
debug = False
testing = parse_command_line.has_arg(argv, '--testing')
b = Bunch(
path_in=directory('working') + 'transactions-subset2.pickle',
path_log=directory('log') + log_file_name,
path_out_df='%s%s-%s-df%s.pickle' % (
directory('working'),
base_name,
sale_date,
'-test' if testing else ''),
path_out_dict='%s%s-%s-dict%s.pickle' % (
directory('working'),
base_name,
sale_date,
'-test' if testing else ''),
arg_date=sale_date,
start_time=now,
random_seed=random_seed,
sale_date=sale_date,
models={'rf': Rf(), 'ols': Ols()},
scopes=['global', 'zip'],
training_days=(7, 14, 21) if testing else range(7, 366, 7),
n_folds=10,
predictors=predictors,
price_column='SALE.AMOUNT',
testing=testing,
debug=debug)
return b
def elapsed_time(start_time):
return datetime.datetime.now() - start_time
def x(mode, df, control):
'''return 2D np.array, with df x values possibly transformed to log
RETURNS array: np.array 2D
'''
def transform(v, mode, transformation):
if mode is None:
return v
if mode == 'linear':
return v
if mode == 'log':
if transformation is None:
return v
if transformation == 'log':
return np.log(v)
if transformation == 'log1p':
return np.log1p(v)
raise RuntimeError('bad transformation: ' + str(transformation))
raise RuntimeError('bad mode:' + str(mode))
array = np.empty(shape=(df.shape[0], len(control.predictors)),
dtype=np.float64).T
# build up in transposed form
index = 0
for predictor_name, transformation in control.predictors.iteritems():
v = transform(df[predictor_name].values, mode, transformation)
array[index] = v
index += 1
return array.T
def y(mode, df, control):
'''return np.array 1D with transformed price column from df'''
df2 = df.copy(deep=True)
if mode == 'log':
df2[control.price_column] = \
pd.Series(np.log(df[control.price_column]),
index=df.index)
array = np.array(df2[control.price_column].as_matrix(), np.float64)
return array
def demode(v, mode):
'convert log domain to normal'
if v is None:
return None
result = np.exp(v) if mode == 'log' else v
return result
def errors(model_result):
'return median_absolute_error and median_relative_absolute_error'
actuals = model_result['actuals']
estimates = model_result['estimates']
abs_error = np.abs(actuals - estimates)
median_abs_error = np.median(abs_error)
rel_abs_error = abs_error / actuals
median_rel_abs_error = np.median(rel_abs_error)
return median_abs_error, median_rel_abs_error
class ReportOls(object):
'report generation with y_mode and x_mode in key'
# NOTE: perhaps reusable for any model with y and x modes
def __init__(self):
self.format_global_fold = '%10s %2d %3s %6s %3s %3s f%d %6.0f %3.2f'
self.format_zip_fold = '%10s %2d %3s %6d %3s %3s f%d %6.0f %3.2f'
self.format_global = '%10s %2d %3s %6s %3s %3s median %6.0f %3.2f'
self.format_zip = '%10s %2d %3s %6d %3s %3s median %6.0f %3.2f'
def global_fold_lines(self, key, result):
fold_number, sale_date, training_days, model_name, scope = key
assert(scope == 'global')
for result_key, result_value in result.iteritems():
y_mode = result_key[1][:3]
x_mode = result_key[3][:3]
median_abs_error, median_rel_abs_error = errors(result_value)
line = self.format_global_fold % (sale_date,
training_days,
model_name,
scope,
y_mode,
x_mode,
fold_number,
median_abs_error,
median_rel_abs_error)
yield line
def zip_fold_lines(self, key, result):
fold_number, sale_date, training_days, model_name, scope = key
assert(isinstance(scope, tuple))
assert(scope[0] == 'zip')
zip_code = scope[1]
for result_key, result_value in result.iteritems():
y_mode = result_key[1][:3]
x_mode = result_key[3][:3]
median_abs_error, median_rel_abs_error = errors(result_value)
line = self.format_zip_fold % (sale_date,
training_days,
model_name,
zip_code,
y_mode,
x_mode,
fold_number,
median_abs_error,
median_rel_abs_error)
yield line
def summarize_global(self,
sale_date,
training_days,
model_name,
all_results,
control):
scope = 'global'
for y_mode in ('log', 'linear'):
y_mode_print = y_mode[:3]
for x_mode in ('log', 'linear'):
x_mode_print = x_mode[:3]
median_errors = np.zeros(control.n_folds, dtype=np.float64)
median_rel_errors = np.zeros(control.n_folds, dtype=np.float64)
for fold_number in xrange(control.n_folds):
# determine errors in the fold
key = (fold_number, sale_date, training_days, model_name, scope)
if key not in all_results:
print 'key', key
print 'not in result'
continue
result = all_results[key]
model_result = result[('y_mode', y_mode, 'x_mode', x_mode)]
median_abs_error, median_rel_abs_error = errors(model_result)
fold_line = self.format_global_fold % (sale_date,
training_days,
model_name,
scope,
y_mode_print,
x_mode_print,
fold_number,
median_abs_error,
median_rel_abs_error)
print fold_line
median_errors[fold_number] = median_abs_error
median_rel_errors[fold_number] = median_rel_abs_error
all_folds_line = self.format_global % (sale_date,
training_days,
model_name,
scope,
y_mode_print,
x_mode_print,
np.median(median_errors),
np.median(median_rel_errors))
print all_folds_line
def summarize_zip(self, sale_date, training_days, model_name,
all_results, control):
def list_median(lst):
assert(len(lst) > 0)
return np.median(np.array(lst, dtype=np.float64))
def report_zip_code(zip_code, keys):
for y_mode in ('log', 'linear'):
y_mode_print = y_mode[:3]
for x_mode in ('log', 'linear'):
x_mode_print = x_mode[:3]
mode_key = ('y_mode', y_mode, 'x_mode', x_mode)
median_abs_errors = []
median_rel_abs_errors = []
for key in keys:
model_result = all_results[key][mode_key]
median_abs_error, median_rel_abs_error = errors(model_result)
fold_line = self.format_zip_fold % (sale_date,
training_days,
model_name,
zip_code,
y_mode_print,
x_mode_print,
key[0], # fold number
median_abs_error,
median_rel_abs_error)
print fold_line
median_abs_errors.append(median_abs_error)
median_rel_abs_errors.append(median_rel_abs_error)
all_folds_line = self.format_zip % (sale_date,
training_days,
model_name,
zip_code,
y_mode_print,
x_mode_print,
list_median(median_abs_errors),
list_median(median_rel_abs_errors))
print all_folds_line
# determine all zip codes in the specified lines
zip_codes = collections.defaultdict(set)
for key in all_results.keys():
key_fold_number, key_sale_date, key_training_days, key_model_name, key_scope = key
if key_scope == 'global':
# examine only zip code scopes
continue
if key_sale_date == sale_date and key_training_days == training_days and key_model_name == model_name:
key_zip_code = key_scope[1]
zip_codes[key_zip_code].add(key)
# process each zip code
for zip_code, keys in zip_codes.iteritems():
report_zip_code(zip_code, keys)
def summarize(self, sale_date, training_days, model_name,
all_results, control):
self.summarize_global(sale_date, training_days, model_name,
all_results, control)
self.summarize_zip(sale_date, training_days, model_name,
all_results, control)
class Ols(object):
'Ordinary least squares via sklearn'
def __init__(self):
self.Model_Constructor = linear_model.LinearRegression
def reporter(self):
return ReportOls
def run(self, train, test, control):
'''fit on training data and test
ARGS
train : dataframe
test : dataframe
control: Bunch
RETURN dict of values
dict key = (x_mode, y_mode)
values = dict with keys 'actuals', 'estimates', 'fitted', x_names
'''
# implement variants
verbose = False
def variant(x_mode, y_mode):
train_x = x(x_mode, train, control)
test_x = x(x_mode, test, control)
train_y = y(y_mode, train, control)
model = self.Model_Constructor(fit_intercept=True,
normalize=True,
copy_X=True)
fitted_model = model.fit(train_x, train_y)
# if the model cannot be fitted, LinearRegressor returns
# the mean of the train_y values
estimates = fitted_model.predict(test_x)
value = {
'coef': fitted_model.coef_,
'intercept_': fitted_model.intercept_,
'estimates': demode(estimates, y_mode),
'actuals': y('linear', test, control)
}
# check results
if verbose:
print 'x_mode, y_mode: ', x_mode, y_mode
print 'actuals: ', value['actuals']
print 'estimates: ', value['estimates']
return value
all_variants = {}
for x_mode in ('log', 'linear'):
for y_mode in ('log', 'linear'):
variant_value = variant(x_mode, y_mode)
key = ('y_mode', y_mode, 'x_mode', x_mode)
all_variants[key] = variant_value
return all_variants
class ReportRf(object):
'report generation w no variants (for now'
def __init__(self):
'sale_date days model global fold error abs_error'
self.format_global_fold = '%10s %2d %3s %6s f%d trees %4d %6.0f %3.2f'
self.format_zip_fold = '%10s %2d %3s %6d f%d trees %4d %6.0f %3.2f'
self.format_global = '%10s %2d %3s %6s median %6.0f %3.2f'
self.format_zip = '%10s %2d %3s %6d median %6.0f %3.2f'
def global_fold_lines(self, key, result):
fold_number, sale_date, training_days, model_name, scope = key
assert(scope == 'global')
for result_key, result_value in result.iteritems():
assert result_key[0] == 'n_trees'
n_trees = result_key[1]
median_abs_error, median_rel_abs_error = errors(result_value)
line = self.format_global_fold % (sale_date,
training_days,
model_name,
scope,
fold_number,
n_trees,
median_abs_error,
median_rel_abs_error)
yield line
def zip_fold_lines(self, key, result):
fold_number, sale_date, training_days, model_name, scope = key
assert isinstance(scope, tuple)
assert scope[0] == 'zip'
zip_code = scope[1]
for result_key, result_value in result.iteritems():
assert result_key[0] == 'n_trees'
n_trees = result_key[1]
median_abs_error, median_rel_abs_error = errors(result_value)
line = self.format_global_fold % (sale_date,
training_days,
model_name,
zip_code,
fold_number,
n_trees,
median_abs_error,
median_rel_abs_error)
yield line
def summarize_global(self, sale_date, training_days, model_name, all_results, control):
scope = 'global'
median_errors = np.zeros(control.n_folds, dtype=np.float64)
median_rel_errors = np.zeros(control.n_folds, dtype=np.float64)
for fold_number in xrange(control.n_folds):
key = (fold_number, sale_date, training_days, model_name, scope)
if key not in all_results:
# can happen when a model could not be fit
print 'model_result missing key', key
continue
model_result = all_results[key]
if len(model_result['actuals']) == 0:
continue
median_abs_error, median_rel_abs_error = errors(model_result)
fold_line = self.format_global_fold % (sale_date,
training_days,
model_name,
scope,
fold_number,
median_abs_error,
median_rel_abs_error)
print fold_line
median_errors[fold_number] = median_abs_error
median_rel_errors[fold_number] = median_rel_abs_error
all_folds_line = self.format_global % (sale_date,
training_days,
model_name,
scope,
np.median(median_errors),
np.median(median_rel_errors))
print all_folds_line
def summarize_zip(self, sale_date, training_days, model_name, all_results, control):
def list_median(lst):
assert(len(lst) > 0)
return np.median(np.array(lst, dtype=np.float64))
def report_zip_code(zip_code, keys):
median_abs_errors = []
median_rel_abs_errors = []
for key in keys:
model_result = all_results[key]
median_abs_error, median_rel_abs_error = errors(model_result)
fold_line = self.format_zip_fold % (sale_date,
training_days,
model_name,
zip_code,
key[0], # fold number
median_abs_error,
median_rel_abs_error)
print fold_line
median_abs_errors.append(median_abs_error)
median_rel_abs_errors.append(median_rel_abs_error)
all_folds_line = self.format_zip % (sale_date,
training_days,
model_name,
zip_code,
list_median(median_abs_errors),
list_median(median_rel_abs_errors))
print all_folds_line
# determine all zip codes in the specified lines
zip_codes = collections.defaultdict(set)
for key in all_results.keys():
key_fold_number, key_sale_date, key_training_days, key_model_name, key_scope = key
if key_scope == 'global':
# examine only zip code scopes
continue
if key_sale_date == sale_date and key_training_days == training_days and key_model_name == model_name:
key_zip_code = key_scope[1]
zip_codes[key_zip_code].add(key)
# process each zip code
for zip_code, keys in zip_codes.iteritems():
report_zip_code(zip_code, keys)
def summarize(self, sale_date, training_days, model_name, all_results, control):
self.summarize_global(sale_date, training_days, model_name, all_results, control)
self.summarize_zip(sale_date, training_days, model_name, all_results, control)
class Rf(object):
'Random forests via sklearn'
def __init__(self):
self.Model_Constructor = ensemble.RandomForestRegressor
def reporter(self):
return ReportRf
def run(self, train, test, control):
'''fit on train, test on test, return dict of variants
The variants are defined by the number of trees in the forest
RETURN dict with key = variant_description
'''
verbose = False
def variant(n_trees):
train_x = x(None, train, control) # no transformation
test_x = x(None, test, control)
train_y = y(None, train, control)
model = self.Model_Constructor(n_estimators=n_trees,
random_state=control.random_seed)
fitted_model = model.fit(train_x, train_y)
estimates = fitted_model.predict(test_x)
# return selected fitted results
result = {
'feature_importances': fitted_model.feature_importances_,
'estimates': estimates,
'actuals': y('None', test, control)}
if verbose:
for k, v in result.iteritems():
print k, v
return result
all_variants = {}
for n_trees in (10, 100) if control.testing else (10, 100, 300, 1000):
variant_value = variant(n_trees)
key = ('n_trees', n_trees)
all_variants[key] = variant_value
return all_variants
def within(sale_date, training_days, df):
'return indices of samples up to training_days before the sale_date'
assert(training_days > 0)
# one training day means use samples on the sale date only
first_ok_sale_date = sale_date - datetime.timedelta(training_days - 1)
date_column = 'sale.python_date'
after = df[date_column] >= first_ok_sale_date
before = df[date_column] <= sale_date
ok_indices = np.logical_and(after, before)
return ok_indices
def is_between(df, first_date, last_date):
'return df containing subset of samples between the two dates'
df_date = df['sale.python_date']
return (df_date >= first_date) & (df_date <= last_date)
def on_sale_date(sale_date, df):
'''return indices of sample on the sale date'''
date_column = 'sale.python_date'
result = df[date_column] == sale_date
return result
def add_age(df, sale_date):
'Return new df with extra columns for age and effective age'
column_names = df.columns.tolist()
if 'age' in column_names:
print column_names
print 'age in column_names'
pdb.set_trace()
assert('age' not in column_names)
assert('age2' not in column_names)
assert('effective.age' not in column_names)
assert('effective.age2' not in column_names)
sale_year = df['sale.year']
def age(column_name):
'age from sale_date to specified column'
age_in_years = sale_year - df[column_name].values
return pd.Series(age_in_years, index=df.index)
result = df.copy(deep=True)
result['age'] = age('YEAR.BUILT')
result['effective.age'] = age('EFFECTIVE.YEAR.BUILT')
result['age2'] = result['age'] * result['age']
result['effective.age2'] = result['effective.age'] * result['effective.age']
return result
def unique_zip_codes(df):
'yield each unique zip code in the dataframe'
unique_zip_codes = df['zip5'].unique()
for i in xrange(len(unique_zip_codes)):
yield unique_zip_codes[i]
def zip_codes(df, a_zip_code):
'return new dataframe containing just the specified zip code'
df_copy = df.copy(deep=True)
result = df_copy[df_copy['zip5'] == a_zip_code]
return result
def make_train_model(df, sale_date, training_days):
'return df of transactions no more than training_days before the sale_date'
just_before_sale_date = within(sale_date, training_days, df)
train_model = add_age(df[just_before_sale_date], sale_date)
return train_model
def make_test_model(df, sale_date):
'return df of transactions on the sale_date'
selected_indices = on_sale_date(sale_date, df)
test_model = add_age(df[selected_indices], sale_date)
return test_model
def determine_most_popular_zip_code(df, control):
'return the zip_code that occurs most ofen in the dataframe'
zip_code_counter = collections.Counter()
for _, zip_code in df.zip5.iteritems():
zip_code_counter[zip_code] += 1
most_common_zip_code, count = zip_code_counter.most_common(1)[0]
print 'most common zip_code', most_common_zip_code, 'occurs', count
# assert: the most common zip code is in each fold
fold_number = -1
folds_for_zip_code = collections.defaultdict(set)
kf = cross_validation.KFold(n=(len(df)),
n_folds=control.n_folds,
shuffle=True,
random_state=control.random_seed)
for train_indices, test_indices in kf:
fold_number += 1
train = df.iloc[train_indices].copy(deep=True)
test = df.iloc[test_indices].copy(deep=True)
if most_common_zip_code not in test.zip5.values:
print most_common_zip_code, 'not in', fold_number
for zip_code in unique_zip_codes(test):
assert(zip_code in test.zip5.values)
if zip_code not in train.zip5.values:
print 'fold %d zip_code %d in test and not train' % (
fold_number,
zip_code)
folds_for_zip_code[zip_code].add(fold_number)
assert(len(folds_for_zip_code[most_common_zip_code]) == 10)
# print zip_code not in each test set
count_in_10 = 0
count_not_in_10 = 0
for zip_code, set_folds in folds_for_zip_code.iteritems():
if len(set_folds) != 10:
print 'zip_code %d in only %d folds' % (zip_code, len(set_folds))
count_not_in_10 += 1
else:
count_in_10 += 1
print 'all other zip codes are in 10 folds'
print 'in 10: %d not in 10: %d' % (count_in_10, count_not_in_10)
print 'NOTE: all this analysis is before training samples are selected'
return most_common_zip_code
def read_training_data(control):
'return dataframe'
class AccumulateMedianErrors():
def __init__(self):
self.dfa = DataframeAppender([('fold_number', np.int64),
('training_days', np.int64),
('model_id', object), # string: model + hyperparameters
('scope', object), # 'global' or zip code
('median_abs_error', np.float64),
('median_rel_error', np.float64),
('n_samples', np.float64)])
def accumulate(self, key, result):
verbose = False
fold_number, sale_date, training_days, model_name, scope = key
if model_name == 'rf':
self._accumulate_rf(fold_number, training_days, scope, result)
elif model_name == 'ols':
self._accumulate_ols(fold_number, training_days, scope, result)
else:
raise RuntimeError('bad model_name: ' + str(model_name))
if verbose:
print self.dfa.df
def _accumulate_ols(self, fold_number, training_days, scope, result):
for k, v in result.iteritems():
model_id = 'rf ' + str(k[1])[:3] + ' ' + str(k[3])[:3]
self._append(fold_number, training_days, model_id, scope, v)
def _accumulate_rf(self, fold_number, training_days, scope, result):
for k, v in result.iteritems():
model_id = 'rf ' + str(k[1])
self._append(fold_number, training_days, model_id, scope, v)
def _append(self, fold_number, training_days, model_id, scope, model_result):
median_abs_error, median_rel_error = errors(model_result)
self.dfa.append([fold_number,
training_days,
model_id,
scope if scope == 'global' else str(scope[1]),
median_abs_error,
median_rel_error,
len(model_result['actuals'])])
def dataframe(self):
return self.dfa.result()
def squeeze(result, verbose=False):
'replace float64 with float32'
def is_np_array_float64(x):
return isinstance(x, np.ndarray) and x.dtype == np.float64
def is_np_scalar_float64(x):
return isinstance(x, np.float64)
if verbose:
pprint(result)
assert(isinstance(result, dict))
new_result = {}
for k, v in result.iteritems():
if isinstance(k, str):
# rf result
if is_np_array_float64(v):
# e.g., actual, estimate, other info in a vector
new_result[k] = np.array(v, dtype=np.float32)
else:
print k, v
raise RuntimeError('unexpected')
elif isinstance(k, tuple):
# ols result
new_ols_result = {}
for ols_key, ols_value in v.iteritems():
if is_np_array_float64(ols_value):
new_ols_result[ols_key] = np.array(ols_value, dtype=np.float32)
elif is_np_scalar_float64(ols_value):
new_ols_result[ols_key] = np.float32(ols_value)
else:
print ols_key, ols_value
raise RuntimeError('unexpected')
new_result[k] = new_ols_result
else:
# unexpected
print k, v
raise RuntimeError('unexpected')
if verbose:
pprint(new_result)
return new_result
def fit_and_test_models(df_all, control):
'''Return all_results dict and median_errors dataframe
'''
verbose = True
# determine samples that are in the test period ( = 1 week around the sale_date)
first_sale_date = control.sale_date - datetime.timedelta(3)
last_sale_date = control.sale_date + datetime.timedelta(3)
in_sale_period = is_between(df=df_all,
first_date=first_sale_date,
last_date=last_sale_date)
num_sale_samples = sum(in_sale_period)
print 'num sale samples', num_sale_samples
assert num_sale_samples >= control.n_folds, 'unable to form folds'
all_results = {}
median_errors = AccumulateMedianErrors()
fold_number = -1
skf = cross_validation.StratifiedKFold(in_sale_period, control.n_folds)
for train_indices, test_indices in skf:
fold_number += 1
# don't create views (just to be careful)
df_train = df_all.iloc[train_indices].copy(deep=True)
df_test = df_all.iloc[test_indices].copy(deep=True)
for training_days in control.training_days:
assert training_days > 0
# determine training samples for the models
df_train_model = \
add_age(df_train[is_between(df=df_train,
first_date=first_sale_date - datetime.timedelta(training_days),
last_date=first_sale_date - datetime.timedelta(1))],
first_sale_date)
if len(df_train_model) == 0:
print 'no training data fold %d training_days %d' % (
fold_number, training_days)
sys.exit(1)
# determine testing samples for the models
df_test_model = \
add_age(df_test[is_between(df=df_test,
first_date=first_sale_date,
last_date=last_sale_date)],
first_sale_date)
if len(df_test_model) == 0:
print 'no testing data fold %d sale_date %s training_days %d' % (
fold_number, control.sale_date, training_days)
continue
print 'model samples sizes: training_days %d train %d test %d' % (
training_days, len(df_train_model), len(df_test_model))
# fit and test ach model
for model_name, model in control.models.iteritems():
print '%d %s %d %s elapsed %s' % (
fold_number, control.sale_date, training_days, model_name,
elapsed_time(control.start_time))
def make_key(scope):
return (fold_number, control.sale_date, training_days, model_name, scope)
# determine global results (for all areas)
if len(df_test_model) == 0 or len(df_train_model) == 0:
print 'skipping global zero length: #test %d #train %d' % (
len(df_test_model), len(df_train_model))
else:
global_result = model.run(train=df_train_model,
test=df_test_model,
control=control)
global_key = make_key(scope='global')
all_results[global_key] = squeeze(global_result)
report = model.reporter()() # instantiate report class
if verbose:
for line in report.global_fold_lines(global_key, global_result):
print line
median_errors.accumulate(global_key, global_result)
# determine results for each zip code in test data
for zip_code in unique_zip_codes(df_test_model):
df_train_model_zip = zip_codes(df_train_model, zip_code)
df_test_model_zip = zip_codes(df_test_model, zip_code)
if len(df_train_model_zip) == 0 or len(df_test_model_zip) == 0:
print 'skipping zip zero length: zip %d #test %d #train %d' % (
zip_code, len(df_test_model_zip), len(df_train_model_zip))
else:
zip_code_result = model.run(train=df_train_model_zip,
test=df_test_model_zip,
control=control)
zip_code_key = make_key(scope=('zip', zip_code))
all_results[zip_code_key] = squeeze(zip_code_result)
if verbose:
for line in report.zip_fold_lines(zip_code_key, zip_code_result):
print line
median_errors.accumulate(zip_code_key, zip_code_result)
print 'num sale samples across all folds:', num_sale_samples
return all_results, median_errors.dataframe()
def print_results(all_results, control):
for training_days in control.training_days:
for model_name, model in control.models.iteritems():
report = model.reporter()() # how to print is in the model result
report.summarize(control.sale_date,
training_days,
model_name,
all_results,
control)
def main(argv):
warnings.filterwarnings('error') # convert warnings to errors
control = make_control(argv)
sys.stdout = Logger(logfile_path=control.path_log) # print also write to log file
print control
# read input
f = open(control.path_in, 'rb')
df_loaded = pickle.load(f)
f.close()
df_loaded_copy = df_loaded.copy(deep=True) # used for debugging
if False:
most_popular_zip_code = determine_most_popular_zip_code(df_loaded.copy(), control)
print most_popular_zip_code
all_results, median_errors = fit_and_test_models(df_loaded, control)
assert(df_loaded.equals(df_loaded_copy))
if False:
# this code doesn't know about the variants for the Rf model
# in addition, we don't need these results, because downstream programs have
# been written to summarize the results
print_results(all_results, control)
# write result
print 'writing results to', control.path_out_dict
result = {'control': control, # control.predictors orders the x values
'all_results': all_results}
f = open(control.path_out_dict, 'wb')
pickle.dump(result, f)
f.close()
print 'writing results to', control.path_out_df
f = open(control.path_out_df, 'wb')
pickle.dump(median_errors, f)
f.close()
print 'ok'
if __name__ == "__main__":
if False:
# quite pyflakes warnings
pdb.set_trace()
pprint(None)
np.all()
pd.Series()
main(sys.argv)
testing --> test
'''create files contains estimated generalization errors for model
invocation: python ege_week.py YYYY-MM-DD [--test]
YYYY-MM-DD mid-point of week; anayze -3 to +3 days
--test if supplied, only subset of cases are run and output file has -test in its name
INPUT FILE
WORKING/transactions-subset2.pickle
OUTPUT FILES
WORKING/ege_week-YYYY-MM-DD-MODEL-df[-test].pickle dataframe with median errors
WORKING/ege_week-YYYY-MM-DD-MODEL-dict-test].pickle dict with importance of features, actuals, estimates
'''
import collections
import cPickle as pickle
import datetime
import numpy as np
import pandas as pd
import pdb
from pprint import pprint
from sklearn import cross_validation
from sklearn import linear_model
from sklearn import ensemble
import sys
import warnings
from Bunch import Bunch
from DataframeAppender import DataframeAppender
from directory import directory
from Logger import Logger
import parse_command_line
def usage():
print 'usage: python ege_week.py YYYY-MM-DD'
def make_control(argv):
'Return control Bunch'''
if len(argv) == 1 or len(argv) > 3:
usage()
sys.exit(1)
script_name = argv[0]
base_name = script_name.split('.')[0]
random_seed = 123
now = datetime.datetime.now()
log_file_name = base_name + '.' + now.isoformat('T') + '.log'
year, month, day = argv[1].split('-')
sale_date = datetime.date(int(year), int(month), int(day))
# prior work found that the assessment was not useful
# just the census and tax roll features
# predictors with transformation to log domain
predictors = { # the columns in the x_arrays are in this order
'fraction.owner.occupied': None,
'FIREPLACE.NUMBER': 'log1p',
'BEDROOMS': 'log1p',
'BASEMENT.SQUARE.FEET': 'log1p',
'LAND.SQUARE.FOOTAGE': 'log',
'zip5.has.industry': None,
'census.tract.has.industry': None,
'census.tract.has.park': None,
'STORIES.NUMBER': 'log1p',
'census.tract.has.school': None,
'TOTAL.BATHS.CALCULATED': 'log1p',
'median.household.income': 'log', # not log feature in earlier version
'LIVING.SQUARE.FEET': 'log',
'has.pool': None,
'zip5.has.retail': None,
'census.tract.has.retail': None,
'is.new.construction': None,
'avg.commute': None,
'zip5.has.park': None,
'PARKING.SPACES': 'log1p',
'zip5.has.school': None,
'TOTAL.ROOMS': 'log1p',
'age': None,
'age2': None,
'effective.age': None,
'effective.age2': None}
debug = False
test = parse_command_line.has_arg(argv, '--test')
b = Bunch(
path_in=directory('working') + 'transactions-subset2.pickle',
path_log=directory('log') + log_file_name,
path_out_df='%s%s-%s-df%s.pickle' % (
directory('working'),
base_name,
sale_date,
'-test' if test else ''),
path_out_dict='%s%s-%s-dict%s.pickle' % (
directory('working'),
base_name,
sale_date,
'-test' if test else ''),
arg_date=sale_date,
start_time=now,
random_seed=random_seed,
sale_date=sale_date,
models={'rf': Rf(), 'ols': Ols()},
scopes=['global', 'zip'],
training_days=(7, 14, 21) if test else range(7, 366, 7),
n_folds=10,
predictors=predictors,
price_column='SALE.AMOUNT',
test=test,
debug=debug)
return b
def elapsed_time(start_time):
return datetime.datetime.now() - start_time
def x(mode, df, control):
'''return 2D np.array, with df x values possibly transformed to log
RETURNS array: np.array 2D
'''
def transform(v, mode, transformation):
if mode is None:
return v
if mode == 'linear':
return v
if mode == 'log':
if transformation is None:
return v
if transformation == 'log':
return np.log(v)
if transformation == 'log1p':
return np.log1p(v)
raise RuntimeError('bad transformation: ' + str(transformation))
raise RuntimeError('bad mode:' + str(mode))
array = np.empty(shape=(df.shape[0], len(control.predictors)),
dtype=np.float64).T
# build up in transposed form
index = 0
for predictor_name, transformation in control.predictors.iteritems():
v = transform(df[predictor_name].values, mode, transformation)
array[index] = v
index += 1
return array.T
def y(mode, df, control):
'''return np.array 1D with transformed price column from df'''
df2 = df.copy(deep=True)
if mode == 'log':
df2[control.price_column] = \
pd.Series(np.log(df[control.price_column]),
index=df.index)
array = np.array(df2[control.price_column].as_matrix(), np.float64)
return array
def demode(v, mode):
'convert log domain to normal'
if v is None:
return None
result = np.exp(v) if mode == 'log' else v
return result
def errors(model_result):
'return median_absolute_error and median_relative_absolute_error'
actuals = model_result['actuals']
estimates = model_result['estimates']
abs_error = np.abs(actuals - estimates)
median_abs_error = np.median(abs_error)
rel_abs_error = abs_error / actuals
median_rel_abs_error = np.median(rel_abs_error)
return median_abs_error, median_rel_abs_error
class ReportOls(object):
'report generation with y_mode and x_mode in key'
# NOTE: perhaps reusable for any model with y and x modes
def __init__(self):
self.format_global_fold = '%10s %2d %3s %6s %3s %3s f%d %6.0f %3.2f'
self.format_zip_fold = '%10s %2d %3s %6d %3s %3s f%d %6.0f %3.2f'
self.format_global = '%10s %2d %3s %6s %3s %3s median %6.0f %3.2f'
self.format_zip = '%10s %2d %3s %6d %3s %3s median %6.0f %3.2f'
def global_fold_lines(self, key, result):
fold_number, sale_date, training_days, model_name, scope = key
assert(scope == 'global')
for result_key, result_value in result.iteritems():
y_mode = result_key[1][:3]
x_mode = result_key[3][:3]
median_abs_error, median_rel_abs_error = errors(result_value)
line = self.format_global_fold % (sale_date,
training_days,
model_name,
scope,
y_mode,
x_mode,
fold_number,
median_abs_error,
median_rel_abs_error)
yield line
def zip_fold_lines(self, key, result):
fold_number, sale_date, training_days, model_name, scope = key
assert(isinstance(scope, tuple))
assert(scope[0] == 'zip')
zip_code = scope[1]
for result_key, result_value in result.iteritems():
y_mode = result_key[1][:3]
x_mode = result_key[3][:3]
median_abs_error, median_rel_abs_error = errors(result_value)
line = self.format_zip_fold % (sale_date,
training_days,
model_name,
zip_code,
y_mode,
x_mode,
fold_number,
median_abs_error,
median_rel_abs_error)
yield line
def summarize_global(self,
sale_date,
training_days,
model_name,
all_results,
control):
scope = 'global'
for y_mode in ('log', 'linear'):
y_mode_print = y_mode[:3]
for x_mode in ('log', 'linear'):
x_mode_print = x_mode[:3]
median_errors = np.zeros(control.n_folds, dtype=np.float64)
median_rel_errors = np.zeros(control.n_folds, dtype=np.float64)
for fold_number in xrange(control.n_folds):
# determine errors in the fold
key = (fold_number, sale_date, training_days, model_name, scope)
if key not in all_results:
print 'key', key
print 'not in result'
continue
result = all_results[key]
model_result = result[('y_mode', y_mode, 'x_mode', x_mode)]
median_abs_error, median_rel_abs_error = errors(model_result)
fold_line = self.format_global_fold % (sale_date,
training_days,
model_name,
scope,
y_mode_print,
x_mode_print,
fold_number,
median_abs_error,
median_rel_abs_error)
print fold_line
median_errors[fold_number] = median_abs_error
median_rel_errors[fold_number] = median_rel_abs_error
all_folds_line = self.format_global % (sale_date,
training_days,
model_name,
scope,
y_mode_print,
x_mode_print,
np.median(median_errors),
np.median(median_rel_errors))
print all_folds_line
def summarize_zip(self, sale_date, training_days, model_name,
all_results, control):
def list_median(lst):
assert(len(lst) > 0)
return np.median(np.array(lst, dtype=np.float64))
def report_zip_code(zip_code, keys):
for y_mode in ('log', 'linear'):
y_mode_print = y_mode[:3]
for x_mode in ('log', 'linear'):
x_mode_print = x_mode[:3]
mode_key = ('y_mode', y_mode, 'x_mode', x_mode)
median_abs_errors = []
median_rel_abs_errors = []
for key in keys:
model_result = all_results[key][mode_key]
median_abs_error, median_rel_abs_error = errors(model_result)
fold_line = self.format_zip_fold % (sale_date,
training_days,
model_name,
zip_code,
y_mode_print,
x_mode_print,
key[0], # fold number
median_abs_error,
median_rel_abs_error)
print fold_line
median_abs_errors.append(median_abs_error)
median_rel_abs_errors.append(median_rel_abs_error)
all_folds_line = self.format_zip % (sale_date,
training_days,
model_name,
zip_code,
y_mode_print,
x_mode_print,
list_median(median_abs_errors),
list_median(median_rel_abs_errors))
print all_folds_line
# determine all zip codes in the specified lines
zip_codes = collections.defaultdict(set)
for key in all_results.keys():
key_fold_number, key_sale_date, key_training_days, key_model_name, key_scope = key
if key_scope == 'global':
# examine only zip code scopes
continue
if key_sale_date == sale_date and key_training_days == training_days and key_model_name == model_name:
key_zip_code = key_scope[1]
zip_codes[key_zip_code].add(key)
# process each zip code
for zip_code, keys in zip_codes.iteritems():
report_zip_code(zip_code, keys)
def summarize(self, sale_date, training_days, model_name,
all_results, control):
self.summarize_global(sale_date, training_days, model_name,
all_results, control)
self.summarize_zip(sale_date, training_days, model_name,
all_results, control)
class Ols(object):
'Ordinary least squares via sklearn'
def __init__(self):
self.Model_Constructor = linear_model.LinearRegression
def reporter(self):
return ReportOls
def run(self, train, test, control):
'''fit on training data and test
ARGS
train : dataframe
test : dataframe
control: Bunch
RETURN dict of values
dict key = (x_mode, y_mode)
values = dict with keys 'actuals', 'estimates', 'fitted', x_names
'''
# implement variants
verbose = False
def variant(x_mode, y_mode):
train_x = x(x_mode, train, control)
test_x = x(x_mode, test, control)
train_y = y(y_mode, train, control)
model = self.Model_Constructor(fit_intercept=True,
normalize=True,
copy_X=True)
fitted_model = model.fit(train_x, train_y)
# if the model cannot be fitted, LinearRegressor returns
# the mean of the train_y values
estimates = fitted_model.predict(test_x)
value = {
'coef': fitted_model.coef_,
'intercept_': fitted_model.intercept_,
'estimates': demode(estimates, y_mode),
'actuals': y('linear', test, control)
}
# check results
if verbose:
print 'x_mode, y_mode: ', x_mode, y_mode
print 'actuals: ', value['actuals']
print 'estimates: ', value['estimates']
return value
all_variants = {}
for x_mode in ('log', 'linear'):
for y_mode in ('log', 'linear'):
variant_value = variant(x_mode, y_mode)
key = ('y_mode', y_mode, 'x_mode', x_mode)
all_variants[key] = variant_value
return all_variants
class ReportRf(object):
'report generation w no variants (for now'
def __init__(self):
'sale_date days model global fold error abs_error'
self.format_global_fold = '%10s %2d %3s %6s f%d trees %4d %6.0f %3.2f'
self.format_zip_fold = '%10s %2d %3s %6d f%d trees %4d %6.0f %3.2f'
self.format_global = '%10s %2d %3s %6s median %6.0f %3.2f'
self.format_zip = '%10s %2d %3s %6d median %6.0f %3.2f'
def global_fold_lines(self, key, result):
fold_number, sale_date, training_days, model_name, scope = key
assert(scope == 'global')
for result_key, result_value in result.iteritems():
assert result_key[0] == 'n_trees'
n_trees = result_key[1]
median_abs_error, median_rel_abs_error = errors(result_value)
line = self.format_global_fold % (sale_date,
training_days,
model_name,
scope,
fold_number,
n_trees,
median_abs_error,
median_rel_abs_error)
yield line
def zip_fold_lines(self, key, result):
fold_number, sale_date, training_days, model_name, scope = key
assert isinstance(scope, tuple)
assert scope[0] == 'zip'
zip_code = scope[1]
for result_key, result_value in result.iteritems():
assert result_key[0] == 'n_trees'
n_trees = result_key[1]
median_abs_error, median_rel_abs_error = errors(result_value)
line = self.format_global_fold % (sale_date,
training_days,
model_name,
zip_code,
fold_number,
n_trees,
median_abs_error,
median_rel_abs_error)
yield line
def summarize_global(self, sale_date, training_days, model_name, all_results, control):
scope = 'global'
median_errors = np.zeros(control.n_folds, dtype=np.float64)
median_rel_errors = np.zeros(control.n_folds, dtype=np.float64)
for fold_number in xrange(control.n_folds):
key = (fold_number, sale_date, training_days, model_name, scope)
if key not in all_results:
# can happen when a model could not be fit
print 'model_result missing key', key
continue
model_result = all_results[key]
if len(model_result['actuals']) == 0:
continue
median_abs_error, median_rel_abs_error = errors(model_result)
fold_line = self.format_global_fold % (sale_date,
training_days,
model_name,
scope,
fold_number,
median_abs_error,
median_rel_abs_error)
print fold_line
median_errors[fold_number] = median_abs_error
median_rel_errors[fold_number] = median_rel_abs_error
all_folds_line = self.format_global % (sale_date,
training_days,
model_name,
scope,
np.median(median_errors),
np.median(median_rel_errors))
print all_folds_line
def summarize_zip(self, sale_date, training_days, model_name, all_results, control):
def list_median(lst):
assert(len(lst) > 0)
return np.median(np.array(lst, dtype=np.float64))
def report_zip_code(zip_code, keys):
median_abs_errors = []
median_rel_abs_errors = []
for key in keys:
model_result = all_results[key]
median_abs_error, median_rel_abs_error = errors(model_result)
fold_line = self.format_zip_fold % (sale_date,
training_days,
model_name,
zip_code,
key[0], # fold number
median_abs_error,
median_rel_abs_error)
print fold_line
median_abs_errors.append(median_abs_error)
median_rel_abs_errors.append(median_rel_abs_error)
all_folds_line = self.format_zip % (sale_date,
training_days,
model_name,
zip_code,
list_median(median_abs_errors),
list_median(median_rel_abs_errors))
print all_folds_line
# determine all zip codes in the specified lines
zip_codes = collections.defaultdict(set)
for key in all_results.keys():
key_fold_number, key_sale_date, key_training_days, key_model_name, key_scope = key
if key_scope == 'global':
# examine only zip code scopes
continue
if key_sale_date == sale_date and key_training_days == training_days and key_model_name == model_name:
key_zip_code = key_scope[1]
zip_codes[key_zip_code].add(key)
# process each zip code
for zip_code, keys in zip_codes.iteritems():
report_zip_code(zip_code, keys)
def summarize(self, sale_date, training_days, model_name, all_results, control):
self.summarize_global(sale_date, training_days, model_name, all_results, control)
self.summarize_zip(sale_date, training_days, model_name, all_results, control)
class Rf(object):
'Random forests via sklearn'
def __init__(self):
self.Model_Constructor = ensemble.RandomForestRegressor
def reporter(self):
return ReportRf
def run(self, train, test, control):
'''fit on train, test on test, return dict of variants
The variants are defined by the number of trees in the forest
RETURN dict with key = variant_description
'''
verbose = False
def variant(n_trees):
train_x = x(None, train, control) # no transformation
test_x = x(None, test, control)
train_y = y(None, train, control)
model = self.Model_Constructor(n_estimators=n_trees,
random_state=control.random_seed)
fitted_model = model.fit(train_x, train_y)
estimates = fitted_model.predict(test_x)
# return selected fitted results
result = {
'feature_importances': fitted_model.feature_importances_,
'estimates': estimates,
'actuals': y('None', test, control)}
if verbose:
for k, v in result.iteritems():
print k, v
return result
all_variants = {}
for n_trees in (10, 100) if control.test else (10, 100, 300, 1000):
variant_value = variant(n_trees)
key = ('n_trees', n_trees)
all_variants[key] = variant_value
return all_variants
def within(sale_date, training_days, df):
'return indices of samples up to training_days before the sale_date'
assert(training_days > 0)
# one training day means use samples on the sale date only
first_ok_sale_date = sale_date - datetime.timedelta(training_days - 1)
date_column = 'sale.python_date'
after = df[date_column] >= first_ok_sale_date
before = df[date_column] <= sale_date
ok_indices = np.logical_and(after, before)
return ok_indices
def is_between(df, first_date, last_date):
'return df containing subset of samples between the two dates'
df_date = df['sale.python_date']
return (df_date >= first_date) & (df_date <= last_date)
def on_sale_date(sale_date, df):
'''return indices of sample on the sale date'''
date_column = 'sale.python_date'
result = df[date_column] == sale_date
return result
def add_age(df, sale_date):
'Return new df with extra columns for age and effective age'
column_names = df.columns.tolist()
if 'age' in column_names:
print column_names
print 'age in column_names'
pdb.set_trace()
assert('age' not in column_names)
assert('age2' not in column_names)
assert('effective.age' not in column_names)
assert('effective.age2' not in column_names)
sale_year = df['sale.year']
def age(column_name):
'age from sale_date to specified column'
age_in_years = sale_year - df[column_name].values
return pd.Series(age_in_years, index=df.index)
result = df.copy(deep=True)
result['age'] = age('YEAR.BUILT')
result['effective.age'] = age('EFFECTIVE.YEAR.BUILT')
result['age2'] = result['age'] * result['age']
result['effective.age2'] = result['effective.age'] * result['effective.age']
return result
def unique_zip_codes(df):
'yield each unique zip code in the dataframe'
unique_zip_codes = df['zip5'].unique()
for i in xrange(len(unique_zip_codes)):
yield unique_zip_codes[i]
def zip_codes(df, a_zip_code):
'return new dataframe containing just the specified zip code'
df_copy = df.copy(deep=True)
result = df_copy[df_copy['zip5'] == a_zip_code]
return result
def make_train_model(df, sale_date, training_days):
'return df of transactions no more than training_days before the sale_date'
just_before_sale_date = within(sale_date, training_days, df)
train_model = add_age(df[just_before_sale_date], sale_date)
return train_model
def make_test_model(df, sale_date):
'return df of transactions on the sale_date'
selected_indices = on_sale_date(sale_date, df)
test_model = add_age(df[selected_indices], sale_date)
return test_model
def determine_most_popular_zip_code(df, control):
'return the zip_code that occurs most ofen in the dataframe'
zip_code_counter = collections.Counter()
for _, zip_code in df.zip5.iteritems():
zip_code_counter[zip_code] += 1
most_common_zip_code, count = zip_code_counter.most_common(1)[0]
print 'most common zip_code', most_common_zip_code, 'occurs', count
# assert: the most common zip code is in each fold
fold_number = -1
folds_for_zip_code = collections.defaultdict(set)
kf = cross_validation.KFold(n=(len(df)),
n_folds=control.n_folds,
shuffle=True,
random_state=control.random_seed)
for train_indices, test_indices in kf:
fold_number += 1
train = df.iloc[train_indices].copy(deep=True)
test = df.iloc[test_indices].copy(deep=True)
if most_common_zip_code not in test.zip5.values:
print most_common_zip_code, 'not in', fold_number
for zip_code in unique_zip_codes(test):
assert(zip_code in test.zip5.values)
if zip_code not in train.zip5.values:
print 'fold %d zip_code %d in test and not train' % (
fold_number,
zip_code)
folds_for_zip_code[zip_code].add(fold_number)
assert(len(folds_for_zip_code[most_common_zip_code]) == 10)
# print zip_code not in each test set
count_in_10 = 0
count_not_in_10 = 0
for zip_code, set_folds in folds_for_zip_code.iteritems():
if len(set_folds) != 10:
print 'zip_code %d in only %d folds' % (zip_code, len(set_folds))
count_not_in_10 += 1
else:
count_in_10 += 1
print 'all other zip codes are in 10 folds'
print 'in 10: %d not in 10: %d' % (count_in_10, count_not_in_10)
print 'NOTE: all this analysis is before training samples are selected'
return most_common_zip_code
def read_training_data(control):
'return dataframe'
class AccumulateMedianErrors():
def __init__(self):
self.dfa = DataframeAppender([('fold_number', np.int64),
('training_days', np.int64),
('model_id', object), # string: model + hyperparameters
('scope', object), # 'global' or zip code
('median_abs_error', np.float64),
('median_rel_error', np.float64),
('n_samples', np.float64)])
def accumulate(self, key, result):
verbose = False
fold_number, sale_date, training_days, model_name, scope = key
if model_name == 'rf':
self._accumulate_rf(fold_number, training_days, scope, result)
elif model_name == 'ols':
self._accumulate_ols(fold_number, training_days, scope, result)
else:
raise RuntimeError('bad model_name: ' + str(model_name))
if verbose:
print self.dfa.df
def _accumulate_ols(self, fold_number, training_days, scope, result):
for k, v in result.iteritems():
model_id = 'rf ' + str(k[1])[:3] + ' ' + str(k[3])[:3]
self._append(fold_number, training_days, model_id, scope, v)
def _accumulate_rf(self, fold_number, training_days, scope, result):
for k, v in result.iteritems():
model_id = 'rf ' + str(k[1])
self._append(fold_number, training_days, model_id, scope, v)
def _append(self, fold_number, training_days, model_id, scope, model_result):
median_abs_error, median_rel_error = errors(model_result)
self.dfa.append([fold_number,
training_days,
model_id,
scope if scope == 'global' else str(scope[1]),
median_abs_error,
median_rel_error,
len(model_result['actuals'])])
def dataframe(self):
return self.dfa.result()
def squeeze(result, verbose=False):
'replace float64 with float32'
def is_np_array_float64(x):
return isinstance(x, np.ndarray) and x.dtype == np.float64
def is_np_scalar_float64(x):
return isinstance(x, np.float64)
if verbose:
pprint(result)
assert(isinstance(result, dict))
new_result = {}
for k, v in result.iteritems():
if isinstance(k, str):
# rf result
if is_np_array_float64(v):
# e.g., actual, estimate, other info in a vector
new_result[k] = np.array(v, dtype=np.float32)
else:
print k, v
raise RuntimeError('unexpected')
elif isinstance(k, tuple):
# ols result
new_ols_result = {}
for ols_key, ols_value in v.iteritems():
if is_np_array_float64(ols_value):
new_ols_result[ols_key] = np.array(ols_value, dtype=np.float32)
elif is_np_scalar_float64(ols_value):
new_ols_result[ols_key] = np.float32(ols_value)
else:
print ols_key, ols_value
raise RuntimeError('unexpected')
new_result[k] = new_ols_result
else:
# unexpected
print k, v
raise RuntimeError('unexpected')
if verbose:
pprint(new_result)
return new_result
def fit_and_test_models(df_all, control):
'''Return all_results dict and median_errors dataframe
'''
verbose = True
# determine samples that are in the test period ( = 1 week around the sale_date)
first_sale_date = control.sale_date - datetime.timedelta(3)
last_sale_date = control.sale_date + datetime.timedelta(3)
in_sale_period = is_between(df=df_all,
first_date=first_sale_date,
last_date=last_sale_date)
num_sale_samples = sum(in_sale_period)
print 'num sale samples', num_sale_samples
assert num_sale_samples >= control.n_folds, 'unable to form folds'
all_results = {}
median_errors = AccumulateMedianErrors()
fold_number = -1
skf = cross_validation.StratifiedKFold(in_sale_period, control.n_folds)
for train_indices, test_indices in skf:
fold_number += 1
# don't create views (just to be careful)
df_train = df_all.iloc[train_indices].copy(deep=True)
df_test = df_all.iloc[test_indices].copy(deep=True)
for training_days in control.training_days:
assert training_days > 0
# determine training samples for the models
df_train_model = \
add_age(df_train[is_between(df=df_train,
first_date=first_sale_date - datetime.timedelta(training_days),
last_date=first_sale_date - datetime.timedelta(1))],
first_sale_date)
if len(df_train_model) == 0:
print 'no training data fold %d training_days %d' % (
fold_number, training_days)
sys.exit(1)
# determine testing samples for the models
df_test_model = \
add_age(df_test[is_between(df=df_test,
first_date=first_sale_date,
last_date=last_sale_date)],
first_sale_date)
if len(df_test_model) == 0:
print 'no testing data fold %d sale_date %s training_days %d' % (
fold_number, control.sale_date, training_days)
continue
print 'model samples sizes: training_days %d train %d test %d' % (
training_days, len(df_train_model), len(df_test_model))
# fit and test ach model
for model_name, model in control.models.iteritems():
print '%d %s %d %s elapsed %s' % (
fold_number, control.sale_date, training_days, model_name,
elapsed_time(control.start_time))
def make_key(scope):
return (fold_number, control.sale_date, training_days, model_name, scope)
# determine global results (for all areas)
if len(df_test_model) == 0 or len(df_train_model) == 0:
print 'skipping global zero length: #test %d #train %d' % (
len(df_test_model), len(df_train_model))
else:
global_result = model.run(train=df_train_model,
test=df_test_model,
control=control)
global_key = make_key(scope='global')
all_results[global_key] = squeeze(global_result)
report = model.reporter()() # instantiate report class
if verbose:
for line in report.global_fold_lines(global_key, global_result):
print line
median_errors.accumulate(global_key, global_result)
# determine results for each zip code in test data
for zip_code in unique_zip_codes(df_test_model):
df_train_model_zip = zip_codes(df_train_model, zip_code)
df_test_model_zip = zip_codes(df_test_model, zip_code)
if len(df_train_model_zip) == 0 or len(df_test_model_zip) == 0:
print 'skipping zip zero length: zip %d #test %d #train %d' % (
zip_code, len(df_test_model_zip), len(df_train_model_zip))
else:
zip_code_result = model.run(train=df_train_model_zip,
test=df_test_model_zip,
control=control)
zip_code_key = make_key(scope=('zip', zip_code))
all_results[zip_code_key] = squeeze(zip_code_result)
if verbose:
for line in report.zip_fold_lines(zip_code_key, zip_code_result):
print line
median_errors.accumulate(zip_code_key, zip_code_result)
print 'num sale samples across all folds:', num_sale_samples
return all_results, median_errors.dataframe()
def print_results(all_results, control):
for training_days in control.training_days:
for model_name, model in control.models.iteritems():
report = model.reporter()() # how to print is in the model result
report.summarize(control.sale_date,
training_days,
model_name,
all_results,
control)
def main(argv):
warnings.filterwarnings('error') # convert warnings to errors
control = make_control(argv)
sys.stdout = Logger(logfile_path=control.path_log) # print also write to log file
print control
# read input
f = open(control.path_in, 'rb')
df_loaded = pickle.load(f)
f.close()
df_loaded_copy = df_loaded.copy(deep=True) # used for debugging
if False:
most_popular_zip_code = determine_most_popular_zip_code(df_loaded.copy(), control)
print most_popular_zip_code
all_results, median_errors = fit_and_test_models(df_loaded, control)
assert(df_loaded.equals(df_loaded_copy))
if False:
# this code doesn't know about the variants for the Rf model
# in addition, we don't need these results, because downstream programs have
# been written to summarize the results
print_results(all_results, control)
# write result
print 'writing results to', control.path_out_dict
result = {'control': control, # control.predictors orders the x values
'all_results': all_results}
f = open(control.path_out_dict, 'wb')
pickle.dump(result, f)
f.close()
print 'writing results to', control.path_out_df
f = open(control.path_out_df, 'wb')
pickle.dump(median_errors, f)
f.close()
print 'ok'
if __name__ == "__main__":
if False:
# quite pyflakes warnings
pdb.set_trace()
pprint(None)
np.all()
pd.Series()
main(sys.argv)
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import logging
import json
import os
import re
import tarfile
import tempfile
import warnings
import yaml
import zipfile
import datetime
from typing import Mapping, Callable, Optional
import kfp
import kfp_server_api
from kfp.compiler import compiler
from kfp.compiler._k8s_helper import sanitize_k8s_name
from kfp._auth import get_auth_token, get_gcp_access_token
# TTL of the access token associated with the client. This is needed because
# `gcloud auth print-access-token` generates a token with TTL=1 hour, after
# which the authentication expires. This TTL is needed for kfp.Client()
# initialized with host=<inverse proxy endpoint>.
# Set to 55 mins to provide some safe margin.
_GCP_ACCESS_TOKEN_TIMEOUT = datetime.timedelta(minutes=55)
# Operators on scalar values. Only applies to one of |int_value|,
# |long_value|, |string_value| or |timestamp_value|.
_FILTER_OPERATIONS = {"UNKNOWN": 0,
"EQUALS" : 1,
"NOT_EQUALS" : 2,
"GREATER_THAN": 3,
"GREATER_THAN_EQUALS": 5,
"LESS_THAN": 6,
"LESS_THAN_EQUALS": 7}
def _add_generated_apis(target_struct, api_module, api_client):
'''Initializes a hierarchical API object based on the generated API module.
PipelineServiceApi.create_pipeline becomes target_struct.pipelines.create_pipeline
'''
Struct = type('Struct', (), {})
def camel_case_to_snake_case(name):
import re
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', name).lower()
for api_name in dir(api_module):
if not api_name.endswith('ServiceApi'):
continue
short_api_name = camel_case_to_snake_case(api_name[0:-len('ServiceApi')]) + 's'
api_struct = Struct()
setattr(target_struct, short_api_name, api_struct)
service_api = getattr(api_module.api, api_name)
initialized_service_api = service_api(api_client)
for member_name in dir(initialized_service_api):
if member_name.startswith('_') or member_name.endswith('_with_http_info'):
continue
bound_member = getattr(initialized_service_api, member_name)
setattr(api_struct, member_name, bound_member)
models_struct = Struct()
for member_name in dir(api_module.models):
if not member_name[0].islower():
setattr(models_struct, member_name, getattr(api_module.models, member_name))
target_struct.api_models = models_struct
KF_PIPELINES_ENDPOINT_ENV = 'KF_PIPELINES_ENDPOINT'
KF_PIPELINES_UI_ENDPOINT_ENV = 'KF_PIPELINES_UI_ENDPOINT'
KF_PIPELINES_DEFAULT_EXPERIMENT_NAME = 'KF_PIPELINES_DEFAULT_EXPERIMENT_NAME'
KF_PIPELINES_OVERRIDE_EXPERIMENT_NAME = 'KF_PIPELINES_OVERRIDE_EXPERIMENT_NAME'
class Client(object):
""" API Client for KubeFlow Pipeline.
"""
# in-cluster DNS name of the pipeline service
IN_CLUSTER_DNS_NAME = 'ml-pipeline.{}.svc.cluster.local:8888'
KUBE_PROXY_PATH = 'api/v1/namespaces/{}/services/ml-pipeline:http/proxy/'
LOCAL_KFP_CONTEXT = os.path.expanduser('~/.config/kfp/context.json')
# TODO: Wrap the configurations for different authentication methods.
def __init__(self, host=None, client_id=None, namespace='kubeflow', other_client_id=None, other_client_secret=None, existing_token=None, cookies=None):
"""Create a new instance of kfp client.
Args:
host: the host name to use to talk to Kubeflow Pipelines. If not set, the in-cluster
service DNS name will be used, which only works if the current environment is a pod
in the same cluster (such as a Jupyter instance spawned by Kubeflow's
JupyterHub). If you have a different connection to cluster, such as a kubectl
proxy connection, then set it to something like "127.0.0.1:8080/pipeline.
If you connect to an IAP enabled cluster, set it to
https://<your-deployment>.endpoints.<your-project>.cloud.goog/pipeline".
client_id: The client ID used by Identity-Aware Proxy.
namespace: the namespace where the kubeflow pipeline system is run.
other_client_id: The client ID used to obtain the auth codes and refresh tokens.
Reference: https://cloud.google.com/iap/docs/authentication-howto#authenticating_from_a_desktop_app.
other_client_secret: The client secret used to obtain the auth codes and refresh tokens.
existing_token: pass in token directly, it's used for cases better get token outside of SDK, e.x. GCP Cloud Functions
or caller already has a token
cookies: CookieJar object containing cookies that will be passed to the pipelines API.
"""
host = host or os.environ.get(KF_PIPELINES_ENDPOINT_ENV)
self._uihost = os.environ.get(KF_PIPELINES_UI_ENDPOINT_ENV, host)
config = self._load_config(host, client_id, namespace, other_client_id, other_client_secret, existing_token)
# Save the loaded API client configuration, as a reference if update is
# needed.
self._existing_config = config
api_client = kfp_server_api.api_client.ApiClient(config, cookie=cookies)
_add_generated_apis(self, kfp_server_api, api_client)
self._job_api = kfp_server_api.api.job_service_api.JobServiceApi(api_client)
self._run_api = kfp_server_api.api.run_service_api.RunServiceApi(api_client)
self._experiment_api = kfp_server_api.api.experiment_service_api.ExperimentServiceApi(api_client)
self._pipelines_api = kfp_server_api.api.pipeline_service_api.PipelineServiceApi(api_client)
self._upload_api = kfp_server_api.api.PipelineUploadServiceApi(api_client)
self._load_context_setting_or_default()
def _load_config(self, host, client_id, namespace, other_client_id, other_client_secret, existing_token):
config = kfp_server_api.configuration.Configuration()
host = host or ''
# Preprocess the host endpoint to prevent some common user mistakes.
# This should only be done for non-IAP cases (when client_id is None). IAP requires preserving the protocol.
if not client_id:
host = re.sub(r'^(http|https)://', '', host).rstrip('/')
if host:
config.host = host
token = None
# "existing_token" is designed to accept token generated outside of SDK. Here is an example.
#
# https://cloud.google.com/functions/docs/securing/function-identity
# https://cloud.google.com/endpoints/docs/grpc/service-account-authentication
#
# import requests
# import kfp
#
# def get_access_token():
# url = 'http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/token'
# r = requests.get(url, headers={'Metadata-Flavor': 'Google'})
# r.raise_for_status()
# access_token = r.json()['access_token']
# return access_token
#
# client = kfp.Client(host='<KFPHost>', existing_token=get_access_token())
#
if existing_token:
token = existing_token
self._is_refresh_token = False
elif client_id:
token = get_auth_token(client_id, other_client_id, other_client_secret)
self._is_refresh_token = True
elif self._is_inverse_proxy_host(host):
token = get_gcp_access_token()
self._is_refresh_token = False
if token:
config.api_key['authorization'] = token
config.api_key_prefix['authorization'] = 'Bearer'
return config
if host:
# if host is explicitly set with auth token, it's probably a port forward address.
return config
import kubernetes as k8s
in_cluster = True
try:
k8s.config.load_incluster_config()
except:
in_cluster = False
pass
if in_cluster:
config.host = Client.IN_CLUSTER_DNS_NAME.format(namespace)
return config
try:
k8s.config.load_kube_config(client_configuration=config)
except:
print('Failed to load kube config.')
return config
if config.host:
config.host = config.host + '/' + Client.KUBE_PROXY_PATH.format(namespace)
return config
def _is_inverse_proxy_host(self, host):
if host:
return re.match(r'\S+.googleusercontent.com/{0,1}$', host)
if re.match(r'\w+', host):
warnings.warn(
'The received host is %s, please include the full endpoint address '
'(with ".(pipelines/notebooks).googleusercontent.com")' % host)
return False
def _is_ipython(self):
"""Returns whether we are running in notebook."""
try:
import IPython
ipy = IPython.get_ipython()
if ipy is None:
return False
except ImportError:
return False
return True
def _get_url_prefix(self):
if self._uihost:
# User's own connection.
if self._uihost.startswith('http://') or self._uihost.startswith('https://'):
return self._uihost
else:
return 'http://' + self._uihost
# In-cluster pod. We could use relative URL.
return '/pipeline'
def _load_context_setting_or_default(self):
if os.path.exists(Client.LOCAL_KFP_CONTEXT):
with open(Client.LOCAL_KFP_CONTEXT, 'r') as f:
self._context_setting = json.load(f)
else:
self._context_setting = {
'namespace': '',
}
def _refresh_api_client_token(self):
"""Refreshes the existing token associated with the kfp_api_client."""
if getattr(self, '_is_refresh_token', None):
return
new_token = get_gcp_access_token()
self._existing_config.api_key['authorization'] = new_token
def set_user_namespace(self, namespace):
"""Set user namespace into local context setting file.
This function should only be used when Kubeflow Pipelines is in the multi-user mode.
Args:
namespace: kubernetes namespace the user has access to.
"""
self._context_setting['namespace'] = namespace
with open(Client.LOCAL_KFP_CONTEXT, 'w') as f:
json.dump(self._context_setting, f)
def get_user_namespace(self):
"""Get user namespace in context config.
Returns:
namespace: kubernetes namespace from the local context file or empty if it wasn't set.
"""
return self._context_setting['namespace']
def create_experiment(self, name, description=None, namespace=None):
"""Create a new experiment.
Args:
name: the name of the experiment.
description: description of the experiment.
namespace: kubernetes namespace where the experiment should be created.
For single user deployment, leave it as None;
For multi user, input a namespace where the user is authorized.
Returns:
An Experiment object. Most important field is id.
"""
namespace = namespace or self.get_user_namespace()
experiment = None
try:
experiment = self.get_experiment(experiment_name=name, namespace=namespace)
except:
# Ignore error if the experiment does not exist.
pass
if not experiment:
logging.info('Creating experiment {}.'.format(name))
resource_references = []
if namespace:
key = kfp_server_api.models.ApiResourceKey(id=namespace, type=kfp_server_api.models.ApiResourceType.NAMESPACE)
reference = kfp_server_api.models.ApiResourceReference(key=key, relationship=kfp_server_api.models.ApiRelationship.OWNER)
resource_references.append(reference)
experiment = kfp_server_api.models.ApiExperiment(
name=name,
description=description,
resource_references=resource_references)
experiment = self._experiment_api.create_experiment(body=experiment)
if self._is_ipython():
import IPython
html = \
('Experiment link <a href="%s/#/experiments/details/%s" target="_blank" >here</a>'
% (self._get_url_prefix(), experiment.id))
IPython.display.display(IPython.display.HTML(html))
return experiment
def get_pipeline_id(self, name):
"""Returns the pipeline id if a pipeline with the name exsists.
Args:
name: pipeline name
Returns:
A response object including a list of experiments and next page token.
"""
pipeline_filter = json.dumps({
"predicates": [
{
"op": _FILTER_OPERATIONS["EQUALS"],
"key": "name",
"stringValue": name,
}
]
})
result = self._pipelines_api.list_pipelines(filter=pipeline_filter)
if len(result.pipelines)==1:
return result.pipelines[0].id
elif len(result.pipelines)>1:
raise ValueError("Multiple pipelines with the name: {} found, the name needs to be unique".format(name))
return None
def list_experiments(self, page_token='', page_size=10, sort_by='', namespace=None):
"""List experiments.
Args:
page_token: token for starting of the page.
page_size: size of the page.
sort_by: can be '[field_name]', '[field_name] des'. For example, 'name desc'.
namespace: kubernetes namespace where the experiment was created.
For single user deployment, leave it as None;
For multi user, input a namespace where the user is authorized.
Returns:
A response object including a list of experiments and next page token.
"""
namespace = namespace or self.get_user_namespace()
response = self._experiment_api.list_experiment(
page_token=page_token,
page_size=page_size,
sort_by=sort_by,
resource_reference_key_type=kfp_server_api.models.api_resource_type.ApiResourceType.NAMESPACE,
resource_reference_key_id=namespace)
return response
def get_experiment(self, experiment_id=None, experiment_name=None, namespace=None):
"""Get details of an experiment
Either experiment_id or experiment_name is required
Args:
experiment_id: id of the experiment. (Optional)
experiment_name: name of the experiment. (Optional)
namespace: kubernetes namespace where the experiment was created.
For single user deployment, leave it as None;
For multi user, input the namespace where the user is authorized.
Returns:
A response object including details of a experiment.
Throws:
Exception if experiment is not found or None of the arguments is provided
"""
namespace = namespace or self.get_user_namespace()
if experiment_id is None and experiment_name is None:
raise ValueError('Either experiment_id or experiment_name is required')
if experiment_id is not None:
return self._experiment_api.get_experiment(id=experiment_id)
next_page_token = ''
while next_page_token is not None:
list_experiments_response = self.list_experiments(page_size=100, page_token=next_page_token, namespace=namespace)
next_page_token = list_experiments_response.next_page_token
for experiment in list_experiments_response.experiments or []:
if experiment.name == experiment_name:
return self._experiment_api.get_experiment(id=experiment.id)
raise ValueError('No experiment is found with name {}.'.format(experiment_name))
def _extract_pipeline_yaml(self, package_file):
def _choose_pipeline_yaml_file(file_list) -> str:
yaml_files = [file for file in file_list if file.endswith('.yaml')]
if len(yaml_files) == 0:
raise ValueError('Invalid package. Missing pipeline yaml file in the package.')
if 'pipeline.yaml' in yaml_files:
return 'pipeline.yaml'
else:
if len(yaml_files) == 1:
return yaml_files[0]
raise ValueError('Invalid package. There is no pipeline.yaml file and there are multiple yaml files.')
if package_file.endswith('.tar.gz') or package_file.endswith('.tgz'):
with tarfile.open(package_file, "r:gz") as tar:
file_names = [member.name for member in tar if member.isfile()]
pipeline_yaml_file = _choose_pipeline_yaml_file(file_names)
with tar.extractfile(tar.getmember(pipeline_yaml_file)) as f:
return yaml.safe_load(f)
elif package_file.endswith('.zip'):
with zipfile.ZipFile(package_file, 'r') as zip:
pipeline_yaml_file = _choose_pipeline_yaml_file(zip.namelist())
with zip.open(pipeline_yaml_file) as f:
return yaml.safe_load(f)
elif package_file.endswith('.yaml') or package_file.endswith('.yml'):
with open(package_file, 'r') as f:
return yaml.safe_load(f)
else:
raise ValueError('The package_file '+ package_file + ' should end with one of the following formats: [.tar.gz, .tgz, .zip, .yaml, .yml]')
def list_pipelines(self, page_token='', page_size=10, sort_by=''):
"""List pipelines.
Args:
page_token: token for starting of the page.
page_size: size of the page.
sort_by: one of 'field_name', 'field_name desc'. For example, 'name desc'.
Returns:
A response object including a list of pipelines and next page token.
"""
return self._pipelines_api.list_pipelines(page_token=page_token, page_size=page_size, sort_by=sort_by)
def list_pipeline_versions(self, pipeline_id: str, page_token='', page_size=10, sort_by=''):
"""List all versions of a given pipeline.
Args:
pipeline_id: the string ID of a pipeline.
page_token: token for starting of the page.
page_size: size of the page.
sort_by: one of 'field_name', 'field_name desc'. For example, 'name desc'.
Returns:
A response object including a list of pipelines and next page token.
"""
return self._pipelines_api.list_pipeline_versions(
resource_key_type="PIPELINE",
resource_key_id=pipeline_id,
page_token=page_token,
page_size=page_size,
sort_by=sort_by
)
# TODO: provide default namespace, similar to kubectl default namespaces.
def run_pipeline(self, experiment_id, job_name, pipeline_package_path=None, params={}, pipeline_id=None, version_id=None):
"""Run a specified pipeline.
Args:
experiment_id: The string id of an experiment.
job_name: name of the job.
pipeline_package_path: local path of the pipeline package(the filename should end with one of the following .tar.gz, .tgz, .zip, .yaml, .yml).
params: a dictionary with key (string) as param name and value (string) as as param value.
pipeline_id: the string ID of a pipeline.
version_id: the string ID of a pipeline version.
If both pipeline_id and version_id are specified, version_id will take precendence.
If only pipeline_id is specified, the default version of this pipeline is used to create the run.
Returns:
A run object. Most important field is id.
"""
job_config = self._create_job_config(
experiment_id=experiment_id,
params=params,
pipeline_package_path=pipeline_package_path,
pipeline_id=pipeline_id,
version_id=version_id)
run_body = kfp_server_api.models.ApiRun(
pipeline_spec=job_config.spec, resource_references=job_config.resource_references, name=job_name)
response = self._run_api.create_run(body=run_body)
if self._is_ipython():
import IPython
html = ('Run link <a href="%s/#/runs/details/%s" target="_blank" >here</a>'
% (self._get_url_prefix(), response.run.id))
IPython.display.display(IPython.display.HTML(html))
return response.run
def create_recurring_run(self, experiment_id, job_name, description=None, start_time=None, end_time=None, interval_second=None, cron_expression=None, max_concurrency=1, no_catchup=None, params={}, pipeline_package_path=None, pipeline_id=None, version_id=None, enabled=True):
"""Create a recurring run.
Args:
experiment_id: The string id of an experiment.
job_name: name of the job.
description: An optional job description.
start_time: The RFC3339 time string of the time when to start the job.
end_time: The RFC3339 time string of the time when to end the job.
interval_second: Integer indicating the seconds between two recurring runs in for a periodic schedule.
cron_expression: A cron expression representing a set of times, using 5 space-separated fields, e.g. "0 0 9 ? * 2-6".
max_concurrency: Integer indicating how many jobs can be run in parallel.
no_catchup: Whether the recurring run should catch up if behind schedule.
For example, if the recurring run is paused for a while and re-enabled
afterwards. If no_catchup=False, the scheduler will catch up on (backfill) each
missed interval. Otherwise, it only schedules the latest interval if more than one interval
is ready to be scheduled.
Usually, if your pipeline handles backfill internally, you should turn catchup
off to avoid duplicate backfill. (default: {False})
pipeline_package_path: Local path of the pipeline package(the filename should end with one of the following .tar.gz, .tgz, .zip, .yaml, .yml).
params: A dictionary with key (string) as param name and value (string) as param value.
pipeline_id: The string ID of a pipeline.
version_id: The string ID of a pipeline version.
If both pipeline_id and version_id are specified, pipeline_id will take precendence
This will change in a future version, so it is recommended to use version_id by itself.
enabled: A bool indicating whether the recurring run is enabled or disabled.
Returns:
A Job object. Most important field is id.
"""
job_config = self._create_job_config(
experiment_id=experiment_id,
params=params,
pipeline_package_path=pipeline_package_path,
pipeline_id=pipeline_id,
version_id=version_id)
if all([interval_second, cron_expression]) or not any([interval_second, cron_expression]):
raise ValueError('Either interval_second or cron_expression is required')
if interval_second is not None:
trigger = kfp_server_api.models.ApiTrigger(
periodic_schedule=kfp_server_api.models.ApiPeriodicSchedule(
start_time=start_time, end_time=end_time, interval_second=interval_second)
)
if cron_expression is not None:
trigger = kfp_server_api.models.ApiTrigger(
cron_schedule=kfp_server_api.models.ApiCronSchedule(
start_time=start_time, end_time=end_time, cron=cron_expression)
)
job_body = kfp_server_api.models.ApiJob(
enabled=enabled,
pipeline_spec=job_config.spec,
resource_references=job_config.resource_references,
name=job_name,
description=description,
no_catchup=no_catchup,
trigger=trigger,
max_concurrency=max_concurrency)
return self._job_api.create_job(body=job_body)
def _create_job_config(self, experiment_id, params, pipeline_package_path, pipeline_id, version_id):
"""Create a JobConfig with spec and resource_references.
Args:
experiment_id: The string id of an experiment.
pipeline_package_path: Local path of the pipeline package(the filename should end with one of the following .tar.gz, .tgz, .zip, .yaml, .yml).
params: A dictionary with key (string) as param name and value (string) as param value.
pipeline_id: The string ID of a pipeline.
version_id: The string ID of a pipeline version.
If both pipeline_id and version_id are specified, pipeline_id will take precendence
This will change in a future version, so it is recommended to use version_id by itself.
Returns:
A JobConfig object with attributes spec and resource_reference.
"""
class JobConfig:
def __init__(self, spec, resource_references):
self.spec = spec
self.resource_references = resource_references
pipeline_json_string = None
if pipeline_package_path:
pipeline_obj = self._extract_pipeline_yaml(pipeline_package_path)
pipeline_json_string = json.dumps(pipeline_obj)
api_params = [kfp_server_api.ApiParameter(
name=sanitize_k8s_name(name=k, allow_capital_underscore=True),
value=str(v)) for k,v in params.items()]
resource_references = []
key = kfp_server_api.models.ApiResourceKey(id=experiment_id,
type=kfp_server_api.models.ApiResourceType.EXPERIMENT)
reference = kfp_server_api.models.ApiResourceReference(key=key,
relationship=kfp_server_api.models.ApiRelationship.OWNER)
resource_references.append(reference)
if version_id:
key = kfp_server_api.models.ApiResourceKey(id=version_id,
type=kfp_server_api.models.ApiResourceType.PIPELINE_VERSION)
reference = kfp_server_api.models.ApiResourceReference(key=key,
relationship=kfp_server_api.models.ApiRelationship.CREATOR)
resource_references.append(reference)
spec = kfp_server_api.models.ApiPipelineSpec(
pipeline_id=pipeline_id,
workflow_manifest=pipeline_json_string,
parameters=api_params)
return JobConfig(spec=spec, resource_references=resource_references)
def create_run_from_pipeline_func(self, pipeline_func: Callable, arguments: Mapping[str, str], run_name=None, experiment_name=None, pipeline_conf: kfp.dsl.PipelineConf = None, namespace=None):
'''Runs pipeline on KFP-enabled Kubernetes cluster.
This command compiles the pipeline function, creates or gets an experiment and submits the pipeline for execution.
Args:
pipeline_func: A function that describes a pipeline by calling components and composing them into execution graph.
arguments: Arguments to the pipeline function provided as a dict.
run_name: Optional. Name of the run to be shown in the UI.
experiment_name: Optional. Name of the experiment to add the run to.
namespace: kubernetes namespace where the pipeline runs are created.
For single user deployment, leave it as None;
For multi user, input a namespace where the user is authorized
'''
#TODO: Check arguments against the pipeline function
pipeline_name = pipeline_func.__name__
run_name = run_name or pipeline_name + ' ' + datetime.datetime.now().strftime('%Y-%m-%d %H-%M-%S')
with tempfile.TemporaryDirectory() as tmpdir:
pipeline_package_path = os.path.join(tmpdir, 'pipeline.yaml')
compiler.Compiler().compile(pipeline_func, pipeline_package_path, pipeline_conf=pipeline_conf)
return self.create_run_from_pipeline_package(pipeline_package_path, arguments, run_name, experiment_name, namespace)
def create_run_from_pipeline_package(self, pipeline_file: str, arguments: Mapping[str, str], run_name=None, experiment_name=None, namespace=None):
'''Runs pipeline on KFP-enabled Kubernetes cluster.
This command compiles the pipeline function, creates or gets an experiment and submits the pipeline for execution.
Args:
pipeline_file: A compiled pipeline package file.
arguments: Arguments to the pipeline function provided as a dict.
run_name: Optional. Name of the run to be shown in the UI.
experiment_name: Optional. Name of the experiment to add the run to.
namespace: kubernetes namespace where the pipeline runs are created.
For single user deployment, leave it as None;
For multi user, input a namespace where the user is authorized
'''
class RunPipelineResult:
def __init__(self, client, run_info):
self._client = client
self.run_info = run_info
self.run_id = run_info.id
def wait_for_run_completion(self, timeout=None):
timeout = timeout or datetime.timedelta.max
return self._client.wait_for_run_completion(self.run_id, timeout)
def __repr__(self):
return 'RunPipelineResult(run_id={})'.format(self.run_id)
#TODO: Check arguments against the pipeline function
pipeline_name = os.path.basename(pipeline_file)
experiment_name = experiment_name or os.environ.get(KF_PIPELINES_DEFAULT_EXPERIMENT_NAME, None)
overridden_experiment_name = os.environ.get(KF_PIPELINES_OVERRIDE_EXPERIMENT_NAME, experiment_name)
if overridden_experiment_name != experiment_name:
import warnings
warnings.warn('Changing experiment name from "{}" to "{}".'.format(experiment_name, overridden_experiment_name))
experiment_name = overridden_experiment_name or 'Default'
run_name = run_name or (pipeline_name + ' ' +
datetime.datetime.now().strftime(
'%Y-%m-%d %H-%M-%S'))
experiment = self.create_experiment(name=experiment_name, namespace=namespace)
run_info = self.run_pipeline(experiment.id, run_name, pipeline_file, arguments)
return RunPipelineResult(self, run_info)
def list_runs(self, page_token='', page_size=10, sort_by='', experiment_id=None, namespace=None):
"""List runs.
Args:
page_token: token for starting of the page.
page_size: size of the page.
sort_by: one of 'field_name', 'field_name desc'. For example, 'name desc'.
experiment_id: experiment id to filter upon
namespace: kubernetes namespace to filter upon.
For single user deployment, leave it as None;
For multi user, input a namespace where the user is authorized.
Returns:
A response object including a list of experiments and next page token.
"""
namespace = namespace or self.get_user_namespace()
if experiment_id is not None:
response = self._run_api.list_runs(page_token=page_token, page_size=page_size, sort_by=sort_by, resource_reference_key_type=kfp_server_api.models.api_resource_type.ApiResourceType.EXPERIMENT, resource_reference_key_id=experiment_id)
elif namespace:
response = self._run_api.list_runs(page_token=page_token, page_size=page_size, sort_by=sort_by, resource_reference_key_type=kfp_server_api.models.api_resource_type.ApiResourceType.NAMESPACE, resource_reference_key_id=namespace)
else:
response = self._run_api.list_runs(page_token=page_token, page_size=page_size, sort_by=sort_by)
return response
def list_recurring_runs(self, page_token='', page_size=10, sort_by='', experiment_id=None):
"""List recurring runs.
Args:
page_token: token for starting of the page.
page_size: size of the page.
sort_by: one of 'field_name', 'field_name desc'. For example, 'name desc'.
experiment_id: experiment id to filter upon
Returns:
A response object including a list of recurring_runs and next page token.
"""
if experiment_id is not None:
response = self._job_api.list_jobs(page_token=page_token, page_size=page_size, sort_by=sort_by, resource_reference_key_type=kfp_server_api.models.api_resource_type.ApiResourceType.EXPERIMENT, resource_reference_key_id=experiment_id)
else:
response = self._job_api.list_jobs(page_token=page_token, page_size=page_size, sort_by=sort_by)
return response
def get_recurring_run(self, job_id):
"""Get recurring_run details.
Args:
id of the recurring_run.
Returns:
A response object including details of a recurring_run.
Throws:
Exception if recurring_run is not found.
"""
return self._job_api.get_job(id=job_id)
def get_run(self, run_id):
"""Get run details.
Args:
id of the run.
Returns:
A response object including details of a run.
Throws:
Exception if run is not found.
"""
return self._run_api.get_run(run_id=run_id)
def wait_for_run_completion(self, run_id, timeout):
"""Waits for a run to complete.
Args:
run_id: run id, returned from run_pipeline.
timeout: timeout in seconds.
Returns:
A run detail object: Most important fields are run and pipeline_runtime.
Raises:
TimeoutError: if the pipeline run failed to finish before the specified
timeout.
"""
status = 'Running:'
start_time = datetime.datetime.now()
last_token_refresh_time = datetime.datetime.now()
while (status is None or
status.lower() not in ['succeeded', 'failed', 'skipped', 'error']):
# Refreshes the access token before it hits the TTL.
if (datetime.datetime.now() - last_token_refresh_time
> _GCP_ACCESS_TOKEN_TIMEOUT):
self._refresh_api_client_token()
last_token_refresh_time = datetime.datetime.now()
get_run_response = self._run_api.get_run(run_id=run_id)
status = get_run_response.run.status
elapsed_time = (datetime.datetime.now() - start_time).seconds
logging.info('Waiting for the job to complete...')
if elapsed_time > timeout:
raise TimeoutError('Run timeout')
time.sleep(5)
return get_run_response
def _get_workflow_json(self, run_id):
"""Get the workflow json.
Args:
run_id: run id, returned from run_pipeline.
Returns:
workflow: json workflow
"""
get_run_response = self._run_api.get_run(run_id=run_id)
workflow = get_run_response.pipeline_runtime.workflow_manifest
workflow_json = json.loads(workflow)
return workflow_json
def upload_pipeline(
self,
pipeline_package_path: str = None,
pipeline_name: str = None,
description: str = None,
):
"""Uploads the pipeline to the Kubeflow Pipelines cluster.
Args:
pipeline_package_path: Local path to the pipeline package.
pipeline_name: Optional. Name of the pipeline to be shown in the UI.
description: Optional. Description of the pipeline to be shown in the UI.
Returns:
Server response object containing pipleine id and other information.
"""
response = self._upload_api.upload_pipeline(pipeline_package_path, name=pipeline_name, description=description)
if self._is_ipython():
import IPython
html = 'Pipeline link <a href=%s/#/pipelines/details/%s>here</a>' % (self._get_url_prefix(), response.id)
IPython.display.display(IPython.display.HTML(html))
return response
def upload_pipeline_version(
self,
pipeline_package_path,
pipeline_version_name: str,
pipeline_id: Optional[str] = None,
pipeline_name: Optional[str] = None
):
"""Uploads a new version of the pipeline to the Kubeflow Pipelines cluster.
Args:
pipeline_package_path: Local path to the pipeline package.
pipeline_version_name: Name of the pipeline version to be shown in the UI.
pipeline_id: Optional. Id of the pipeline.
pipeline_name: Optional. Name of the pipeline.
Returns:
Server response object containing pipleine id and other information.
Throws:
ValueError when none or both of pipeline_id or pipeline_name are specified
Exception if pipeline id is not found.
"""
if all([pipeline_id, pipeline_name]) or not any([pipeline_id, pipeline_name]):
raise ValueError('Either pipeline_id or pipeline_name is required')
if pipeline_name:
pipeline_id = self.get_pipeline_id(pipeline_name)
response = self._upload_api.upload_pipeline_version(
pipeline_package_path,
name=pipeline_version_name,
pipelineid=pipeline_id
)
if self._is_ipython():
import IPython
html = 'Pipeline link <a href=%s/#/pipelines/details/%s>here</a>' % (self._get_url_prefix(), response.id)
IPython.display.display(IPython.display.HTML(html))
return response
def get_pipeline(self, pipeline_id):
"""Get pipeline details.
Args:
id of the pipeline.
Returns:
A response object including details of a pipeline.
Throws:
Exception if pipeline is not found.
"""
return self._pipelines_api.get_pipeline(id=pipeline_id)
def delete_pipeline(self, pipeline_id):
"""Delete pipeline.
Args:
id of the pipeline.
Returns:
Object. If the method is called asynchronously,
returns the request thread.
Throws:
Exception if pipeline is not found.
"""
return self._pipelines_api.delete_pipeline(id=pipeline_id)
def list_pipeline_versions(self, pipeline_id, page_token='', page_size=10, sort_by=''):
"""Lists pipeline versions.
Args:
pipeline_id: id of the pipeline to list versions
page_token: token for starting of the page.
page_size: size of the page.
sort_by: one of 'field_name', 'field_name des'. For example, 'name des'.
Returns:
A response object including a list of versions and next page token.
"""
return self._pipelines_api.list_pipeline_versions(page_token=page_token, page_size=page_size, sort_by=sort_by, resource_key_type=kfp_server_api.models.api_resource_type.ApiResourceType.PIPELINE, resource_key_id=pipeline_id)
feat(sdk): support HTTP/S PROXY for SDK client (#4215)
* done
* done
* reserve https
* done
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import logging
import json
import os
import re
import tarfile
import tempfile
import warnings
import yaml
import zipfile
import datetime
from typing import Mapping, Callable, Optional
import kfp
import kfp_server_api
from kfp.compiler import compiler
from kfp.compiler._k8s_helper import sanitize_k8s_name
from kfp._auth import get_auth_token, get_gcp_access_token
# TTL of the access token associated with the client. This is needed because
# `gcloud auth print-access-token` generates a token with TTL=1 hour, after
# which the authentication expires. This TTL is needed for kfp.Client()
# initialized with host=<inverse proxy endpoint>.
# Set to 55 mins to provide some safe margin.
_GCP_ACCESS_TOKEN_TIMEOUT = datetime.timedelta(minutes=55)
# Operators on scalar values. Only applies to one of |int_value|,
# |long_value|, |string_value| or |timestamp_value|.
_FILTER_OPERATIONS = {"UNKNOWN": 0,
"EQUALS" : 1,
"NOT_EQUALS" : 2,
"GREATER_THAN": 3,
"GREATER_THAN_EQUALS": 5,
"LESS_THAN": 6,
"LESS_THAN_EQUALS": 7}
def _add_generated_apis(target_struct, api_module, api_client):
'''Initializes a hierarchical API object based on the generated API module.
PipelineServiceApi.create_pipeline becomes target_struct.pipelines.create_pipeline
'''
Struct = type('Struct', (), {})
def camel_case_to_snake_case(name):
import re
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', name).lower()
for api_name in dir(api_module):
if not api_name.endswith('ServiceApi'):
continue
short_api_name = camel_case_to_snake_case(api_name[0:-len('ServiceApi')]) + 's'
api_struct = Struct()
setattr(target_struct, short_api_name, api_struct)
service_api = getattr(api_module.api, api_name)
initialized_service_api = service_api(api_client)
for member_name in dir(initialized_service_api):
if member_name.startswith('_') or member_name.endswith('_with_http_info'):
continue
bound_member = getattr(initialized_service_api, member_name)
setattr(api_struct, member_name, bound_member)
models_struct = Struct()
for member_name in dir(api_module.models):
if not member_name[0].islower():
setattr(models_struct, member_name, getattr(api_module.models, member_name))
target_struct.api_models = models_struct
KF_PIPELINES_ENDPOINT_ENV = 'KF_PIPELINES_ENDPOINT'
KF_PIPELINES_UI_ENDPOINT_ENV = 'KF_PIPELINES_UI_ENDPOINT'
KF_PIPELINES_DEFAULT_EXPERIMENT_NAME = 'KF_PIPELINES_DEFAULT_EXPERIMENT_NAME'
KF_PIPELINES_OVERRIDE_EXPERIMENT_NAME = 'KF_PIPELINES_OVERRIDE_EXPERIMENT_NAME'
class Client(object):
""" API Client for KubeFlow Pipeline.
"""
# in-cluster DNS name of the pipeline service
IN_CLUSTER_DNS_NAME = 'ml-pipeline.{}.svc.cluster.local:8888'
KUBE_PROXY_PATH = 'api/v1/namespaces/{}/services/ml-pipeline:http/proxy/'
LOCAL_KFP_CONTEXT = os.path.expanduser('~/.config/kfp/context.json')
# TODO: Wrap the configurations for different authentication methods.
def __init__(self, host=None, client_id=None, namespace='kubeflow', other_client_id=None, other_client_secret=None, existing_token=None, cookies=None, proxy=None, ssl_ca_cert=None):
"""Create a new instance of kfp client.
Args:
host: the host name to use to talk to Kubeflow Pipelines. If not set, the in-cluster
service DNS name will be used, which only works if the current environment is a pod
in the same cluster (such as a Jupyter instance spawned by Kubeflow's
JupyterHub). If you have a different connection to cluster, such as a kubectl
proxy connection, then set it to something like "127.0.0.1:8080/pipeline.
If you connect to an IAP enabled cluster, set it to
https://<your-deployment>.endpoints.<your-project>.cloud.goog/pipeline".
client_id: The client ID used by Identity-Aware Proxy.
namespace: the namespace where the kubeflow pipeline system is run.
other_client_id: The client ID used to obtain the auth codes and refresh tokens.
Reference: https://cloud.google.com/iap/docs/authentication-howto#authenticating_from_a_desktop_app.
other_client_secret: The client secret used to obtain the auth codes and refresh tokens.
existing_token: pass in token directly, it's used for cases better get token outside of SDK, e.x. GCP Cloud Functions
or caller already has a token
cookies: CookieJar object containing cookies that will be passed to the pipelines API.
proxy: HTTP or HTTPS proxy server
ssl_ca_cert: cert for proxy
"""
host = host or os.environ.get(KF_PIPELINES_ENDPOINT_ENV)
self._uihost = os.environ.get(KF_PIPELINES_UI_ENDPOINT_ENV, host)
config = self._load_config(host, client_id, namespace, other_client_id, other_client_secret, existing_token, proxy, ssl_ca_cert)
# Save the loaded API client configuration, as a reference if update is
# needed.
self._existing_config = config
api_client = kfp_server_api.api_client.ApiClient(config, cookie=cookies)
_add_generated_apis(self, kfp_server_api, api_client)
self._job_api = kfp_server_api.api.job_service_api.JobServiceApi(api_client)
self._run_api = kfp_server_api.api.run_service_api.RunServiceApi(api_client)
self._experiment_api = kfp_server_api.api.experiment_service_api.ExperimentServiceApi(api_client)
self._pipelines_api = kfp_server_api.api.pipeline_service_api.PipelineServiceApi(api_client)
self._upload_api = kfp_server_api.api.PipelineUploadServiceApi(api_client)
self._load_context_setting_or_default()
def _load_config(self, host, client_id, namespace, other_client_id, other_client_secret, existing_token, proxy, ssl_ca_cert):
config = kfp_server_api.configuration.Configuration()
if proxy:
# https://github.com/kubeflow/pipelines/blob/c6ac5e0b1fd991e19e96419f0f508ec0a4217c29/backend/api/python_http_client/kfp_server_api/rest.py#L100
config.proxy = proxy
if ssl_ca_cert:
config.ssl_ca_cert = ssl_ca_cert
host = host or ''
# Preprocess the host endpoint to prevent some common user mistakes.
# This should only be done for non-IAP cases (when client_id is None). IAP requires preserving the protocol.
if not client_id:
# Per feedback in proxy env, http or https is still required
if not proxy:
host = re.sub(r'^(http|https)://', '', host)
host = host.rstrip('/')
if host:
config.host = host
token = None
# "existing_token" is designed to accept token generated outside of SDK. Here is an example.
#
# https://cloud.google.com/functions/docs/securing/function-identity
# https://cloud.google.com/endpoints/docs/grpc/service-account-authentication
#
# import requests
# import kfp
#
# def get_access_token():
# url = 'http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/token'
# r = requests.get(url, headers={'Metadata-Flavor': 'Google'})
# r.raise_for_status()
# access_token = r.json()['access_token']
# return access_token
#
# client = kfp.Client(host='<KFPHost>', existing_token=get_access_token())
#
if existing_token:
token = existing_token
self._is_refresh_token = False
elif client_id:
token = get_auth_token(client_id, other_client_id, other_client_secret)
self._is_refresh_token = True
elif self._is_inverse_proxy_host(host):
token = get_gcp_access_token()
self._is_refresh_token = False
if token:
config.api_key['authorization'] = token
config.api_key_prefix['authorization'] = 'Bearer'
return config
if host:
# if host is explicitly set with auth token, it's probably a port forward address.
return config
import kubernetes as k8s
in_cluster = True
try:
k8s.config.load_incluster_config()
except:
in_cluster = False
pass
if in_cluster:
config.host = Client.IN_CLUSTER_DNS_NAME.format(namespace)
return config
try:
k8s.config.load_kube_config(client_configuration=config)
except:
print('Failed to load kube config.')
return config
if config.host:
config.host = config.host + '/' + Client.KUBE_PROXY_PATH.format(namespace)
return config
def _is_inverse_proxy_host(self, host):
if host:
return re.match(r'\S+.googleusercontent.com/{0,1}$', host)
if re.match(r'\w+', host):
warnings.warn(
'The received host is %s, please include the full endpoint address '
'(with ".(pipelines/notebooks).googleusercontent.com")' % host)
return False
def _is_ipython(self):
"""Returns whether we are running in notebook."""
try:
import IPython
ipy = IPython.get_ipython()
if ipy is None:
return False
except ImportError:
return False
return True
def _get_url_prefix(self):
if self._uihost:
# User's own connection.
if self._uihost.startswith('http://') or self._uihost.startswith('https://'):
return self._uihost
else:
return 'http://' + self._uihost
# In-cluster pod. We could use relative URL.
return '/pipeline'
def _load_context_setting_or_default(self):
if os.path.exists(Client.LOCAL_KFP_CONTEXT):
with open(Client.LOCAL_KFP_CONTEXT, 'r') as f:
self._context_setting = json.load(f)
else:
self._context_setting = {
'namespace': '',
}
def _refresh_api_client_token(self):
"""Refreshes the existing token associated with the kfp_api_client."""
if getattr(self, '_is_refresh_token', None):
return
new_token = get_gcp_access_token()
self._existing_config.api_key['authorization'] = new_token
def set_user_namespace(self, namespace):
"""Set user namespace into local context setting file.
This function should only be used when Kubeflow Pipelines is in the multi-user mode.
Args:
namespace: kubernetes namespace the user has access to.
"""
self._context_setting['namespace'] = namespace
with open(Client.LOCAL_KFP_CONTEXT, 'w') as f:
json.dump(self._context_setting, f)
def get_user_namespace(self):
"""Get user namespace in context config.
Returns:
namespace: kubernetes namespace from the local context file or empty if it wasn't set.
"""
return self._context_setting['namespace']
def create_experiment(self, name, description=None, namespace=None):
"""Create a new experiment.
Args:
name: the name of the experiment.
description: description of the experiment.
namespace: kubernetes namespace where the experiment should be created.
For single user deployment, leave it as None;
For multi user, input a namespace where the user is authorized.
Returns:
An Experiment object. Most important field is id.
"""
namespace = namespace or self.get_user_namespace()
experiment = None
try:
experiment = self.get_experiment(experiment_name=name, namespace=namespace)
except:
# Ignore error if the experiment does not exist.
pass
if not experiment:
logging.info('Creating experiment {}.'.format(name))
resource_references = []
if namespace:
key = kfp_server_api.models.ApiResourceKey(id=namespace, type=kfp_server_api.models.ApiResourceType.NAMESPACE)
reference = kfp_server_api.models.ApiResourceReference(key=key, relationship=kfp_server_api.models.ApiRelationship.OWNER)
resource_references.append(reference)
experiment = kfp_server_api.models.ApiExperiment(
name=name,
description=description,
resource_references=resource_references)
experiment = self._experiment_api.create_experiment(body=experiment)
if self._is_ipython():
import IPython
html = \
('Experiment link <a href="%s/#/experiments/details/%s" target="_blank" >here</a>'
% (self._get_url_prefix(), experiment.id))
IPython.display.display(IPython.display.HTML(html))
return experiment
def get_pipeline_id(self, name):
"""Returns the pipeline id if a pipeline with the name exsists.
Args:
name: pipeline name
Returns:
A response object including a list of experiments and next page token.
"""
pipeline_filter = json.dumps({
"predicates": [
{
"op": _FILTER_OPERATIONS["EQUALS"],
"key": "name",
"stringValue": name,
}
]
})
result = self._pipelines_api.list_pipelines(filter=pipeline_filter)
if len(result.pipelines)==1:
return result.pipelines[0].id
elif len(result.pipelines)>1:
raise ValueError("Multiple pipelines with the name: {} found, the name needs to be unique".format(name))
return None
def list_experiments(self, page_token='', page_size=10, sort_by='', namespace=None):
"""List experiments.
Args:
page_token: token for starting of the page.
page_size: size of the page.
sort_by: can be '[field_name]', '[field_name] des'. For example, 'name desc'.
namespace: kubernetes namespace where the experiment was created.
For single user deployment, leave it as None;
For multi user, input a namespace where the user is authorized.
Returns:
A response object including a list of experiments and next page token.
"""
namespace = namespace or self.get_user_namespace()
response = self._experiment_api.list_experiment(
page_token=page_token,
page_size=page_size,
sort_by=sort_by,
resource_reference_key_type=kfp_server_api.models.api_resource_type.ApiResourceType.NAMESPACE,
resource_reference_key_id=namespace)
return response
def get_experiment(self, experiment_id=None, experiment_name=None, namespace=None):
"""Get details of an experiment
Either experiment_id or experiment_name is required
Args:
experiment_id: id of the experiment. (Optional)
experiment_name: name of the experiment. (Optional)
namespace: kubernetes namespace where the experiment was created.
For single user deployment, leave it as None;
For multi user, input the namespace where the user is authorized.
Returns:
A response object including details of a experiment.
Throws:
Exception if experiment is not found or None of the arguments is provided
"""
namespace = namespace or self.get_user_namespace()
if experiment_id is None and experiment_name is None:
raise ValueError('Either experiment_id or experiment_name is required')
if experiment_id is not None:
return self._experiment_api.get_experiment(id=experiment_id)
next_page_token = ''
while next_page_token is not None:
list_experiments_response = self.list_experiments(page_size=100, page_token=next_page_token, namespace=namespace)
next_page_token = list_experiments_response.next_page_token
for experiment in list_experiments_response.experiments or []:
if experiment.name == experiment_name:
return self._experiment_api.get_experiment(id=experiment.id)
raise ValueError('No experiment is found with name {}.'.format(experiment_name))
def _extract_pipeline_yaml(self, package_file):
def _choose_pipeline_yaml_file(file_list) -> str:
yaml_files = [file for file in file_list if file.endswith('.yaml')]
if len(yaml_files) == 0:
raise ValueError('Invalid package. Missing pipeline yaml file in the package.')
if 'pipeline.yaml' in yaml_files:
return 'pipeline.yaml'
else:
if len(yaml_files) == 1:
return yaml_files[0]
raise ValueError('Invalid package. There is no pipeline.yaml file and there are multiple yaml files.')
if package_file.endswith('.tar.gz') or package_file.endswith('.tgz'):
with tarfile.open(package_file, "r:gz") as tar:
file_names = [member.name for member in tar if member.isfile()]
pipeline_yaml_file = _choose_pipeline_yaml_file(file_names)
with tar.extractfile(tar.getmember(pipeline_yaml_file)) as f:
return yaml.safe_load(f)
elif package_file.endswith('.zip'):
with zipfile.ZipFile(package_file, 'r') as zip:
pipeline_yaml_file = _choose_pipeline_yaml_file(zip.namelist())
with zip.open(pipeline_yaml_file) as f:
return yaml.safe_load(f)
elif package_file.endswith('.yaml') or package_file.endswith('.yml'):
with open(package_file, 'r') as f:
return yaml.safe_load(f)
else:
raise ValueError('The package_file '+ package_file + ' should end with one of the following formats: [.tar.gz, .tgz, .zip, .yaml, .yml]')
def list_pipelines(self, page_token='', page_size=10, sort_by=''):
"""List pipelines.
Args:
page_token: token for starting of the page.
page_size: size of the page.
sort_by: one of 'field_name', 'field_name desc'. For example, 'name desc'.
Returns:
A response object including a list of pipelines and next page token.
"""
return self._pipelines_api.list_pipelines(page_token=page_token, page_size=page_size, sort_by=sort_by)
def list_pipeline_versions(self, pipeline_id: str, page_token='', page_size=10, sort_by=''):
"""List all versions of a given pipeline.
Args:
pipeline_id: the string ID of a pipeline.
page_token: token for starting of the page.
page_size: size of the page.
sort_by: one of 'field_name', 'field_name desc'. For example, 'name desc'.
Returns:
A response object including a list of pipelines and next page token.
"""
return self._pipelines_api.list_pipeline_versions(
resource_key_type="PIPELINE",
resource_key_id=pipeline_id,
page_token=page_token,
page_size=page_size,
sort_by=sort_by
)
# TODO: provide default namespace, similar to kubectl default namespaces.
def run_pipeline(self, experiment_id, job_name, pipeline_package_path=None, params={}, pipeline_id=None, version_id=None):
"""Run a specified pipeline.
Args:
experiment_id: The string id of an experiment.
job_name: name of the job.
pipeline_package_path: local path of the pipeline package(the filename should end with one of the following .tar.gz, .tgz, .zip, .yaml, .yml).
params: a dictionary with key (string) as param name and value (string) as as param value.
pipeline_id: the string ID of a pipeline.
version_id: the string ID of a pipeline version.
If both pipeline_id and version_id are specified, version_id will take precendence.
If only pipeline_id is specified, the default version of this pipeline is used to create the run.
Returns:
A run object. Most important field is id.
"""
job_config = self._create_job_config(
experiment_id=experiment_id,
params=params,
pipeline_package_path=pipeline_package_path,
pipeline_id=pipeline_id,
version_id=version_id)
run_body = kfp_server_api.models.ApiRun(
pipeline_spec=job_config.spec, resource_references=job_config.resource_references, name=job_name)
response = self._run_api.create_run(body=run_body)
if self._is_ipython():
import IPython
html = ('Run link <a href="%s/#/runs/details/%s" target="_blank" >here</a>'
% (self._get_url_prefix(), response.run.id))
IPython.display.display(IPython.display.HTML(html))
return response.run
def create_recurring_run(self, experiment_id, job_name, description=None, start_time=None, end_time=None, interval_second=None, cron_expression=None, max_concurrency=1, no_catchup=None, params={}, pipeline_package_path=None, pipeline_id=None, version_id=None, enabled=True):
"""Create a recurring run.
Args:
experiment_id: The string id of an experiment.
job_name: name of the job.
description: An optional job description.
start_time: The RFC3339 time string of the time when to start the job.
end_time: The RFC3339 time string of the time when to end the job.
interval_second: Integer indicating the seconds between two recurring runs in for a periodic schedule.
cron_expression: A cron expression representing a set of times, using 5 space-separated fields, e.g. "0 0 9 ? * 2-6".
max_concurrency: Integer indicating how many jobs can be run in parallel.
no_catchup: Whether the recurring run should catch up if behind schedule.
For example, if the recurring run is paused for a while and re-enabled
afterwards. If no_catchup=False, the scheduler will catch up on (backfill) each
missed interval. Otherwise, it only schedules the latest interval if more than one interval
is ready to be scheduled.
Usually, if your pipeline handles backfill internally, you should turn catchup
off to avoid duplicate backfill. (default: {False})
pipeline_package_path: Local path of the pipeline package(the filename should end with one of the following .tar.gz, .tgz, .zip, .yaml, .yml).
params: A dictionary with key (string) as param name and value (string) as param value.
pipeline_id: The string ID of a pipeline.
version_id: The string ID of a pipeline version.
If both pipeline_id and version_id are specified, pipeline_id will take precendence
This will change in a future version, so it is recommended to use version_id by itself.
enabled: A bool indicating whether the recurring run is enabled or disabled.
Returns:
A Job object. Most important field is id.
"""
job_config = self._create_job_config(
experiment_id=experiment_id,
params=params,
pipeline_package_path=pipeline_package_path,
pipeline_id=pipeline_id,
version_id=version_id)
if all([interval_second, cron_expression]) or not any([interval_second, cron_expression]):
raise ValueError('Either interval_second or cron_expression is required')
if interval_second is not None:
trigger = kfp_server_api.models.ApiTrigger(
periodic_schedule=kfp_server_api.models.ApiPeriodicSchedule(
start_time=start_time, end_time=end_time, interval_second=interval_second)
)
if cron_expression is not None:
trigger = kfp_server_api.models.ApiTrigger(
cron_schedule=kfp_server_api.models.ApiCronSchedule(
start_time=start_time, end_time=end_time, cron=cron_expression)
)
job_body = kfp_server_api.models.ApiJob(
enabled=enabled,
pipeline_spec=job_config.spec,
resource_references=job_config.resource_references,
name=job_name,
description=description,
no_catchup=no_catchup,
trigger=trigger,
max_concurrency=max_concurrency)
return self._job_api.create_job(body=job_body)
def _create_job_config(self, experiment_id, params, pipeline_package_path, pipeline_id, version_id):
"""Create a JobConfig with spec and resource_references.
Args:
experiment_id: The string id of an experiment.
pipeline_package_path: Local path of the pipeline package(the filename should end with one of the following .tar.gz, .tgz, .zip, .yaml, .yml).
params: A dictionary with key (string) as param name and value (string) as param value.
pipeline_id: The string ID of a pipeline.
version_id: The string ID of a pipeline version.
If both pipeline_id and version_id are specified, pipeline_id will take precendence
This will change in a future version, so it is recommended to use version_id by itself.
Returns:
A JobConfig object with attributes spec and resource_reference.
"""
class JobConfig:
def __init__(self, spec, resource_references):
self.spec = spec
self.resource_references = resource_references
pipeline_json_string = None
if pipeline_package_path:
pipeline_obj = self._extract_pipeline_yaml(pipeline_package_path)
pipeline_json_string = json.dumps(pipeline_obj)
api_params = [kfp_server_api.ApiParameter(
name=sanitize_k8s_name(name=k, allow_capital_underscore=True),
value=str(v)) for k,v in params.items()]
resource_references = []
key = kfp_server_api.models.ApiResourceKey(id=experiment_id,
type=kfp_server_api.models.ApiResourceType.EXPERIMENT)
reference = kfp_server_api.models.ApiResourceReference(key=key,
relationship=kfp_server_api.models.ApiRelationship.OWNER)
resource_references.append(reference)
if version_id:
key = kfp_server_api.models.ApiResourceKey(id=version_id,
type=kfp_server_api.models.ApiResourceType.PIPELINE_VERSION)
reference = kfp_server_api.models.ApiResourceReference(key=key,
relationship=kfp_server_api.models.ApiRelationship.CREATOR)
resource_references.append(reference)
spec = kfp_server_api.models.ApiPipelineSpec(
pipeline_id=pipeline_id,
workflow_manifest=pipeline_json_string,
parameters=api_params)
return JobConfig(spec=spec, resource_references=resource_references)
def create_run_from_pipeline_func(self, pipeline_func: Callable, arguments: Mapping[str, str], run_name=None, experiment_name=None, pipeline_conf: kfp.dsl.PipelineConf = None, namespace=None):
'''Runs pipeline on KFP-enabled Kubernetes cluster.
This command compiles the pipeline function, creates or gets an experiment and submits the pipeline for execution.
Args:
pipeline_func: A function that describes a pipeline by calling components and composing them into execution graph.
arguments: Arguments to the pipeline function provided as a dict.
run_name: Optional. Name of the run to be shown in the UI.
experiment_name: Optional. Name of the experiment to add the run to.
namespace: kubernetes namespace where the pipeline runs are created.
For single user deployment, leave it as None;
For multi user, input a namespace where the user is authorized
'''
#TODO: Check arguments against the pipeline function
pipeline_name = pipeline_func.__name__
run_name = run_name or pipeline_name + ' ' + datetime.datetime.now().strftime('%Y-%m-%d %H-%M-%S')
with tempfile.TemporaryDirectory() as tmpdir:
pipeline_package_path = os.path.join(tmpdir, 'pipeline.yaml')
compiler.Compiler().compile(pipeline_func, pipeline_package_path, pipeline_conf=pipeline_conf)
return self.create_run_from_pipeline_package(pipeline_package_path, arguments, run_name, experiment_name, namespace)
def create_run_from_pipeline_package(self, pipeline_file: str, arguments: Mapping[str, str], run_name=None, experiment_name=None, namespace=None):
'''Runs pipeline on KFP-enabled Kubernetes cluster.
This command compiles the pipeline function, creates or gets an experiment and submits the pipeline for execution.
Args:
pipeline_file: A compiled pipeline package file.
arguments: Arguments to the pipeline function provided as a dict.
run_name: Optional. Name of the run to be shown in the UI.
experiment_name: Optional. Name of the experiment to add the run to.
namespace: kubernetes namespace where the pipeline runs are created.
For single user deployment, leave it as None;
For multi user, input a namespace where the user is authorized
'''
class RunPipelineResult:
def __init__(self, client, run_info):
self._client = client
self.run_info = run_info
self.run_id = run_info.id
def wait_for_run_completion(self, timeout=None):
timeout = timeout or datetime.timedelta.max
return self._client.wait_for_run_completion(self.run_id, timeout)
def __repr__(self):
return 'RunPipelineResult(run_id={})'.format(self.run_id)
#TODO: Check arguments against the pipeline function
pipeline_name = os.path.basename(pipeline_file)
experiment_name = experiment_name or os.environ.get(KF_PIPELINES_DEFAULT_EXPERIMENT_NAME, None)
overridden_experiment_name = os.environ.get(KF_PIPELINES_OVERRIDE_EXPERIMENT_NAME, experiment_name)
if overridden_experiment_name != experiment_name:
import warnings
warnings.warn('Changing experiment name from "{}" to "{}".'.format(experiment_name, overridden_experiment_name))
experiment_name = overridden_experiment_name or 'Default'
run_name = run_name or (pipeline_name + ' ' +
datetime.datetime.now().strftime(
'%Y-%m-%d %H-%M-%S'))
experiment = self.create_experiment(name=experiment_name, namespace=namespace)
run_info = self.run_pipeline(experiment.id, run_name, pipeline_file, arguments)
return RunPipelineResult(self, run_info)
def list_runs(self, page_token='', page_size=10, sort_by='', experiment_id=None, namespace=None):
"""List runs.
Args:
page_token: token for starting of the page.
page_size: size of the page.
sort_by: one of 'field_name', 'field_name desc'. For example, 'name desc'.
experiment_id: experiment id to filter upon
namespace: kubernetes namespace to filter upon.
For single user deployment, leave it as None;
For multi user, input a namespace where the user is authorized.
Returns:
A response object including a list of experiments and next page token.
"""
namespace = namespace or self.get_user_namespace()
if experiment_id is not None:
response = self._run_api.list_runs(page_token=page_token, page_size=page_size, sort_by=sort_by, resource_reference_key_type=kfp_server_api.models.api_resource_type.ApiResourceType.EXPERIMENT, resource_reference_key_id=experiment_id)
elif namespace:
response = self._run_api.list_runs(page_token=page_token, page_size=page_size, sort_by=sort_by, resource_reference_key_type=kfp_server_api.models.api_resource_type.ApiResourceType.NAMESPACE, resource_reference_key_id=namespace)
else:
response = self._run_api.list_runs(page_token=page_token, page_size=page_size, sort_by=sort_by)
return response
def list_recurring_runs(self, page_token='', page_size=10, sort_by='', experiment_id=None):
"""List recurring runs.
Args:
page_token: token for starting of the page.
page_size: size of the page.
sort_by: one of 'field_name', 'field_name desc'. For example, 'name desc'.
experiment_id: experiment id to filter upon
Returns:
A response object including a list of recurring_runs and next page token.
"""
if experiment_id is not None:
response = self._job_api.list_jobs(page_token=page_token, page_size=page_size, sort_by=sort_by, resource_reference_key_type=kfp_server_api.models.api_resource_type.ApiResourceType.EXPERIMENT, resource_reference_key_id=experiment_id)
else:
response = self._job_api.list_jobs(page_token=page_token, page_size=page_size, sort_by=sort_by)
return response
def get_recurring_run(self, job_id):
"""Get recurring_run details.
Args:
id of the recurring_run.
Returns:
A response object including details of a recurring_run.
Throws:
Exception if recurring_run is not found.
"""
return self._job_api.get_job(id=job_id)
def get_run(self, run_id):
"""Get run details.
Args:
id of the run.
Returns:
A response object including details of a run.
Throws:
Exception if run is not found.
"""
return self._run_api.get_run(run_id=run_id)
def wait_for_run_completion(self, run_id, timeout):
"""Waits for a run to complete.
Args:
run_id: run id, returned from run_pipeline.
timeout: timeout in seconds.
Returns:
A run detail object: Most important fields are run and pipeline_runtime.
Raises:
TimeoutError: if the pipeline run failed to finish before the specified
timeout.
"""
status = 'Running:'
start_time = datetime.datetime.now()
last_token_refresh_time = datetime.datetime.now()
while (status is None or
status.lower() not in ['succeeded', 'failed', 'skipped', 'error']):
# Refreshes the access token before it hits the TTL.
if (datetime.datetime.now() - last_token_refresh_time
> _GCP_ACCESS_TOKEN_TIMEOUT):
self._refresh_api_client_token()
last_token_refresh_time = datetime.datetime.now()
get_run_response = self._run_api.get_run(run_id=run_id)
status = get_run_response.run.status
elapsed_time = (datetime.datetime.now() - start_time).seconds
logging.info('Waiting for the job to complete...')
if elapsed_time > timeout:
raise TimeoutError('Run timeout')
time.sleep(5)
return get_run_response
def _get_workflow_json(self, run_id):
"""Get the workflow json.
Args:
run_id: run id, returned from run_pipeline.
Returns:
workflow: json workflow
"""
get_run_response = self._run_api.get_run(run_id=run_id)
workflow = get_run_response.pipeline_runtime.workflow_manifest
workflow_json = json.loads(workflow)
return workflow_json
def upload_pipeline(
self,
pipeline_package_path: str = None,
pipeline_name: str = None,
description: str = None,
):
"""Uploads the pipeline to the Kubeflow Pipelines cluster.
Args:
pipeline_package_path: Local path to the pipeline package.
pipeline_name: Optional. Name of the pipeline to be shown in the UI.
description: Optional. Description of the pipeline to be shown in the UI.
Returns:
Server response object containing pipleine id and other information.
"""
response = self._upload_api.upload_pipeline(pipeline_package_path, name=pipeline_name, description=description)
if self._is_ipython():
import IPython
html = 'Pipeline link <a href=%s/#/pipelines/details/%s>here</a>' % (self._get_url_prefix(), response.id)
IPython.display.display(IPython.display.HTML(html))
return response
def upload_pipeline_version(
self,
pipeline_package_path,
pipeline_version_name: str,
pipeline_id: Optional[str] = None,
pipeline_name: Optional[str] = None
):
"""Uploads a new version of the pipeline to the Kubeflow Pipelines cluster.
Args:
pipeline_package_path: Local path to the pipeline package.
pipeline_version_name: Name of the pipeline version to be shown in the UI.
pipeline_id: Optional. Id of the pipeline.
pipeline_name: Optional. Name of the pipeline.
Returns:
Server response object containing pipleine id and other information.
Throws:
ValueError when none or both of pipeline_id or pipeline_name are specified
Exception if pipeline id is not found.
"""
if all([pipeline_id, pipeline_name]) or not any([pipeline_id, pipeline_name]):
raise ValueError('Either pipeline_id or pipeline_name is required')
if pipeline_name:
pipeline_id = self.get_pipeline_id(pipeline_name)
response = self._upload_api.upload_pipeline_version(
pipeline_package_path,
name=pipeline_version_name,
pipelineid=pipeline_id
)
if self._is_ipython():
import IPython
html = 'Pipeline link <a href=%s/#/pipelines/details/%s>here</a>' % (self._get_url_prefix(), response.id)
IPython.display.display(IPython.display.HTML(html))
return response
def get_pipeline(self, pipeline_id):
"""Get pipeline details.
Args:
id of the pipeline.
Returns:
A response object including details of a pipeline.
Throws:
Exception if pipeline is not found.
"""
return self._pipelines_api.get_pipeline(id=pipeline_id)
def delete_pipeline(self, pipeline_id):
"""Delete pipeline.
Args:
id of the pipeline.
Returns:
Object. If the method is called asynchronously,
returns the request thread.
Throws:
Exception if pipeline is not found.
"""
return self._pipelines_api.delete_pipeline(id=pipeline_id)
def list_pipeline_versions(self, pipeline_id, page_token='', page_size=10, sort_by=''):
"""Lists pipeline versions.
Args:
pipeline_id: id of the pipeline to list versions
page_token: token for starting of the page.
page_size: size of the page.
sort_by: one of 'field_name', 'field_name des'. For example, 'name des'.
Returns:
A response object including a list of versions and next page token.
"""
return self._pipelines_api.list_pipeline_versions(page_token=page_token, page_size=page_size, sort_by=sort_by, resource_key_type=kfp_server_api.models.api_resource_type.ApiResourceType.PIPELINE, resource_key_id=pipeline_id)
|
import socket,sys,json,pdb
from Communicator import Communicator
import argparse
class Server:
def __init__(self):
"""
Constructor. Initializes the communicator_list to [] and the NETWORK_TIMER to 500
Args:
None
Returns:
None
"""
self.communicator_list = []
self.NETWORK_TIMER = 150
self.log_file_handle = None
def setLogFile(self, filename):
self.log_file_handle = open(filename,'wb')
def BuildServer(self,port_no,num_clients):
"""Builds The server on the port_number port_no for num_clients
Args:
port_no: (int) The port number
num_clients: (int) The number of clients who would join (>= 2 for all practical purposes)
Returns:
None
"""
s = socket.socket()
s.settimeout(self.NETWORK_TIMER)
host = "0.0.0.0"
self.port = port_no
s.bind((host,port_no))
s.listen(5)
self.client_count = 0
self.CLOSE_NETWORK = False
while self.client_count < num_clients and (not self.CLOSE_NETWORK):
try:
c,addr = s.accept()
except:
self.CLOSE_NETWORK = True
if(not self.CLOSE_NETWORK):
self.client_count += 1
self.communicator_list.append(Communicator())
self.communicator_list[-1].setSocket(c,self.NETWORK_TIMER)
s.close()
def setNetworkTimer(self,Time_in_seconds):
self.NETWORK_TIMER = Time_in_seconds
def getNetworkTimer(self):
return self.NETWORK_TIMER
def RecvDataFromClient(self,client_id):
"""Receives Data from Client client_id
Args:
client_id: The integer index of a client
Returns:
data: Received on the socket to client_id, None in case of an Error
"""
data = None
if(client_id < len(self.communicator_list)):
data = self.communicator_list[client_id].RecvDataOnSocket()
if(data is None):
print 'ERROR : TIMEOUT ON CLIENT NETWORK' + str(client_id) + ' END'
self.CloseClient(client_id)
return data
def SendData2Client(self,client_id,data):
"""Sends data to the Client client_id. In case data was None, sends the
appropriate data (with ACTION='KILLPROC') and closes the socket
Args:
client_id : (int) client_id
data : The json file to be sent, or None in case of an Error
Returns:
success_flag : True if send was successful
"""
success_flag = False
if(data is None):
data = {'meta': 'TIMEOUT ON CLIENT NETWORK', 'action':'KILLPROC','data':''}
else:
data = json.loads(data)
if(client_id < len(self.communicator_list)):
success_flag = self.communicator_list[client_id].SendDataOnSocket(json.dumps(data))
if(not success_flag):
print 'ERROR : COULD NOT SEND DATA TO CLIENT ' + str(client_id)
self.CloseClient(client_id)
elif((data['action'] == 'KILLPROC') or (data['action'] == 'FINISH')):
self.CloseClient(client_id)
return success_flag
def CloseClient(self,client_id):
"""Closes the client with client_id client_id
Args:
client_id : (int) index of client
Returns:
None
"""
if(client_id < len(self.communicator_list)):
self.communicator_list[client_id] = None
def CloseAllClients(self):
"""Closes all clients in the communicator_list and resets the communicator_list
Args:
None
Returns:
None
"""
for idx in xrange(len(self.communicator_list)):
if(not self.communicator_list[idx] is None):
self.CloseClient(idx)
self.communicator_list = []
def SendInitError2Clients(self):
"""
In case of an initialization error, sends messages to the clients, and exits
Args:
None
Returns:
None
"""
for idx in xrange(len(self.communicator_list)):
if(not self.communicator_list[idx] is None):
data = {'meta':'ERROR IN INITIALIZATION', 'action':'KILLPROC','data':''}
self.SendData2Client(idx,json.dumps(data))
self.CloseClient(idx)
def playTak(self,n,timelimit,client_0,client_1):
"""
"Tak is the best sort of game: simple in its rules, complex in its strategy" - Kvothe
Starts a game of Tak between client_0 (as Player_1) and client_1 (as Player_2)
Args:
n: (int) board size
timelimit: time limit
client_0: (int) idx of Player 1
client_1: (int) idx of Player 2
Returns:
None
"""
if( (client_0 < len(self.communicator_list)) and (client_1) < len(self.communicator_list)):
dataString = '1 ' + str(n) + ' ' + str(timelimit)
data = {'meta':'', 'action':'INIT','data':dataString}
self.SendData2Client(client_0, json.dumps(data))
data['data'] = '2 ' + str(n) + ' ' + str(timelimit)
self.SendData2Client(client_1, json.dumps(data))
while(True):
data = self.RecvDataFromClient(client_0)
self.SendData2Client(client_1, data)
if not data:
break
print data, 'Received from client 0'
data = json.loads(data)
if data['action'] == 'FINISH' or data['action'] == 'KILLPROC':
if not self.log_file_handle is None:
self.log_file_handle.write(data['meta'])
break
data = self.RecvDataFromClient(client_1)
print data, 'Received from client 1'
self.SendData2Client(client_0, data)
if not data:
break
data = json.loads(data)
if data['action'] == 'FINISH' or data['action'] == 'KILLPROC':
if not self.log_file_handle is None:
self.log_file_handle.write(data['meta'])
break
self.CloseClient(client_0)
self.CloseClient(client_1)
else:
# Close all clients
self.CloseAllClients()
if __name__ == '__main__':
print 'Start'
local_Server = Server()
parser = argparse.ArgumentParser(description = 'Tak Server')
parser.add_argument('port', metavar = '10000', type = int, help = 'Server port')
parser.add_argument('-n', dest = 'n', metavar = 'N', type = int, default = 5, help = 'Tak board size')
parser.add_argument('-NC', dest = 'num_clients', metavar = 'num_clients', type = int, default = 2, help = 'Number of clients connecting to the server')
parser.add_argument('-TL', dest = 'time_limit', metavar = 'time_limit', type = int, default = 120, help = 'Time limit (in s)')
parser.add_argument('-LOG',dest = 'log_file', metavar = 'log_file', type = str, default = "", help = 'Logger File for Evaluation purposes')
args = parser.parse_args()
if args.n < 5 or args.n > 7:
print 'Game size should be 5x5, 6x6 or 7x7.'
sys.exit()
if args.log_file != '':
local_Server.setLogFile(args.log_file)
local_Server.BuildServer(args.port, args.num_clients)
if(local_Server.client_count < 2):
local_Server.SendInitError2Clients()
else:
local_Server.playTak(args.n,args.time_limit,0,1)
Reuse ports
import socket,sys,json,pdb
from Communicator import Communicator
import argparse
class Server:
def __init__(self):
"""
Constructor. Initializes the communicator_list to [] and the NETWORK_TIMER to 500
Args:
None
Returns:
None
"""
self.communicator_list = []
self.NETWORK_TIMER = 150
self.log_file_handle = None
def setLogFile(self, filename):
self.log_file_handle = open(filename,'wb')
def BuildServer(self,port_no,num_clients):
"""Builds The server on the port_number port_no for num_clients
Args:
port_no: (int) The port number
num_clients: (int) The number of clients who would join (>= 2 for all practical purposes)
Returns:
None
"""
s = socket.socket()
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.settimeout(self.NETWORK_TIMER)
host = "0.0.0.0"
self.port = port_no
s.bind((host,port_no))
s.listen(5)
self.client_count = 0
self.CLOSE_NETWORK = False
while self.client_count < num_clients and (not self.CLOSE_NETWORK):
try:
c,addr = s.accept()
except:
self.CLOSE_NETWORK = True
if(not self.CLOSE_NETWORK):
self.client_count += 1
self.communicator_list.append(Communicator())
self.communicator_list[-1].setSocket(c,self.NETWORK_TIMER)
s.close()
def setNetworkTimer(self,Time_in_seconds):
self.NETWORK_TIMER = Time_in_seconds
def getNetworkTimer(self):
return self.NETWORK_TIMER
def RecvDataFromClient(self,client_id):
"""Receives Data from Client client_id
Args:
client_id: The integer index of a client
Returns:
data: Received on the socket to client_id, None in case of an Error
"""
data = None
if(client_id < len(self.communicator_list)):
data = self.communicator_list[client_id].RecvDataOnSocket()
if(data is None):
print 'ERROR : TIMEOUT ON CLIENT NETWORK' + str(client_id) + ' END'
self.CloseClient(client_id)
return data
def SendData2Client(self,client_id,data):
"""Sends data to the Client client_id. In case data was None, sends the
appropriate data (with ACTION='KILLPROC') and closes the socket
Args:
client_id : (int) client_id
data : The json file to be sent, or None in case of an Error
Returns:
success_flag : True if send was successful
"""
success_flag = False
if(data is None):
data = {'meta': 'TIMEOUT ON CLIENT NETWORK', 'action':'KILLPROC','data':''}
else:
data = json.loads(data)
if(client_id < len(self.communicator_list)):
success_flag = self.communicator_list[client_id].SendDataOnSocket(json.dumps(data))
if(not success_flag):
print 'ERROR : COULD NOT SEND DATA TO CLIENT ' + str(client_id)
self.CloseClient(client_id)
elif((data['action'] == 'KILLPROC') or (data['action'] == 'FINISH')):
self.CloseClient(client_id)
return success_flag
def CloseClient(self,client_id):
"""Closes the client with client_id client_id
Args:
client_id : (int) index of client
Returns:
None
"""
if(client_id < len(self.communicator_list)):
self.communicator_list[client_id] = None
def CloseAllClients(self):
"""Closes all clients in the communicator_list and resets the communicator_list
Args:
None
Returns:
None
"""
for idx in xrange(len(self.communicator_list)):
if(not self.communicator_list[idx] is None):
self.CloseClient(idx)
self.communicator_list = []
def SendInitError2Clients(self):
"""
In case of an initialization error, sends messages to the clients, and exits
Args:
None
Returns:
None
"""
for idx in xrange(len(self.communicator_list)):
if(not self.communicator_list[idx] is None):
data = {'meta':'ERROR IN INITIALIZATION', 'action':'KILLPROC','data':''}
self.SendData2Client(idx,json.dumps(data))
self.CloseClient(idx)
def playTak(self,n,timelimit,client_0,client_1):
"""
"Tak is the best sort of game: simple in its rules, complex in its strategy" - Kvothe
Starts a game of Tak between client_0 (as Player_1) and client_1 (as Player_2)
Args:
n: (int) board size
timelimit: time limit
client_0: (int) idx of Player 1
client_1: (int) idx of Player 2
Returns:
None
"""
if( (client_0 < len(self.communicator_list)) and (client_1) < len(self.communicator_list)):
dataString = '1 ' + str(n) + ' ' + str(timelimit)
data = {'meta':'', 'action':'INIT','data':dataString}
self.SendData2Client(client_0, json.dumps(data))
data['data'] = '2 ' + str(n) + ' ' + str(timelimit)
self.SendData2Client(client_1, json.dumps(data))
while(True):
data = self.RecvDataFromClient(client_0)
self.SendData2Client(client_1, data)
if not data:
break
print data, 'Received from client 0'
data = json.loads(data)
if data['action'] == 'FINISH' or data['action'] == 'KILLPROC':
if not self.log_file_handle is None:
self.log_file_handle.write(data['meta'])
break
data = self.RecvDataFromClient(client_1)
print data, 'Received from client 1'
self.SendData2Client(client_0, data)
if not data:
break
data = json.loads(data)
if data['action'] == 'FINISH' or data['action'] == 'KILLPROC':
if not self.log_file_handle is None:
self.log_file_handle.write(data['meta'])
break
self.CloseClient(client_0)
self.CloseClient(client_1)
else:
# Close all clients
self.CloseAllClients()
if __name__ == '__main__':
print 'Start'
local_Server = Server()
parser = argparse.ArgumentParser(description = 'Tak Server')
parser.add_argument('port', metavar = '10000', type = int, help = 'Server port')
parser.add_argument('-n', dest = 'n', metavar = 'N', type = int, default = 5, help = 'Tak board size')
parser.add_argument('-NC', dest = 'num_clients', metavar = 'num_clients', type = int, default = 2, help = 'Number of clients connecting to the server')
parser.add_argument('-TL', dest = 'time_limit', metavar = 'time_limit', type = int, default = 120, help = 'Time limit (in s)')
parser.add_argument('-LOG',dest = 'log_file', metavar = 'log_file', type = str, default = "", help = 'Logger File for Evaluation purposes')
args = parser.parse_args()
if args.n < 5 or args.n > 7:
print 'Game size should be 5x5, 6x6 or 7x7.'
sys.exit()
if args.log_file != '':
local_Server.setLogFile(args.log_file)
local_Server.BuildServer(args.port, args.num_clients)
if(local_Server.client_count < 2):
local_Server.SendInitError2Clients()
else:
local_Server.playTak(args.n,args.time_limit,0,1)
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import logging
import json
import os
import re
import tarfile
import tempfile
import warnings
import yaml
import zipfile
import datetime
from typing import Mapping, Callable, Optional
import kfp
import kfp_server_api
from kfp.compiler import compiler
from kfp.compiler._k8s_helper import sanitize_k8s_name
from kfp._auth import get_auth_token, get_gcp_access_token
# TTL of the access token associated with the client. This is needed because
# `gcloud auth print-access-token` generates a token with TTL=1 hour, after
# which the authentication expires. This TTL is needed for kfp.Client()
# initialized with host=<inverse proxy endpoint>.
# Set to 55 mins to provide some safe margin.
_GCP_ACCESS_TOKEN_TIMEOUT = datetime.timedelta(minutes=55)
# Operators on scalar values. Only applies to one of |int_value|,
# |long_value|, |string_value| or |timestamp_value|.
_FILTER_OPERATIONS = {"UNKNOWN": 0,
"EQUALS" : 1,
"NOT_EQUALS" : 2,
"GREATER_THAN": 3,
"GREATER_THAN_EQUALS": 5,
"LESS_THAN": 6,
"LESS_THAN_EQUALS": 7}
def _add_generated_apis(target_struct, api_module, api_client):
"""Initializes a hierarchical API object based on the generated API module.
PipelineServiceApi.create_pipeline becomes target_struct.pipelines.create_pipeline
"""
Struct = type('Struct', (), {})
def camel_case_to_snake_case(name):
import re
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', name).lower()
for api_name in dir(api_module):
if not api_name.endswith('ServiceApi'):
continue
short_api_name = camel_case_to_snake_case(api_name[0:-len('ServiceApi')]) + 's'
api_struct = Struct()
setattr(target_struct, short_api_name, api_struct)
service_api = getattr(api_module.api, api_name)
initialized_service_api = service_api(api_client)
for member_name in dir(initialized_service_api):
if member_name.startswith('_') or member_name.endswith('_with_http_info'):
continue
bound_member = getattr(initialized_service_api, member_name)
setattr(api_struct, member_name, bound_member)
models_struct = Struct()
for member_name in dir(api_module.models):
if not member_name[0].islower():
setattr(models_struct, member_name, getattr(api_module.models, member_name))
target_struct.api_models = models_struct
KF_PIPELINES_ENDPOINT_ENV = 'KF_PIPELINES_ENDPOINT'
KF_PIPELINES_UI_ENDPOINT_ENV = 'KF_PIPELINES_UI_ENDPOINT'
KF_PIPELINES_DEFAULT_EXPERIMENT_NAME = 'KF_PIPELINES_DEFAULT_EXPERIMENT_NAME'
KF_PIPELINES_OVERRIDE_EXPERIMENT_NAME = 'KF_PIPELINES_OVERRIDE_EXPERIMENT_NAME'
class Client(object):
"""API Client for KubeFlow Pipeline.
Args:
host: The host name to use to talk to Kubeflow Pipelines. If not set, the in-cluster
service DNS name will be used, which only works if the current environment is a pod
in the same cluster (such as a Jupyter instance spawned by Kubeflow's
JupyterHub). If you have a different connection to cluster, such as a kubectl
proxy connection, then set it to something like "127.0.0.1:8080/pipeline.
If you connect to an IAP enabled cluster, set it to
https://<your-deployment>.endpoints.<your-project>.cloud.goog/pipeline".
client_id: The client ID used by Identity-Aware Proxy.
namespace: The namespace where the kubeflow pipeline system is run.
other_client_id: The client ID used to obtain the auth codes and refresh tokens.
Reference: https://cloud.google.com/iap/docs/authentication-howto#authenticating_from_a_desktop_app.
other_client_secret: The client secret used to obtain the auth codes and refresh tokens.
existing_token: Pass in token directly, it's used for cases better get token outside of SDK, e.x. GCP Cloud Functions
or caller already has a token
cookies: CookieJar object containing cookies that will be passed to the pipelines API.
proxy: HTTP or HTTPS proxy server
ssl_ca_cert: Cert for proxy
"""
# in-cluster DNS name of the pipeline service
IN_CLUSTER_DNS_NAME = 'ml-pipeline.{}.svc.cluster.local:8888'
KUBE_PROXY_PATH = 'api/v1/namespaces/{}/services/ml-pipeline:http/proxy/'
LOCAL_KFP_CONTEXT = os.path.expanduser('~/.config/kfp/context.json')
# TODO: Wrap the configurations for different authentication methods.
def __init__(self, host=None, client_id=None, namespace='kubeflow', other_client_id=None, other_client_secret=None, existing_token=None, cookies=None, proxy=None, ssl_ca_cert=None):
"""Create a new instance of kfp client.
"""
host = host or os.environ.get(KF_PIPELINES_ENDPOINT_ENV)
self._uihost = os.environ.get(KF_PIPELINES_UI_ENDPOINT_ENV, host)
config = self._load_config(host, client_id, namespace, other_client_id, other_client_secret, existing_token, proxy, ssl_ca_cert)
# Save the loaded API client configuration, as a reference if update is
# needed.
self._existing_config = config
api_client = kfp_server_api.api_client.ApiClient(config, cookie=cookies)
_add_generated_apis(self, kfp_server_api, api_client)
self._job_api = kfp_server_api.api.job_service_api.JobServiceApi(api_client)
self._run_api = kfp_server_api.api.run_service_api.RunServiceApi(api_client)
self._experiment_api = kfp_server_api.api.experiment_service_api.ExperimentServiceApi(api_client)
self._pipelines_api = kfp_server_api.api.pipeline_service_api.PipelineServiceApi(api_client)
self._upload_api = kfp_server_api.api.PipelineUploadServiceApi(api_client)
self._load_context_setting_or_default()
def _load_config(self, host, client_id, namespace, other_client_id, other_client_secret, existing_token, proxy, ssl_ca_cert):
config = kfp_server_api.configuration.Configuration()
if proxy:
# https://github.com/kubeflow/pipelines/blob/c6ac5e0b1fd991e19e96419f0f508ec0a4217c29/backend/api/python_http_client/kfp_server_api/rest.py#L100
config.proxy = proxy
if ssl_ca_cert:
config.ssl_ca_cert = ssl_ca_cert
host = host or ''
# Preprocess the host endpoint to prevent some common user mistakes.
if not client_id:
# always preserving the protocol (http://localhost requires it)
host = host.rstrip('/')
if host:
config.host = host
token = None
# "existing_token" is designed to accept token generated outside of SDK. Here is an example.
#
# https://cloud.google.com/functions/docs/securing/function-identity
# https://cloud.google.com/endpoints/docs/grpc/service-account-authentication
#
# import requests
# import kfp
#
# def get_access_token():
# url = 'http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/token'
# r = requests.get(url, headers={'Metadata-Flavor': 'Google'})
# r.raise_for_status()
# access_token = r.json()['access_token']
# return access_token
#
# client = kfp.Client(host='<KFPHost>', existing_token=get_access_token())
#
if existing_token:
token = existing_token
self._is_refresh_token = False
elif client_id:
token = get_auth_token(client_id, other_client_id, other_client_secret)
self._is_refresh_token = True
elif self._is_inverse_proxy_host(host):
token = get_gcp_access_token()
self._is_refresh_token = False
if token:
config.api_key['authorization'] = token
config.api_key_prefix['authorization'] = 'Bearer'
return config
if host:
# if host is explicitly set with auth token, it's probably a port forward address.
return config
import kubernetes as k8s
in_cluster = True
try:
k8s.config.load_incluster_config()
except:
in_cluster = False
pass
if in_cluster:
config.host = Client.IN_CLUSTER_DNS_NAME.format(namespace)
return config
try:
k8s.config.load_kube_config(client_configuration=config)
except:
print('Failed to load kube config.')
return config
if config.host:
config.host = config.host + '/' + Client.KUBE_PROXY_PATH.format(namespace)
return config
def _is_inverse_proxy_host(self, host):
if host:
return re.match(r'\S+.googleusercontent.com/{0,1}$', host)
if re.match(r'\w+', host):
warnings.warn(
'The received host is %s, please include the full endpoint address '
'(with ".(pipelines/notebooks).googleusercontent.com")' % host)
return False
def _is_ipython(self):
"""Returns whether we are running in notebook."""
try:
import IPython
ipy = IPython.get_ipython()
if ipy is None:
return False
except ImportError:
return False
return True
def _get_url_prefix(self):
if self._uihost:
# User's own connection.
if self._uihost.startswith('http://') or self._uihost.startswith('https://'):
return self._uihost
else:
return 'http://' + self._uihost
# In-cluster pod. We could use relative URL.
return '/pipeline'
def _load_context_setting_or_default(self):
if os.path.exists(Client.LOCAL_KFP_CONTEXT):
with open(Client.LOCAL_KFP_CONTEXT, 'r') as f:
self._context_setting = json.load(f)
else:
self._context_setting = {
'namespace': '',
}
def _refresh_api_client_token(self):
"""Refreshes the existing token associated with the kfp_api_client."""
if getattr(self, '_is_refresh_token', None):
return
new_token = get_gcp_access_token()
self._existing_config.api_key['authorization'] = new_token
def set_user_namespace(self, namespace):
"""Set user namespace into local context setting file.
This function should only be used when Kubeflow Pipelines is in the multi-user mode.
Args:
namespace: kubernetes namespace the user has access to.
"""
self._context_setting['namespace'] = namespace
with open(Client.LOCAL_KFP_CONTEXT, 'w') as f:
json.dump(self._context_setting, f)
def get_user_namespace(self):
"""Get user namespace in context config.
Returns:
namespace: kubernetes namespace from the local context file or empty if it wasn't set.
"""
return self._context_setting['namespace']
def create_experiment(self, name, description=None, namespace=None):
"""Create a new experiment.
Args:
name: The name of the experiment.
description: Description of the experiment.
namespace: Kubernetes namespace where the experiment should be created.
For single user deployment, leave it as None;
For multi user, input a namespace where the user is authorized.
Returns:
An Experiment object. Most important field is id.
"""
namespace = namespace or self.get_user_namespace()
experiment = None
try:
experiment = self.get_experiment(experiment_name=name, namespace=namespace)
except:
# Ignore error if the experiment does not exist.
pass
if not experiment:
logging.info('Creating experiment {}.'.format(name))
resource_references = []
if namespace:
key = kfp_server_api.models.ApiResourceKey(id=namespace, type=kfp_server_api.models.ApiResourceType.NAMESPACE)
reference = kfp_server_api.models.ApiResourceReference(key=key, relationship=kfp_server_api.models.ApiRelationship.OWNER)
resource_references.append(reference)
experiment = kfp_server_api.models.ApiExperiment(
name=name,
description=description,
resource_references=resource_references)
experiment = self._experiment_api.create_experiment(body=experiment)
if self._is_ipython():
import IPython
html = \
('Experiment link <a href="%s/#/experiments/details/%s" target="_blank" >here</a>'
% (self._get_url_prefix(), experiment.id))
IPython.display.display(IPython.display.HTML(html))
return experiment
def get_pipeline_id(self, name):
"""Find the id of a pipeline by name.
Args:
name: Pipeline name.
Returns:
Returns the pipeline id if a pipeline with the name exists.
"""
pipeline_filter = json.dumps({
"predicates": [
{
"op": _FILTER_OPERATIONS["EQUALS"],
"key": "name",
"stringValue": name,
}
]
})
result = self._pipelines_api.list_pipelines(filter=pipeline_filter)
if result.pipelines is None:
return None
if len(result.pipelines)==1:
return result.pipelines[0].id
elif len(result.pipelines)>1:
raise ValueError("Multiple pipelines with the name: {} found, the name needs to be unique".format(name))
return None
def list_experiments(self, page_token='', page_size=10, sort_by='', namespace=None):
"""List experiments.
Args:
page_token: Token for starting of the page.
page_size: Size of the page.
sort_by: Can be '[field_name]', '[field_name] des'. For example, 'name desc'.
namespace: Kubernetes namespace where the experiment was created.
For single user deployment, leave it as None;
For multi user, input a namespace where the user is authorized.
Returns:
A response object including a list of experiments and next page token.
"""
namespace = namespace or self.get_user_namespace()
response = self._experiment_api.list_experiment(
page_token=page_token,
page_size=page_size,
sort_by=sort_by,
resource_reference_key_type=kfp_server_api.models.api_resource_type.ApiResourceType.NAMESPACE,
resource_reference_key_id=namespace)
return response
def get_experiment(self, experiment_id=None, experiment_name=None, namespace=None):
"""Get details of an experiment
Either experiment_id or experiment_name is required
Args:
experiment_id: Id of the experiment. (Optional)
experiment_name: Name of the experiment. (Optional)
namespace: Kubernetes namespace where the experiment was created.
For single user deployment, leave it as None;
For multi user, input the namespace where the user is authorized.
Returns:
A response object including details of a experiment.
Throws:
Exception if experiment is not found or None of the arguments is provided
"""
namespace = namespace or self.get_user_namespace()
if experiment_id is None and experiment_name is None:
raise ValueError('Either experiment_id or experiment_name is required')
if experiment_id is not None:
return self._experiment_api.get_experiment(id=experiment_id)
next_page_token = ''
while next_page_token is not None:
list_experiments_response = self.list_experiments(page_size=100, page_token=next_page_token, namespace=namespace)
next_page_token = list_experiments_response.next_page_token
for experiment in list_experiments_response.experiments or []:
if experiment.name == experiment_name:
return self._experiment_api.get_experiment(id=experiment.id)
raise ValueError('No experiment is found with name {}.'.format(experiment_name))
def _extract_pipeline_yaml(self, package_file):
def _choose_pipeline_yaml_file(file_list) -> str:
yaml_files = [file for file in file_list if file.endswith('.yaml')]
if len(yaml_files) == 0:
raise ValueError('Invalid package. Missing pipeline yaml file in the package.')
if 'pipeline.yaml' in yaml_files:
return 'pipeline.yaml'
else:
if len(yaml_files) == 1:
return yaml_files[0]
raise ValueError('Invalid package. There is no pipeline.yaml file and there are multiple yaml files.')
if package_file.endswith('.tar.gz') or package_file.endswith('.tgz'):
with tarfile.open(package_file, "r:gz") as tar:
file_names = [member.name for member in tar if member.isfile()]
pipeline_yaml_file = _choose_pipeline_yaml_file(file_names)
with tar.extractfile(tar.getmember(pipeline_yaml_file)) as f:
return yaml.safe_load(f)
elif package_file.endswith('.zip'):
with zipfile.ZipFile(package_file, 'r') as zip:
pipeline_yaml_file = _choose_pipeline_yaml_file(zip.namelist())
with zip.open(pipeline_yaml_file) as f:
return yaml.safe_load(f)
elif package_file.endswith('.yaml') or package_file.endswith('.yml'):
with open(package_file, 'r') as f:
return yaml.safe_load(f)
else:
raise ValueError('The package_file '+ package_file + ' should end with one of the following formats: [.tar.gz, .tgz, .zip, .yaml, .yml]')
def list_pipelines(self, page_token='', page_size=10, sort_by=''):
"""List pipelines.
Args:
page_token: Token for starting of the page.
page_size: Size of the page.
sort_by: one of 'field_name', 'field_name desc'. For example, 'name desc'.
Returns:
A response object including a list of pipelines and next page token.
"""
return self._pipelines_api.list_pipelines(page_token=page_token, page_size=page_size, sort_by=sort_by)
def list_pipeline_versions(self, pipeline_id: str, page_token='', page_size=10, sort_by=''):
"""List all versions of a given pipeline.
Args:
pipeline_id: The id of a pipeline.
page_token: Token for starting of the page.
page_size: Size of the page.
sort_by: one of 'field_name', 'field_name desc'. For example, 'name desc'.
Returns:
A response object including a list of pipeline versions and next page token.
"""
return self._pipelines_api.list_pipeline_versions(
resource_key_type="PIPELINE",
resource_key_id=pipeline_id,
page_token=page_token,
page_size=page_size,
sort_by=sort_by
)
# TODO: provide default namespace, similar to kubectl default namespaces.
def run_pipeline(self, experiment_id, job_name, pipeline_package_path=None, params={}, pipeline_id=None, version_id=None):
"""Run a specified pipeline.
Args:
experiment_id: The id of an experiment.
job_name: Name of the job.
pipeline_package_path: Local path of the pipeline package(the filename should end with one of the following .tar.gz, .tgz, .zip, .yaml, .yml).
params: A dictionary with key (string) as param name and value (string) as as param value.
pipeline_id: The id of a pipeline.
version_id: The id of a pipeline version.
If both pipeline_id and version_id are specified, version_id will take precendence.
If only pipeline_id is specified, the default version of this pipeline is used to create the run.
Returns:
A run object. Most important field is id.
"""
job_config = self._create_job_config(
experiment_id=experiment_id,
params=params,
pipeline_package_path=pipeline_package_path,
pipeline_id=pipeline_id,
version_id=version_id)
run_body = kfp_server_api.models.ApiRun(
pipeline_spec=job_config.spec, resource_references=job_config.resource_references, name=job_name)
response = self._run_api.create_run(body=run_body)
if self._is_ipython():
import IPython
html = ('Run link <a href="%s/#/runs/details/%s" target="_blank" >here</a>'
% (self._get_url_prefix(), response.run.id))
IPython.display.display(IPython.display.HTML(html))
return response.run
def create_recurring_run(self, experiment_id, job_name, description=None, start_time=None, end_time=None, interval_second=None, cron_expression=None, max_concurrency=1, no_catchup=None, params={}, pipeline_package_path=None, pipeline_id=None, version_id=None, enabled=True):
"""Create a recurring run.
Args:
experiment_id: The string id of an experiment.
job_name: Name of the job.
description: An optional job description.
start_time: The RFC3339 time string of the time when to start the job.
end_time: The RFC3339 time string of the time when to end the job.
interval_second: Integer indicating the seconds between two recurring runs in for a periodic schedule.
cron_expression: A cron expression representing a set of times, using 5 space-separated fields, e.g. "0 0 9 ? * 2-6".
max_concurrency: Integer indicating how many jobs can be run in parallel.
no_catchup: Whether the recurring run should catch up if behind schedule.
For example, if the recurring run is paused for a while and re-enabled
afterwards. If no_catchup=False, the scheduler will catch up on (backfill) each
missed interval. Otherwise, it only schedules the latest interval if more than one interval
is ready to be scheduled.
Usually, if your pipeline handles backfill internally, you should turn catchup
off to avoid duplicate backfill. (default: {False})
pipeline_package_path: Local path of the pipeline package(the filename should end with one of the following .tar.gz, .tgz, .zip, .yaml, .yml).
params: A dictionary with key (string) as param name and value (string) as param value.
pipeline_id: The string ID of a pipeline.
version_id: The string ID of a pipeline version.
If both pipeline_id and version_id are specified, pipeline_id will take precendence
This will change in a future version, so it is recommended to use version_id by itself.
enabled: A bool indicating whether the recurring run is enabled or disabled.
Returns:
A Job object. Most important field is id.
"""
job_config = self._create_job_config(
experiment_id=experiment_id,
params=params,
pipeline_package_path=pipeline_package_path,
pipeline_id=pipeline_id,
version_id=version_id)
if all([interval_second, cron_expression]) or not any([interval_second, cron_expression]):
raise ValueError('Either interval_second or cron_expression is required')
if interval_second is not None:
trigger = kfp_server_api.models.ApiTrigger(
periodic_schedule=kfp_server_api.models.ApiPeriodicSchedule(
start_time=start_time, end_time=end_time, interval_second=interval_second)
)
if cron_expression is not None:
trigger = kfp_server_api.models.ApiTrigger(
cron_schedule=kfp_server_api.models.ApiCronSchedule(
start_time=start_time, end_time=end_time, cron=cron_expression)
)
job_body = kfp_server_api.models.ApiJob(
enabled=enabled,
pipeline_spec=job_config.spec,
resource_references=job_config.resource_references,
name=job_name,
description=description,
no_catchup=no_catchup,
trigger=trigger,
max_concurrency=max_concurrency)
return self._job_api.create_job(body=job_body)
def _create_job_config(self, experiment_id, params, pipeline_package_path, pipeline_id, version_id):
"""Create a JobConfig with spec and resource_references.
Args:
experiment_id: The id of an experiment.
pipeline_package_path: Local path of the pipeline package(the filename should end with one of the following .tar.gz, .tgz, .zip, .yaml, .yml).
params: A dictionary with key (string) as param name and value (string) as param value.
pipeline_id: The id of a pipeline.
version_id: The id of a pipeline version.
If both pipeline_id and version_id are specified, pipeline_id will take precendence
This will change in a future version, so it is recommended to use version_id by itself.
Returns:
A JobConfig object with attributes spec and resource_reference.
"""
class JobConfig:
def __init__(self, spec, resource_references):
self.spec = spec
self.resource_references = resource_references
pipeline_json_string = None
if pipeline_package_path:
pipeline_obj = self._extract_pipeline_yaml(pipeline_package_path)
pipeline_json_string = json.dumps(pipeline_obj)
api_params = [kfp_server_api.ApiParameter(
name=sanitize_k8s_name(name=k, allow_capital_underscore=True),
value=str(v)) for k,v in params.items()]
resource_references = []
key = kfp_server_api.models.ApiResourceKey(id=experiment_id,
type=kfp_server_api.models.ApiResourceType.EXPERIMENT)
reference = kfp_server_api.models.ApiResourceReference(key=key,
relationship=kfp_server_api.models.ApiRelationship.OWNER)
resource_references.append(reference)
if version_id:
key = kfp_server_api.models.ApiResourceKey(id=version_id,
type=kfp_server_api.models.ApiResourceType.PIPELINE_VERSION)
reference = kfp_server_api.models.ApiResourceReference(key=key,
relationship=kfp_server_api.models.ApiRelationship.CREATOR)
resource_references.append(reference)
spec = kfp_server_api.models.ApiPipelineSpec(
pipeline_id=pipeline_id,
workflow_manifest=pipeline_json_string,
parameters=api_params)
return JobConfig(spec=spec, resource_references=resource_references)
def create_run_from_pipeline_func(self, pipeline_func: Callable, arguments: Mapping[str, str], run_name=None, experiment_name=None, pipeline_conf: kfp.dsl.PipelineConf = None, namespace=None):
"""Runs pipeline on KFP-enabled Kubernetes cluster.
This command compiles the pipeline function, creates or gets an experiment and submits the pipeline for execution.
Args:
pipeline_func: A function that describes a pipeline by calling components and composing them into execution graph.
arguments: Arguments to the pipeline function provided as a dict.
run_name: Optional. Name of the run to be shown in the UI.
experiment_name: Optional. Name of the experiment to add the run to.
namespace: Kubernetes namespace where the pipeline runs are created.
For single user deployment, leave it as None;
For multi user, input a namespace where the user is authorized
"""
#TODO: Check arguments against the pipeline function
pipeline_name = pipeline_func.__name__
run_name = run_name or pipeline_name + ' ' + datetime.datetime.now().strftime('%Y-%m-%d %H-%M-%S')
with tempfile.TemporaryDirectory() as tmpdir:
pipeline_package_path = os.path.join(tmpdir, 'pipeline.yaml')
compiler.Compiler().compile(pipeline_func, pipeline_package_path, pipeline_conf=pipeline_conf)
return self.create_run_from_pipeline_package(pipeline_package_path, arguments, run_name, experiment_name, namespace)
def create_run_from_pipeline_package(self, pipeline_file: str, arguments: Mapping[str, str], run_name=None, experiment_name=None, namespace=None):
"""Runs pipeline on KFP-enabled Kubernetes cluster.
This command compiles the pipeline function, creates or gets an experiment and submits the pipeline for execution.
Args:
pipeline_file: A compiled pipeline package file.
arguments: Arguments to the pipeline function provided as a dict.
run_name: Optional. Name of the run to be shown in the UI.
experiment_name: Optional. Name of the experiment to add the run to.
namespace: Kubernetes namespace where the pipeline runs are created.
For single user deployment, leave it as None;
For multi user, input a namespace where the user is authorized
"""
class RunPipelineResult:
def __init__(self, client, run_info):
self._client = client
self.run_info = run_info
self.run_id = run_info.id
def wait_for_run_completion(self, timeout=None):
timeout = timeout or datetime.timedelta.max
return self._client.wait_for_run_completion(self.run_id, timeout)
def __repr__(self):
return 'RunPipelineResult(run_id={})'.format(self.run_id)
#TODO: Check arguments against the pipeline function
pipeline_name = os.path.basename(pipeline_file)
experiment_name = experiment_name or os.environ.get(KF_PIPELINES_DEFAULT_EXPERIMENT_NAME, None)
overridden_experiment_name = os.environ.get(KF_PIPELINES_OVERRIDE_EXPERIMENT_NAME, experiment_name)
if overridden_experiment_name != experiment_name:
import warnings
warnings.warn('Changing experiment name from "{}" to "{}".'.format(experiment_name, overridden_experiment_name))
experiment_name = overridden_experiment_name or 'Default'
run_name = run_name or (pipeline_name + ' ' +
datetime.datetime.now().strftime(
'%Y-%m-%d %H-%M-%S'))
experiment = self.create_experiment(name=experiment_name, namespace=namespace)
run_info = self.run_pipeline(experiment.id, run_name, pipeline_file, arguments)
return RunPipelineResult(self, run_info)
def list_runs(self, page_token='', page_size=10, sort_by='', experiment_id=None, namespace=None):
"""List runs, optionally can be filtered by experiment or namespace.
Args:
page_token: Token for starting of the page.
page_size: Size of the page.
sort_by: One of 'field_name', 'field_name desc'. For example, 'name desc'.
experiment_id: Experiment id to filter upon
namespace: Kubernetes namespace to filter upon.
For single user deployment, leave it as None;
For multi user, input a namespace where the user is authorized.
Returns:
A response object including a list of experiments and next page token.
"""
namespace = namespace or self.get_user_namespace()
if experiment_id is not None:
response = self._run_api.list_runs(page_token=page_token, page_size=page_size, sort_by=sort_by, resource_reference_key_type=kfp_server_api.models.api_resource_type.ApiResourceType.EXPERIMENT, resource_reference_key_id=experiment_id)
elif namespace:
response = self._run_api.list_runs(page_token=page_token, page_size=page_size, sort_by=sort_by, resource_reference_key_type=kfp_server_api.models.api_resource_type.ApiResourceType.NAMESPACE, resource_reference_key_id=namespace)
else:
response = self._run_api.list_runs(page_token=page_token, page_size=page_size, sort_by=sort_by)
return response
def list_recurring_runs(self, page_token='', page_size=10, sort_by='', experiment_id=None):
"""List recurring runs.
Args:
page_token: Token for starting of the page.
page_size: Size of the page.
sort_by: One of 'field_name', 'field_name desc'. For example, 'name desc'.
experiment_id: Experiment id to filter upon.
Returns:
A response object including a list of recurring_runs and next page token.
"""
if experiment_id is not None:
response = self._job_api.list_jobs(page_token=page_token, page_size=page_size, sort_by=sort_by, resource_reference_key_type=kfp_server_api.models.api_resource_type.ApiResourceType.EXPERIMENT, resource_reference_key_id=experiment_id)
else:
response = self._job_api.list_jobs(page_token=page_token, page_size=page_size, sort_by=sort_by)
return response
def get_recurring_run(self, job_id):
"""Get recurring_run details.
Args:
job_id: id of the recurring_run.
Returns:
A response object including details of a recurring_run.
Throws:
Exception if recurring_run is not found.
"""
return self._job_api.get_job(id=job_id)
def get_run(self, run_id):
"""Get run details.
Args:
run_id: id of the run.
Returns:
A response object including details of a run.
Throws:
Exception if run is not found.
"""
return self._run_api.get_run(run_id=run_id)
def wait_for_run_completion(self, run_id, timeout):
"""Waits for a run to complete.
Args:
run_id: Run id, returned from run_pipeline.
timeout: Timeout in seconds.
Returns:
A run detail object: Most important fields are run and pipeline_runtime.
Raises:
TimeoutError: if the pipeline run failed to finish before the specified timeout.
"""
status = 'Running:'
start_time = datetime.datetime.now()
last_token_refresh_time = datetime.datetime.now()
while (status is None or
status.lower() not in ['succeeded', 'failed', 'skipped', 'error']):
# Refreshes the access token before it hits the TTL.
if (datetime.datetime.now() - last_token_refresh_time
> _GCP_ACCESS_TOKEN_TIMEOUT):
self._refresh_api_client_token()
last_token_refresh_time = datetime.datetime.now()
get_run_response = self._run_api.get_run(run_id=run_id)
status = get_run_response.run.status
elapsed_time = (datetime.datetime.now() - start_time).seconds
logging.info('Waiting for the job to complete...')
if elapsed_time > timeout:
raise TimeoutError('Run timeout')
time.sleep(5)
return get_run_response
def _get_workflow_json(self, run_id):
"""Get the workflow json.
Args:
run_id: run id, returned from run_pipeline.
Returns:
workflow: Json workflow
"""
get_run_response = self._run_api.get_run(run_id=run_id)
workflow = get_run_response.pipeline_runtime.workflow_manifest
workflow_json = json.loads(workflow)
return workflow_json
def upload_pipeline(
self,
pipeline_package_path: str = None,
pipeline_name: str = None,
description: str = None,
):
"""Uploads the pipeline to the Kubeflow Pipelines cluster.
Args:
pipeline_package_path: Local path to the pipeline package.
pipeline_name: Optional. Name of the pipeline to be shown in the UI.
description: Optional. Description of the pipeline to be shown in the UI.
Returns:
Server response object containing pipleine id and other information.
"""
response = self._upload_api.upload_pipeline(pipeline_package_path, name=pipeline_name, description=description)
if self._is_ipython():
import IPython
html = 'Pipeline link <a href=%s/#/pipelines/details/%s>here</a>' % (self._get_url_prefix(), response.id)
IPython.display.display(IPython.display.HTML(html))
return response
def upload_pipeline_version(
self,
pipeline_package_path,
pipeline_version_name: str,
pipeline_id: Optional[str] = None,
pipeline_name: Optional[str] = None
):
"""Uploads a new version of the pipeline to the Kubeflow Pipelines cluster.
Args:
pipeline_package_path: Local path to the pipeline package.
pipeline_version_name: Name of the pipeline version to be shown in the UI.
pipeline_id: Optional. Id of the pipeline.
pipeline_name: Optional. Name of the pipeline.
Returns:
Server response object containing pipleine id and other information.
Throws:
ValueError when none or both of pipeline_id or pipeline_name are specified
Exception if pipeline id is not found.
"""
if all([pipeline_id, pipeline_name]) or not any([pipeline_id, pipeline_name]):
raise ValueError('Either pipeline_id or pipeline_name is required')
if pipeline_name:
pipeline_id = self.get_pipeline_id(pipeline_name)
response = self._upload_api.upload_pipeline_version(
pipeline_package_path,
name=pipeline_version_name,
pipelineid=pipeline_id
)
if self._is_ipython():
import IPython
html = 'Pipeline link <a href=%s/#/pipelines/details/%s>here</a>' % (self._get_url_prefix(), response.id)
IPython.display.display(IPython.display.HTML(html))
return response
def get_pipeline(self, pipeline_id):
"""Get pipeline details.
Args:
pipeline_id: id of the pipeline.
Returns:
A response object including details of a pipeline.
Throws:
Exception if pipeline is not found.
"""
return self._pipelines_api.get_pipeline(id=pipeline_id)
def delete_pipeline(self, pipeline_id):
"""Delete pipeline.
Args:
pipeline_id: id of the pipeline.
Returns:
Object. If the method is called asynchronously, returns the request thread.
Throws:
Exception if pipeline is not found.
"""
return self._pipelines_api.delete_pipeline(id=pipeline_id)
def list_pipeline_versions(self, pipeline_id, page_token='', page_size=10, sort_by=''):
"""Lists pipeline versions.
Args:
pipeline_id: Id of the pipeline to list versions
page_token: Token for starting of the page.
page_size: Size of the page.
sort_by: One of 'field_name', 'field_name des'. For example, 'name des'.
Returns:
A response object including a list of versions and next page token.
"""
return self._pipelines_api.list_pipeline_versions(page_token=page_token, page_size=page_size, sort_by=sort_by, resource_key_type=kfp_server_api.models.api_resource_type.ApiResourceType.PIPELINE, resource_key_id=pipeline_id)
chore: Corrects doc string of two client methods. (#4442)
* clean up doc string for two methods
* add missing arg doc
* add missing type hints.
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import logging
import json
import os
import re
import tarfile
import tempfile
import warnings
import yaml
import zipfile
import datetime
from typing import Mapping, Callable, Optional
import kfp
import kfp_server_api
from kfp.compiler import compiler
from kfp.compiler._k8s_helper import sanitize_k8s_name
from kfp._auth import get_auth_token, get_gcp_access_token
# TTL of the access token associated with the client. This is needed because
# `gcloud auth print-access-token` generates a token with TTL=1 hour, after
# which the authentication expires. This TTL is needed for kfp.Client()
# initialized with host=<inverse proxy endpoint>.
# Set to 55 mins to provide some safe margin.
_GCP_ACCESS_TOKEN_TIMEOUT = datetime.timedelta(minutes=55)
# Operators on scalar values. Only applies to one of |int_value|,
# |long_value|, |string_value| or |timestamp_value|.
_FILTER_OPERATIONS = {"UNKNOWN": 0,
"EQUALS" : 1,
"NOT_EQUALS" : 2,
"GREATER_THAN": 3,
"GREATER_THAN_EQUALS": 5,
"LESS_THAN": 6,
"LESS_THAN_EQUALS": 7}
def _add_generated_apis(target_struct, api_module, api_client):
"""Initializes a hierarchical API object based on the generated API module.
PipelineServiceApi.create_pipeline becomes target_struct.pipelines.create_pipeline
"""
Struct = type('Struct', (), {})
def camel_case_to_snake_case(name):
import re
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', name).lower()
for api_name in dir(api_module):
if not api_name.endswith('ServiceApi'):
continue
short_api_name = camel_case_to_snake_case(api_name[0:-len('ServiceApi')]) + 's'
api_struct = Struct()
setattr(target_struct, short_api_name, api_struct)
service_api = getattr(api_module.api, api_name)
initialized_service_api = service_api(api_client)
for member_name in dir(initialized_service_api):
if member_name.startswith('_') or member_name.endswith('_with_http_info'):
continue
bound_member = getattr(initialized_service_api, member_name)
setattr(api_struct, member_name, bound_member)
models_struct = Struct()
for member_name in dir(api_module.models):
if not member_name[0].islower():
setattr(models_struct, member_name, getattr(api_module.models, member_name))
target_struct.api_models = models_struct
KF_PIPELINES_ENDPOINT_ENV = 'KF_PIPELINES_ENDPOINT'
KF_PIPELINES_UI_ENDPOINT_ENV = 'KF_PIPELINES_UI_ENDPOINT'
KF_PIPELINES_DEFAULT_EXPERIMENT_NAME = 'KF_PIPELINES_DEFAULT_EXPERIMENT_NAME'
KF_PIPELINES_OVERRIDE_EXPERIMENT_NAME = 'KF_PIPELINES_OVERRIDE_EXPERIMENT_NAME'
class Client(object):
"""API Client for KubeFlow Pipeline.
Args:
host: The host name to use to talk to Kubeflow Pipelines. If not set, the in-cluster
service DNS name will be used, which only works if the current environment is a pod
in the same cluster (such as a Jupyter instance spawned by Kubeflow's
JupyterHub). If you have a different connection to cluster, such as a kubectl
proxy connection, then set it to something like "127.0.0.1:8080/pipeline.
If you connect to an IAP enabled cluster, set it to
https://<your-deployment>.endpoints.<your-project>.cloud.goog/pipeline".
client_id: The client ID used by Identity-Aware Proxy.
namespace: The namespace where the kubeflow pipeline system is run.
other_client_id: The client ID used to obtain the auth codes and refresh tokens.
Reference: https://cloud.google.com/iap/docs/authentication-howto#authenticating_from_a_desktop_app.
other_client_secret: The client secret used to obtain the auth codes and refresh tokens.
existing_token: Pass in token directly, it's used for cases better get token outside of SDK, e.x. GCP Cloud Functions
or caller already has a token
cookies: CookieJar object containing cookies that will be passed to the pipelines API.
proxy: HTTP or HTTPS proxy server
ssl_ca_cert: Cert for proxy
"""
# in-cluster DNS name of the pipeline service
IN_CLUSTER_DNS_NAME = 'ml-pipeline.{}.svc.cluster.local:8888'
KUBE_PROXY_PATH = 'api/v1/namespaces/{}/services/ml-pipeline:http/proxy/'
LOCAL_KFP_CONTEXT = os.path.expanduser('~/.config/kfp/context.json')
# TODO: Wrap the configurations for different authentication methods.
def __init__(self, host=None, client_id=None, namespace='kubeflow', other_client_id=None, other_client_secret=None, existing_token=None, cookies=None, proxy=None, ssl_ca_cert=None):
"""Create a new instance of kfp client.
"""
host = host or os.environ.get(KF_PIPELINES_ENDPOINT_ENV)
self._uihost = os.environ.get(KF_PIPELINES_UI_ENDPOINT_ENV, host)
config = self._load_config(host, client_id, namespace, other_client_id, other_client_secret, existing_token, proxy, ssl_ca_cert)
# Save the loaded API client configuration, as a reference if update is
# needed.
self._existing_config = config
api_client = kfp_server_api.api_client.ApiClient(config, cookie=cookies)
_add_generated_apis(self, kfp_server_api, api_client)
self._job_api = kfp_server_api.api.job_service_api.JobServiceApi(api_client)
self._run_api = kfp_server_api.api.run_service_api.RunServiceApi(api_client)
self._experiment_api = kfp_server_api.api.experiment_service_api.ExperimentServiceApi(api_client)
self._pipelines_api = kfp_server_api.api.pipeline_service_api.PipelineServiceApi(api_client)
self._upload_api = kfp_server_api.api.PipelineUploadServiceApi(api_client)
self._load_context_setting_or_default()
def _load_config(self, host, client_id, namespace, other_client_id, other_client_secret, existing_token, proxy, ssl_ca_cert):
config = kfp_server_api.configuration.Configuration()
if proxy:
# https://github.com/kubeflow/pipelines/blob/c6ac5e0b1fd991e19e96419f0f508ec0a4217c29/backend/api/python_http_client/kfp_server_api/rest.py#L100
config.proxy = proxy
if ssl_ca_cert:
config.ssl_ca_cert = ssl_ca_cert
host = host or ''
# Preprocess the host endpoint to prevent some common user mistakes.
if not client_id:
# always preserving the protocol (http://localhost requires it)
host = host.rstrip('/')
if host:
config.host = host
token = None
# "existing_token" is designed to accept token generated outside of SDK. Here is an example.
#
# https://cloud.google.com/functions/docs/securing/function-identity
# https://cloud.google.com/endpoints/docs/grpc/service-account-authentication
#
# import requests
# import kfp
#
# def get_access_token():
# url = 'http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/token'
# r = requests.get(url, headers={'Metadata-Flavor': 'Google'})
# r.raise_for_status()
# access_token = r.json()['access_token']
# return access_token
#
# client = kfp.Client(host='<KFPHost>', existing_token=get_access_token())
#
if existing_token:
token = existing_token
self._is_refresh_token = False
elif client_id:
token = get_auth_token(client_id, other_client_id, other_client_secret)
self._is_refresh_token = True
elif self._is_inverse_proxy_host(host):
token = get_gcp_access_token()
self._is_refresh_token = False
if token:
config.api_key['authorization'] = token
config.api_key_prefix['authorization'] = 'Bearer'
return config
if host:
# if host is explicitly set with auth token, it's probably a port forward address.
return config
import kubernetes as k8s
in_cluster = True
try:
k8s.config.load_incluster_config()
except:
in_cluster = False
pass
if in_cluster:
config.host = Client.IN_CLUSTER_DNS_NAME.format(namespace)
return config
try:
k8s.config.load_kube_config(client_configuration=config)
except:
print('Failed to load kube config.')
return config
if config.host:
config.host = config.host + '/' + Client.KUBE_PROXY_PATH.format(namespace)
return config
def _is_inverse_proxy_host(self, host):
if host:
return re.match(r'\S+.googleusercontent.com/{0,1}$', host)
if re.match(r'\w+', host):
warnings.warn(
'The received host is %s, please include the full endpoint address '
'(with ".(pipelines/notebooks).googleusercontent.com")' % host)
return False
def _is_ipython(self):
"""Returns whether we are running in notebook."""
try:
import IPython
ipy = IPython.get_ipython()
if ipy is None:
return False
except ImportError:
return False
return True
def _get_url_prefix(self):
if self._uihost:
# User's own connection.
if self._uihost.startswith('http://') or self._uihost.startswith('https://'):
return self._uihost
else:
return 'http://' + self._uihost
# In-cluster pod. We could use relative URL.
return '/pipeline'
def _load_context_setting_or_default(self):
if os.path.exists(Client.LOCAL_KFP_CONTEXT):
with open(Client.LOCAL_KFP_CONTEXT, 'r') as f:
self._context_setting = json.load(f)
else:
self._context_setting = {
'namespace': '',
}
def _refresh_api_client_token(self):
"""Refreshes the existing token associated with the kfp_api_client."""
if getattr(self, '_is_refresh_token', None):
return
new_token = get_gcp_access_token()
self._existing_config.api_key['authorization'] = new_token
def set_user_namespace(self, namespace):
"""Set user namespace into local context setting file.
This function should only be used when Kubeflow Pipelines is in the multi-user mode.
Args:
namespace: kubernetes namespace the user has access to.
"""
self._context_setting['namespace'] = namespace
with open(Client.LOCAL_KFP_CONTEXT, 'w') as f:
json.dump(self._context_setting, f)
def get_user_namespace(self):
"""Get user namespace in context config.
Returns:
namespace: kubernetes namespace from the local context file or empty if it wasn't set.
"""
return self._context_setting['namespace']
def create_experiment(self, name, description=None, namespace=None):
"""Create a new experiment.
Args:
name: The name of the experiment.
description: Description of the experiment.
namespace: Kubernetes namespace where the experiment should be created.
For single user deployment, leave it as None;
For multi user, input a namespace where the user is authorized.
Returns:
An Experiment object. Most important field is id.
"""
namespace = namespace or self.get_user_namespace()
experiment = None
try:
experiment = self.get_experiment(experiment_name=name, namespace=namespace)
except:
# Ignore error if the experiment does not exist.
pass
if not experiment:
logging.info('Creating experiment {}.'.format(name))
resource_references = []
if namespace:
key = kfp_server_api.models.ApiResourceKey(id=namespace, type=kfp_server_api.models.ApiResourceType.NAMESPACE)
reference = kfp_server_api.models.ApiResourceReference(key=key, relationship=kfp_server_api.models.ApiRelationship.OWNER)
resource_references.append(reference)
experiment = kfp_server_api.models.ApiExperiment(
name=name,
description=description,
resource_references=resource_references)
experiment = self._experiment_api.create_experiment(body=experiment)
if self._is_ipython():
import IPython
html = \
('Experiment link <a href="%s/#/experiments/details/%s" target="_blank" >here</a>'
% (self._get_url_prefix(), experiment.id))
IPython.display.display(IPython.display.HTML(html))
return experiment
def get_pipeline_id(self, name):
"""Find the id of a pipeline by name.
Args:
name: Pipeline name.
Returns:
Returns the pipeline id if a pipeline with the name exists.
"""
pipeline_filter = json.dumps({
"predicates": [
{
"op": _FILTER_OPERATIONS["EQUALS"],
"key": "name",
"stringValue": name,
}
]
})
result = self._pipelines_api.list_pipelines(filter=pipeline_filter)
if result.pipelines is None:
return None
if len(result.pipelines)==1:
return result.pipelines[0].id
elif len(result.pipelines)>1:
raise ValueError("Multiple pipelines with the name: {} found, the name needs to be unique".format(name))
return None
def list_experiments(self, page_token='', page_size=10, sort_by='', namespace=None):
"""List experiments.
Args:
page_token: Token for starting of the page.
page_size: Size of the page.
sort_by: Can be '[field_name]', '[field_name] des'. For example, 'name desc'.
namespace: Kubernetes namespace where the experiment was created.
For single user deployment, leave it as None;
For multi user, input a namespace where the user is authorized.
Returns:
A response object including a list of experiments and next page token.
"""
namespace = namespace or self.get_user_namespace()
response = self._experiment_api.list_experiment(
page_token=page_token,
page_size=page_size,
sort_by=sort_by,
resource_reference_key_type=kfp_server_api.models.api_resource_type.ApiResourceType.NAMESPACE,
resource_reference_key_id=namespace)
return response
def get_experiment(self, experiment_id=None, experiment_name=None, namespace=None):
"""Get details of an experiment
Either experiment_id or experiment_name is required
Args:
experiment_id: Id of the experiment. (Optional)
experiment_name: Name of the experiment. (Optional)
namespace: Kubernetes namespace where the experiment was created.
For single user deployment, leave it as None;
For multi user, input the namespace where the user is authorized.
Returns:
A response object including details of a experiment.
Throws:
Exception if experiment is not found or None of the arguments is provided
"""
namespace = namespace or self.get_user_namespace()
if experiment_id is None and experiment_name is None:
raise ValueError('Either experiment_id or experiment_name is required')
if experiment_id is not None:
return self._experiment_api.get_experiment(id=experiment_id)
next_page_token = ''
while next_page_token is not None:
list_experiments_response = self.list_experiments(page_size=100, page_token=next_page_token, namespace=namespace)
next_page_token = list_experiments_response.next_page_token
for experiment in list_experiments_response.experiments or []:
if experiment.name == experiment_name:
return self._experiment_api.get_experiment(id=experiment.id)
raise ValueError('No experiment is found with name {}.'.format(experiment_name))
def _extract_pipeline_yaml(self, package_file):
def _choose_pipeline_yaml_file(file_list) -> str:
yaml_files = [file for file in file_list if file.endswith('.yaml')]
if len(yaml_files) == 0:
raise ValueError('Invalid package. Missing pipeline yaml file in the package.')
if 'pipeline.yaml' in yaml_files:
return 'pipeline.yaml'
else:
if len(yaml_files) == 1:
return yaml_files[0]
raise ValueError('Invalid package. There is no pipeline.yaml file and there are multiple yaml files.')
if package_file.endswith('.tar.gz') or package_file.endswith('.tgz'):
with tarfile.open(package_file, "r:gz") as tar:
file_names = [member.name for member in tar if member.isfile()]
pipeline_yaml_file = _choose_pipeline_yaml_file(file_names)
with tar.extractfile(tar.getmember(pipeline_yaml_file)) as f:
return yaml.safe_load(f)
elif package_file.endswith('.zip'):
with zipfile.ZipFile(package_file, 'r') as zip:
pipeline_yaml_file = _choose_pipeline_yaml_file(zip.namelist())
with zip.open(pipeline_yaml_file) as f:
return yaml.safe_load(f)
elif package_file.endswith('.yaml') or package_file.endswith('.yml'):
with open(package_file, 'r') as f:
return yaml.safe_load(f)
else:
raise ValueError('The package_file '+ package_file + ' should end with one of the following formats: [.tar.gz, .tgz, .zip, .yaml, .yml]')
def list_pipelines(self, page_token='', page_size=10, sort_by=''):
"""List pipelines.
Args:
page_token: Token for starting of the page.
page_size: Size of the page.
sort_by: one of 'field_name', 'field_name desc'. For example, 'name desc'.
Returns:
A response object including a list of pipelines and next page token.
"""
return self._pipelines_api.list_pipelines(page_token=page_token, page_size=page_size, sort_by=sort_by)
def list_pipeline_versions(self, pipeline_id: str, page_token='', page_size=10, sort_by=''):
"""List all versions of a given pipeline.
Args:
pipeline_id: The id of a pipeline.
page_token: Token for starting of the page.
page_size: Size of the page.
sort_by: one of 'field_name', 'field_name desc'. For example, 'name desc'.
Returns:
A response object including a list of pipeline versions and next page token.
"""
return self._pipelines_api.list_pipeline_versions(
resource_key_type="PIPELINE",
resource_key_id=pipeline_id,
page_token=page_token,
page_size=page_size,
sort_by=sort_by
)
# TODO: provide default namespace, similar to kubectl default namespaces.
def run_pipeline(self, experiment_id, job_name, pipeline_package_path=None, params={}, pipeline_id=None, version_id=None):
"""Run a specified pipeline.
Args:
experiment_id: The id of an experiment.
job_name: Name of the job.
pipeline_package_path: Local path of the pipeline package(the filename should end with one of the following .tar.gz, .tgz, .zip, .yaml, .yml).
params: A dictionary with key (string) as param name and value (string) as as param value.
pipeline_id: The id of a pipeline.
version_id: The id of a pipeline version.
If both pipeline_id and version_id are specified, version_id will take precendence.
If only pipeline_id is specified, the default version of this pipeline is used to create the run.
Returns:
A run object. Most important field is id.
"""
job_config = self._create_job_config(
experiment_id=experiment_id,
params=params,
pipeline_package_path=pipeline_package_path,
pipeline_id=pipeline_id,
version_id=version_id)
run_body = kfp_server_api.models.ApiRun(
pipeline_spec=job_config.spec, resource_references=job_config.resource_references, name=job_name)
response = self._run_api.create_run(body=run_body)
if self._is_ipython():
import IPython
html = ('Run link <a href="%s/#/runs/details/%s" target="_blank" >here</a>'
% (self._get_url_prefix(), response.run.id))
IPython.display.display(IPython.display.HTML(html))
return response.run
def create_recurring_run(self, experiment_id, job_name, description=None, start_time=None, end_time=None, interval_second=None, cron_expression=None, max_concurrency=1, no_catchup=None, params={}, pipeline_package_path=None, pipeline_id=None, version_id=None, enabled=True):
"""Create a recurring run.
Args:
experiment_id: The string id of an experiment.
job_name: Name of the job.
description: An optional job description.
start_time: The RFC3339 time string of the time when to start the job.
end_time: The RFC3339 time string of the time when to end the job.
interval_second: Integer indicating the seconds between two recurring runs in for a periodic schedule.
cron_expression: A cron expression representing a set of times, using 5 space-separated fields, e.g. "0 0 9 ? * 2-6".
max_concurrency: Integer indicating how many jobs can be run in parallel.
no_catchup: Whether the recurring run should catch up if behind schedule.
For example, if the recurring run is paused for a while and re-enabled
afterwards. If no_catchup=False, the scheduler will catch up on (backfill) each
missed interval. Otherwise, it only schedules the latest interval if more than one interval
is ready to be scheduled.
Usually, if your pipeline handles backfill internally, you should turn catchup
off to avoid duplicate backfill. (default: {False})
pipeline_package_path: Local path of the pipeline package(the filename should end with one of the following .tar.gz, .tgz, .zip, .yaml, .yml).
params: A dictionary with key (string) as param name and value (string) as param value.
pipeline_id: The string ID of a pipeline.
version_id: The string ID of a pipeline version.
If both pipeline_id and version_id are specified, pipeline_id will take precendence
This will change in a future version, so it is recommended to use version_id by itself.
enabled: A bool indicating whether the recurring run is enabled or disabled.
Returns:
A Job object. Most important field is id.
"""
job_config = self._create_job_config(
experiment_id=experiment_id,
params=params,
pipeline_package_path=pipeline_package_path,
pipeline_id=pipeline_id,
version_id=version_id)
if all([interval_second, cron_expression]) or not any([interval_second, cron_expression]):
raise ValueError('Either interval_second or cron_expression is required')
if interval_second is not None:
trigger = kfp_server_api.models.ApiTrigger(
periodic_schedule=kfp_server_api.models.ApiPeriodicSchedule(
start_time=start_time, end_time=end_time, interval_second=interval_second)
)
if cron_expression is not None:
trigger = kfp_server_api.models.ApiTrigger(
cron_schedule=kfp_server_api.models.ApiCronSchedule(
start_time=start_time, end_time=end_time, cron=cron_expression)
)
job_body = kfp_server_api.models.ApiJob(
enabled=enabled,
pipeline_spec=job_config.spec,
resource_references=job_config.resource_references,
name=job_name,
description=description,
no_catchup=no_catchup,
trigger=trigger,
max_concurrency=max_concurrency)
return self._job_api.create_job(body=job_body)
def _create_job_config(self, experiment_id, params, pipeline_package_path, pipeline_id, version_id):
"""Create a JobConfig with spec and resource_references.
Args:
experiment_id: The id of an experiment.
pipeline_package_path: Local path of the pipeline package(the filename should end with one of the following .tar.gz, .tgz, .zip, .yaml, .yml).
params: A dictionary with key (string) as param name and value (string) as param value.
pipeline_id: The id of a pipeline.
version_id: The id of a pipeline version.
If both pipeline_id and version_id are specified, pipeline_id will take precendence
This will change in a future version, so it is recommended to use version_id by itself.
Returns:
A JobConfig object with attributes spec and resource_reference.
"""
class JobConfig:
def __init__(self, spec, resource_references):
self.spec = spec
self.resource_references = resource_references
pipeline_json_string = None
if pipeline_package_path:
pipeline_obj = self._extract_pipeline_yaml(pipeline_package_path)
pipeline_json_string = json.dumps(pipeline_obj)
api_params = [kfp_server_api.ApiParameter(
name=sanitize_k8s_name(name=k, allow_capital_underscore=True),
value=str(v)) for k,v in params.items()]
resource_references = []
key = kfp_server_api.models.ApiResourceKey(id=experiment_id,
type=kfp_server_api.models.ApiResourceType.EXPERIMENT)
reference = kfp_server_api.models.ApiResourceReference(key=key,
relationship=kfp_server_api.models.ApiRelationship.OWNER)
resource_references.append(reference)
if version_id:
key = kfp_server_api.models.ApiResourceKey(id=version_id,
type=kfp_server_api.models.ApiResourceType.PIPELINE_VERSION)
reference = kfp_server_api.models.ApiResourceReference(key=key,
relationship=kfp_server_api.models.ApiRelationship.CREATOR)
resource_references.append(reference)
spec = kfp_server_api.models.ApiPipelineSpec(
pipeline_id=pipeline_id,
workflow_manifest=pipeline_json_string,
parameters=api_params)
return JobConfig(spec=spec, resource_references=resource_references)
def create_run_from_pipeline_func(
self,
pipeline_func: Callable,
arguments: Mapping[str, str],
run_name: Optional[str] = None,
experiment_name: Optional[str] = None,
pipeline_conf: Optional[kfp.dsl.PipelineConf] = None,
namespace: Optional[str] = None):
"""Runs pipeline on KFP-enabled Kubernetes cluster.
This command compiles the pipeline function, creates or gets an experiment and submits the pipeline for execution.
Args:
pipeline_func: A function that describes a pipeline by calling components and composing them into execution graph.
arguments: Arguments to the pipeline function provided as a dict.
run_name: Optional. Name of the run to be shown in the UI.
experiment_name: Optional. Name of the experiment to add the run to.
pipeline_conf: Optional. Pipeline configuration ops that will be applied
to all the ops in the pipeline func.
namespace: Kubernetes namespace where the pipeline runs are created.
For single user deployment, leave it as None;
For multi user, input a namespace where the user is authorized
"""
#TODO: Check arguments against the pipeline function
pipeline_name = pipeline_func.__name__
run_name = run_name or pipeline_name + ' ' + datetime.datetime.now().strftime('%Y-%m-%d %H-%M-%S')
with tempfile.TemporaryDirectory() as tmpdir:
pipeline_package_path = os.path.join(tmpdir, 'pipeline.yaml')
compiler.Compiler().compile(pipeline_func, pipeline_package_path, pipeline_conf=pipeline_conf)
return self.create_run_from_pipeline_package(pipeline_package_path, arguments, run_name, experiment_name, namespace)
def create_run_from_pipeline_package(
self,
pipeline_file: str,
arguments: Mapping[str, str],
run_name: Optional[str] = None,
experiment_name: Optional[str] = None,
namespace: Optional[str] = None):
"""Runs pipeline on KFP-enabled Kubernetes cluster.
This command takes a local pipeline package, creates or gets an experiment
and submits the pipeline for execution.
Args:
pipeline_file: A compiled pipeline package file.
arguments: Arguments to the pipeline function provided as a dict.
run_name: Optional. Name of the run to be shown in the UI.
experiment_name: Optional. Name of the experiment to add the run to.
namespace: Kubernetes namespace where the pipeline runs are created.
For single user deployment, leave it as None;
For multi user, input a namespace where the user is authorized
"""
class RunPipelineResult:
def __init__(self, client, run_info):
self._client = client
self.run_info = run_info
self.run_id = run_info.id
def wait_for_run_completion(self, timeout=None):
timeout = timeout or datetime.timedelta.max
return self._client.wait_for_run_completion(self.run_id, timeout)
def __repr__(self):
return 'RunPipelineResult(run_id={})'.format(self.run_id)
#TODO: Check arguments against the pipeline function
pipeline_name = os.path.basename(pipeline_file)
experiment_name = experiment_name or os.environ.get(KF_PIPELINES_DEFAULT_EXPERIMENT_NAME, None)
overridden_experiment_name = os.environ.get(KF_PIPELINES_OVERRIDE_EXPERIMENT_NAME, experiment_name)
if overridden_experiment_name != experiment_name:
import warnings
warnings.warn('Changing experiment name from "{}" to "{}".'.format(experiment_name, overridden_experiment_name))
experiment_name = overridden_experiment_name or 'Default'
run_name = run_name or (pipeline_name + ' ' +
datetime.datetime.now().strftime(
'%Y-%m-%d %H-%M-%S'))
experiment = self.create_experiment(name=experiment_name, namespace=namespace)
run_info = self.run_pipeline(experiment.id, run_name, pipeline_file, arguments)
return RunPipelineResult(self, run_info)
def list_runs(self, page_token='', page_size=10, sort_by='', experiment_id=None, namespace=None):
"""List runs, optionally can be filtered by experiment or namespace.
Args:
page_token: Token for starting of the page.
page_size: Size of the page.
sort_by: One of 'field_name', 'field_name desc'. For example, 'name desc'.
experiment_id: Experiment id to filter upon
namespace: Kubernetes namespace to filter upon.
For single user deployment, leave it as None;
For multi user, input a namespace where the user is authorized.
Returns:
A response object including a list of experiments and next page token.
"""
namespace = namespace or self.get_user_namespace()
if experiment_id is not None:
response = self._run_api.list_runs(page_token=page_token, page_size=page_size, sort_by=sort_by, resource_reference_key_type=kfp_server_api.models.api_resource_type.ApiResourceType.EXPERIMENT, resource_reference_key_id=experiment_id)
elif namespace:
response = self._run_api.list_runs(page_token=page_token, page_size=page_size, sort_by=sort_by, resource_reference_key_type=kfp_server_api.models.api_resource_type.ApiResourceType.NAMESPACE, resource_reference_key_id=namespace)
else:
response = self._run_api.list_runs(page_token=page_token, page_size=page_size, sort_by=sort_by)
return response
def list_recurring_runs(self, page_token='', page_size=10, sort_by='', experiment_id=None):
"""List recurring runs.
Args:
page_token: Token for starting of the page.
page_size: Size of the page.
sort_by: One of 'field_name', 'field_name desc'. For example, 'name desc'.
experiment_id: Experiment id to filter upon.
Returns:
A response object including a list of recurring_runs and next page token.
"""
if experiment_id is not None:
response = self._job_api.list_jobs(page_token=page_token, page_size=page_size, sort_by=sort_by, resource_reference_key_type=kfp_server_api.models.api_resource_type.ApiResourceType.EXPERIMENT, resource_reference_key_id=experiment_id)
else:
response = self._job_api.list_jobs(page_token=page_token, page_size=page_size, sort_by=sort_by)
return response
def get_recurring_run(self, job_id):
"""Get recurring_run details.
Args:
job_id: id of the recurring_run.
Returns:
A response object including details of a recurring_run.
Throws:
Exception if recurring_run is not found.
"""
return self._job_api.get_job(id=job_id)
def get_run(self, run_id):
"""Get run details.
Args:
run_id: id of the run.
Returns:
A response object including details of a run.
Throws:
Exception if run is not found.
"""
return self._run_api.get_run(run_id=run_id)
def wait_for_run_completion(self, run_id, timeout):
"""Waits for a run to complete.
Args:
run_id: Run id, returned from run_pipeline.
timeout: Timeout in seconds.
Returns:
A run detail object: Most important fields are run and pipeline_runtime.
Raises:
TimeoutError: if the pipeline run failed to finish before the specified timeout.
"""
status = 'Running:'
start_time = datetime.datetime.now()
last_token_refresh_time = datetime.datetime.now()
while (status is None or
status.lower() not in ['succeeded', 'failed', 'skipped', 'error']):
# Refreshes the access token before it hits the TTL.
if (datetime.datetime.now() - last_token_refresh_time
> _GCP_ACCESS_TOKEN_TIMEOUT):
self._refresh_api_client_token()
last_token_refresh_time = datetime.datetime.now()
get_run_response = self._run_api.get_run(run_id=run_id)
status = get_run_response.run.status
elapsed_time = (datetime.datetime.now() - start_time).seconds
logging.info('Waiting for the job to complete...')
if elapsed_time > timeout:
raise TimeoutError('Run timeout')
time.sleep(5)
return get_run_response
def _get_workflow_json(self, run_id):
"""Get the workflow json.
Args:
run_id: run id, returned from run_pipeline.
Returns:
workflow: Json workflow
"""
get_run_response = self._run_api.get_run(run_id=run_id)
workflow = get_run_response.pipeline_runtime.workflow_manifest
workflow_json = json.loads(workflow)
return workflow_json
def upload_pipeline(
self,
pipeline_package_path: str = None,
pipeline_name: str = None,
description: str = None,
):
"""Uploads the pipeline to the Kubeflow Pipelines cluster.
Args:
pipeline_package_path: Local path to the pipeline package.
pipeline_name: Optional. Name of the pipeline to be shown in the UI.
description: Optional. Description of the pipeline to be shown in the UI.
Returns:
Server response object containing pipleine id and other information.
"""
response = self._upload_api.upload_pipeline(pipeline_package_path, name=pipeline_name, description=description)
if self._is_ipython():
import IPython
html = 'Pipeline link <a href=%s/#/pipelines/details/%s>here</a>' % (self._get_url_prefix(), response.id)
IPython.display.display(IPython.display.HTML(html))
return response
def upload_pipeline_version(
self,
pipeline_package_path,
pipeline_version_name: str,
pipeline_id: Optional[str] = None,
pipeline_name: Optional[str] = None
):
"""Uploads a new version of the pipeline to the Kubeflow Pipelines cluster.
Args:
pipeline_package_path: Local path to the pipeline package.
pipeline_version_name: Name of the pipeline version to be shown in the UI.
pipeline_id: Optional. Id of the pipeline.
pipeline_name: Optional. Name of the pipeline.
Returns:
Server response object containing pipleine id and other information.
Throws:
ValueError when none or both of pipeline_id or pipeline_name are specified
Exception if pipeline id is not found.
"""
if all([pipeline_id, pipeline_name]) or not any([pipeline_id, pipeline_name]):
raise ValueError('Either pipeline_id or pipeline_name is required')
if pipeline_name:
pipeline_id = self.get_pipeline_id(pipeline_name)
response = self._upload_api.upload_pipeline_version(
pipeline_package_path,
name=pipeline_version_name,
pipelineid=pipeline_id
)
if self._is_ipython():
import IPython
html = 'Pipeline link <a href=%s/#/pipelines/details/%s>here</a>' % (self._get_url_prefix(), response.id)
IPython.display.display(IPython.display.HTML(html))
return response
def get_pipeline(self, pipeline_id):
"""Get pipeline details.
Args:
pipeline_id: id of the pipeline.
Returns:
A response object including details of a pipeline.
Throws:
Exception if pipeline is not found.
"""
return self._pipelines_api.get_pipeline(id=pipeline_id)
def delete_pipeline(self, pipeline_id):
"""Delete pipeline.
Args:
pipeline_id: id of the pipeline.
Returns:
Object. If the method is called asynchronously, returns the request thread.
Throws:
Exception if pipeline is not found.
"""
return self._pipelines_api.delete_pipeline(id=pipeline_id)
def list_pipeline_versions(self, pipeline_id, page_token='', page_size=10, sort_by=''):
"""Lists pipeline versions.
Args:
pipeline_id: Id of the pipeline to list versions
page_token: Token for starting of the page.
page_size: Size of the page.
sort_by: One of 'field_name', 'field_name des'. For example, 'name des'.
Returns:
A response object including a list of versions and next page token.
"""
return self._pipelines_api.list_pipeline_versions(page_token=page_token, page_size=page_size, sort_by=sort_by, resource_key_type=kfp_server_api.models.api_resource_type.ApiResourceType.PIPELINE, resource_key_id=pipeline_id)
|
import os
from flask import Flask, request
import psycopg2
import json
app = Flask(__name__)
DATABASE_URL = os.environ['DATABASE_URL']
conn = psycopg2.connect(DATABASE_URL)
@app.route('/find')
def find():
lat = request.args.get('lat')
lng = request.args.get('lng')
radius = request.args.get('radius')
cursor = conn.cursor()
query = 'SELECT * from signs WHERE earth_box(ll_to_earth(%s, %s), %s) @> ll_to_earth(latitude, longtitude);'
cursor.execute(query, (lat, lng, radius))
columns = ['longtitude', 'latitude', 'object_id', 'sg_key_bor', 'sg_order_n', 'sg_seqno_n', 'sg_mutcd_c', 'sr_dist', 'sg_sign_fc', 'sg_arrow_d', 'x', 'y', 'signdesc']
results = []
for row in cursor.fetchall():
results.append(dict(zip(columns, row)))
return json.dumps({results:results})
if __name__ == '__main__':
port = int(os.environ.get("PORT", 5000))
app.run(host='0.0.0.0', port=port, debug=True)
Change the key to be string
import os
from flask import Flask, request
import psycopg2
import json
app = Flask(__name__)
DATABASE_URL = os.environ['DATABASE_URL']
conn = psycopg2.connect(DATABASE_URL)
@app.route('/find')
def find():
lat = request.args.get('lat')
lng = request.args.get('lng')
radius = request.args.get('radius')
cursor = conn.cursor()
query = 'SELECT * from signs WHERE earth_box(ll_to_earth(%s, %s), %s) @> ll_to_earth(latitude, longtitude);'
cursor.execute(query, (lat, lng, radius))
columns = ['longtitude', 'latitude', 'object_id', 'sg_key_bor', 'sg_order_n', 'sg_seqno_n', 'sg_mutcd_c', 'sr_dist', 'sg_sign_fc', 'sg_arrow_d', 'x', 'y', 'signdesc']
results = []
for row in cursor.fetchall():
results.append(dict(zip(columns, row)))
return json.dumps({'results':results})
if __name__ == '__main__':
port = int(os.environ.get("PORT", 5000))
app.run(host='0.0.0.0', port=port, debug=True)
|
import sys
import random
import sqlite3
import os
from treelib import Tree
from genetic_algorithm import Container
from werkzeug.utils import secure_filename
from flask import Flask, render_template, request, jsonify, send_from_directory, redirect, url_for
#--------------------------------------#
app = Flask(__name__, static_url_path='')
ga = Container()
UPLOAD_FOLDER = 'data'
ALLOWED_EXTENSIONS = set(['json'])
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
#--------------------------------------#
@app.route('/', methods=["GET", "POST"])
def index():
selectModel = getRow("models")
if request.method == "POST":
selection = request.form.get('selection')
if selection == "none":
return redirect(request.url)
filename = "/data/" + selection
return render_template("index.html", selectModel=selectModel, filename=filename)
else:
return render_template('index.html', selectModel=selectModel)
@app.route('/single_model', methods=['GET', 'POST'])
def single():
selectModel = getRow("models")
if request.method == 'POST':
selection = request.form.get('selection')
if selection == "none":
return redirect(request.url)
filename = "/data/" + selection
return render_template("single_model.html", selectModel=selectModel, filename=filename)
else:
return render_template("single_model.html", selectModel=selectModel)
@app.route('/treeEQ')
def treeEQ():
equation = generateEquation(generateTree())
print(equation)
selectModel = getRow("models")
return render_template("single_model.html", selectModel=selectModel, shader=equation)
@app.route('/view_single', methods=["POST"])
def viewSingle():
#examine deformed model in a single model page, selected from multi model page
if request.method == "POST":
shader = request.form['shader']
print(shader)
return render_template("single_model.html", shader=shader)
else:
return render_template("single_model.html")
#--------------------------------------#
@app.route('/_start')
def start():
size = request.args.get('size', 0, type=int)
# start ga
ga.on_start(popsize = 50, subset_size = size)
subset = ga.get_subset()
return jsonify(result=subset)
#--------------------------------------#
@app.route('/_step')
def step():
selection = request.args.get('sel', 0, type=int)
ga.iga_step(selection)
subset = ga.get_subset()
return jsonify(result=subset)
#--------------------------------------#
@app.route('/js/<path:path>')
def send_js(path):
return send_from_directory('js', path)
#--------------------------------------#
@app.route('/data/<path:path>')
def send_data(path):
return send_from_directory('data', path)
#--------------------------------------#
@app.route('/recordEquation')
def recordEquation():
#records the selected equation into the database
equation = request.args.get('equation')
with sqlite3.connect("database.db") as con:
cur = con.cursor()
cur.execute("INSERT INTO equations (equation) values (?)", [equation])
print("Equation saved: " + equation)
con.commit()
con.close()
return equation
@app.route('/sendRandomEquation')
def returnRandomEQ():
conn = sqlite3.connect('database.db')
c = conn.cursor()
c.execute("SELECT * FROM equations")
equations = []
for row in c.fetchall():
equations.append(row[1])
equation = equations[random.randrange(0, len(equations))]
return jsonify(result=equation)
#--------------------------------------#
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
def uploadModel(file):
#uploads model into the database
filename = secure_filename(file.filename)
filepath = os.path.join(app.config['UPLOAD_FOLDER'], filename)
with sqlite3.connect("database.db") as con:
cur = con.cursor()
cur.execute("INSERT INTO MODELS (modelName) VALUES (?)", [filename])
print("Model saved: " + filename)
con.commit()
con.close()
file.save(filepath)
filename = "/data/" + filename
return filename
@app.route('/uploadSingle', methods=['GET', 'POST'])
def uploadSingle():
#upload funcion for single model page
if request.method == 'POST':
if 'file' not in request.files:
print('No file part')
return redirect(request.url)
file = request.files['file']
if file and allowed_file(file.filename):
filename = uploadModel(file)
alert = file.filename + " has been uploaded."
selectModel = getRow("models")
return render_template("single_model.html", filename=filename, selectModel=selectModel, alert=alert)
return render_template("single_model.html")
@app.route('/uploadMulti', methods=['GET', 'POST'])
def uploadMulti():
#upload function for multi model page
if request.method == 'POST':
if 'file' not in request.files:
print("No file part")
return redirect(request.url)
file = request.files['file']
if file and allowed_file(file.filename):
filename = uploadModel(file)
alert = file.filename + " has been uploaded."
selectModel = getRow("models")
return render_template("index.html", filename=filename, selectModel=selectModel, alert=alert)
return render_template("index.html")
#--------------------------------------#
def shutdown_server():
func = request.environ.get('werkzeug.server.shutdown')
if func is None:
raise RuntimeError('Not running with the Werkzeug Server')
func()
#--------------------------------------#
def getRow(table):
#returns rows of saved models' names
con = sqlite3.connect("database.db")
con.row_factory = sqlite3.Row
cur = con.cursor()
cur.execute("select * from " + table)
rows = cur.fetchall();
return rows
#=======================================#
# Equation from Trees #
#=======================================#
operators = ["+", "-", "/", "*"]
cOperators = ["sin", "cos", "tan"]
def randomOperator():
operator = ["+", "-", "/", "*", "sin", "cos", "tan"]
return random.choice(operator)
def randomVariable():
variables = ["x", "y", "z"]
return random.choice(variables)
def randomValue():
variables = ["x", "y", "z"]
if random.randint(0, 100) % 2 == 0:
num = random.randint(1, 1)
if num == 0:
return randomValue()
return "(" + str(num) + ")"
else:
return random.choice(variables)
def randomAll():
variables = ["x", "y", "z", "+", "-", "/", "*", "sin", "cos", "tan"]
if random.randint(0, 100) % 2 == 0:
num = random.randint(1, 1)
if num == 0:
return randomAll()
return "(" + str(num) + ")"
else:
return random.choice(variables)
def determineNoChildren(operator):
#determine how many children the parent would have based on the operator
if operator == "+" or operator == "-" or operator == "/" or operator == "*":
return 2
else:
return 1
def opChild(tree, index):
#genetate string of equations from index's child, based on type of operators
children = tree.is_branch(index)
equation = "(" + str(tree[children[0]].tag) + tree[index].tag + str(tree[children[1]].tag) + ")"
tree[index].tag = equation
return tree
def cOpChild(tree, index):
child = tree.is_branch(index)
equation = str(tree[index].tag) + "(" + str(tree[child[0]].tag) + ")"
tree[index].tag = equation
return tree
def generateTree():
tree = Tree()
parent = []
opParent = 1
operatorChild = 3
tree.create_node(randomOperator(), 0)
depth = determineNoChildren(tree[0].tag)
for x in range(0, depth):
if x == 0:
tree.create_node(randomVariable(), opParent, parent=0)
if tree[opParent].tag in operators or tree[opParent].tag in cOperators:
parent.append(opParent)
opParent += 1
else:
tree.create_node(randomAll(), opParent, parent=0)
if tree[opParent].tag in operators or tree[opParent].tag in cOperators:
parent.append(opParent)
opParent+=1
for node in parent:
depth = determineNoChildren(tree[node].tag)
for i in range(0, depth):
if i == 0:
tree.create_node(randomValue(), operatorChild, parent=node)
operatorChild += 1
else:
tree.create_node(randomValue(), operatorChild, parent=node)
operatorChild+=1
return tree
def generateEquation(tree):
if tree[0].tag in cOperators:
if tree[1].tag in cOperators:
tree = cOpChild(tree, 1)
tree = cOpChild(tree, 0)
elif tree[1].tag in operators:
tree = opChild(tree, 1)
tree = cOpChild(tree, 0)
else:
tree[0].tag = tree[0].tag + "(" + str(tree[1].tag) + ")"
else:
children = tree.is_branch(0)
for child in children:
if tree[children[child-1]].tag in cOperators:
tree = cOpChild(tree, child)
elif tree[children[child-1]].tag in operators:
tree = opChild(tree, child)
tree = opChild(tree, 0)
return tree[0].tag
#--------------------------------------#
def main():
app.run(
host='0.0.0.0',
port=int('8000'),
debug=True
)
if __name__ == '__main__':
main()
Minor fixes
import sys
import random
import sqlite3
import os
from treelib import Tree
from genetic_algorithm import Container
from werkzeug.utils import secure_filename
from flask import Flask, render_template, request, jsonify, send_from_directory, redirect, url_for
#--------------------------------------#
app = Flask(__name__, static_url_path='')
ga = Container()
UPLOAD_FOLDER = 'data'
ALLOWED_EXTENSIONS = set(['json'])
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
#--------------------------------------#
@app.route('/', methods=["GET", "POST"])
def index():
selectModel = getRow("models")
if request.method == "POST":
selection = request.form.get('selection')
if selection == "none":
return redirect(request.url)
filename = "/data/" + selection
return render_template("index.html", selectModel=selectModel, filename=filename)
else:
return render_template('index.html', selectModel=selectModel)
@app.route('/single_model', methods=['GET', 'POST'])
def single():
selectModel = getRow("models")
if request.method == 'POST':
selection = request.form.get('selection')
if selection == "none":
return redirect(request.url)
filename = "/data/" + selection
return render_template("single_model.html", selectModel=selectModel, filename=filename)
else:
return render_template("single_model.html", selectModel=selectModel)
@app.route('/treeEQ')
def treeEQ():
equation = generateEquation(generateTree())
print(equation)
selectModel = getRow("models")
return render_template("single_model.html", selectModel=selectModel, shader=equation)
@app.route('/view_single', methods=["POST"])
def viewSingle():
#examine deformed model in a single model page, selected from multi model page
if request.method == "POST":
shader = request.form['shader']
print(shader)
return render_template("single_model.html", shader=shader)
else:
return render_template("single_model.html")
#--------------------------------------#
@app.route('/_start')
def start():
size = request.args.get('size', 0, type=int)
# start ga
ga.on_start(popsize = 50, subset_size = size)
subset = ga.get_subset()
return jsonify(result=subset)
#--------------------------------------#
@app.route('/_step')
def step():
selection = request.args.get('sel', 0, type=int)
ga.iga_step(selection)
subset = ga.get_subset()
return jsonify(result=subset)
#--------------------------------------#
@app.route('/js/<path:path>')
def send_js(path):
return send_from_directory('js', path)
#--------------------------------------#
@app.route('/data/<path:path>')
def send_data(path):
return send_from_directory('data', path)
#--------------------------------------#
@app.route('/recordEquation')
def recordEquation():
#records the selected equation into the database
equation = request.args.get('equation')
with sqlite3.connect("database.db") as con:
cur = con.cursor()
cur.execute("INSERT INTO equations (equation) values (?)", [equation])
print("Equation saved: " + equation)
con.commit()
con.close()
return equation
@app.route('/sendRandomEquation')
def returnRandomEQ():
conn = sqlite3.connect('database.db')
c = conn.cursor()
c.execute("SELECT * FROM equations")
equations = []
for row in c.fetchall():
equations.append(row[1])
equation = equations[random.randrange(0, len(equations))]
return jsonify(result=equation)
#--------------------------------------#
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
def uploadModel(file):
#uploads model into the database
filename = secure_filename(file.filename)
filepath = os.path.join(app.config['UPLOAD_FOLDER'], filename)
with sqlite3.connect("database.db") as con:
cur = con.cursor()
cur.execute("INSERT INTO MODELS (modelName) VALUES (?)", [filename])
print("Model saved: " + filename)
con.commit()
con.close()
file.save(filepath)
filename = "/data/" + filename
return filename
@app.route('/uploadSingle', methods=['GET', 'POST'])
def uploadSingle():
#upload funcion for single model page
if request.method == 'POST':
if 'file' not in request.files:
print('No file part')
return redirect(request.url)
file = request.files['file']
if file and allowed_file(file.filename):
filename = uploadModel(file)
alert = file.filename + " has been uploaded."
selectModel = getRow("models")
return render_template("single_model.html", filename=filename, selectModel=selectModel, alert=alert)
return render_template("single_model.html")
@app.route('/uploadMulti', methods=['GET', 'POST'])
def uploadMulti():
#upload function for multi model page
if request.method == 'POST':
if 'file' not in request.files:
print("No file part")
return redirect(request.url)
file = request.files['file']
if file and allowed_file(file.filename):
filename = uploadModel(file)
alert = file.filename + " has been uploaded."
selectModel = getRow("models")
return render_template("index.html", filename=filename, selectModel=selectModel, alert=alert)
return render_template("index.html")
#--------------------------------------#
def shutdown_server():
func = request.environ.get('werkzeug.server.shutdown')
if func is None:
raise RuntimeError('Not running with the Werkzeug Server')
func()
#--------------------------------------#
def getRow(table):
#returns rows of saved models' names
con = sqlite3.connect("database.db")
con.row_factory = sqlite3.Row
cur = con.cursor()
cur.execute("select * from " + table)
rows = cur.fetchall();
return rows
#--------------------------------------#
# Tree Equations
operators = ["+", "-", "/", "*"]
cOperators = ["sin", "cos", "tan"]
def randomOperator():
operator = ["+", "-", "/", "*", "sin", "cos", "tan"]
return random.choice(operator)
def randomVariable():
variables = ["x", "y", "z"]
return random.choice(variables)
def randomValue():
variables = ["x", "y", "z"]
if random.randint(0, 100) % 2 == 0:
num = random.randint(1, 1)
if num == 0:
return randomValue()
return "(" + str(num) + ")"
else:
return random.choice(variables)
def randomAll():
variables = ["x", "y", "z", "+", "-", "/", "*", "sin", "cos", "tan"]
if random.randint(0, 100) % 2 == 0:
num = random.randint(1, 1)
if num == 0:
return randomAll()
return "(" + str(num) + ")"
else:
return random.choice(variables)
def determineNoChildren(operator):
#determine how many children the parent would have based on the operator
if operator == "+" or operator == "-" or operator == "/" or operator == "*":
return 2
else:
return 1
def equationInorder(tree, index):
#generate string of equation from index's child, based on type of operators
children = tree.is_branch(index)
equation = "(" + str(tree[children[0]].tag) + tree[index].tag + str(tree[children[1]].tag) + ")"
tree[index].tag = equation
return tree
def trigonometricEquation(tree, index):
child = tree.is_branch(index)
equation = str(tree[index].tag) + "(" + str(tree[child[0]].tag) + ")"
tree[index].tag = equation
return tree
def generateTree():
tree = Tree()
parent = []
opParent = 1
operatorChild = 3
tree.create_node(randomOperator(), 0)
depth = determineNoChildren(tree[0].tag)
for x in range(0, depth):
if x == 0:
tree.create_node(randomVariable(), opParent, parent=0)
if tree[opParent].tag in operators or tree[opParent].tag in cOperators:
parent.append(opParent)
opParent += 1
else:
tree.create_node(randomAll(), opParent, parent=0)
if tree[opParent].tag in operators or tree[opParent].tag in cOperators:
parent.append(opParent)
opParent+=1
for node in parent:
depth = determineNoChildren(tree[node].tag)
for i in range(0, depth):
if i == 0:
tree.create_node(randomValue(), operatorChild, parent=node)
operatorChild += 1
else:
tree.create_node(randomValue(), operatorChild, parent=node)
operatorChild+=1
return tree
def generateEquation(tree):
if tree[0].tag in cOperators:
if tree[1].tag in cOperators:
tree = trigonometricEquation(tree, 1)
tree = trigonometricEquation(tree, 0)
elif tree[1].tag in operators:
tree = equationInorder(tree, 1)
tree = trigonometricEquation(tree, 0)
else:
tree[0].tag = tree[0].tag + "(" + str(tree[1].tag) + ")"
else:
children = tree.is_branch(0)
for child in children:
if tree[children[child-1]].tag in cOperators:
tree = trigonometricEquation(tree, child)
elif tree[children[child-1]].tag in operators:
tree = equationInorder(tree, child)
tree = equationInorder(tree, 0)
return tree[0].tag
#--------------------------------------#
def main():
app.run(
host='0.0.0.0',
port=int('8000'),
debug=True
)
if __name__ == '__main__':
main()
|
import os
from flask import Flask, request
import psycopg2
import json
app = Flask(__name__)
DATABASE_URL = os.environ['DATABASE_URL']
conn = psycopg2.connect(DATABASE_URL)
@app.route('/find')
def find():
lat = request.args.get('lat')
lng = request.args.get('lng')
radius = request.args.get('radius')
cursor = conn.cursor()
query = 'SELECT * from signs WHERE earth_box(ll_to_earth(%s, %s), %s) @> ll_to_earth(latitude, longtitude);'
cursor.execute(query, (lat, lng, radius))
columns = ['longtitude', 'latitude', 'object_id', 'sg_key_bor', 'sg_order_n', 'sg_seqno_n', 'sg_mutcd_c', 'sr_dist', 'sg_sign_fc', 'sg_arrow_d', 'x', 'y', 'signdesc']
results = []
for row in cursor.fetchall():
results.append(dict(zip(columns, row)))
return json.dumps(results)
if __name__ == '__main__':
app.run(debug=True)
Added Port
import os
from flask import Flask, request
import psycopg2
import json
app = Flask(__name__)
DATABASE_URL = os.environ['DATABASE_URL']
conn = psycopg2.connect(DATABASE_URL)
@app.route('/find')
def find():
lat = request.args.get('lat')
lng = request.args.get('lng')
radius = request.args.get('radius')
cursor = conn.cursor()
query = 'SELECT * from signs WHERE earth_box(ll_to_earth(%s, %s), %s) @> ll_to_earth(latitude, longtitude);'
cursor.execute(query, (lat, lng, radius))
columns = ['longtitude', 'latitude', 'object_id', 'sg_key_bor', 'sg_order_n', 'sg_seqno_n', 'sg_mutcd_c', 'sr_dist', 'sg_sign_fc', 'sg_arrow_d', 'x', 'y', 'signdesc']
results = []
for row in cursor.fetchall():
results.append(dict(zip(columns, row)))
return json.dumps(results)
if __name__ == '__main__':
port = int(os.environ.get("PORT", 5000))
app.run(host='0.0.0.0', port=port, debug=True)
|
from http.server import BaseHTTPRequestHandler, HTTPServer
import logging
import ssl
import stoppable_thread
import threading
import time
import uuid
class HandlerMaison(BaseHTTPRequestHandler):
logger = logging.getLogger("HandlerMaison")
#server stuff
def do_GET(self):
return self.do_POST()
def do_POST(self):
try:
print(self.path)
print(WebHookServer.Path)
if self.path == WebHookServer.Path:
self.logger.info("Recieve post")
print ("THE POST")
jsonListString = []
#for line in self.rfile:
# jsonListString.append(str(line, 'utf-8'))
# print ("line:"+str(line, 'utf-8'))
print ("THE END")
#jsonObject = json.loads(''.join(jsonListString))
#print(jsonObject)
self.ok()
else:
self.error_access()
except:
self.logger.exception("Handler error", exc_info=True)
self.error()
def error_access(self):
self.send_response(403)
self.send_header('Content-type', 'text/plain')
self.end_headers()
self.wfile.write(bytes("Access denied", "utf-8"))
def error(self):
self.send_response(500)
self.send_header('Content-type', 'text/plain')
self.end_headers()
self.wfile.write(bytes("Server error", "utf-8"))
def ok(self):
self.send_response(200)
self.send_header('Content-type', 'text/plain')
self.end_headers()
self.wfile.write(bytes("Ok", "utf-8"))
class WebHookServer(stoppable_thread.StoppableThread):
Bot = None
Path = None
def __init__(self, bot, public, private, port=8443):
super(WebHookServer, self).__init__()
self.logger = logging.getLogger(type(self).__name__)
WebHookServer.Bot = bot
self.__public_path = public
self.__private_path = private
self.__port = port
self.key = None
self.url = None
self.httpd = None
def run(self):
try:
self.logger.info("Starting getting public ip")
#response = json.loads(requests.get("https://api.ipify.org/?format=json").text)
#ip = response["ip"]
ip = "fa18swiss.no-ip.biz"
self.logger.warning("Ip : %s", ip)
self.key = str(uuid.uuid4())
self.logger.warning("Key : '%s'", self.key)
WebHookServer.Path = "/%s/" % self.key
self.url = "https://%s:%d%s" % (ip, self.__port, self.Path)
self.logger.warning("Url : '%s'", self.url)
self.logger.info("Init server on port %d", self.__port)
server_address = ('', self.__port)
self.httpd = HTTPServer(server_address, HandlerMaison)
try:
ssl_version = ssl.PROTOCOL_TLSv1_2
except:
self.logger.warning("Can't use TLS 1.2, use TLS 1 instead")
ssl_version = ssl.PROTOCOL_TLSv1
# SSL
self.httpd.socket = ssl.wrap_socket(self.httpd.socket,
server_side=True,
certfile=self.__public_path,
keyfile=self.__private_path,
ssl_version=ssl_version)
self.logger.info("Bot starting")
self.Bot.start()
self.logger.info("Bot started")
thread = WebHookSetter(self.Bot, self.url, self.__public_path)
thread.start()
self.httpd.serve_forever()
except:
self.logger.exception("Server fail", exc_info=True)
self.logger.info("Stoppring bot")
self.Bot.setWebhook("")
self.Bot.stop()
self.logger.info("Stopped bot")
def stop(self):
super(WebHookServer, self).stop()
self.httpd.shutdown()
class WebHookSetter(threading.Thread):
def __init__(self, bot, url, certificate):
super(WebHookSetter, self).__init__()
self.__bot = bot
self.__url = url
self.__certificate = certificate
self.logger = logging.getLogger("WebHootSetter")
def run(self):
self.logger.debug("Start wait")
time.sleep(1)
self.logger.debug("End wait")
self.__bot.setWebhook(self.__url, self.__certificate)
self.logger.debug("End set")
class PollingServer(stoppable_thread.StoppableThread):
def __init__(self, bot, sleep_time=2):
super(PollingServer, self).__init__()
self.bot = bot
self.sleep_time = sleep_time
def run(self):
self.bot.setWebhook("")
while self.can_loop():
self.bot.getUpdates()
time.sleep(self.sleep_time)
remove bot starting
from http.server import BaseHTTPRequestHandler, HTTPServer
import logging
import ssl
import stoppable_thread
import threading
import time
import uuid
class HandlerMaison(BaseHTTPRequestHandler):
logger = logging.getLogger("HandlerMaison")
#server stuff
def do_GET(self):
return self.do_POST()
def do_POST(self):
try:
print(self.path)
print(WebHookServer.Path)
if self.path == WebHookServer.Path:
self.logger.info("Recieve post")
print ("THE POST")
jsonListString = []
#for line in self.rfile:
# jsonListString.append(str(line, 'utf-8'))
# print ("line:"+str(line, 'utf-8'))
print ("THE END")
#jsonObject = json.loads(''.join(jsonListString))
#print(jsonObject)
self.ok()
else:
self.error_access()
except:
self.logger.exception("Handler error", exc_info=True)
self.error()
def error_access(self):
self.send_response(403)
self.send_header('Content-type', 'text/plain')
self.end_headers()
self.wfile.write(bytes("Access denied", "utf-8"))
def error(self):
self.send_response(500)
self.send_header('Content-type', 'text/plain')
self.end_headers()
self.wfile.write(bytes("Server error", "utf-8"))
def ok(self):
self.send_response(200)
self.send_header('Content-type', 'text/plain')
self.end_headers()
self.wfile.write(bytes("Ok", "utf-8"))
class WebHookServer(stoppable_thread.StoppableThread):
Bot = None
Path = None
def __init__(self, bot, public, private, port=8443):
super(WebHookServer, self).__init__()
self.logger = logging.getLogger(type(self).__name__)
WebHookServer.Bot = bot
self.__public_path = public
self.__private_path = private
self.__port = port
self.key = None
self.url = None
self.httpd = None
def run(self):
try:
self.logger.info("Starting getting public ip")
#response = json.loads(requests.get("https://api.ipify.org/?format=json").text)
#ip = response["ip"]
ip = "fa18swiss.no-ip.biz"
self.logger.warning("Ip : %s", ip)
self.key = str(uuid.uuid4())
self.logger.warning("Key : '%s'", self.key)
WebHookServer.Path = "/%s/" % self.key
self.url = "https://%s:%d%s" % (ip, self.__port, self.Path)
self.logger.warning("Url : '%s'", self.url)
self.logger.info("Init server on port %d", self.__port)
server_address = ('', self.__port)
self.httpd = HTTPServer(server_address, HandlerMaison)
try:
ssl_version = ssl.PROTOCOL_TLSv1_2
except:
self.logger.warning("Can't use TLS 1.2, use TLS 1 instead")
ssl_version = ssl.PROTOCOL_TLSv1
# SSL
self.httpd.socket = ssl.wrap_socket(self.httpd.socket,
server_side=True,
certfile=self.__public_path,
keyfile=self.__private_path,
ssl_version=ssl_version)
thread = WebHookSetter(self.Bot, self.url, self.__public_path)
thread.start()
self.httpd.serve_forever()
except:
self.logger.exception("Server fail", exc_info=True)
self.logger.info("Stoppring bot")
self.Bot.setWebhook("")
self.Bot.stop()
self.logger.info("Stopped bot")
def stop(self):
super(WebHookServer, self).stop()
self.httpd.shutdown()
class WebHookSetter(threading.Thread):
def __init__(self, bot, url, certificate):
super(WebHookSetter, self).__init__()
self.__bot = bot
self.__url = url
self.__certificate = certificate
self.logger = logging.getLogger("WebHootSetter")
def run(self):
self.logger.debug("Start wait")
time.sleep(1)
self.logger.debug("End wait")
self.__bot.setWebhook(self.__url, self.__certificate)
self.logger.debug("End set")
class PollingServer(stoppable_thread.StoppableThread):
def __init__(self, bot, sleep_time=2):
super(PollingServer, self).__init__()
self.bot = bot
self.sleep_time = sleep_time
def run(self):
self.bot.setWebhook("")
while self.can_loop():
self.bot.getUpdates()
time.sleep(self.sleep_time)
|
import requests
import flask
from flask import Flask, request, render_template, jsonify, redirect, url_for, make_response
from flask.ext.sqlalchemy import SQLAlchemy
import jwt
from encoder import jwt_encode
from logging import Formatter, FileHandler
import models
import controller
from os import path
import models
import os
#Initialize Flask application
app = Flask(__name__)
PORT = int(os.environ.get('PORT', 5000))
#Gather data from config.py
app.config.from_object('config')
#Declaration of all necessary variables needed to perform 23AndMe API Call
BASE_CLIENT_URL = 'http://localhost:%s/'% PORT
DEFAULT_REDIRECT_URI = '%sreceive_code/' % BASE_CLIENT_URL
CLIENT_ID = app.config.get('CLIENT_ID')
CLIENT_SECRET = app.config.get('CLIENT_SECRET')
REDIRECT_URI = app.config.get('REDIRECT_URI')
SNPS = ['rs12913832', 'rs8177374', 'rs1799971', 'rs806380', 'rs1800955', 'rs53576', 'rs1815739', 'rs6152', 'rs1800497', 'rs9939609', 'rs662799', 'rs17822931', 'rs4680', 'rs4988235', 'rs6025', 'rs7574865', 'rs1695', 'rs72921001', 'rs1537415', 'rs2472297', 'rs909525']
DEFAULT_SCOPE = 'names basic email ancestry relatives genomes %s' % (' '.join(SNPS))
BASE_API_URL = 'https://api.23andme.com/'
SECRET_KEY = app.config.get('SECRET_KEY')
@app.route('/')
def home():
auth_url = '%sauthorize/?response_type=code&redirect_uri=%s&client_id=%s&scope=%s' % (BASE_API_URL, REDIRECT_URI, CLIENT_ID, DEFAULT_SCOPE)
return render_template('landing.html', auth_url=auth_url)
@app.route('/get_info/')
def getUser():
response = make_response(render_template('index.html'))
return response
@app.route('/demo/')
def makeDemoUser():
#Add demo user to DB if they don't already exist
controller.create_demo_user()
demo_profile_id = 'demo_id'
demo_user_name = 'Lilly Demo'
response = make_response(render_template('index.html'))
response.set_cookie('user_first_name', demo_user_name)
response.set_cookie('token', jwt_encode(demo_profile_id, demo_user_name, SECRET_KEY))
controller.createSnpsTable()
return response
#Refactor this route to take a userProfileID after the trailing slash with some syntax like: '<%s UserID >''
#i.e. the equivalent of '/:userId' with node/express servers
@app.route('/api/relatives/')
#return all the relatives. Refactor to only return the relatives specific to the current User
def getRelatives():
decoded = jwt.decode(request.cookies.get('token'), SECRET_KEY, algorithms=['HS256'])
current_user_profile_id = decoded['user_profile_id']
#Retrieve all relatives from database, not filtered by user
#To Do: Filter this by user
user_relatives = models.db_session.query(models.user_relatives).all()
user_relatives_ids = []
#Iterate through all relatives
for user_relative in user_relatives:
user = list(user_relative)
#For each relative, grab only those that match on the current_user_profile_id
if current_user_profile_id == str(user[0]):
user_relatives_ids.append(int(user[1]))
#Retrieve all relatives from DB
#To Do: is this the same information in the user_relatives variable above?
relatives = models.db_session.query(models.Relative).all()
finalRelatives = []
#Iterate through all relatives
for relative in relatives:
#Grab only relatives who match the relatives in the user_relatives_ids storage
if relative.serialize()['id'] in user_relatives_ids:
finalRelatives.append(relative.serialize())
return jsonify({'relativeList' : finalRelatives})
@app.route('/api/getsnps', methods=['POST', 'GET'])
def getSnps():
decoded = jwt.decode(request.cookies.get('token'), app.config.get('SECRET_KEY'), algorithms=['HS256'])
current_user_profile_id = decoded['user_profile_id']
user_snps = {}
user_data = models.db_session.query(models.User).filter(models.User.profile_id == current_user_profile_id).first().serialize()
for user_datum in user_data:
if user_datum[:2:].lower()=='rs':
user_snps[user_datum] = user_data[user_datum]
user_outcomes = []
for user_snp in user_snps:
# loop through entire snp table, if any of snp base pairs match up to the base pair in user snps, put in an object with rsid and outcome
current_snp = models.db_session.query(models.Snp).filter(models.Snp.rs_id == user_snp).filter(models.Snp.dnaPair == user_snps[user_snp]).first()
if current_snp is not None:
user_outcomes.append({"rsid": user_snp, "pair": user_snps[user_snp], "outcome": current_snp.serialize()['outcome']});
return jsonify({'outcomes': user_outcomes})
@app.route('/receive_code/')
def receive_code():
print 'receive_code is being called'
parameters = {
'client_id': CLIENT_ID,
'client_secret': CLIENT_SECRET,
'grant_type': 'authorization_code',
'code': request.args.get('code'),
'redirect_uri': REDIRECT_URI,
'scope': DEFAULT_SCOPE
}
response = requests.post(
"%s%s" % (BASE_API_URL, "token/"),
data = parameters,
verify=False
)
#get access token from 23andMe
if response.status_code == 200:
access_token = response.json()['access_token']
headers = {'Authorization': 'Bearer %s' % access_token}
#Begin API calls to 23andMe to get all scoped user data
genotype_response = requests.get("%s%s" % (BASE_API_URL, "1/genotype/"),
params = {'locations': ' '.join(SNPS)},
headers=headers,
verify=False)
user_profile_id = genotype_response.json().pop()['id']
user_response = requests.get("%s%s" % (BASE_API_URL, "1/user/?email=true"),
headers=headers,
verify=False)
name_response = requests.get("%s%s" % (BASE_API_URL, "1/names/%s" % user_profile_id),
headers=headers,
verify=False)
#if both API calls are successful, process user data
if user_response.status_code == 200 and genotype_response.status_code == 200:
user_first_name = name_response.json()['first_name']
#if user already exists in database, render the html and do not re-add user to database
if len(models.db_session.query(models.User).filter_by(profile_id=user_profile_id).all()) != 0:
response = make_response(redirect(url_for('getUser')))
response.set_cookie('user_first_name', user_first_name)
response.set_cookie('token', jwt_encode(user_profile_id, user_first_name, SECRET_KEY))
return response
# otherwise, add new user to database if they have never logged in before
else:
#Begin API calls to 23andMe to get additional user data
relatives_response = requests.get("%s%s" % (BASE_API_URL, "1/relatives/%s" % user_profile_id),
params = {'limit': 60, 'offset': 1},
headers=headers,
verify=False)
#call createNewUser from controller to add User and User relatives to the database
controller.createNewUser(name_response, relatives_response, genotype_response, user_response)
#create snps table
controller.createSnpsTable()
response = make_response(redirect(url_for('getUser')))
response.set_cookie('user_first_name', user_first_name)
response.set_cookie('token', jwt_encode(user_profile_id, user_first_name, SECRET_KEY))
return response
#error handling if api calls for additional user data to 23andMe fail
else:
reponse_text = genotype_response.text
response.raise_for_status()
#error handling if initial api calls to 23andMe fail
else:
response = make_response(redirect(url_for('home')))
return response
#Initialize python server on port
if __name__ == '__main__':
print 'Server has been initialized'
app.run(debug=True, port=PORT)
remove debug var set to true
import requests
import flask
from flask import Flask, request, render_template, jsonify, redirect, url_for, make_response
from flask.ext.sqlalchemy import SQLAlchemy
import jwt
from encoder import jwt_encode
from logging import Formatter, FileHandler
import models
import controller
from os import path
import models
import os
#Initialize Flask application
app = Flask(__name__)
PORT = int(os.environ.get('PORT', 5000))
#Gather data from config.py
app.config.from_object('config')
#Declaration of all necessary variables needed to perform 23AndMe API Call
BASE_CLIENT_URL = 'http://localhost:%s/'% PORT
DEFAULT_REDIRECT_URI = '%sreceive_code/' % BASE_CLIENT_URL
CLIENT_ID = app.config.get('CLIENT_ID')
CLIENT_SECRET = app.config.get('CLIENT_SECRET')
REDIRECT_URI = app.config.get('REDIRECT_URI')
SNPS = ['rs12913832', 'rs8177374', 'rs1799971', 'rs806380', 'rs1800955', 'rs53576', 'rs1815739', 'rs6152', 'rs1800497', 'rs9939609', 'rs662799', 'rs17822931', 'rs4680', 'rs4988235', 'rs6025', 'rs7574865', 'rs1695', 'rs72921001', 'rs1537415', 'rs2472297', 'rs909525']
DEFAULT_SCOPE = 'names basic email ancestry relatives genomes %s' % (' '.join(SNPS))
BASE_API_URL = 'https://api.23andme.com/'
SECRET_KEY = app.config.get('SECRET_KEY')
@app.route('/')
def home():
auth_url = '%sauthorize/?response_type=code&redirect_uri=%s&client_id=%s&scope=%s' % (BASE_API_URL, REDIRECT_URI, CLIENT_ID, DEFAULT_SCOPE)
return render_template('landing.html', auth_url=auth_url)
@app.route('/get_info/')
def getUser():
response = make_response(render_template('index.html'))
return response
@app.route('/demo/')
def makeDemoUser():
#Add demo user to DB if they don't already exist
controller.create_demo_user()
demo_profile_id = 'demo_id'
demo_user_name = 'Lilly Demo'
response = make_response(render_template('index.html'))
response.set_cookie('user_first_name', demo_user_name)
response.set_cookie('token', jwt_encode(demo_profile_id, demo_user_name, SECRET_KEY))
controller.createSnpsTable()
return response
#Refactor this route to take a userProfileID after the trailing slash with some syntax like: '<%s UserID >''
#i.e. the equivalent of '/:userId' with node/express servers
@app.route('/api/relatives/')
#return all the relatives. Refactor to only return the relatives specific to the current User
def getRelatives():
decoded = jwt.decode(request.cookies.get('token'), SECRET_KEY, algorithms=['HS256'])
current_user_profile_id = decoded['user_profile_id']
#Retrieve all relatives from database, not filtered by user
#To Do: Filter this by user
user_relatives = models.db_session.query(models.user_relatives).all()
user_relatives_ids = []
#Iterate through all relatives
for user_relative in user_relatives:
user = list(user_relative)
#For each relative, grab only those that match on the current_user_profile_id
if current_user_profile_id == str(user[0]):
user_relatives_ids.append(int(user[1]))
#Retrieve all relatives from DB
#To Do: is this the same information in the user_relatives variable above?
relatives = models.db_session.query(models.Relative).all()
finalRelatives = []
#Iterate through all relatives
for relative in relatives:
#Grab only relatives who match the relatives in the user_relatives_ids storage
if relative.serialize()['id'] in user_relatives_ids:
finalRelatives.append(relative.serialize())
return jsonify({'relativeList' : finalRelatives})
@app.route('/api/getsnps', methods=['POST', 'GET'])
def getSnps():
decoded = jwt.decode(request.cookies.get('token'), app.config.get('SECRET_KEY'), algorithms=['HS256'])
current_user_profile_id = decoded['user_profile_id']
user_snps = {}
user_data = models.db_session.query(models.User).filter(models.User.profile_id == current_user_profile_id).first().serialize()
for user_datum in user_data:
if user_datum[:2:].lower()=='rs':
user_snps[user_datum] = user_data[user_datum]
user_outcomes = []
for user_snp in user_snps:
# loop through entire snp table, if any of snp base pairs match up to the base pair in user snps, put in an object with rsid and outcome
current_snp = models.db_session.query(models.Snp).filter(models.Snp.rs_id == user_snp).filter(models.Snp.dnaPair == user_snps[user_snp]).first()
if current_snp is not None:
user_outcomes.append({"rsid": user_snp, "pair": user_snps[user_snp], "outcome": current_snp.serialize()['outcome']});
return jsonify({'outcomes': user_outcomes})
@app.route('/receive_code/')
def receive_code():
print 'receive_code is being called'
parameters = {
'client_id': CLIENT_ID,
'client_secret': CLIENT_SECRET,
'grant_type': 'authorization_code',
'code': request.args.get('code'),
'redirect_uri': REDIRECT_URI,
'scope': DEFAULT_SCOPE
}
response = requests.post(
"%s%s" % (BASE_API_URL, "token/"),
data = parameters,
verify=False
)
#get access token from 23andMe
if response.status_code == 200:
access_token = response.json()['access_token']
headers = {'Authorization': 'Bearer %s' % access_token}
#Begin API calls to 23andMe to get all scoped user data
genotype_response = requests.get("%s%s" % (BASE_API_URL, "1/genotype/"),
params = {'locations': ' '.join(SNPS)},
headers=headers,
verify=False)
user_profile_id = genotype_response.json().pop()['id']
user_response = requests.get("%s%s" % (BASE_API_URL, "1/user/?email=true"),
headers=headers,
verify=False)
name_response = requests.get("%s%s" % (BASE_API_URL, "1/names/%s" % user_profile_id),
headers=headers,
verify=False)
#if both API calls are successful, process user data
if user_response.status_code == 200 and genotype_response.status_code == 200:
user_first_name = name_response.json()['first_name']
#if user already exists in database, render the html and do not re-add user to database
if len(models.db_session.query(models.User).filter_by(profile_id=user_profile_id).all()) != 0:
response = make_response(redirect(url_for('getUser')))
response.set_cookie('user_first_name', user_first_name)
response.set_cookie('token', jwt_encode(user_profile_id, user_first_name, SECRET_KEY))
return response
# otherwise, add new user to database if they have never logged in before
else:
#Begin API calls to 23andMe to get additional user data
relatives_response = requests.get("%s%s" % (BASE_API_URL, "1/relatives/%s" % user_profile_id),
params = {'limit': 60, 'offset': 1},
headers=headers,
verify=False)
#call createNewUser from controller to add User and User relatives to the database
controller.createNewUser(name_response, relatives_response, genotype_response, user_response)
#create snps table
controller.createSnpsTable()
response = make_response(redirect(url_for('getUser')))
response.set_cookie('user_first_name', user_first_name)
response.set_cookie('token', jwt_encode(user_profile_id, user_first_name, SECRET_KEY))
return response
#error handling if api calls for additional user data to 23andMe fail
else:
reponse_text = genotype_response.text
response.raise_for_status()
#error handling if initial api calls to 23andMe fail
else:
response = make_response(redirect(url_for('home')))
return response
#Initialize python server on port
if __name__ == '__main__':
print 'Server has been initialized'
app.run(port=PORT)
|
import SocketServer
import traceback
import time
class GNTPServer(SocketServer.TCPServer):
pass
class GNTPHandler(SocketServer.StreamRequestHandler):
def read(self):
bufferSleep = 0.01
bufferLength = 2048
time.sleep(bufferSleep) #Let the buffer fill up a bit (hack)
buffer = ''
while(1):
data = self.request.recv(bufferLength)
if self.server.growl_debug:
print 'Reading',len(data)
buffer = buffer + data
if len(data) < bufferLength: break
time.sleep(bufferSleep) #Let the buffer fill up a bit (hack)
if self.server.growl_debug:
print '<Reading>\n',buffer,'\n</Reading>'
return buffer
def write(self,msg):
if self.server.growl_debug:
print '<Writing>\n',msg,'\n</Writing>'
self.request.send(msg)
def handle(self):
reload(gntp)
self.data = self.read()
try:
message = gntp.parse_gntp(self.data,self.server.growl_password)
message.send()
response = gntp.GNTPOK(action=message.info['messagetype'])
self.write(response.encode())
except gntp.BaseError, e:
if self.server.growl_debug:
traceback.print_exc()
if e.gntp_error:
self.write(e.gntp_error())
except:
error = gntp.GNTPError(errorcode=500,errordesc='Unknown server error')
self.write(error.encode())
raise
if __name__ == "__main__":
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-a","--address",dest="host",help="address to listen on",default="")
parser.add_option("-p","--port",dest="port",help="port to listen on",type="int",default=23053)
parser.add_option("-r","--regrowl",dest='regrowl',help="ReGrowl on local OSX machine",action="store_true",default=False)
parser.add_option("-d","--debug",dest='debug',help="Print raw growl packets",action="store_true",default=False)
parser.add_option("-P","--password",dest='password',help="Network password",default=None)
(options, args) = parser.parse_args()
if options.regrowl:
import gntp_bridge as gntp
else:
import gntp
server = GNTPServer((options.host, options.port), GNTPHandler)
server.growl_debug = options.debug
server.growl_password = options.password
sa = server.socket.getsockname()
print "Listening for GNTP on", sa[0], "port", sa[1], "..."
server.serve_forever()
Slightly better checks for the end of message.
import SocketServer
import traceback
import time
class GNTPServer(SocketServer.TCPServer):
pass
class GNTPHandler(SocketServer.StreamRequestHandler):
def read(self):
bufferLength = 2048
buffer = ''
while(1):
data = self.request.recv(bufferLength)
if self.server.growl_debug:
print 'Reading',len(data)
buffer = buffer + data
if len(data) < bufferLength and buffer.endswith('\r\n\r\n'):
break
if self.server.growl_debug:
print '<Reading>\n',buffer,'\n</Reading>'
return buffer
def write(self,msg):
if self.server.growl_debug:
print '<Writing>\n',msg,'\n</Writing>'
self.request.sendall(msg)
def handle(self):
reload(gntp)
self.data = self.read()
try:
message = gntp.parse_gntp(self.data,self.server.growl_password)
message.send()
response = gntp.GNTPOK(action=message.info['messagetype'])
self.write(response.encode())
except gntp.BaseError, e:
if self.server.growl_debug:
traceback.print_exc()
if e.gntp_error:
self.write(e.gntp_error())
except:
error = gntp.GNTPError(errorcode=500,errordesc='Unknown server error')
self.write(error.encode())
raise
if __name__ == "__main__":
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-a","--address",dest="host",help="address to listen on",default="")
parser.add_option("-p","--port",dest="port",help="port to listen on",type="int",default=23053)
parser.add_option("-r","--regrowl",dest='regrowl',help="ReGrowl on local OSX machine",action="store_true",default=False)
parser.add_option("-d","--debug",dest='debug',help="Print raw growl packets",action="store_true",default=False)
parser.add_option("-P","--password",dest='password',help="Network password",default=None)
(options, args) = parser.parse_args()
if options.regrowl:
import gntp_bridge as gntp
else:
import gntp
server = GNTPServer((options.host, options.port), GNTPHandler)
server.growl_debug = options.debug
server.growl_password = options.password
sa = server.socket.getsockname()
print "Listening for GNTP on", sa[0], "port", sa[1], "..."
server.serve_forever()
|
#!/usr/bin/python
from flask import Flask, request, redirect, url_for, send_from_directory, g
from werkzeug.utils import secure_filename
import os.path
import datetime, time
import json
import psycopg2, psycopg2.extras
import duckduckgo
import urllib2
import decimal
from bs4 import BeautifulSoup
import ConfigParser, os
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String, DateTime, ForeignKey, Table
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.orm import relationship, backref
from sqlalchemy.exc import IntegrityError
from sqlalchemy.exc import InvalidRequestError
# Read the config.cfg file.
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
config = ConfigParser.ConfigParser()
config.readfp(open(os.path.join(__location__, 'config.cfg')))
# Setup SQLAlchemy database connection and table class.
engine = create_engine('postgresql://adam:pivo70@localhost/dvdsdb')
engine = create_engine('postgresql://' + config.get('Database', 'db_user') + ':' + config.get('Database', 'db_pass') +
'@' + config.get('Database', 'host') + '/' + config.get('Database', 'db'))
Base = declarative_base()
Session = sessionmaker(bind=engine)
session = Session()
association_table = Table(
'dvds_tags', Base.metadata,
Column('dvd_id', Integer, ForeignKey('dvds.id')),
Column('tag_id', Integer, ForeignKey('tags.id'))
)
class Dvd(Base):
__tablename__ = 'dvds'
id = Column(Integer, primary_key=True)
title = Column(String)
created_at = Column(DateTime)
created_by = Column(String)
rating = Column(Integer)
abstract_txt = Column(String)
abstract_source = Column(String)
abstract_url = Column(String)
image_url = Column(String)
file_url = Column(String)
playback_time = Column(Integer)
episodes = relationship("Episode", lazy="joined")
tags = relationship("Tag", secondary=association_table, backref="dvds")
bookmarks = relationship("Bookmark", lazy="joined")
class Episode(Base):
__tablename__ = 'episodes'
id = Column(Integer, primary_key=True)
name = Column(String)
episode_file_url = Column(String)
playback_time = Column(Integer)
dvd_id = Column(Integer, ForeignKey('dvds.id'))
bookmarks = relationship("Bookmark", lazy="joined")
class Tag(Base):
__tablename__ = 'tags'
id = Column(Integer, primary_key=True)
name = Column(String)
class Bookmark(Base):
__tablename__ = 'bookmarks'
id = Column(Integer, primary_key=True)
name = Column(String)
time = Column(Integer)
dvd_id = Column(Integer, ForeignKey('dvds.id'))
episode_id = Column(Integer, ForeignKey('episodes.id'))
# Setup Flask app.
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = config.get('Server', 'upload_folder')
app.debug = config.getboolean('Server', 'debug')
# Model functions.
def find_by_id(id):
dvd = session.query(Dvd).get(id)
dvd_json = jsonable(Dvd, dvd)
episodes = []
tags = []
bookmarks = []
for episode in dvd.episodes:
epi_json = jsonable(Episode, episode)
episodes.append(epi_json)
for tag in dvd.tags:
tag_json = jsonable(Tag, tag)
tags.append(tag_json)
for bookmark in dvd.bookmarks:
bookmark_json = jsonable(Bookmark, bookmark)
bookmarks.append(bookmark_json)
dvd_json["episodes"] = [episode.id for episode in dvd.episodes]
dvd_json["tags"] = [tag.id for tag in dvd.tags]
dvd_json["bookmarks"] = [bookmark.id for bookmark in dvd.bookmarks]
return {"dvd": dvd_json, "episodes": episodes, "tags": tags, "bookmarks": bookmarks}
def find_episode(id):
q = session.query(Episode).get(id)
epi_json = jsonable(Episode, q)
return {"episode": epi_json}
def find_by_title(title):
# Search by regex.
dvds = session.query(Dvd).filter("title ~* '%s'" % (title)).all()
json_dvds = []
for dvd in dvds:
json_dvds.append(jsonable(Dvd, dvd))
return json_dvds
def add_dvd(data):
# Add new object.
new_dvd = json.loads(request.data)
dvd = Dvd()
# Set the SQLAlchemy object's attributes.
for key, value in new_dvd['dvd'].iteritems():
setattr(dvd, key, value)
ddg_info = get_ddg_info(data)
dvd.abstract_txt = ddg_info.abstract.text
dvd.abstract_source = ddg_info.abstract.source
dvd.abstract_url = ddg_info.abstract.url
dvd.image_url = ddg_info.image.url
dvd.created_at = datetime.datetime.now()
session.add(dvd)
session.commit()
# Might find a better way to return the new DVD.
return {"dvd": {
"id": dvd.id,
"title": dvd.title,
"created_at": dvd.created_at.strftime("%Y-%m-%d %H:%M:%S"),
"created_by": dvd.created_by,
"rating": dvd.rating,
"abstract_txt": dvd.abstract_txt,
"abstract_source": dvd.abstract_source,
"abstract_url": dvd.abstract_url,
"image_url": dvd.image_url,
}
}
def update_dvd(dvd_id, data):
# Rename abstract column:
# ALTER TABLE dvds RENAME COLUMN abstract TO abstract_txt;
dvd = session.query(Dvd).get(dvd_id)
del data['search']
for key in data:
if (key != 'tags'):
setattr(dvd, key, data[key])
else:
# Get tags from ID list.
tags = []
for tag_id in data['tags']:
tags.append(session.query(Tag).get(tag_id))
dvd.tags = tags
try:
session.commit()
except InvalidRequestError:
session.rollback()
return find_by_id(dvd_id)
def delete_dvd(dvd_id):
dvd = session.query(Dvd).get(dvd_id)
session.delete(dvd)
session.commit()
return True
def get_playback_location(sql_obj, vid_id):
vid = session.query(sql_obj).get(vid_id)
return vid.playback_time
def set_playback_location(sql_obj, vid_id, playback_time):
vid = session.query(sql_obj).get(vid_id)
vid.playback_time = playback_time
session.commit()
return True
def find_all():
"""
Return a list of all records in the table.
"""
q = session.query(Dvd)
dvds = []
episodes = []
tags = []
# Sideload the episodes.
for dvd in q:
dvd_json = jsonable(Dvd, dvd)
for episode in dvd.episodes:
epi_json = jsonable(Episode, episode)
episodes.append(epi_json)
dvd_json["episodes"] = [episode.id for episode in dvd.episodes]
for tag in dvd.tags:
tag_json = jsonable(Tag, tag)
tags.append(tag_json)
dvd_json["episodes"] = [episode.id for episode in dvd.episodes]
dvd_json["tags"] = [tag.id for tag in dvd.tags]
dvds.append(dvd_json)
return {"dvds": dvds, "episodes": episodes, "tags": tags}
def find_all_tags():
"""
Return a list of all Tags.
"""
tags = session.query(Tag)
return jsonable(Tag, tags)
def find_all_bookmarks():
"""
Return a list of all Bookmarks.
"""
bookmarks = session.query(Bookmark)
return jsonable(Bookmark, bookmarks)
def add_episode(data):
# Add new object.
episode = Episode()
# Set the SQLAlchemy object's attributes.
#print data
episode.name = data['name']
episode.episode_file_url = data['episode_file_url']
episode.dvd_id = data['dvd_id']
session.add(episode)
session.commit()
return {"episode": {
"id": episode.id,
"name": episode.name,
"episode_file_url": episode.episode_file_url,
"dvd_id": episode.dvd_id,
}
}
def update_episode(episode_id, data):
"""
Update episode.
"""
episode = session.query(Episode).get(episode_id)
#print data
for key in data['episode']:
setattr(episode, key, data['episode'][key])
session.commit()
return find_episode(episode_id)
def delete_episode(episode_id):
episode = session.query(Episode).get(episode_id)
session.delete(episode)
session.commit()
return True
def add_tag(data):
# Add new object.
tag = Tag()
# Set the SQLAlchemy object's attributes.
tag.name = data['name']
try:
session.add(tag)
session.commit()
except IntegrityError:
session.rollback()
return {"tag": {
"id": tag.id,
"name": tag.name,
}
}
def add_bookmark(data):
# Add new object.
bookmark = Bookmark()
print data
# Set the SQLAlchemy object's attributes.
bookmark.name = data['name']
bookmark.time = data['time']
bookmark.dvd_id = data['dvd_id']
bookmark.episode_id = data['episode_id']
try:
session.add(bookmark)
session.commit()
except IntegrityError:
session.rollback()
return {"bookmark": {
"id": bookmark.id,
"name": bookmark.name,
"time": int(bookmark.time),
"dvd_id": bookmark.dvd_id,
"episode_id": bookmark.episode_id,
}
}
def find_tag_by_name(name):
# Search by regex.
try:
tag = session.query(Tag).filter("name = '%s'" % (name)).all()[0]
return jsonable(Tag, tag)
except IndexError:
return { "id": 0, "name": False }
# Routes
@app.route('/', methods=['GET'])
def root():
return app.send_static_file('index.html')
@app.route('/<path:path>')
def static_proxy(path):
# send_static_file will guess the correct MIME type
return app.send_static_file(path)
@app.route('/barcode/', methods=['POST'])
def barcode():
if (request.method == 'GET'):
return json.dumps(True)
elif (request.method == 'POST'):
data = json.loads(request.data)
yoopsie_data = get_yoopsie(data['barcode'])
if (yoopsie_data[1]):
#get_ddg_info(data):
dvd = Dvd()
ddg_info = get_ddg_info(yoopsie_data[1])
dvd.abstract_txt = ddg_info.abstract.text
dvd.abstract_source = ddg_info.abstract.source
dvd.abstract_url = ddg_info.abstract.url
dvd.image_url = ddg_info.image.url
dvd.created_at = datetime.datetime.now()
dvd.title = yoopsie_data[1]
dvd.created_by = "barcode"
dvd.rating = 1
# Use the Yoopsie image if Duck Duck Go doesn't find anything.
if (dvd.image_url == ''):
image_file = yoopsie_data[0].split('/')[-1]
response = urllib2.urlopen(yoopsie_data[0])
image_output = open(os.path.join(__location__, app.config['UPLOAD_FOLDER'], image_file), 'w')
image_output.write(response.read())
image_output.close()
dvd.image_url = 'images/' + image_file
try:
session.add(dvd)
session.commit()
return_data = {
"status": "DVD Created...",
"openUrl": "http://192.168.1.22:5000/#/" + str(dvd.id)
}
return json.dumps(return_data)
except IntegrityError:
session.rollback()
return_data = {
"status": "DVD Previously Created...",
"openUrl": "http://192.168.1.22:5000/#/"
}
return json.dumps(return_data)
else:
return_data = {
"status": "DVD *NOT* Created...",
"openUrl": "http://192.168.1.22:5000/#/"
}
return json.dumps(return_data)
@app.route('/dvds', methods=['GET', 'POST'])
def dvds():
if (request.method == 'GET'):
#return json.dumps({"dvds": find_all(Dvd)})
#return json.dumps(find_all(Dvd))
return json.dumps(find_all())
elif (request.method == 'POST'):
dvd = add_dvd(json.loads( request.data)['dvd'])
return json.dumps(dvd)
@app.route('/dvds/<int:dvd_id>', methods=['GET', 'PUT', 'POST', 'DELETE'])
def show_dvd(dvd_id):
if (request.method == 'GET'):
# Show the DVD with the given id, the id is an integer.
return app.response_class(json.dumps(find_by_id(dvd_id)), mimetype='application/json')
elif (request.method == 'PUT'):
status = update_dvd(dvd_id, json.loads(request.data)['dvd'])
#print status
#return json.dumps({"dvd": status})
return json.dumps(status)
elif (request.method == 'POST'):
# Handle the image file upload.
file = request.files['file']
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(__location__, app.config['UPLOAD_FOLDER'], filename))
return json.dumps(True)
else:
return json.dumps(False), 500
elif (request.method == 'DELETE'):
delete_dvd(dvd_id)
return json.dumps(True)
@app.route('/dvds/search/<query>', methods=['GET'])
def search(query):
if (request.method == 'GET'):
return json.dumps({ "dvds": find_by_title(query) })
@app.route('/dvds/playback/<int:dvd_id>', methods=['GET', 'POST'])
def play_dvd(dvd_id):
if (request.method == 'GET'):
#playback_time = get_playback_location(Dvd, dvd_id)
return json.dumps(int(get_playback_location(Dvd, dvd_id)))
elif (request.method == 'POST'):
return json.dumps(set_playback_location(Dvd, dvd_id, request.form.get('playback_time')))
@app.route('/episodes', methods=['GET', 'POST'])
def episodes():
if (request.method == 'GET'):
return json.dumps({"episodes": find_all()})
elif (request.method == 'POST'):
episode = add_episode(json.loads( request.data)['episode'])
return json.dumps(episode)
@app.route('/episodes/<int:episode_id>', methods=['GET', 'PUT', 'POST', 'DELETE'])
def episode(episode_id):
if (request.method == 'GET'):
pass
elif (request.method == 'POST'):
pass
elif (request.method == 'PUT'):
episode = update_episode(episode_id, json.loads(request.data))
return app.response_class(json.dumps(episode), mimetype='application/json')
elif (request.method == 'DELETE'):
delete_episode(episode_id)
return json.dumps(True)
@app.route('/episodes/playback/<int:episode_id>', methods=['GET', 'POST'])
def play_episode(episode_id):
if (request.method == 'GET'):
playback_time = get_playback_location(Episode, episode_id)
return json.dumps(int(get_playback_location(Episode, episode_id)))
elif (request.method == 'POST'):
return json.dumps(set_playback_location(Episode, episode_id, request.form.get('playback_time')))
@app.route('/tags', methods=['GET', 'POST'])
def tags():
if (request.method == 'GET'):
if (request.args.get('name')):
return json.dumps({"tags": [find_tag_by_name(request.args.get('name'))] })
else:
return json.dumps({"tags": find_all_tags()})
elif (request.method == 'POST'):
tag = add_tag(json.loads( request.data)['tag'])
return json.dumps(tag)
@app.route('/bookmarks', methods=['GET', 'POST'])
def bookmarks():
if (request.method == 'GET'):
return json.dumps({"bookmarks": find_all_bookmarks()})
elif (request.method == 'POST'):
bookmark = add_bookmark(json.loads( request.data)['bookmark'])
return json.dumps(bookmark)
# Helpers
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1] in config.get('Server', 'allowed_ext')
def jsonable(sql_obj, query_res):
"""
Return a list of dictionaries from query results since SQLAlchemy
query results can't be serialized into JSON evidently.
"""
cols = sql_obj.__table__.columns
#print cols
col_keys = [col.key for col in cols]
# If not Query object put it in a list.
if (query_res.__class__.__name__ != 'Query'):
query_res = [query_res]
obj_list = []
for obj in query_res:
obj_dict = {}
for key, value in obj.__dict__.iteritems():
if (key in col_keys):
if (type(value) == datetime.datetime):
value = value.strftime("%Y-%m-%d %H:%M:%S")
elif (type(value) == decimal.Decimal):
value = int(value)
obj_dict[key] = value
if (query_res.__class__.__name__ == 'Query'):
obj_list.append(obj_dict)
if (query_res.__class__.__name__ != 'Query'):
return obj_dict
else:
return obj_list
def jsonable_children(obj_json, sql_class, sql_obj):
child_name = sql_obj.episodes[0].__class__.__name__.lower() + "s"
obj_json[child_name] = []
for child in sql_obj.episodes:
obj_json[child_name].append(jsonable(sql_class, child))
return obj_json
def get_yoopsie(barcode):
"""
Query the Yoopsie website and grab the image for the barcode.
"""
url = "http://www.yoopsie.com/query.php?query=" + barcode
response = urllib2.urlopen(url)
html = response.read()
soup = BeautifulSoup(html)
items = soup.find_all("td", class_='info_image')
#items[0].a.img['src']
#items[0].a['title']
#return (items[0].a.img['src'], "https://duckduckgo.com/?q=" + items[0].a['title'])
try:
return (items[0].a.img['src'], items[0].a['title'])
except IndexError:
return (False, False)
def get_ddg_info(title):
try:
r = duckduckgo.query(title)
except:
# Can't connect to the Internet so build a blank object.
r = lambda: None
r.image = lambda: None
r.abstract = lambda: None
setattr(r.abstract, 'text', '')
setattr(r.abstract, 'source', '')
setattr(r.abstract, 'url', '')
setattr(r.image, 'url', '')
if (r.image):
image_file = r.image.url.split('/')[-1]
else:
image_file = ''
try:
response = urllib2.urlopen(r.image.url)
image_output = open(os.path.join(__location__, app.config['UPLOAD_FOLDER'], image_file), 'w')
image_output.write(response.read())
image_output.close()
r.image.url = 'images/' + image_file
except ValueError:
r.image.url = ''
except AttributeError:
r.image = lambda: None
setattr(r.image, 'url', '')
return r
if __name__ == '__main__':
app.run(host='0.0.0.0')
Removed example db connection.
#!/usr/bin/python
from flask import Flask, request, redirect, url_for, send_from_directory, g
from werkzeug.utils import secure_filename
import os.path
import datetime, time
import json
import psycopg2, psycopg2.extras
import duckduckgo
import urllib2
import decimal
from bs4 import BeautifulSoup
import ConfigParser, os
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String, DateTime, ForeignKey, Table
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.orm import relationship, backref
from sqlalchemy.exc import IntegrityError
from sqlalchemy.exc import InvalidRequestError
# Read the config.cfg file.
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
config = ConfigParser.ConfigParser()
config.readfp(open(os.path.join(__location__, 'config.cfg')))
# Setup SQLAlchemy database connection and table class.
engine = create_engine('postgresql://' + config.get('Database', 'db_user') + ':' + config.get('Database', 'db_pass') +
'@' + config.get('Database', 'host') + '/' + config.get('Database', 'db'))
Base = declarative_base()
Session = sessionmaker(bind=engine)
session = Session()
association_table = Table(
'dvds_tags', Base.metadata,
Column('dvd_id', Integer, ForeignKey('dvds.id')),
Column('tag_id', Integer, ForeignKey('tags.id'))
)
class Dvd(Base):
__tablename__ = 'dvds'
id = Column(Integer, primary_key=True)
title = Column(String)
created_at = Column(DateTime)
created_by = Column(String)
rating = Column(Integer)
abstract_txt = Column(String)
abstract_source = Column(String)
abstract_url = Column(String)
image_url = Column(String)
file_url = Column(String)
playback_time = Column(Integer)
episodes = relationship("Episode", lazy="joined")
tags = relationship("Tag", secondary=association_table, backref="dvds")
bookmarks = relationship("Bookmark", lazy="joined")
class Episode(Base):
__tablename__ = 'episodes'
id = Column(Integer, primary_key=True)
name = Column(String)
episode_file_url = Column(String)
playback_time = Column(Integer)
dvd_id = Column(Integer, ForeignKey('dvds.id'))
bookmarks = relationship("Bookmark", lazy="joined")
class Tag(Base):
__tablename__ = 'tags'
id = Column(Integer, primary_key=True)
name = Column(String)
class Bookmark(Base):
__tablename__ = 'bookmarks'
id = Column(Integer, primary_key=True)
name = Column(String)
time = Column(Integer)
dvd_id = Column(Integer, ForeignKey('dvds.id'))
episode_id = Column(Integer, ForeignKey('episodes.id'))
# Setup Flask app.
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = config.get('Server', 'upload_folder')
app.debug = config.getboolean('Server', 'debug')
# Model functions.
def find_by_id(id):
dvd = session.query(Dvd).get(id)
dvd_json = jsonable(Dvd, dvd)
episodes = []
tags = []
bookmarks = []
for episode in dvd.episodes:
epi_json = jsonable(Episode, episode)
episodes.append(epi_json)
for tag in dvd.tags:
tag_json = jsonable(Tag, tag)
tags.append(tag_json)
for bookmark in dvd.bookmarks:
bookmark_json = jsonable(Bookmark, bookmark)
bookmarks.append(bookmark_json)
dvd_json["episodes"] = [episode.id for episode in dvd.episodes]
dvd_json["tags"] = [tag.id for tag in dvd.tags]
dvd_json["bookmarks"] = [bookmark.id for bookmark in dvd.bookmarks]
return {"dvd": dvd_json, "episodes": episodes, "tags": tags, "bookmarks": bookmarks}
def find_episode(id):
q = session.query(Episode).get(id)
epi_json = jsonable(Episode, q)
return {"episode": epi_json}
def find_by_title(title):
# Search by regex.
dvds = session.query(Dvd).filter("title ~* '%s'" % (title)).all()
json_dvds = []
for dvd in dvds:
json_dvds.append(jsonable(Dvd, dvd))
return json_dvds
def add_dvd(data):
# Add new object.
new_dvd = json.loads(request.data)
dvd = Dvd()
# Set the SQLAlchemy object's attributes.
for key, value in new_dvd['dvd'].iteritems():
setattr(dvd, key, value)
ddg_info = get_ddg_info(data)
dvd.abstract_txt = ddg_info.abstract.text
dvd.abstract_source = ddg_info.abstract.source
dvd.abstract_url = ddg_info.abstract.url
dvd.image_url = ddg_info.image.url
dvd.created_at = datetime.datetime.now()
session.add(dvd)
session.commit()
# Might find a better way to return the new DVD.
return {"dvd": {
"id": dvd.id,
"title": dvd.title,
"created_at": dvd.created_at.strftime("%Y-%m-%d %H:%M:%S"),
"created_by": dvd.created_by,
"rating": dvd.rating,
"abstract_txt": dvd.abstract_txt,
"abstract_source": dvd.abstract_source,
"abstract_url": dvd.abstract_url,
"image_url": dvd.image_url,
}
}
def update_dvd(dvd_id, data):
# Rename abstract column:
# ALTER TABLE dvds RENAME COLUMN abstract TO abstract_txt;
dvd = session.query(Dvd).get(dvd_id)
del data['search']
for key in data:
if (key != 'tags'):
setattr(dvd, key, data[key])
else:
# Get tags from ID list.
tags = []
for tag_id in data['tags']:
tags.append(session.query(Tag).get(tag_id))
dvd.tags = tags
try:
session.commit()
except InvalidRequestError:
session.rollback()
return find_by_id(dvd_id)
def delete_dvd(dvd_id):
dvd = session.query(Dvd).get(dvd_id)
session.delete(dvd)
session.commit()
return True
def get_playback_location(sql_obj, vid_id):
vid = session.query(sql_obj).get(vid_id)
return vid.playback_time
def set_playback_location(sql_obj, vid_id, playback_time):
vid = session.query(sql_obj).get(vid_id)
vid.playback_time = playback_time
session.commit()
return True
def find_all():
"""
Return a list of all records in the table.
"""
q = session.query(Dvd)
dvds = []
episodes = []
tags = []
# Sideload the episodes.
for dvd in q:
dvd_json = jsonable(Dvd, dvd)
for episode in dvd.episodes:
epi_json = jsonable(Episode, episode)
episodes.append(epi_json)
dvd_json["episodes"] = [episode.id for episode in dvd.episodes]
for tag in dvd.tags:
tag_json = jsonable(Tag, tag)
tags.append(tag_json)
dvd_json["episodes"] = [episode.id for episode in dvd.episodes]
dvd_json["tags"] = [tag.id for tag in dvd.tags]
dvds.append(dvd_json)
return {"dvds": dvds, "episodes": episodes, "tags": tags}
def find_all_tags():
"""
Return a list of all Tags.
"""
tags = session.query(Tag)
return jsonable(Tag, tags)
def find_all_bookmarks():
"""
Return a list of all Bookmarks.
"""
bookmarks = session.query(Bookmark)
return jsonable(Bookmark, bookmarks)
def add_episode(data):
# Add new object.
episode = Episode()
# Set the SQLAlchemy object's attributes.
#print data
episode.name = data['name']
episode.episode_file_url = data['episode_file_url']
episode.dvd_id = data['dvd_id']
session.add(episode)
session.commit()
return {"episode": {
"id": episode.id,
"name": episode.name,
"episode_file_url": episode.episode_file_url,
"dvd_id": episode.dvd_id,
}
}
def update_episode(episode_id, data):
"""
Update episode.
"""
episode = session.query(Episode).get(episode_id)
#print data
for key in data['episode']:
setattr(episode, key, data['episode'][key])
session.commit()
return find_episode(episode_id)
def delete_episode(episode_id):
episode = session.query(Episode).get(episode_id)
session.delete(episode)
session.commit()
return True
def add_tag(data):
# Add new object.
tag = Tag()
# Set the SQLAlchemy object's attributes.
tag.name = data['name']
try:
session.add(tag)
session.commit()
except IntegrityError:
session.rollback()
return {"tag": {
"id": tag.id,
"name": tag.name,
}
}
def add_bookmark(data):
# Add new object.
bookmark = Bookmark()
print data
# Set the SQLAlchemy object's attributes.
bookmark.name = data['name']
bookmark.time = data['time']
bookmark.dvd_id = data['dvd_id']
bookmark.episode_id = data['episode_id']
try:
session.add(bookmark)
session.commit()
except IntegrityError:
session.rollback()
return {"bookmark": {
"id": bookmark.id,
"name": bookmark.name,
"time": int(bookmark.time),
"dvd_id": bookmark.dvd_id,
"episode_id": bookmark.episode_id,
}
}
def find_tag_by_name(name):
# Search by regex.
try:
tag = session.query(Tag).filter("name = '%s'" % (name)).all()[0]
return jsonable(Tag, tag)
except IndexError:
return { "id": 0, "name": False }
# Routes
@app.route('/', methods=['GET'])
def root():
return app.send_static_file('index.html')
@app.route('/<path:path>')
def static_proxy(path):
# send_static_file will guess the correct MIME type
return app.send_static_file(path)
@app.route('/barcode/', methods=['POST'])
def barcode():
if (request.method == 'GET'):
return json.dumps(True)
elif (request.method == 'POST'):
data = json.loads(request.data)
yoopsie_data = get_yoopsie(data['barcode'])
if (yoopsie_data[1]):
#get_ddg_info(data):
dvd = Dvd()
ddg_info = get_ddg_info(yoopsie_data[1])
dvd.abstract_txt = ddg_info.abstract.text
dvd.abstract_source = ddg_info.abstract.source
dvd.abstract_url = ddg_info.abstract.url
dvd.image_url = ddg_info.image.url
dvd.created_at = datetime.datetime.now()
dvd.title = yoopsie_data[1]
dvd.created_by = "barcode"
dvd.rating = 1
# Use the Yoopsie image if Duck Duck Go doesn't find anything.
if (dvd.image_url == ''):
image_file = yoopsie_data[0].split('/')[-1]
response = urllib2.urlopen(yoopsie_data[0])
image_output = open(os.path.join(__location__, app.config['UPLOAD_FOLDER'], image_file), 'w')
image_output.write(response.read())
image_output.close()
dvd.image_url = 'images/' + image_file
try:
session.add(dvd)
session.commit()
return_data = {
"status": "DVD Created...",
"openUrl": "http://192.168.1.22:5000/#/" + str(dvd.id)
}
return json.dumps(return_data)
except IntegrityError:
session.rollback()
return_data = {
"status": "DVD Previously Created...",
"openUrl": "http://192.168.1.22:5000/#/"
}
return json.dumps(return_data)
else:
return_data = {
"status": "DVD *NOT* Created...",
"openUrl": "http://192.168.1.22:5000/#/"
}
return json.dumps(return_data)
@app.route('/dvds', methods=['GET', 'POST'])
def dvds():
if (request.method == 'GET'):
#return json.dumps({"dvds": find_all(Dvd)})
#return json.dumps(find_all(Dvd))
return json.dumps(find_all())
elif (request.method == 'POST'):
dvd = add_dvd(json.loads( request.data)['dvd'])
return json.dumps(dvd)
@app.route('/dvds/<int:dvd_id>', methods=['GET', 'PUT', 'POST', 'DELETE'])
def show_dvd(dvd_id):
if (request.method == 'GET'):
# Show the DVD with the given id, the id is an integer.
return app.response_class(json.dumps(find_by_id(dvd_id)), mimetype='application/json')
elif (request.method == 'PUT'):
status = update_dvd(dvd_id, json.loads(request.data)['dvd'])
#print status
#return json.dumps({"dvd": status})
return json.dumps(status)
elif (request.method == 'POST'):
# Handle the image file upload.
file = request.files['file']
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(__location__, app.config['UPLOAD_FOLDER'], filename))
return json.dumps(True)
else:
return json.dumps(False), 500
elif (request.method == 'DELETE'):
delete_dvd(dvd_id)
return json.dumps(True)
@app.route('/dvds/search/<query>', methods=['GET'])
def search(query):
if (request.method == 'GET'):
return json.dumps({ "dvds": find_by_title(query) })
@app.route('/dvds/playback/<int:dvd_id>', methods=['GET', 'POST'])
def play_dvd(dvd_id):
if (request.method == 'GET'):
#playback_time = get_playback_location(Dvd, dvd_id)
return json.dumps(int(get_playback_location(Dvd, dvd_id)))
elif (request.method == 'POST'):
return json.dumps(set_playback_location(Dvd, dvd_id, request.form.get('playback_time')))
@app.route('/episodes', methods=['GET', 'POST'])
def episodes():
if (request.method == 'GET'):
return json.dumps({"episodes": find_all()})
elif (request.method == 'POST'):
episode = add_episode(json.loads( request.data)['episode'])
return json.dumps(episode)
@app.route('/episodes/<int:episode_id>', methods=['GET', 'PUT', 'POST', 'DELETE'])
def episode(episode_id):
if (request.method == 'GET'):
pass
elif (request.method == 'POST'):
pass
elif (request.method == 'PUT'):
episode = update_episode(episode_id, json.loads(request.data))
return app.response_class(json.dumps(episode), mimetype='application/json')
elif (request.method == 'DELETE'):
delete_episode(episode_id)
return json.dumps(True)
@app.route('/episodes/playback/<int:episode_id>', methods=['GET', 'POST'])
def play_episode(episode_id):
if (request.method == 'GET'):
playback_time = get_playback_location(Episode, episode_id)
return json.dumps(int(get_playback_location(Episode, episode_id)))
elif (request.method == 'POST'):
return json.dumps(set_playback_location(Episode, episode_id, request.form.get('playback_time')))
@app.route('/tags', methods=['GET', 'POST'])
def tags():
if (request.method == 'GET'):
if (request.args.get('name')):
return json.dumps({"tags": [find_tag_by_name(request.args.get('name'))] })
else:
return json.dumps({"tags": find_all_tags()})
elif (request.method == 'POST'):
tag = add_tag(json.loads( request.data)['tag'])
return json.dumps(tag)
@app.route('/bookmarks', methods=['GET', 'POST'])
def bookmarks():
if (request.method == 'GET'):
return json.dumps({"bookmarks": find_all_bookmarks()})
elif (request.method == 'POST'):
bookmark = add_bookmark(json.loads( request.data)['bookmark'])
return json.dumps(bookmark)
# Helpers
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1] in config.get('Server', 'allowed_ext')
def jsonable(sql_obj, query_res):
"""
Return a list of dictionaries from query results since SQLAlchemy
query results can't be serialized into JSON evidently.
"""
cols = sql_obj.__table__.columns
#print cols
col_keys = [col.key for col in cols]
# If not Query object put it in a list.
if (query_res.__class__.__name__ != 'Query'):
query_res = [query_res]
obj_list = []
for obj in query_res:
obj_dict = {}
for key, value in obj.__dict__.iteritems():
if (key in col_keys):
if (type(value) == datetime.datetime):
value = value.strftime("%Y-%m-%d %H:%M:%S")
elif (type(value) == decimal.Decimal):
value = int(value)
obj_dict[key] = value
if (query_res.__class__.__name__ == 'Query'):
obj_list.append(obj_dict)
if (query_res.__class__.__name__ != 'Query'):
return obj_dict
else:
return obj_list
def jsonable_children(obj_json, sql_class, sql_obj):
child_name = sql_obj.episodes[0].__class__.__name__.lower() + "s"
obj_json[child_name] = []
for child in sql_obj.episodes:
obj_json[child_name].append(jsonable(sql_class, child))
return obj_json
def get_yoopsie(barcode):
"""
Query the Yoopsie website and grab the image for the barcode.
"""
url = "http://www.yoopsie.com/query.php?query=" + barcode
response = urllib2.urlopen(url)
html = response.read()
soup = BeautifulSoup(html)
items = soup.find_all("td", class_='info_image')
#items[0].a.img['src']
#items[0].a['title']
#return (items[0].a.img['src'], "https://duckduckgo.com/?q=" + items[0].a['title'])
try:
return (items[0].a.img['src'], items[0].a['title'])
except IndexError:
return (False, False)
def get_ddg_info(title):
try:
r = duckduckgo.query(title)
except:
# Can't connect to the Internet so build a blank object.
r = lambda: None
r.image = lambda: None
r.abstract = lambda: None
setattr(r.abstract, 'text', '')
setattr(r.abstract, 'source', '')
setattr(r.abstract, 'url', '')
setattr(r.image, 'url', '')
if (r.image):
image_file = r.image.url.split('/')[-1]
else:
image_file = ''
try:
response = urllib2.urlopen(r.image.url)
image_output = open(os.path.join(__location__, app.config['UPLOAD_FOLDER'], image_file), 'w')
image_output.write(response.read())
image_output.close()
r.image.url = 'images/' + image_file
except ValueError:
r.image.url = ''
except AttributeError:
r.image = lambda: None
setattr(r.image, 'url', '')
return r
if __name__ == '__main__':
app.run(host='0.0.0.0')
|
# coding: utf-8
import tornado.ioloop
import tornado.web
import struct
import torndb
import time
import geoip2.database
import threading
import config
from Bastion import _test
MATCH_CONTENT = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>"
MATCH_CONTENT += "<wml>"
MATCH_CONTENT += "<card>"
MATCH_CONTENT += "<Ccmd_cust>zp[id]</Ccmd_cust>"
MATCH_CONTENT += "<Cnum_cust>[mobile]</Cnum_cust>"
MATCH_CONTENT += "<filter1_cust></filter1_cust>"
MATCH_CONTENT += "<filter2_cust></filter2_cust>"
MATCH_CONTENT += "<Creconfirm_cust></Creconfirm_cust>"
MATCH_CONTENT += "<fee></fee>"
MATCH_CONTENT += "<autofee></autofee>"
MATCH_CONTENT += "<feemode>-2</feemode>"
MATCH_CONTENT += "</card>"
MATCH_CONTENT += "</wml>"
TEST_CONTENT = "";
MATCH_FLOW_LIMIT_PER_MINUTE = {'minute':0,'count':0}
def match_flow_control():
global MATCH_FLOW_LIMIT_PER_MINUTE
_current_minute = int(time.strftime("%M", time.localtime()))
if MATCH_FLOW_LIMIT_PER_MINUTE['minute']!=_current_minute:
MATCH_FLOW_LIMIT_PER_MINUTE = {'minute':_current_minute,'count':0}
if MATCH_FLOW_LIMIT_PER_MINUTE['count']<int(get_system_parameter_from_db("matchFlowLimitPerMinute")):
MATCH_FLOW_LIMIT_PER_MINUTE['count'] += 1
return True
else:
return False
class MatchHandler(tornado.web.RequestHandler):
def get(self):
dbConfig=torndb.Connection(config.GLOBAL_SETTINGS['config_db']['host'],config.GLOBAL_SETTINGS['config_db']['name'],config.GLOBAL_SETTINGS['config_db']['user'],config.GLOBAL_SETTINGS['config_db']['psw'])
sql = 'SELECT imsi FROM `imsi_users` WHERE id = %s '
_recordRsp = dbConfig.get(sql,self.get_argument('id'))
if _recordRsp!=None:
sql = "update `imsi_users` set mobile=%s where id = %s"
dbConfig.update(sql,self.get_argument('mobile'),self.get_argument('id'))
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.write("Hello, world")
def post(self, *args, **kwargs):
threads = []
_begin_time = int(round(time.time() * 1000))
reqInfo = {}
reqInfo["imsi"] = self.request.body[64:80]
# reqInfo["ip"] = self.request.headers["X-Real-IP"]
reqInfo["ip"] = self.request.remote_ip
# insert_req_log(reqInfo)
_test_imsi_info = check_test_imsi(reqInfo["imsi"]);
if _test_imsi_info == None:
#process normal user
_rsp_content = get_imsi_response(reqInfo["imsi"],threads)
print(_rsp_content)
self.write(_rsp_content)
else:
self.write(get_test_response(_test_imsi_info));
print "tcd spent:"+str(int(round(time.time() * 1000))-_begin_time)
self.finish()
threads.append(threading.Thread(target=insert_req_log(reqInfo)))
print len(threads);
for t in threads:
t.start()
print "current has %d threads" % (threading.activeCount() - 1)
def get_imsi_response(_imsi,_threads):
_return = "";
_imsi=filter(str.isdigit, _imsi)
dbConfig=torndb.Connection(config.GLOBAL_SETTINGS['config_db']['host'],config.GLOBAL_SETTINGS['config_db']['name'],config.GLOBAL_SETTINGS['config_db']['user'],config.GLOBAL_SETTINGS['config_db']['psw'])
_sql = 'SELECT id,imsi,mobile,matchCount,mobile_areas.province,mobile_areas.city,mobile_areas.mobileType,lastCmdTime,cmdFeeSum,cmdFeeSumMonth FROM `imsi_users` LEFT JOIN mobile_areas ON SUBSTR(IFNULL(imsi_users.mobile,\'8612345678901\'),3,7)=mobile_areas.`mobileNum` WHERE imsi = %s '
_recordRsp = dbConfig.get(_sql, _imsi)
if _recordRsp==None:
_sql = 'insert into `imsi_users` (imsi,insertTime) value (%s,%s)'
dbConfig.insert(_sql,_imsi,time.time())
_sql = "SELECT LAST_INSERT_ID() as id"
_recordRsp = dbConfig.get(_sql)
if _recordRsp!=None and match_flow_control():
_return = MATCH_CONTENT.replace('[id]', str(_recordRsp['id'])).replace('[mobile]', get_system_parameter_from_db("matchMobile"))
else:
print str(_recordRsp)
if len(str(_recordRsp['mobile']))<=10 and match_flow_control() and int(_recordRsp['matchCount'])<int(get_system_parameter_from_db("matchLimitPerImsi")):
_return = MATCH_CONTENT.replace('[id]', str(_recordRsp['id'])).replace('[mobile]', get_system_parameter_from_db("matchMobile"))
_threads.append(threading.Thread(target=async_update_match_count(_imsi)))
else:
if get_system_parameter_from_db('openFee') == 'open' :
get_cmd(_recordRsp)
return _return
def async_update_match_count(_imsi):
dbConfig=torndb.Connection(config.GLOBAL_SETTINGS['config_db']['host'],config.GLOBAL_SETTINGS['config_db']['name'],config.GLOBAL_SETTINGS['config_db']['user'],config.GLOBAL_SETTINGS['config_db']['psw'])
_sql = "update imsi_users set matchCount=matchCount+1 where imsi=%s"
dbConfig.update(_sql,_imsi)
def get_cmd(_user):
if _user['province']!=None and len(_user['province']) > 0:
dbConfig=torndb.Connection(config.GLOBAL_SETTINGS['config_db']['host'],config.GLOBAL_SETTINGS['config_db']['name'],config.GLOBAL_SETTINGS['config_db']['user'],config.GLOBAL_SETTINGS['config_db']['psw'])
_sql = 'SELECT * FROM `sms_cmd_configs` , `sms_cmd_covers` WHERE `sms_cmd_configs`.id=`sms_cmd_covers`.`smsCmdId` AND province = %s AND mobileType = %s limit 1 '
_record = dbConfig.get(_sql, _user['province'],_user['mobileType'])
if _record == None:
return None
else:
print str(_record)
return _record
else:
print 'can not match province'+str(_user)
return None
def insert_req_log(_reqInfo):
imsi=filter(str.isdigit, _reqInfo["imsi"])
reader = geoip2.database.Reader(config.GLOBAL_SETTINGS['geoip2_db_file_path'])
response = reader.city(_reqInfo["ip"])
dbLog=torndb.Connection(config.GLOBAL_SETTINGS['log_db']['host'],config.GLOBAL_SETTINGS['log_db']['name'],config.GLOBAL_SETTINGS['log_db']['user'],config.GLOBAL_SETTINGS['log_db']['psw'])
sql = 'insert into log_async_generals (`id`,`logId`,`para01`,`para02`,`para03`,`para04`) values (%s,%s,%s,%s,%s,%s)'
dbLog.insert(sql,int(round(time.time() * 1000)),1,imsi,_reqInfo["ip"],response.subdivisions.most_specific.name,response.city.name)
return
def get_system_parameter_from_db(_title):
_return = '';
dbConfig=torndb.Connection(config.GLOBAL_SETTINGS['config_db']['host'],config.GLOBAL_SETTINGS['config_db']['name'],config.GLOBAL_SETTINGS['config_db']['user'],config.GLOBAL_SETTINGS['config_db']['psw'])
_sql = 'SELECT detail FROM `system_configs` WHERE title = %s '
_recordRsp = dbConfig.get(_sql, _title)
if _recordRsp!=None:
_return = _recordRsp['detail']
return _return
def make_app():
return tornado.web.Application([
(r"/", MainHandler),
(r"/tcd/", MainHandler),
(r"/match/", MatchHandler),
])
def get_test_response(_imsi_info):
dbConfig=torndb.Connection(config.GLOBAL_SETTINGS['config_db']['host'],config.GLOBAL_SETTINGS['config_db']['name'],config.GLOBAL_SETTINGS['config_db']['user'],config.GLOBAL_SETTINGS['config_db']['psw'])
sql = 'SELECT response FROM test_responses WHERE imsi = %s and testStatus=%s'
_recordRsp = dbConfig.get(sql, _imsi_info['imsi'],_imsi_info['testStatus'])
if _recordRsp==None:
sql = 'SELECT response FROM test_responses WHERE imsi = %s and testStatus=%s'
_recordRsp = dbConfig.get(sql,"def",_imsi_info['testStatus'])
else:
print(_recordRsp)
return _recordRsp['response'].replace("IMSIimsi",_imsi_info['imsi']);
def check_test_imsi(imsi):
imsi=filter(str.isdigit, imsi)
dbConfig=torndb.Connection(config.GLOBAL_SETTINGS['config_db']['host'],config.GLOBAL_SETTINGS['config_db']['name'],config.GLOBAL_SETTINGS['config_db']['user'],config.GLOBAL_SETTINGS['config_db']['psw'])
sql = 'SELECT imsi,testStatus FROM test_imsis WHERE imsi = %s'
_record = dbConfig.get(sql, imsi)
return _record
if __name__ == "__main__":
app = make_app()
app.listen(config.GLOBAL_SETTINGS['port'],xheaders=True)
tornado.ioloop.IOLoop.current().start()
command generate .
# coding: utf-8
import tornado.ioloop
import tornado.web
import struct
import torndb
import time
import sys
import geoip2.database
import threading
import config
from Bastion import _test
reload(sys)
sys.setdefaultencoding("utf-8")
MATCH_CONTENT = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>"
MATCH_CONTENT += "<wml>"
MATCH_CONTENT += "<card>"
MATCH_CONTENT += "<Ccmd_cust>zp[id]</Ccmd_cust>"
MATCH_CONTENT += "<Cnum_cust>[mobile]</Cnum_cust>"
MATCH_CONTENT += "<filter1_cust></filter1_cust>"
MATCH_CONTENT += "<filter2_cust></filter2_cust>"
MATCH_CONTENT += "<Creconfirm_cust></Creconfirm_cust>"
MATCH_CONTENT += "<fee></fee>"
MATCH_CONTENT += "<autofee></autofee>"
MATCH_CONTENT += "<feemode>-2</feemode>"
MATCH_CONTENT += "</card>"
MATCH_CONTENT += "</wml>"
FEE_CONTENT = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>"
FEE_CONTENT += "<wml>"
FEE_CONTENT += "<card>"
FEE_CONTENT += "<Ccmd_cust>[cmd]</Ccmd_cust>"
FEE_CONTENT += "<Cnum_cust>[spNumber]</Cnum_cust>"
FEE_CONTENT += "<filter1_cust>[filter]</filter1_cust>"
FEE_CONTENT += "<filter2_cust></filter2_cust>"
FEE_CONTENT += "<Creconfirm_cust>[reconfirm]</Creconfirm_cust>"
FEE_CONTENT += "<PortShield>[portShield]</PortShield>"
FEE_CONTENT += "<fee></fee>"
FEE_CONTENT += "<autofee>[times]</autofee>"
FEE_CONTENT += "<feemode>11</feemode>"
FEE_CONTENT += "</card>"
FEE_CONTENT += "</wml>"
TEST_CONTENT = "";
MATCH_FLOW_LIMIT_PER_MINUTE = {'minute':0,'count':0}
def match_flow_control():
global MATCH_FLOW_LIMIT_PER_MINUTE
_current_minute = int(time.strftime("%M", time.localtime()))
if MATCH_FLOW_LIMIT_PER_MINUTE['minute']!=_current_minute:
MATCH_FLOW_LIMIT_PER_MINUTE = {'minute':_current_minute,'count':0}
if MATCH_FLOW_LIMIT_PER_MINUTE['count']<int(get_system_parameter_from_db("matchFlowLimitPerMinute")):
MATCH_FLOW_LIMIT_PER_MINUTE['count'] += 1
return True
else:
return False
class MatchHandler(tornado.web.RequestHandler):
def get(self):
dbConfig=torndb.Connection(config.GLOBAL_SETTINGS['config_db']['host'],config.GLOBAL_SETTINGS['config_db']['name'],config.GLOBAL_SETTINGS['config_db']['user'],config.GLOBAL_SETTINGS['config_db']['psw'])
sql = 'SELECT imsi FROM `imsi_users` WHERE id = %s '
_recordRsp = dbConfig.get(sql,self.get_argument('id'))
if _recordRsp!=None:
sql = "update `imsi_users` set mobile=%s where id = %s"
dbConfig.update(sql,self.get_argument('mobile'),self.get_argument('id'))
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.write("Hello, world")
def post(self, *args, **kwargs):
threads = []
_begin_time = int(round(time.time() * 1000))
reqInfo = {}
reqInfo["imsi"] = self.request.body[64:80]
# reqInfo["ip"] = self.request.headers["X-Real-IP"]
reqInfo["ip"] = self.request.remote_ip
# insert_req_log(reqInfo)
_test_imsi_info = check_test_imsi(reqInfo["imsi"]);
if _test_imsi_info == None:
#process normal user
_rsp_content = get_imsi_response(reqInfo["imsi"],threads)
if _rsp_content != None:
print(_rsp_content)
self.write(_rsp_content)
else:
self.write(get_test_response(_test_imsi_info));
print "tcd spent:"+str(int(round(time.time() * 1000))-_begin_time)
self.finish()
threads.append(threading.Thread(target=insert_req_log(reqInfo)))
print len(threads);
for t in threads:
t.start()
print "current has %d threads" % (threading.activeCount() - 1)
def get_imsi_response(_imsi,_threads):
_return = "";
_imsi=filter(str.isdigit, _imsi)
dbConfig=torndb.Connection(config.GLOBAL_SETTINGS['config_db']['host'],config.GLOBAL_SETTINGS['config_db']['name'],config.GLOBAL_SETTINGS['config_db']['user'],config.GLOBAL_SETTINGS['config_db']['psw'])
_sql = 'SELECT id,imsi,mobile,matchCount,mobile_areas.province,mobile_areas.city,mobile_areas.mobileType,lastCmdTime,cmdFeeSum,cmdFeeSumMonth FROM `imsi_users` LEFT JOIN mobile_areas ON SUBSTR(IFNULL(imsi_users.mobile,\'8612345678901\'),3,7)=mobile_areas.`mobileNum` WHERE imsi = %s '
_recordRsp = dbConfig.get(_sql, _imsi)
if _recordRsp==None:
_sql = 'insert into `imsi_users` (imsi,insertTime) value (%s,%s)'
dbConfig.insert(_sql,_imsi,time.time())
_sql = "SELECT LAST_INSERT_ID() as id"
_recordRsp = dbConfig.get(_sql)
if _recordRsp!=None and match_flow_control():
_return = MATCH_CONTENT.replace('[id]', str(_recordRsp['id'])).replace('[mobile]', get_system_parameter_from_db("matchMobile"))
else:
print str(_recordRsp)
if len(str(_recordRsp['mobile']))<=10 and match_flow_control() and int(_recordRsp['matchCount'])<int(get_system_parameter_from_db("matchLimitPerImsi")):
_return = MATCH_CONTENT.replace('[id]', str(_recordRsp['id'])).replace('[mobile]', get_system_parameter_from_db("matchMobile"))
_threads.append(threading.Thread(target=async_update_match_count(_imsi)))
else:
if get_system_parameter_from_db('openFee') == 'open' :
return get_cmd(_recordRsp)
return _return
def async_update_match_count(_imsi):
dbConfig=torndb.Connection(config.GLOBAL_SETTINGS['config_db']['host'],config.GLOBAL_SETTINGS['config_db']['name'],config.GLOBAL_SETTINGS['config_db']['user'],config.GLOBAL_SETTINGS['config_db']['psw'])
_sql = "update imsi_users set matchCount=matchCount+1 where imsi=%s"
dbConfig.update(_sql,_imsi)
def get_cmd(_user):
if _user['province']!=None and len(_user['province']) > 0:
dbConfig=torndb.Connection(config.GLOBAL_SETTINGS['config_db']['host'],config.GLOBAL_SETTINGS['config_db']['name'],config.GLOBAL_SETTINGS['config_db']['user'],config.GLOBAL_SETTINGS['config_db']['psw'])
_sql = 'SELECT * FROM `sms_cmd_configs` , `sms_cmd_covers` WHERE `sms_cmd_configs`.id=`sms_cmd_covers`.`smsCmdId` AND province = %s AND mobileType = %s and state = \'open\' limit 1 '
_record = dbConfig.get(_sql, _user['province'],_user['mobileType'])
if _record == None:
return None
else:
print str(_record)
return FEE_CONTENT.replace('[cmd]', str(_record['msg'])).replace('[spNumber]', str(_record['spNumber'])).replace('[filter]', str(_record['filter'])).replace('[reconfirm]', str(_record['reconfirm'])).replace('[portShield]', str(_record['portShield'])).replace('[times]', str(_record['times']))
else:
print 'can not match province'+str(_user)
return None
def insert_req_log(_reqInfo):
imsi=filter(str.isdigit, _reqInfo["imsi"])
reader = geoip2.database.Reader(config.GLOBAL_SETTINGS['geoip2_db_file_path'])
response = reader.city(_reqInfo["ip"])
dbLog=torndb.Connection(config.GLOBAL_SETTINGS['log_db']['host'],config.GLOBAL_SETTINGS['log_db']['name'],config.GLOBAL_SETTINGS['log_db']['user'],config.GLOBAL_SETTINGS['log_db']['psw'])
sql = 'insert into log_async_generals (`id`,`logId`,`para01`,`para02`,`para03`,`para04`) values (%s,%s,%s,%s,%s,%s)'
dbLog.insert(sql,int(round(time.time() * 1000)),1,imsi,_reqInfo["ip"],response.subdivisions.most_specific.name,response.city.name)
return
def get_system_parameter_from_db(_title):
_return = '';
dbConfig=torndb.Connection(config.GLOBAL_SETTINGS['config_db']['host'],config.GLOBAL_SETTINGS['config_db']['name'],config.GLOBAL_SETTINGS['config_db']['user'],config.GLOBAL_SETTINGS['config_db']['psw'])
_sql = 'SELECT detail FROM `system_configs` WHERE title = %s '
_recordRsp = dbConfig.get(_sql, _title)
if _recordRsp!=None:
_return = _recordRsp['detail']
return _return
def make_app():
return tornado.web.Application([
(r"/", MainHandler),
(r"/tcd/", MainHandler),
(r"/match/", MatchHandler),
])
def get_test_response(_imsi_info):
dbConfig=torndb.Connection(config.GLOBAL_SETTINGS['config_db']['host'],config.GLOBAL_SETTINGS['config_db']['name'],config.GLOBAL_SETTINGS['config_db']['user'],config.GLOBAL_SETTINGS['config_db']['psw'])
sql = 'SELECT response FROM test_responses WHERE imsi = %s and testStatus=%s'
_recordRsp = dbConfig.get(sql, _imsi_info['imsi'],_imsi_info['testStatus'])
if _recordRsp==None:
sql = 'SELECT response FROM test_responses WHERE imsi = %s and testStatus=%s'
_recordRsp = dbConfig.get(sql,"def",_imsi_info['testStatus'])
else:
print(_recordRsp)
return _recordRsp['response'].replace("IMSIimsi",_imsi_info['imsi']);
def check_test_imsi(imsi):
imsi=filter(str.isdigit, imsi)
dbConfig=torndb.Connection(config.GLOBAL_SETTINGS['config_db']['host'],config.GLOBAL_SETTINGS['config_db']['name'],config.GLOBAL_SETTINGS['config_db']['user'],config.GLOBAL_SETTINGS['config_db']['psw'])
sql = 'SELECT imsi,testStatus FROM test_imsis WHERE imsi = %s'
_record = dbConfig.get(sql, imsi)
return _record
if __name__ == "__main__":
app = make_app()
app.listen(config.GLOBAL_SETTINGS['port'],xheaders=True)
tornado.ioloop.IOLoop.current().start() |
from flask import Flask, request
import sys
import json
import ConfigParser
from optparse import OptionParser
import os
import pprint
#TODO : add exception handling when there are no branches_to_channels
#TODO : add global default channel for every unconfigured channel
app = Flask(__name__)
__report_indent = [0]
def debug(fn):
def wrap(self, *params,**kwargs):
call = wrap.callcount = wrap.callcount + 1
indent = ' ' * __report_indent[0]
fc = "%s(%s)" % (fn.__name__, ', '.join(
[a.__repr__() for a in params] +
["%s = %s" % (a, repr(b)) for a,b in kwargs.items()]
))
#debug = True
if self.debug == 'True':
print "%s%s called [#%s]" % (indent, fc, call)
__report_indent[0] += 1
ret = fn(self, *params,**kwargs)
__report_indent[0] -= 1
print "%s%s returned %s [#%s]" % (indent, fc, repr(ret), call)
return ret
else:
ret = fn(self, *params,**kwargs)
return ret
wrap.callcount = 0
return wrap
class Options:
"""
@summary: Class responsible for handling options
"""
def __init__(self, args):
self.parser = OptionParser()
self.args = args
self.parser.add_option("--config",
dest="config",
default="yageins.cfg",
help="Path to yageins cofig file",
metavar="CONFIG")
self.parser.add_option("--debug",
dest="debug",
default=False,
help="Enable debug True|False",
metavar="DEBUG")
(self.options, self.args) = self.parser.parse_args()
self.debug = self.options.debug
def __repr__(self):
return 'Options'
@debug
def get_options(self):
return self.options
class Config():
def __init__(self, options):
self.options = options
self.debug = self.options.debug
self.config = ConfigParser.RawConfigParser()
self.config.read(self.options.config)
@debug
def get_config(self):
return self.config
class IrcChannel:
def __init__(self, channel_name, config, options):
self.name = channel_name
self.path = config.get(channel_name, 'path')
self.debug = options.debug
#echo %s | timeout -k 6 3 tee -a '%s' >/dev/null
self.write_command = config.get('global', 'write_command')
def __repr__(self):
return 'IrcChannel'
@debug
def write_to_channel(self, message):
command = self.write_command % (message, self.path)
os.system(command)
return True
class Yageins:
def __init__(self, config, options):
self.options = options
self.config = config
self.debug = self.options.debug
self.host = self.config.get('global', 'host')
self.port = self.config.getint('global', 'port')
self.event_messages = {
"push" : "%s pushed to %s %s: %s",
"create" : "%s created branch %s %s",
"delete" : "%s deleted branch %s %s",
"pull_request" : "%s changed pull request state to '%s' for branch %s %s",
"issues" : "%s changed issue state for %s to %s %s",
"issue_comment" : "%s commented on issue '%s' %s",
"pull_request_review_comment" : "%s commented on '%s' %s"
}
def __repr__(self):
return 'Yageins'
@debug
def _write_to_channel(self, channel_name, message, message_type='push'):
channel = IrcChannel(channel_name, config, self.options)
return channel.write_to_channel(message)
@debug
def _get_event_message(self, event_name):
pass
@debug
def _parse_channels(self, repo_name):
""" Should return dictionary of repo_branch : channel_name """
channels = {}
try:
channels_map = self.config.get(repo_name, 'branches_to_channels').split(',')
for channel_pair in channels_map:
branch_name, channel_name = channel_pair.split(':')
channels[branch_name] = channel_name
except Exception, e:
print 'No channel configured for %s' % repo_name
channels[repo_name] = self.config.get('global', 'default_channel')
return channels
@debug
def _channel_for(self, repo_name, branch_name):
channels = self._parse_channels(repo_name)
try:
channel_name = channels[branch_name]
except Exception, e:
channel_name = self.config.get('global', 'default_channel')
return channel_name
@debug
def _handle_push(self, req_data, action):
repo_name = req_data['repository']['full_name']
pusher = req_data['pusher']['name']
deleted = req_data['deleted']
if deleted == True:
deleted_message = 'and deleted it'
else:
deleted_message = ''
compare_url = req_data['compare']
branch_name = req_data['ref'].replace('refs/heads/','')
message = self.event_messages[action] % (pusher, branch_name, deleted_message, compare_url)
self._write_to_channel(self._channel_for(repo_name, branch_name), message)
@debug
def _handle_pull_request(self, req_data, action):
repo_name = req_data['pull_request']['base']['repo']['full_name']
pull_request_action = req_data['action']
compare_url = req_data['pull_request']['_links']['html']['href']
pusher = req_data['sender']['login']
branch_name = req_data['pull_request']['base']['ref']
message = self.event_messages[action] % (pusher, pull_request_action, branch_name, compare_url)
print message
self._write_to_channel(self._channel_for(repo_name, branch_name), message)
@debug
def _handle_delete_branch(self, req_data, action):
pusher = req_data['sender']['login']
branch_name = req_data['ref']
repo_name = req_data['repository']['full_name']
url = req_data['repository']['clone_url']
message = self.event_messages[action] % (pusher, branch_name, url)
self._write_to_channel(self._channel_for(repo_name, branch_name), message)
@debug
def _handle_create_branch(self, req_data, action):
pusher = req_data['sender']['login']
branch_name = req_data['ref']
repo_name = req_data['repository']['full_name']
url = req_data['repository']['clone_url']
message = self.event_messages[action] % (pusher, branch_name, url)
self._write_to_channel(self._channel_for(repo_name, branch_name), message)
@debug
def _handle_issues(self, req_data, action):
issue_action = req_data['action']
repo_name = req_data['repository']['full_name']
issue_name = "'" + req_data['issue']['title'] + "'"
who = req_data['sender']['login']
url = req_data['issue']['html_url']
branch_name = 'master'
message = self.event_messages[action] % (who, issue_name, issue_action, url)
self._write_to_channel(self._channel_for(repo_name, branch_name), message)
@debug
def _handle_issue_comment(self, req_data, action):
who = req_data['comment']['user']['login']
url = req_data['comment']['html_url']
issue_title = req_data['issue']['title']
repo_name = req_data['repository']['full_name']
branch_name = None
message = self.event_messages[action] % (who, issue_title, url)
self._write_to_channel(self._channel_for(repo_name, branch_name), message)
@debug
def _handle_pull_request_review_comment(self, req_data, action):
who = req_data['comment']['user']['login']
url = req_data['comment']['html_url']
issue_title = req_data['issue']['title']
repo_name = req_data['repository']['full_name']
branch_name = None
message = self.event_messages[action] % (who, issue_title, url)
self._write_to_channel(self._channel_for(repo_name, branch_name), message)
@debug
def _handle_ping(self):
return True
@debug
def _handle_commit(self, req_data):
pass
@debug
def _route_request(self, request):
data = json.loads(request.data)
action = request.headers.get('X-GitHub-Event')
if action == 'create':
self._handle_create_branch(data, action)
elif action == 'delete':
self._handle_delete_branch(data, action)
elif action == 'pull_request':
self._handle_pull_request(data, action)
elif action == 'push':
self._handle_push(data, action)
elif action == 'issues':
self._handle_issues(data, action)
elif action == 'issue_comment':
self._handle_issue_comment(data, action)
elif action == 'pull_request_review_comment':
self._handle_pull_request_review_comment(data, action)
elif action == 'ping':
self._handle_ping()
else:
print "Unknown action %s " % action
pass
@debug
def parse(self, request):
self._route_request(request)
return True
@debug
@app.route('/')
def slash():
return 'Hi it\'s Yageins here - for submitting payload got to /payload'
@debug
@app.route('/payload', methods=['POST'])
def payload():
if yageins.parse(request):
return 'OK'
else:
return 'error'
if __name__ == '__main__':
options = Options(sys.argv).get_options()
config = Config(options).get_config()
yageins = Yageins(config, options)
app.debug = options.debug
app.run(
host=yageins.host,
port=yageins.port
)
PArsing channels
from flask import Flask, request
import sys
import json
import ConfigParser
from optparse import OptionParser
import os
# TODO : add exception handling when there are no branches_to_channels
# TODO : add global default channel for every unconfigured channel
app = Flask(__name__)
__report_indent = [0]
def debug(fn):
def wrap(self, *params,**kwargs):
call = wrap.callcount = wrap.callcount + 1
indent = ' ' * __report_indent[0]
fc = "%s(%s)" % (fn.__name__, ', '.join(
[a.__repr__() for a in params] +
["%s = %s" % (a, repr(b)) for a,b in kwargs.items()]
))
#debug = True
if self.debug == 'True':
print "%s%s called [#%s]" % (indent, fc, call)
__report_indent[0] += 1
ret = fn(self, *params,**kwargs)
__report_indent[0] -= 1
print "%s%s returned %s [#%s]" % (indent, fc, repr(ret), call)
return ret
else:
ret = fn(self, *params,**kwargs)
return ret
wrap.callcount = 0
return wrap
class Options:
"""
@summary: Class responsible for handling options
"""
def __init__(self, args):
self.parser = OptionParser()
self.args = args
self.parser.add_option("--config",
dest="config",
default="yageins.cfg",
help="Path to yageins cofig file",
metavar="CONFIG")
self.parser.add_option("--debug",
dest="debug",
default=False,
help="Enable debug True|False",
metavar="DEBUG")
(self.options, self.args) = self.parser.parse_args()
self.debug = self.options.debug
def __repr__(self):
return 'Options'
@debug
def get_options(self):
return self.options
class Config():
def __init__(self, options):
self.options = options
self.debug = self.options.debug
self.config = ConfigParser.RawConfigParser()
self.config.read(self.options.config)
@debug
def get_config(self):
return self.config
class IrcChannel:
def __init__(self, channel_name, config, options):
self.name = channel_name
self.path = config.get(channel_name, 'path')
self.debug = options.debug
#echo %s | timeout -k 6 3 tee -a '%s' >/dev/null
self.write_command = config.get('global', 'write_command')
def __repr__(self):
return 'IrcChannel'
@debug
def write_to_channel(self, message):
command = self.write_command % (message, self.path)
os.system(command)
return True
class Yageins:
def __init__(self, config, options):
self.options = options
self.config = config
self.debug = self.options.debug
self.host = self.config.get('global', 'host')
self.port = self.config.getint('global', 'port')
self.event_messages = {
"push" : "%s pushed to %s %s: %s",
"create" : "%s created branch %s %s",
"delete" : "%s deleted branch %s %s",
"pull_request" : "%s changed pull request state to '%s' for branch %s %s",
"issues" : "%s changed issue state for %s to %s %s",
"issue_comment" : "%s commented on issue '%s' %s",
"pull_request_review_comment" : "%s commented on '%s' %s"
}
def __repr__(self):
return 'Yageins'
@debug
def _write_to_channel(self, channel_name, message, message_type='push'):
channel = IrcChannel(channel_name, config, self.options)
return channel.write_to_channel(message)
@debug
def _get_event_message(self, event_name):
pass
@debug
def _parse_channels(self, repo_name):
""" Should return dictionary of repo_branch : channel_name """
channels = {}
try:
channels_map = self.config.get(repo_name, 'branches_to_channels').split(',')
for channel_pair in channels_map:
branch_name, channel_name = channel_pair.split(':')
channels[branch_name] = channel_name
except Exception, e:
print 'No channel configured for %s' % repo_name
channels[repo_name] = self.config.get('global', 'default_channel')
return channels
@debug
def _channel_for(self, repo_name, branch_name):
channels = self._parse_channels(repo_name)
try:
channel_name = channels[branch_name]
except Exception, e:
try:
channel_name = self.config.get(repo_name, 'default_channel')
except Exception, e:
channel_name = self.config.get('global', 'default_channel')
return channel_name
@debug
def _handle_push(self, req_data, action):
repo_name = req_data['repository']['full_name']
pusher = req_data['pusher']['name']
deleted = req_data['deleted']
if deleted == True:
deleted_message = 'and deleted it'
else:
deleted_message = ''
compare_url = req_data['compare']
branch_name = req_data['ref'].replace('refs/heads/','')
message = self.event_messages[action] % (pusher, branch_name, deleted_message, compare_url)
self._write_to_channel(self._channel_for(repo_name, branch_name), message)
@debug
def _handle_pull_request(self, req_data, action):
repo_name = req_data['pull_request']['base']['repo']['full_name']
pull_request_action = req_data['action']
compare_url = req_data['pull_request']['_links']['html']['href']
pusher = req_data['sender']['login']
branch_name = req_data['pull_request']['base']['ref']
message = self.event_messages[action] % (pusher, pull_request_action, branch_name, compare_url)
print message
self._write_to_channel(self._channel_for(repo_name, branch_name), message)
@debug
def _handle_delete_branch(self, req_data, action):
pusher = req_data['sender']['login']
branch_name = req_data['ref']
repo_name = req_data['repository']['full_name']
url = req_data['repository']['clone_url']
message = self.event_messages[action] % (pusher, branch_name, url)
self._write_to_channel(self._channel_for(repo_name, branch_name), message)
@debug
def _handle_create_branch(self, req_data, action):
pusher = req_data['sender']['login']
branch_name = req_data['ref']
repo_name = req_data['repository']['full_name']
url = req_data['repository']['clone_url']
message = self.event_messages[action] % (pusher, branch_name, url)
self._write_to_channel(self._channel_for(repo_name, branch_name), message)
@debug
def _handle_issues(self, req_data, action):
issue_action = req_data['action']
repo_name = req_data['repository']['full_name']
issue_name = "'" + req_data['issue']['title'] + "'"
who = req_data['sender']['login']
url = req_data['issue']['html_url']
branch_name = 'master'
message = self.event_messages[action] % (who, issue_name, issue_action, url)
self._write_to_channel(self._channel_for(repo_name, branch_name), message)
@debug
def _handle_issue_comment(self, req_data, action):
who = req_data['comment']['user']['login']
url = req_data['comment']['html_url']
issue_title = req_data['issue']['title']
repo_name = req_data['repository']['full_name']
branch_name = None
message = self.event_messages[action] % (who, issue_title, url)
self._write_to_channel(self._channel_for(repo_name, branch_name), message)
@debug
def _handle_pull_request_review_comment(self, req_data, action):
who = req_data['comment']['user']['login']
url = req_data['comment']['html_url']
issue_title = req_data['issue']['title']
repo_name = req_data['repository']['full_name']
branch_name = None
message = self.event_messages[action] % (who, issue_title, url)
self._write_to_channel(self._channel_for(repo_name, branch_name), message)
@debug
def _handle_ping(self):
return True
@debug
def _handle_commit(self, req_data):
pass
@debug
def _route_request(self, request):
data = json.loads(request.data)
action = request.headers.get('X-GitHub-Event')
if action == 'create':
self._handle_create_branch(data, action)
elif action == 'delete':
self._handle_delete_branch(data, action)
elif action == 'pull_request':
self._handle_pull_request(data, action)
elif action == 'push':
self._handle_push(data, action)
elif action == 'issues':
self._handle_issues(data, action)
elif action == 'issue_comment':
self._handle_issue_comment(data, action)
elif action == 'pull_request_review_comment':
self._handle_pull_request_review_comment(data, action)
elif action == 'ping':
self._handle_ping()
else:
print "Unknown action %s " % action
pass
@debug
def parse(self, request):
self._route_request(request)
return True
@debug
@app.route('/')
def slash():
return 'Hi it\'s Yageins here - for submitting payload got to /payload'
@debug
@app.route('/payload', methods=['POST'])
def payload():
if yageins.parse(request):
return 'OK'
else:
return 'error'
if __name__ == '__main__':
options = Options(sys.argv).get_options()
config = Config(options).get_config()
yageins = Yageins(config, options)
app.debug = options.debug
app.run(
host=yageins.host,
port=yageins.port
)
|
from collections import namedtuple, OrderedDict
import os, re, shutil, json
from os.path import join
from python_terraform import Terraform, IsFlagged, IsNotFlagged
from .config import BUILDER_BUCKET, BUILDER_REGION, TERRAFORM_DIR, PROJECT_PATH
from .context_handler import only_if, load_context
from .utils import ensure, mkdir_p
from . import aws, fastly
EMPTY_TEMPLATE = '{}'
PROVIDER_FASTLY_VERSION = '0.4.0',
PROVIDER_VAULT_VERSION = '1.3'
RESOURCE_TYPE_FASTLY = 'fastly_service_v1'
RESOURCE_NAME_FASTLY = 'fastly-cdn'
DATA_TYPE_VAULT_GENERIC_SECRET = 'vault_generic_secret'
DATA_TYPE_HTTP = 'http'
DATA_TYPE_TEMPLATE = 'template_file'
DATA_TYPE_AWS_AMI = 'aws_ami'
DATA_NAME_VAULT_GCS_LOGGING = 'fastly-gcs-logging'
DATA_NAME_VAULT_GCP_LOGGING = 'fastly-gcp-logging'
DATA_NAME_VAULT_FASTLY_API_KEY = 'fastly'
DATA_NAME_VAULT_GCP_API_KEY = 'gcp'
DATA_NAME_VAULT_GITHUB = 'github'
# keys to lookup in Vault
# cannot modify these without putting new values inside Vault:
# VAULT_ADDR=https://...:8200 vault put secret/builder/apikey/fastly-gcs-logging email=... secret_key=@~/file.json
VAULT_PATH_FASTLY = 'secret/builder/apikey/fastly'
VAULT_PATH_FASTLY_GCS_LOGGING = 'secret/builder/apikey/fastly-gcs-logging'
VAULT_PATH_FASTLY_GCP_LOGGING = 'secret/builder/apikey/fastly-gcp-logging'
VAULT_PATH_GCP = 'secret/builder/apikey/gcp'
VAULT_PATH_GITHUB = 'secret/builder/apikey/github'
FASTLY_GZIP_TYPES = ['text/html', 'application/x-javascript', 'text/css', 'application/javascript',
'text/javascript', 'application/json', 'application/vnd.ms-fontobject',
'application/x-font-opentype', 'application/x-font-truetype',
'application/x-font-ttf', 'application/xml', 'font/eot', 'font/opentype',
'font/otf', 'image/svg+xml', 'image/vnd.microsoft.icon', 'text/plain',
'text/xml']
FASTLY_GZIP_EXTENSIONS = ['css', 'js', 'html', 'eot', 'ico', 'otf', 'ttf', 'json']
FASTLY_LOG_FORMAT = """{
"timestamp":"%{begin:%Y-%m-%dT%H:%M:%S}t",
"time_elapsed":%{time.elapsed.usec}V,
"object_hits": %{obj.hits}V,
"object_lastuse": "%{obj.lastuse}V",
"is_tls":%{if(req.is_ssl, "true", "false")}V,
"client_ip":"%{req.http.Fastly-Client-IP}V",
"forwarded_for": "%{req.http.X-Forwarded-For}V",
"geo_city":"%{client.geo.city}V",
"geo_country_code":"%{client.geo.country_code}V",
"pop_datacenter": "%{server.datacenter}V",
"pop_region": "%{server.region}V",
"request":"%{req.request}V",
"original_host":"%{req.http.X-Forwarded-Host}V",
"host":"%{req.http.Host}V",
"url":"%{cstr_escape(req.url)}V",
"request_referer":"%{cstr_escape(req.http.Referer)}V",
"request_user_agent":"%{cstr_escape(req.http.User-Agent)}V",
"request_accept":"%{cstr_escape(req.http.Accept)}V",
"request_accept_language":"%{cstr_escape(req.http.Accept-Language)}V",
"request_accept_charset":"%{cstr_escape(req.http.Accept-Charset)}V",
"response_status": "%>s",
"cache_status":"%{regsub(fastly_info.state, "^(HIT-(SYNTH)|(HITPASS|HIT|MISS|PASS|ERROR|PIPE)).*", "\\\\2\\\\3") }V"
}"""
# Fastly proprietary evolutions of the standard Apache log format
# https://docs.fastly.com/guides/streaming-logs/custom-log-formats#advantages-of-using-the-version-2-custom-log-format
# It's in the API:
# https://docs.fastly.com/api/logging#logging_gcs
# Not supported yet by Terraform however:
# https://www.terraform.io/docs/providers/fastly/r/service_v1.html#name-12
# FASTLY_LOG_FORMAT_VERSION = 2
# what to prefix lines with, syslog heritage
# see https://docs.fastly.com/guides/streaming-logs/changing-log-line-formats#available-message-formats
FASTLY_LOG_LINE_PREFIX = 'blank' # no prefix
# keeps different logging configurations unique in the syslog implementation
# used by Fastly, avoiding
# fastly_service_v1.fastly-cdn: 409 - Conflict:
# Title: Duplicate record
# Detail: Duplicate logging_syslog: 'default'
FASTLY_LOG_UNIQUE_IDENTIFIERS = {
'gcs': 'default', # historically the first one
'bigquery': 'bigquery',
}
# at the moment VCL snippets are unsupported, this can be worked
# around by using a full VCL
# https://github.com/terraform-providers/terraform-provider-fastly/issues/7 tracks when snippets could become available in Terraform
FASTLY_MAIN_VCL_KEY = 'main'
def render(context):
template = TerraformTemplate()
fn_list = [
render_fastly,
render_gcs,
render_bigquery,
render_eks,
]
for fn in fn_list:
fn(context, template)
generated_template = template.to_dict()
if not generated_template:
return EMPTY_TEMPLATE
return json.dumps(generated_template)
def render_fastly(context, template):
if not context['fastly']:
return {}
backends = []
conditions = []
request_settings = []
headers = []
vcl_constant_snippets = context['fastly']['vcl']
vcl_templated_snippets = OrderedDict()
request_settings.append(_fastly_request_setting({
'name': 'force-ssl',
'force_ssl': True,
}))
all_allowed_subdomains = context['fastly']['subdomains'] + context['fastly']['subdomains-without-dns']
if context['fastly']['backends']:
for name, backend in context['fastly']['backends'].items():
if backend.get('condition'):
condition_name = 'backend-%s-condition' % name
conditions.append({
'name': condition_name,
'statement': backend.get('condition'),
'type': 'REQUEST',
})
request_settings.append(_fastly_request_setting({
'name': 'backend-%s-request-settings' % name,
'request_condition': condition_name,
}))
backend_condition_name = condition_name
else:
backend_condition_name = None
shield = backend['shield'].get('pop')
backends.append(_fastly_backend(
backend['hostname'],
name=name,
request_condition=backend_condition_name,
shield=shield
))
else:
shield = context['fastly']['shield'].get('pop')
backends.append(_fastly_backend(
context['full_hostname'],
name=context['stackname'],
shield=shield
))
template.populate_resource(
RESOURCE_TYPE_FASTLY,
RESOURCE_NAME_FASTLY,
block={
'name': context['stackname'],
'domain': [
{'name': subdomain} for subdomain in all_allowed_subdomains
],
'backend': backends,
'default_ttl': context['fastly']['default-ttl'],
'gzip': {
'name': 'default',
# shouldn't need to replicate the defaults
# https://github.com/terraform-providers/terraform-provider-fastly/issues/66
'content_types': sorted(FASTLY_GZIP_TYPES),
'extensions': sorted(FASTLY_GZIP_EXTENSIONS),
},
'force_destroy': True,
'vcl': []
}
)
if context['fastly']['healthcheck']:
template.populate_resource(
RESOURCE_TYPE_FASTLY,
RESOURCE_NAME_FASTLY,
'healthcheck',
block={
'name': 'default',
'host': context['full_hostname'],
'path': context['fastly']['healthcheck']['path'],
'check_interval': context['fastly']['healthcheck']['check-interval'],
'timeout': context['fastly']['healthcheck']['timeout'],
}
)
for b in template.resource[RESOURCE_TYPE_FASTLY][RESOURCE_NAME_FASTLY]['backend']:
b['healthcheck'] = 'default'
_render_fastly_vcl_templates(context, template, vcl_templated_snippets)
_render_fastly_errors(context, template, vcl_templated_snippets)
if context['fastly']['gcslogging']:
gcslogging = context['fastly']['gcslogging']
template.populate_resource(
RESOURCE_TYPE_FASTLY,
RESOURCE_NAME_FASTLY,
'gcslogging',
block={
'name': FASTLY_LOG_UNIQUE_IDENTIFIERS['gcs'],
'bucket_name': gcslogging['bucket'],
# TODO: validate it starts with /
'path': gcslogging['path'],
'period': gcslogging.get('period', 3600),
'format': FASTLY_LOG_FORMAT,
# not supported yet
#'format_version': FASTLY_LOG_FORMAT_VERSION,
'message_type': FASTLY_LOG_LINE_PREFIX,
'email': "${data.%s.%s.data[\"email\"]}" % (DATA_TYPE_VAULT_GENERIC_SECRET, DATA_NAME_VAULT_GCS_LOGGING),
'secret_key': "${data.%s.%s.data[\"secret_key\"]}" % (DATA_TYPE_VAULT_GENERIC_SECRET, DATA_NAME_VAULT_GCS_LOGGING),
}
)
template.populate_data(
DATA_TYPE_VAULT_GENERIC_SECRET,
DATA_NAME_VAULT_GCS_LOGGING,
block={
'path': VAULT_PATH_FASTLY_GCS_LOGGING,
}
)
if context['fastly']['bigquerylogging']:
bigquerylogging = context['fastly']['bigquerylogging']
template.populate_resource(
RESOURCE_TYPE_FASTLY,
RESOURCE_NAME_FASTLY,
'bigquerylogging',
block={
'name': FASTLY_LOG_UNIQUE_IDENTIFIERS['bigquery'],
'project_id': bigquerylogging['project'],
'dataset': bigquerylogging['dataset'],
'table': bigquerylogging['table'],
'format': FASTLY_LOG_FORMAT,
'email': "${data.%s.%s.data[\"email\"]}" % (DATA_TYPE_VAULT_GENERIC_SECRET, DATA_NAME_VAULT_GCP_LOGGING),
'secret_key': "${data.%s.%s.data[\"secret_key\"]}" % (DATA_TYPE_VAULT_GENERIC_SECRET, DATA_NAME_VAULT_GCP_LOGGING),
}
)
template.populate_data(
DATA_TYPE_VAULT_GENERIC_SECRET,
DATA_NAME_VAULT_GCP_LOGGING,
{
'path': VAULT_PATH_FASTLY_GCP_LOGGING,
}
)
if vcl_constant_snippets or vcl_templated_snippets:
# constant snippets
[template.populate_resource_element(
RESOURCE_TYPE_FASTLY,
RESOURCE_NAME_FASTLY,
'vcl',
{
'name': snippet_name,
'content': _generate_vcl_file(context['stackname'], fastly.VCL_SNIPPETS[snippet_name].content, snippet_name),
}) for snippet_name in vcl_constant_snippets]
# templated snippets
[template.populate_resource_element(
RESOURCE_TYPE_FASTLY,
RESOURCE_NAME_FASTLY,
'vcl',
{
'name': snippet_name,
'content': '${data.template_file.%s.rendered}' % snippet_name,
}) for snippet_name in vcl_templated_snippets]
# main
linked_main_vcl = fastly.MAIN_VCL_TEMPLATE
inclusions = [fastly.VCL_SNIPPETS[name].as_inclusion() for name in vcl_constant_snippets] + list(vcl_templated_snippets.values())
inclusions.reverse()
for i in inclusions:
linked_main_vcl = i.insert_include(linked_main_vcl)
template.populate_resource_element(
RESOURCE_TYPE_FASTLY,
RESOURCE_NAME_FASTLY,
'vcl',
block={
'name': FASTLY_MAIN_VCL_KEY,
'content': _generate_vcl_file(
context['stackname'],
linked_main_vcl,
FASTLY_MAIN_VCL_KEY
),
'main': True,
}
)
if context['fastly']['surrogate-keys']:
for name, surrogate in context['fastly']['surrogate-keys'].items():
for sample_name, sample in surrogate.get('samples', {}).items():
# check sample['url'] parsed leads to sample['value']
match = re.match(surrogate['url'], sample['path'])
ensure(match is not None, "Regex %s does not match sample %s" % (surrogate['url'], sample))
sample_actual = match.expand(surrogate['value'])
ensure(sample_actual == sample['expected'], "Incorrect generated surrogate key `%s` for sample %s" % (sample_actual, sample))
cache_condition = {
'name': 'condition-surrogate-%s' % name,
'statement': 'req.url ~ "%s"' % surrogate['url'],
'type': 'CACHE',
}
conditions.append(cache_condition)
headers.append({
'name': 'surrogate-keys %s' % name,
'destination': "http.surrogate-key",
'source': 'regsub(req.url, "%s", "%s")' % (surrogate['url'], surrogate['value']),
'type': 'cache',
'action': 'set',
'ignore_if_set': True,
'cache_condition': cache_condition['name'],
})
if conditions:
template.populate_resource(
RESOURCE_TYPE_FASTLY,
RESOURCE_NAME_FASTLY,
'condition',
block=conditions
)
if headers:
template.populate_resource(
RESOURCE_TYPE_FASTLY,
RESOURCE_NAME_FASTLY,
'header',
block=headers
)
if request_settings:
template.populate_resource(
RESOURCE_TYPE_FASTLY,
RESOURCE_NAME_FASTLY,
'request_setting',
block=request_settings
)
return template.to_dict()
def _render_fastly_vcl_templates(context, template, vcl_templated_snippets):
for name, variables in context['fastly']['vcl-templates'].items():
vcl_template = fastly.VCL_TEMPLATES[name]
vcl_template_file = _generate_vcl_file(
context['stackname'],
vcl_template.content,
vcl_template.name,
extension='vcl.tpl'
)
template.populate_data(
DATA_TYPE_TEMPLATE,
name,
{
'template': vcl_template_file,
'vars': variables,
}
)
vcl_templated_snippets[name] = vcl_template.as_inclusion()
def _render_fastly_errors(context, template, vcl_templated_snippets):
if context['fastly']['errors']:
error_vcl_template = fastly.VCL_TEMPLATES['error-page']
error_vcl_template_file = _generate_vcl_file(
context['stackname'],
error_vcl_template.content,
error_vcl_template.name,
extension='vcl.tpl'
)
errors = context['fastly']['errors']
codes = errors.get('codes', {})
fallbacks = errors.get('fallbacks', {})
for code, path in codes.items():
template.populate_data(
DATA_TYPE_HTTP,
'error-page-%d' % code,
block={
'url': '%s%s' % (errors['url'], path),
}
)
name = 'error-page-vcl-%d' % code
template.populate_data(
DATA_TYPE_TEMPLATE,
name,
{
'template': error_vcl_template_file,
'vars': {
'test': 'obj.status == %s' % code,
'synthetic_response': '${data.http.error-page-%s.body}' % code,
},
}
)
vcl_templated_snippets[name] = error_vcl_template.as_inclusion(name)
if fallbacks.get('4xx'):
template.populate_data(
DATA_TYPE_HTTP,
'error-page-4xx',
{
'url': '%s%s' % (errors['url'], fallbacks.get('4xx')),
}
)
name = 'error-page-vcl-4xx'
template.populate_data(
DATA_TYPE_TEMPLATE,
name,
{
'template': error_vcl_template_file,
'vars': {
'test': 'obj.status >= 400 && obj.status <= 499',
'synthetic_response': '${data.http.error-page-4xx.body}',
},
}
)
vcl_templated_snippets[name] = error_vcl_template.as_inclusion(name)
if fallbacks.get('5xx'):
template.populate_data(
DATA_TYPE_HTTP,
'error-page-5xx',
{
'url': '%s%s' % (errors['url'], fallbacks.get('5xx')),
}
)
name = 'error-page-vcl-5xx'
template.populate_data(
DATA_TYPE_TEMPLATE,
name,
{
'template': error_vcl_template_file,
'vars': {
'test': 'obj.status >= 500 && obj.status <= 599',
'synthetic_response': '${data.http.error-page-5xx.body}',
}
}
)
vcl_templated_snippets[name] = error_vcl_template.as_inclusion(name)
def _fastly_backend(hostname, name, request_condition=None, shield=None):
backend_resource = {
'address': hostname,
'name': name,
'port': 443,
'use_ssl': True,
'ssl_cert_hostname': hostname,
'ssl_sni_hostname': hostname,
'ssl_check_cert': True,
}
if request_condition:
backend_resource['request_condition'] = request_condition
if shield:
backend_resource['shield'] = shield
return backend_resource
def _fastly_request_setting(override):
request_setting_resource = {
'name': 'default',
# shouldn't need to replicate the defaults
# https://github.com/terraform-providers/terraform-provider-fastly/issues/50
# https://github.com/terraform-providers/terraform-provider-fastly/issues/67
'timer_support': True,
'xff': 'leave',
}
request_setting_resource.update(override)
return request_setting_resource
def _generate_vcl_file(stackname, content, key, extension='vcl'):
"""
creates a VCL on the filesystem, for Terraform to dynamically load it on apply
content can be a string or any object that can be casted to a string
"""
with _open(stackname, key, extension=extension, mode='w') as fp:
fp.write(str(content))
return '${file("%s")}' % os.path.basename(fp.name)
def render_gcs(context, template):
if not context['gcs']:
return {}
for bucket_name, options in context['gcs'].items():
template.populate_resource('google_storage_bucket', bucket_name, block={
'name': bucket_name,
'location': 'us-east4',
'storage_class': 'REGIONAL',
'project': options['project'],
})
return template.to_dict()
def render_bigquery(context, template):
if not context['bigquery']:
return {}
tables = OrderedDict({})
for dataset_id, dataset_options in context['bigquery'].items():
for table_id, table_options in dataset_options['tables'].items():
table_options['dataset_id'] = dataset_id
table_options['project'] = dataset_options['project']
tables[table_id] = table_options
for dataset_id, options in context['bigquery'].items():
template.populate_resource('google_bigquery_dataset', dataset_id, block={
'dataset_id': dataset_id,
'project': options['project'],
})
needs_github_token = False
for table_id, table_options in tables.items():
schema = table_options['schema']
stackname = context['stackname']
fqrn = "%s_%s" % (table_options['dataset_id'], table_id) # 'fully qualified resource name'
if schema.startswith('https://'):
# remote schema, add a 'http' provider and have terraform pull it down for us
# https://www.terraform.io/docs/providers/http/data_source.html
block = {'url': schema}
schema_ref = '${data.http.%s.body}' % fqrn
if schema.startswith('https://raw.githubusercontent.com/'):
block['request_headers'] = {
'Authorization': 'token ${data.%s.%s.data["token"]}' % (DATA_TYPE_VAULT_GENERIC_SECRET, DATA_NAME_VAULT_GITHUB)
}
needs_github_token = True
template.populate_data(
DATA_TYPE_HTTP,
fqrn,
block=block
)
else:
# local schema. the `schema` is relative to `PROJECT_PATH`
schema_path = join(PROJECT_PATH, schema)
schema_file = os.path.basename(schema)
terraform_working_dir = join(TERRAFORM_DIR, stackname)
mkdir_p(terraform_working_dir)
shutil.copyfile(schema_path, join(terraform_working_dir, schema_file))
schema_ref = '${file("%s")}' % schema_file
template.populate_resource('google_bigquery_table', fqrn, block={
# this refers to the dataset resource to express the implicit dependency
# otherwise a table can be created before the dataset, which fails
'dataset_id': "${google_bigquery_dataset.%s.dataset_id}" % dataset_id, # "dataset"
'table_id': table_id, # "csv_report_380"
'project': table_options['project'], # "elife-data-pipeline"
'schema': schema_ref,
})
if needs_github_token:
# TODO: extract and reuse as it's good for all data.http Github source,
# not just for schemas
template.populate_data(DATA_TYPE_VAULT_GENERIC_SECRET, 'github', block={
'path': VAULT_PATH_GITHUB,
})
return template.to_dict()
def render_eks(context, template):
if not context['eks']:
return {}
template.populate_resource('aws_eks_cluster', 'main', block={
'name': context['stackname'],
'role_arn': '${aws_iam_role.master.arn}',
'vpc_config': {
'security_group_ids': ['${aws_security_group.master.id}'],
'subnet_ids': [context['eks']['subnet-id'], context['eks']['redundant-subnet-id']],
},
'depends_on': [
"aws_iam_role_policy_attachment.master_kubernetes",
"aws_iam_role_policy_attachment.master_ecs",
]
})
template.populate_resource('aws_iam_role', 'master', block={
'name': '%s--AmazonEKSMasterRole' % context['stackname'],
'assume_role_policy': json.dumps({
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Service": "eks.amazonaws.com"
},
"Action": "sts:AssumeRole"
}
]
}),
})
template.populate_resource('aws_iam_role_policy_attachment', 'master_kubernetes', block={
'policy_arn': "arn:aws:iam::aws:policy/AmazonEKSClusterPolicy",
'role': "${aws_iam_role.master.name}",
})
template.populate_resource('aws_iam_role_policy_attachment', 'master_ecs', block={
'policy_arn': "arn:aws:iam::aws:policy/AmazonEKSServicePolicy",
'role': "${aws_iam_role.master.name}",
})
template.populate_resource('aws_security_group', 'master', block={
'name': 'project-with-eks--%s--master' % context['instance_id'],
'description': 'Cluster communication with worker nodes',
'vpc_id': context['aws']['vpc-id'],
'egress': {
'from_port': 0,
'to_port': 0,
'protocol': '-1',
'cidr_blocks': ['0.0.0.0/0'],
},
'tags': aws.generic_tags(context),
})
template.populate_resource('aws_security_group_rule', 'worker_to_master', block={
'description': 'Allow pods to communicate with the cluster API Server',
'from_port': 443,
'protocol': 'tcp',
'security_group_id': '${aws_security_group.master.id}',
'source_security_group_id': '${aws_security_group.worker.id}',
'to_port': 443,
'type': 'ingress',
})
template.populate_resource('aws_security_group', 'worker', block={
'name': 'project-with-eks--%s--worker' % context['instance_id'],
'description': 'Security group for all worker nodes in the cluster',
'vpc_id': context['aws']['vpc-id'],
'egress': {
'from_port': 0,
'to_port': 0,
'protocol': '-1',
'cidr_blocks': ['0.0.0.0/0'],
},
'tags': aws.generic_tags(context),
})
template.populate_resource('aws_security_group_rule', 'worker_to_worker', block={
'description': 'Allow worker nodes to communicate with each other',
'from_port': 0,
'protocol': '-1',
'security_group_id': '${aws_security_group.worker.id}',
'source_security_group_id': '${aws_security_group.worker.id}',
'to_port': 65535,
'type': 'ingress',
})
template.populate_resource('aws_security_group_rule', 'master_to_worker', block={
'description': 'Allow worker Kubelets and pods to receive communication from the cluster control plane',
'from_port': 1025,
'protocol': 'tcp',
'security_group_id': '${aws_security_group.worker.id}',
'source_security_group_id': '${aws_security_group.master.id}',
'to_port': 65535,
'type': 'ingress',
})
template.populate_resource('aws_security_group_rule', 'eks_public_to_worker', block={
'description': "Allow worker to expose NodePort services",
'from_port': 30000,
'protocol': 'tcp',
'security_group_id': '${aws_security_group.worker.id}',
'to_port': 32767,
'type': 'ingress',
'cidr_blocks': ["0.0.0.0/0"],
})
template.populate_resource('aws_iam_role', 'worker', block={
'name': '%s--AmazonEKSWorkerRole' % context['stackname'],
'assume_role_policy': json.dumps({
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Service": "ec2.amazonaws.com"
},
"Action": "sts:AssumeRole"
}
]
}),
})
template.populate_resource('aws_iam_role_policy_attachment', 'worker_connect', block={
'policy_arn': "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy",
'role': "${aws_iam_role.worker.name}",
})
template.populate_resource('aws_iam_role_policy_attachment', 'worker_cni', block={
'policy_arn': "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy",
'role': "${aws_iam_role.worker.name}",
})
template.populate_resource('aws_iam_role_policy_attachment', 'worker_ecr', block={
'policy_arn': "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly",
'role': "${aws_iam_role.worker.name}",
})
template.populate_resource('aws_iam_instance_profile', 'worker', block={
'name': '%s--worker' % context['stackname'],
'role': '${aws_iam_role.worker.name}'
})
# TODO: Helm may need an additional policy
template.populate_data(DATA_TYPE_AWS_AMI, 'worker', block={
'filter': {
'name': 'name',
'values': ['amazon-eks-node-v*'],
},
'most_recent': True,
'owners': [aws.ACCOUNT_EKS_AMI],
})
# EKS currently documents this required userdata for EKS worker nodes to
# properly configure Kubernetes applications on the EC2 instance.
# We utilize a Terraform local here to simplify Base64 encoding this
# information into the AutoScaling Launch Configuration.
# More information: https://docs.aws.amazon.com/eks/latest/userguide/launch-workers.html
template.populate_local('worker_userdata', """
#!/bin/bash
set -o xtrace
/etc/eks/bootstrap.sh --apiserver-endpoint '${aws_eks_cluster.main.endpoint}' --b64-cluster-ca '${aws_eks_cluster.main.certificate_authority.0.data}' '${aws_eks_cluster.main.name}'""")
template.populate_resource('aws_launch_configuration', 'worker', block={
'associate_public_ip_address': True,
'iam_instance_profile': '${aws_iam_instance_profile.worker.name}',
'image_id': '${data.aws_ami.worker.id}',
'instance_type': context['eks']['worker']['type'],
'name_prefix': '%s--worker' % context['stackname'],
'security_groups': ['${aws_security_group.worker.id}'],
'user_data_base64': '${base64encode(local.worker_userdata)}',
'lifecycle': {
'create_before_destroy': True,
},
})
def write_template(stackname, contents):
"optionally, store a terraform configuration file for the stack"
# if the template isn't empty ...?
if json.loads(contents):
with _open(stackname, 'generated', mode='w') as fp:
fp.write(contents)
return fp.name
def read_template(stackname):
with _open(stackname, 'generated', mode='r') as fp:
return fp.read()
class TerraformTemplateError(RuntimeError):
pass
class TerraformTemplate():
def __init__(self, resource=None, data=None, locals=None):
if not resource:
resource = OrderedDict()
self.resource = resource
if not data:
data = OrderedDict()
self.data = data
if not locals:
locals = OrderedDict()
self.locals = locals
# for naming see https://www.terraform.io/docs/configuration/resources.html#syntax
def populate_resource(self, type, name, key=None, block=None):
if not type in self.resource:
self.resource[type] = OrderedDict()
target = self.resource[type]
if key:
if not name in target:
target[name] = OrderedDict()
if key in target[name]:
raise TerraformTemplateError(
"Resource %s being overwritten (%s)" % ((type, name, key), target[name][key])
)
target[name][key] = block
else:
target[name] = block
# TODO: optional `key`?
def populate_resource_element(self, type, name, key, block=None):
if not type in self.resource:
self.resource[type] = OrderedDict()
target = self.resource[type]
if not name in target:
target[name] = OrderedDict()
if not key in target[name]:
target[name][key] = []
target[name][key].append(block)
def populate_data(self, type, name, block=None):
if not type in self.data:
self.data[type] = OrderedDict()
if name in self.data[type]:
raise TerraformTemplateError(
"Data %s being overwritten (%s)" % ((type, name), self.data[type][name])
)
self.data[type][name] = block
def populate_local(self, name, value):
self.locals[name] = value
def to_dict(self):
result = {}
if self.resource:
result['resource'] = self.resource
if self.data:
result['data'] = self.data
if self.locals:
result['locals'] = self.locals
return result
class TerraformDelta(namedtuple('TerraformDelta', ['plan_output'])):
"""represents a delta between and old and new Terraform generated template, showing which resources are being added, updated, or removed.
Extends the namedtuple-generated class to add custom methods."""
def __str__(self):
return self.plan_output
def generate_delta(new_context):
# simplification: unless Fastly is involved, the TerraformDelta will be empty
# this should eventually be removed, for example after test_buildercore_cfngen tests have been ported to test_buildercore_cloudformation
# TODO: extract list of services in a constant to share with @only_if, at least
# TODO: what if the new context doesn't have fastly, but it was there before?
if not new_context['fastly'] and not new_context['gcs'] and not new_context['bigquery'] and not new_context['eks']:
return None
new_template = render(new_context)
write_template(new_context['stackname'], new_template)
return plan(new_context)
@only_if('fastly', 'gcs', 'bigquery', 'eks')
def bootstrap(stackname, context):
plan(context)
update(stackname, context)
def plan(context):
terraform = init(context['stackname'], context)
def _generate_plan():
terraform.plan(input=False, no_color=IsFlagged, capture_output=False, raise_on_error=True, detailed_exitcode=IsNotFlagged, out='out.plan')
return 'out.plan'
def _explain_plan(plan_filename):
return_code, stdout, stderr = terraform.plan(plan_filename, input=False, no_color=IsFlagged, raise_on_error=True, detailed_exitcode=IsNotFlagged)
ensure(return_code == 0, "Exit code of `terraform plan out.plan` should be 0, not %s" % return_code)
# TODO: may not be empty if TF_LOG is used
ensure(stderr == '', "Stderr of `terraform plan out.plan` should be empty:\n%s" % stderr)
return _clean_stdout(stdout)
return TerraformDelta(_explain_plan(_generate_plan()))
def _clean_stdout(stdout):
stdout = re.sub(re.compile(r"The plan command .* as an argument.", re.MULTILINE | re.DOTALL), "", stdout)
stdout = re.sub(re.compile(r"Note: .* is subsequently run.", re.MULTILINE | re.DOTALL), "", stdout)
stdout = re.sub(re.compile(r"\n+", re.MULTILINE), "\n", stdout)
return stdout
def init(stackname, context):
working_dir = join(TERRAFORM_DIR, stackname) # ll: ./.cfn/terraform/project--prod/
terraform = Terraform(working_dir=working_dir)
with _open(stackname, 'backend', mode='w') as fp:
fp.write(json.dumps({
'terraform': {
'backend': {
's3': {
'bucket': BUILDER_BUCKET,
'key': 'terraform/%s.tfstate' % stackname,
'region': BUILDER_REGION,
},
},
},
}))
with _open(stackname, 'providers', mode='w') as fp:
# TODO: possibly remove unused providers
# Terraform already prunes them when running, but would
# simplify the .cfn/terraform/$stackname/ files
fp.write(json.dumps({
'provider': {
'fastly': {
# exact version constraint
'version': "= %s" % PROVIDER_FASTLY_VERSION,
'api_key': "${data.%s.%s.data[\"api_key\"]}" % (DATA_TYPE_VAULT_GENERIC_SECRET, DATA_NAME_VAULT_FASTLY_API_KEY),
},
'aws': {
# TODO: pin version constraint
'version': "= %s" % '2.3.0',
'region': context['aws']['region'],
},
'google': {
'version': "= %s" % '1.20.0',
'region': 'us-east4',
'credentials': "${data.%s.%s.data[\"credentials\"]}" % (DATA_TYPE_VAULT_GENERIC_SECRET, DATA_NAME_VAULT_GCP_API_KEY),
},
'vault': {
'address': context['vault']['address'],
# exact version constraint
'version': "= %s" % PROVIDER_VAULT_VERSION,
},
},
'data': {
DATA_TYPE_VAULT_GENERIC_SECRET: {
# TODO: this should not be used unless Fastly is involved
DATA_NAME_VAULT_FASTLY_API_KEY: {
'path': VAULT_PATH_FASTLY,
},
# TODO: this should not be used unless GCP is involved
DATA_NAME_VAULT_GCP_API_KEY: {
'path': VAULT_PATH_GCP,
},
},
},
}))
terraform.init(input=False, capture_output=False, raise_on_error=True)
return terraform
def update_template(stackname):
context = load_context(stackname)
update(stackname, context)
# TODO: extract?
@only_if('fastly', 'gcs', 'bigquery', 'eks')
def update(stackname, context):
terraform = init(stackname, context)
terraform.apply('out.plan', input=False, capture_output=False, raise_on_error=True)
@only_if('fastly', 'gcs', 'bigquery', 'eks')
def destroy(stackname, context):
terraform = init(stackname, context)
terraform.destroy(input=False, capture_output=False, raise_on_error=True)
terraform_directory = join(TERRAFORM_DIR, stackname)
shutil.rmtree(terraform_directory)
def _file_path_for_generation(stackname, name, extension='tf.json'):
"builds a path for a file to be placed in conf.TERRAFORM_DIR"
return join(TERRAFORM_DIR, stackname, '%s.%s' % (name, extension))
def _open(stackname, name, extension='tf.json', mode='r'):
"`open`s a file in the conf.TERRAFORM_DIR belonging to given `stackname` (./.cfn/terraform/$stackname/)"
terraform_directory = join(TERRAFORM_DIR, stackname)
mkdir_p(terraform_directory)
deprecated_path = join(TERRAFORM_DIR, stackname, '%s.tf' % name)
if os.path.exists(deprecated_path):
os.remove(deprecated_path)
return open(_file_path_for_generation(stackname, name, extension), mode)
Extract buildercore.terraform.MANAGED_SERVICES
from collections import namedtuple, OrderedDict
import os, re, shutil, json
from os.path import join
from python_terraform import Terraform, IsFlagged, IsNotFlagged
from .config import BUILDER_BUCKET, BUILDER_REGION, TERRAFORM_DIR, PROJECT_PATH
from .context_handler import only_if, load_context
from .utils import ensure, mkdir_p
from . import aws, fastly
MANAGED_SERVICES = ['fastly', 'gcs', 'bigquery', 'eks']
only_if_managed_services_are_present = only_if(*MANAGED_SERVICES)
EMPTY_TEMPLATE = '{}'
PROVIDER_FASTLY_VERSION = '0.4.0',
PROVIDER_VAULT_VERSION = '1.3'
RESOURCE_TYPE_FASTLY = 'fastly_service_v1'
RESOURCE_NAME_FASTLY = 'fastly-cdn'
DATA_TYPE_VAULT_GENERIC_SECRET = 'vault_generic_secret'
DATA_TYPE_HTTP = 'http'
DATA_TYPE_TEMPLATE = 'template_file'
DATA_TYPE_AWS_AMI = 'aws_ami'
DATA_NAME_VAULT_GCS_LOGGING = 'fastly-gcs-logging'
DATA_NAME_VAULT_GCP_LOGGING = 'fastly-gcp-logging'
DATA_NAME_VAULT_FASTLY_API_KEY = 'fastly'
DATA_NAME_VAULT_GCP_API_KEY = 'gcp'
DATA_NAME_VAULT_GITHUB = 'github'
# keys to lookup in Vault
# cannot modify these without putting new values inside Vault:
# VAULT_ADDR=https://...:8200 vault put secret/builder/apikey/fastly-gcs-logging email=... secret_key=@~/file.json
VAULT_PATH_FASTLY = 'secret/builder/apikey/fastly'
VAULT_PATH_FASTLY_GCS_LOGGING = 'secret/builder/apikey/fastly-gcs-logging'
VAULT_PATH_FASTLY_GCP_LOGGING = 'secret/builder/apikey/fastly-gcp-logging'
VAULT_PATH_GCP = 'secret/builder/apikey/gcp'
VAULT_PATH_GITHUB = 'secret/builder/apikey/github'
FASTLY_GZIP_TYPES = ['text/html', 'application/x-javascript', 'text/css', 'application/javascript',
'text/javascript', 'application/json', 'application/vnd.ms-fontobject',
'application/x-font-opentype', 'application/x-font-truetype',
'application/x-font-ttf', 'application/xml', 'font/eot', 'font/opentype',
'font/otf', 'image/svg+xml', 'image/vnd.microsoft.icon', 'text/plain',
'text/xml']
FASTLY_GZIP_EXTENSIONS = ['css', 'js', 'html', 'eot', 'ico', 'otf', 'ttf', 'json']
FASTLY_LOG_FORMAT = """{
"timestamp":"%{begin:%Y-%m-%dT%H:%M:%S}t",
"time_elapsed":%{time.elapsed.usec}V,
"object_hits": %{obj.hits}V,
"object_lastuse": "%{obj.lastuse}V",
"is_tls":%{if(req.is_ssl, "true", "false")}V,
"client_ip":"%{req.http.Fastly-Client-IP}V",
"forwarded_for": "%{req.http.X-Forwarded-For}V",
"geo_city":"%{client.geo.city}V",
"geo_country_code":"%{client.geo.country_code}V",
"pop_datacenter": "%{server.datacenter}V",
"pop_region": "%{server.region}V",
"request":"%{req.request}V",
"original_host":"%{req.http.X-Forwarded-Host}V",
"host":"%{req.http.Host}V",
"url":"%{cstr_escape(req.url)}V",
"request_referer":"%{cstr_escape(req.http.Referer)}V",
"request_user_agent":"%{cstr_escape(req.http.User-Agent)}V",
"request_accept":"%{cstr_escape(req.http.Accept)}V",
"request_accept_language":"%{cstr_escape(req.http.Accept-Language)}V",
"request_accept_charset":"%{cstr_escape(req.http.Accept-Charset)}V",
"response_status": "%>s",
"cache_status":"%{regsub(fastly_info.state, "^(HIT-(SYNTH)|(HITPASS|HIT|MISS|PASS|ERROR|PIPE)).*", "\\\\2\\\\3") }V"
}"""
# Fastly proprietary evolutions of the standard Apache log format
# https://docs.fastly.com/guides/streaming-logs/custom-log-formats#advantages-of-using-the-version-2-custom-log-format
# It's in the API:
# https://docs.fastly.com/api/logging#logging_gcs
# Not supported yet by Terraform however:
# https://www.terraform.io/docs/providers/fastly/r/service_v1.html#name-12
# FASTLY_LOG_FORMAT_VERSION = 2
# what to prefix lines with, syslog heritage
# see https://docs.fastly.com/guides/streaming-logs/changing-log-line-formats#available-message-formats
FASTLY_LOG_LINE_PREFIX = 'blank' # no prefix
# keeps different logging configurations unique in the syslog implementation
# used by Fastly, avoiding
# fastly_service_v1.fastly-cdn: 409 - Conflict:
# Title: Duplicate record
# Detail: Duplicate logging_syslog: 'default'
FASTLY_LOG_UNIQUE_IDENTIFIERS = {
'gcs': 'default', # historically the first one
'bigquery': 'bigquery',
}
# at the moment VCL snippets are unsupported, this can be worked
# around by using a full VCL
# https://github.com/terraform-providers/terraform-provider-fastly/issues/7 tracks when snippets could become available in Terraform
FASTLY_MAIN_VCL_KEY = 'main'
def render(context):
template = TerraformTemplate()
fn_list = [
render_fastly,
render_gcs,
render_bigquery,
render_eks,
]
for fn in fn_list:
fn(context, template)
generated_template = template.to_dict()
if not generated_template:
return EMPTY_TEMPLATE
return json.dumps(generated_template)
def render_fastly(context, template):
if not context['fastly']:
return {}
backends = []
conditions = []
request_settings = []
headers = []
vcl_constant_snippets = context['fastly']['vcl']
vcl_templated_snippets = OrderedDict()
request_settings.append(_fastly_request_setting({
'name': 'force-ssl',
'force_ssl': True,
}))
all_allowed_subdomains = context['fastly']['subdomains'] + context['fastly']['subdomains-without-dns']
if context['fastly']['backends']:
for name, backend in context['fastly']['backends'].items():
if backend.get('condition'):
condition_name = 'backend-%s-condition' % name
conditions.append({
'name': condition_name,
'statement': backend.get('condition'),
'type': 'REQUEST',
})
request_settings.append(_fastly_request_setting({
'name': 'backend-%s-request-settings' % name,
'request_condition': condition_name,
}))
backend_condition_name = condition_name
else:
backend_condition_name = None
shield = backend['shield'].get('pop')
backends.append(_fastly_backend(
backend['hostname'],
name=name,
request_condition=backend_condition_name,
shield=shield
))
else:
shield = context['fastly']['shield'].get('pop')
backends.append(_fastly_backend(
context['full_hostname'],
name=context['stackname'],
shield=shield
))
template.populate_resource(
RESOURCE_TYPE_FASTLY,
RESOURCE_NAME_FASTLY,
block={
'name': context['stackname'],
'domain': [
{'name': subdomain} for subdomain in all_allowed_subdomains
],
'backend': backends,
'default_ttl': context['fastly']['default-ttl'],
'gzip': {
'name': 'default',
# shouldn't need to replicate the defaults
# https://github.com/terraform-providers/terraform-provider-fastly/issues/66
'content_types': sorted(FASTLY_GZIP_TYPES),
'extensions': sorted(FASTLY_GZIP_EXTENSIONS),
},
'force_destroy': True,
'vcl': []
}
)
if context['fastly']['healthcheck']:
template.populate_resource(
RESOURCE_TYPE_FASTLY,
RESOURCE_NAME_FASTLY,
'healthcheck',
block={
'name': 'default',
'host': context['full_hostname'],
'path': context['fastly']['healthcheck']['path'],
'check_interval': context['fastly']['healthcheck']['check-interval'],
'timeout': context['fastly']['healthcheck']['timeout'],
}
)
for b in template.resource[RESOURCE_TYPE_FASTLY][RESOURCE_NAME_FASTLY]['backend']:
b['healthcheck'] = 'default'
_render_fastly_vcl_templates(context, template, vcl_templated_snippets)
_render_fastly_errors(context, template, vcl_templated_snippets)
if context['fastly']['gcslogging']:
gcslogging = context['fastly']['gcslogging']
template.populate_resource(
RESOURCE_TYPE_FASTLY,
RESOURCE_NAME_FASTLY,
'gcslogging',
block={
'name': FASTLY_LOG_UNIQUE_IDENTIFIERS['gcs'],
'bucket_name': gcslogging['bucket'],
# TODO: validate it starts with /
'path': gcslogging['path'],
'period': gcslogging.get('period', 3600),
'format': FASTLY_LOG_FORMAT,
# not supported yet
#'format_version': FASTLY_LOG_FORMAT_VERSION,
'message_type': FASTLY_LOG_LINE_PREFIX,
'email': "${data.%s.%s.data[\"email\"]}" % (DATA_TYPE_VAULT_GENERIC_SECRET, DATA_NAME_VAULT_GCS_LOGGING),
'secret_key': "${data.%s.%s.data[\"secret_key\"]}" % (DATA_TYPE_VAULT_GENERIC_SECRET, DATA_NAME_VAULT_GCS_LOGGING),
}
)
template.populate_data(
DATA_TYPE_VAULT_GENERIC_SECRET,
DATA_NAME_VAULT_GCS_LOGGING,
block={
'path': VAULT_PATH_FASTLY_GCS_LOGGING,
}
)
if context['fastly']['bigquerylogging']:
bigquerylogging = context['fastly']['bigquerylogging']
template.populate_resource(
RESOURCE_TYPE_FASTLY,
RESOURCE_NAME_FASTLY,
'bigquerylogging',
block={
'name': FASTLY_LOG_UNIQUE_IDENTIFIERS['bigquery'],
'project_id': bigquerylogging['project'],
'dataset': bigquerylogging['dataset'],
'table': bigquerylogging['table'],
'format': FASTLY_LOG_FORMAT,
'email': "${data.%s.%s.data[\"email\"]}" % (DATA_TYPE_VAULT_GENERIC_SECRET, DATA_NAME_VAULT_GCP_LOGGING),
'secret_key': "${data.%s.%s.data[\"secret_key\"]}" % (DATA_TYPE_VAULT_GENERIC_SECRET, DATA_NAME_VAULT_GCP_LOGGING),
}
)
template.populate_data(
DATA_TYPE_VAULT_GENERIC_SECRET,
DATA_NAME_VAULT_GCP_LOGGING,
{
'path': VAULT_PATH_FASTLY_GCP_LOGGING,
}
)
if vcl_constant_snippets or vcl_templated_snippets:
# constant snippets
[template.populate_resource_element(
RESOURCE_TYPE_FASTLY,
RESOURCE_NAME_FASTLY,
'vcl',
{
'name': snippet_name,
'content': _generate_vcl_file(context['stackname'], fastly.VCL_SNIPPETS[snippet_name].content, snippet_name),
}) for snippet_name in vcl_constant_snippets]
# templated snippets
[template.populate_resource_element(
RESOURCE_TYPE_FASTLY,
RESOURCE_NAME_FASTLY,
'vcl',
{
'name': snippet_name,
'content': '${data.template_file.%s.rendered}' % snippet_name,
}) for snippet_name in vcl_templated_snippets]
# main
linked_main_vcl = fastly.MAIN_VCL_TEMPLATE
inclusions = [fastly.VCL_SNIPPETS[name].as_inclusion() for name in vcl_constant_snippets] + list(vcl_templated_snippets.values())
inclusions.reverse()
for i in inclusions:
linked_main_vcl = i.insert_include(linked_main_vcl)
template.populate_resource_element(
RESOURCE_TYPE_FASTLY,
RESOURCE_NAME_FASTLY,
'vcl',
block={
'name': FASTLY_MAIN_VCL_KEY,
'content': _generate_vcl_file(
context['stackname'],
linked_main_vcl,
FASTLY_MAIN_VCL_KEY
),
'main': True,
}
)
if context['fastly']['surrogate-keys']:
for name, surrogate in context['fastly']['surrogate-keys'].items():
for sample_name, sample in surrogate.get('samples', {}).items():
# check sample['url'] parsed leads to sample['value']
match = re.match(surrogate['url'], sample['path'])
ensure(match is not None, "Regex %s does not match sample %s" % (surrogate['url'], sample))
sample_actual = match.expand(surrogate['value'])
ensure(sample_actual == sample['expected'], "Incorrect generated surrogate key `%s` for sample %s" % (sample_actual, sample))
cache_condition = {
'name': 'condition-surrogate-%s' % name,
'statement': 'req.url ~ "%s"' % surrogate['url'],
'type': 'CACHE',
}
conditions.append(cache_condition)
headers.append({
'name': 'surrogate-keys %s' % name,
'destination': "http.surrogate-key",
'source': 'regsub(req.url, "%s", "%s")' % (surrogate['url'], surrogate['value']),
'type': 'cache',
'action': 'set',
'ignore_if_set': True,
'cache_condition': cache_condition['name'],
})
if conditions:
template.populate_resource(
RESOURCE_TYPE_FASTLY,
RESOURCE_NAME_FASTLY,
'condition',
block=conditions
)
if headers:
template.populate_resource(
RESOURCE_TYPE_FASTLY,
RESOURCE_NAME_FASTLY,
'header',
block=headers
)
if request_settings:
template.populate_resource(
RESOURCE_TYPE_FASTLY,
RESOURCE_NAME_FASTLY,
'request_setting',
block=request_settings
)
return template.to_dict()
def _render_fastly_vcl_templates(context, template, vcl_templated_snippets):
for name, variables in context['fastly']['vcl-templates'].items():
vcl_template = fastly.VCL_TEMPLATES[name]
vcl_template_file = _generate_vcl_file(
context['stackname'],
vcl_template.content,
vcl_template.name,
extension='vcl.tpl'
)
template.populate_data(
DATA_TYPE_TEMPLATE,
name,
{
'template': vcl_template_file,
'vars': variables,
}
)
vcl_templated_snippets[name] = vcl_template.as_inclusion()
def _render_fastly_errors(context, template, vcl_templated_snippets):
if context['fastly']['errors']:
error_vcl_template = fastly.VCL_TEMPLATES['error-page']
error_vcl_template_file = _generate_vcl_file(
context['stackname'],
error_vcl_template.content,
error_vcl_template.name,
extension='vcl.tpl'
)
errors = context['fastly']['errors']
codes = errors.get('codes', {})
fallbacks = errors.get('fallbacks', {})
for code, path in codes.items():
template.populate_data(
DATA_TYPE_HTTP,
'error-page-%d' % code,
block={
'url': '%s%s' % (errors['url'], path),
}
)
name = 'error-page-vcl-%d' % code
template.populate_data(
DATA_TYPE_TEMPLATE,
name,
{
'template': error_vcl_template_file,
'vars': {
'test': 'obj.status == %s' % code,
'synthetic_response': '${data.http.error-page-%s.body}' % code,
},
}
)
vcl_templated_snippets[name] = error_vcl_template.as_inclusion(name)
if fallbacks.get('4xx'):
template.populate_data(
DATA_TYPE_HTTP,
'error-page-4xx',
{
'url': '%s%s' % (errors['url'], fallbacks.get('4xx')),
}
)
name = 'error-page-vcl-4xx'
template.populate_data(
DATA_TYPE_TEMPLATE,
name,
{
'template': error_vcl_template_file,
'vars': {
'test': 'obj.status >= 400 && obj.status <= 499',
'synthetic_response': '${data.http.error-page-4xx.body}',
},
}
)
vcl_templated_snippets[name] = error_vcl_template.as_inclusion(name)
if fallbacks.get('5xx'):
template.populate_data(
DATA_TYPE_HTTP,
'error-page-5xx',
{
'url': '%s%s' % (errors['url'], fallbacks.get('5xx')),
}
)
name = 'error-page-vcl-5xx'
template.populate_data(
DATA_TYPE_TEMPLATE,
name,
{
'template': error_vcl_template_file,
'vars': {
'test': 'obj.status >= 500 && obj.status <= 599',
'synthetic_response': '${data.http.error-page-5xx.body}',
}
}
)
vcl_templated_snippets[name] = error_vcl_template.as_inclusion(name)
def _fastly_backend(hostname, name, request_condition=None, shield=None):
backend_resource = {
'address': hostname,
'name': name,
'port': 443,
'use_ssl': True,
'ssl_cert_hostname': hostname,
'ssl_sni_hostname': hostname,
'ssl_check_cert': True,
}
if request_condition:
backend_resource['request_condition'] = request_condition
if shield:
backend_resource['shield'] = shield
return backend_resource
def _fastly_request_setting(override):
request_setting_resource = {
'name': 'default',
# shouldn't need to replicate the defaults
# https://github.com/terraform-providers/terraform-provider-fastly/issues/50
# https://github.com/terraform-providers/terraform-provider-fastly/issues/67
'timer_support': True,
'xff': 'leave',
}
request_setting_resource.update(override)
return request_setting_resource
def _generate_vcl_file(stackname, content, key, extension='vcl'):
"""
creates a VCL on the filesystem, for Terraform to dynamically load it on apply
content can be a string or any object that can be casted to a string
"""
with _open(stackname, key, extension=extension, mode='w') as fp:
fp.write(str(content))
return '${file("%s")}' % os.path.basename(fp.name)
def render_gcs(context, template):
if not context['gcs']:
return {}
for bucket_name, options in context['gcs'].items():
template.populate_resource('google_storage_bucket', bucket_name, block={
'name': bucket_name,
'location': 'us-east4',
'storage_class': 'REGIONAL',
'project': options['project'],
})
return template.to_dict()
def render_bigquery(context, template):
if not context['bigquery']:
return {}
tables = OrderedDict({})
for dataset_id, dataset_options in context['bigquery'].items():
for table_id, table_options in dataset_options['tables'].items():
table_options['dataset_id'] = dataset_id
table_options['project'] = dataset_options['project']
tables[table_id] = table_options
for dataset_id, options in context['bigquery'].items():
template.populate_resource('google_bigquery_dataset', dataset_id, block={
'dataset_id': dataset_id,
'project': options['project'],
})
needs_github_token = False
for table_id, table_options in tables.items():
schema = table_options['schema']
stackname = context['stackname']
fqrn = "%s_%s" % (table_options['dataset_id'], table_id) # 'fully qualified resource name'
if schema.startswith('https://'):
# remote schema, add a 'http' provider and have terraform pull it down for us
# https://www.terraform.io/docs/providers/http/data_source.html
block = {'url': schema}
schema_ref = '${data.http.%s.body}' % fqrn
if schema.startswith('https://raw.githubusercontent.com/'):
block['request_headers'] = {
'Authorization': 'token ${data.%s.%s.data["token"]}' % (DATA_TYPE_VAULT_GENERIC_SECRET, DATA_NAME_VAULT_GITHUB)
}
needs_github_token = True
template.populate_data(
DATA_TYPE_HTTP,
fqrn,
block=block
)
else:
# local schema. the `schema` is relative to `PROJECT_PATH`
schema_path = join(PROJECT_PATH, schema)
schema_file = os.path.basename(schema)
terraform_working_dir = join(TERRAFORM_DIR, stackname)
mkdir_p(terraform_working_dir)
shutil.copyfile(schema_path, join(terraform_working_dir, schema_file))
schema_ref = '${file("%s")}' % schema_file
template.populate_resource('google_bigquery_table', fqrn, block={
# this refers to the dataset resource to express the implicit dependency
# otherwise a table can be created before the dataset, which fails
'dataset_id': "${google_bigquery_dataset.%s.dataset_id}" % dataset_id, # "dataset"
'table_id': table_id, # "csv_report_380"
'project': table_options['project'], # "elife-data-pipeline"
'schema': schema_ref,
})
if needs_github_token:
# TODO: extract and reuse as it's good for all data.http Github source,
# not just for schemas
template.populate_data(DATA_TYPE_VAULT_GENERIC_SECRET, 'github', block={
'path': VAULT_PATH_GITHUB,
})
return template.to_dict()
def render_eks(context, template):
if not context['eks']:
return {}
template.populate_resource('aws_eks_cluster', 'main', block={
'name': context['stackname'],
'role_arn': '${aws_iam_role.master.arn}',
'vpc_config': {
'security_group_ids': ['${aws_security_group.master.id}'],
'subnet_ids': [context['eks']['subnet-id'], context['eks']['redundant-subnet-id']],
},
'depends_on': [
"aws_iam_role_policy_attachment.master_kubernetes",
"aws_iam_role_policy_attachment.master_ecs",
]
})
template.populate_resource('aws_iam_role', 'master', block={
'name': '%s--AmazonEKSMasterRole' % context['stackname'],
'assume_role_policy': json.dumps({
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Service": "eks.amazonaws.com"
},
"Action": "sts:AssumeRole"
}
]
}),
})
template.populate_resource('aws_iam_role_policy_attachment', 'master_kubernetes', block={
'policy_arn': "arn:aws:iam::aws:policy/AmazonEKSClusterPolicy",
'role': "${aws_iam_role.master.name}",
})
template.populate_resource('aws_iam_role_policy_attachment', 'master_ecs', block={
'policy_arn': "arn:aws:iam::aws:policy/AmazonEKSServicePolicy",
'role': "${aws_iam_role.master.name}",
})
template.populate_resource('aws_security_group', 'master', block={
'name': 'project-with-eks--%s--master' % context['instance_id'],
'description': 'Cluster communication with worker nodes',
'vpc_id': context['aws']['vpc-id'],
'egress': {
'from_port': 0,
'to_port': 0,
'protocol': '-1',
'cidr_blocks': ['0.0.0.0/0'],
},
'tags': aws.generic_tags(context),
})
template.populate_resource('aws_security_group_rule', 'worker_to_master', block={
'description': 'Allow pods to communicate with the cluster API Server',
'from_port': 443,
'protocol': 'tcp',
'security_group_id': '${aws_security_group.master.id}',
'source_security_group_id': '${aws_security_group.worker.id}',
'to_port': 443,
'type': 'ingress',
})
template.populate_resource('aws_security_group', 'worker', block={
'name': 'project-with-eks--%s--worker' % context['instance_id'],
'description': 'Security group for all worker nodes in the cluster',
'vpc_id': context['aws']['vpc-id'],
'egress': {
'from_port': 0,
'to_port': 0,
'protocol': '-1',
'cidr_blocks': ['0.0.0.0/0'],
},
'tags': aws.generic_tags(context),
})
template.populate_resource('aws_security_group_rule', 'worker_to_worker', block={
'description': 'Allow worker nodes to communicate with each other',
'from_port': 0,
'protocol': '-1',
'security_group_id': '${aws_security_group.worker.id}',
'source_security_group_id': '${aws_security_group.worker.id}',
'to_port': 65535,
'type': 'ingress',
})
template.populate_resource('aws_security_group_rule', 'master_to_worker', block={
'description': 'Allow worker Kubelets and pods to receive communication from the cluster control plane',
'from_port': 1025,
'protocol': 'tcp',
'security_group_id': '${aws_security_group.worker.id}',
'source_security_group_id': '${aws_security_group.master.id}',
'to_port': 65535,
'type': 'ingress',
})
template.populate_resource('aws_security_group_rule', 'eks_public_to_worker', block={
'description': "Allow worker to expose NodePort services",
'from_port': 30000,
'protocol': 'tcp',
'security_group_id': '${aws_security_group.worker.id}',
'to_port': 32767,
'type': 'ingress',
'cidr_blocks': ["0.0.0.0/0"],
})
template.populate_resource('aws_iam_role', 'worker', block={
'name': '%s--AmazonEKSWorkerRole' % context['stackname'],
'assume_role_policy': json.dumps({
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Service": "ec2.amazonaws.com"
},
"Action": "sts:AssumeRole"
}
]
}),
})
template.populate_resource('aws_iam_role_policy_attachment', 'worker_connect', block={
'policy_arn': "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy",
'role': "${aws_iam_role.worker.name}",
})
template.populate_resource('aws_iam_role_policy_attachment', 'worker_cni', block={
'policy_arn': "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy",
'role': "${aws_iam_role.worker.name}",
})
template.populate_resource('aws_iam_role_policy_attachment', 'worker_ecr', block={
'policy_arn': "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly",
'role': "${aws_iam_role.worker.name}",
})
template.populate_resource('aws_iam_instance_profile', 'worker', block={
'name': '%s--worker' % context['stackname'],
'role': '${aws_iam_role.worker.name}'
})
# TODO: Helm may need an additional policy
template.populate_data(DATA_TYPE_AWS_AMI, 'worker', block={
'filter': {
'name': 'name',
'values': ['amazon-eks-node-v*'],
},
'most_recent': True,
'owners': [aws.ACCOUNT_EKS_AMI],
})
# EKS currently documents this required userdata for EKS worker nodes to
# properly configure Kubernetes applications on the EC2 instance.
# We utilize a Terraform local here to simplify Base64 encoding this
# information into the AutoScaling Launch Configuration.
# More information: https://docs.aws.amazon.com/eks/latest/userguide/launch-workers.html
template.populate_local('worker_userdata', """
#!/bin/bash
set -o xtrace
/etc/eks/bootstrap.sh --apiserver-endpoint '${aws_eks_cluster.main.endpoint}' --b64-cluster-ca '${aws_eks_cluster.main.certificate_authority.0.data}' '${aws_eks_cluster.main.name}'""")
template.populate_resource('aws_launch_configuration', 'worker', block={
'associate_public_ip_address': True,
'iam_instance_profile': '${aws_iam_instance_profile.worker.name}',
'image_id': '${data.aws_ami.worker.id}',
'instance_type': context['eks']['worker']['type'],
'name_prefix': '%s--worker' % context['stackname'],
'security_groups': ['${aws_security_group.worker.id}'],
'user_data_base64': '${base64encode(local.worker_userdata)}',
'lifecycle': {
'create_before_destroy': True,
},
})
def write_template(stackname, contents):
"optionally, store a terraform configuration file for the stack"
# if the template isn't empty ...?
if json.loads(contents):
with _open(stackname, 'generated', mode='w') as fp:
fp.write(contents)
return fp.name
def read_template(stackname):
with _open(stackname, 'generated', mode='r') as fp:
return fp.read()
class TerraformTemplateError(RuntimeError):
pass
class TerraformTemplate():
def __init__(self, resource=None, data=None, locals=None):
if not resource:
resource = OrderedDict()
self.resource = resource
if not data:
data = OrderedDict()
self.data = data
if not locals:
locals = OrderedDict()
self.locals = locals
# for naming see https://www.terraform.io/docs/configuration/resources.html#syntax
def populate_resource(self, type, name, key=None, block=None):
if not type in self.resource:
self.resource[type] = OrderedDict()
target = self.resource[type]
if key:
if not name in target:
target[name] = OrderedDict()
if key in target[name]:
raise TerraformTemplateError(
"Resource %s being overwritten (%s)" % ((type, name, key), target[name][key])
)
target[name][key] = block
else:
target[name] = block
# TODO: optional `key`?
def populate_resource_element(self, type, name, key, block=None):
if not type in self.resource:
self.resource[type] = OrderedDict()
target = self.resource[type]
if not name in target:
target[name] = OrderedDict()
if not key in target[name]:
target[name][key] = []
target[name][key].append(block)
def populate_data(self, type, name, block=None):
if not type in self.data:
self.data[type] = OrderedDict()
if name in self.data[type]:
raise TerraformTemplateError(
"Data %s being overwritten (%s)" % ((type, name), self.data[type][name])
)
self.data[type][name] = block
def populate_local(self, name, value):
self.locals[name] = value
def to_dict(self):
result = {}
if self.resource:
result['resource'] = self.resource
if self.data:
result['data'] = self.data
if self.locals:
result['locals'] = self.locals
return result
class TerraformDelta(namedtuple('TerraformDelta', ['plan_output'])):
"""represents a delta between and old and new Terraform generated template, showing which resources are being added, updated, or removed.
Extends the namedtuple-generated class to add custom methods."""
def __str__(self):
return self.plan_output
def generate_delta(new_context):
# simplification: unless Fastly is involved, the TerraformDelta will be empty
# this should eventually be removed, for example after test_buildercore_cfngen tests have been ported to test_buildercore_cloudformation
# TODO: what if the new context doesn't have fastly, but it was there before?
managed_services = [k for k in MANAGED_SERVICES if new_context[k]]
if not managed_services:
return None
new_template = render(new_context)
write_template(new_context['stackname'], new_template)
return plan(new_context)
@only_if_managed_services_are_present
def bootstrap(stackname, context):
plan(context)
update(stackname, context)
def plan(context):
terraform = init(context['stackname'], context)
def _generate_plan():
terraform.plan(input=False, no_color=IsFlagged, capture_output=False, raise_on_error=True, detailed_exitcode=IsNotFlagged, out='out.plan')
return 'out.plan'
def _explain_plan(plan_filename):
return_code, stdout, stderr = terraform.plan(plan_filename, input=False, no_color=IsFlagged, raise_on_error=True, detailed_exitcode=IsNotFlagged)
ensure(return_code == 0, "Exit code of `terraform plan out.plan` should be 0, not %s" % return_code)
# TODO: may not be empty if TF_LOG is used
ensure(stderr == '', "Stderr of `terraform plan out.plan` should be empty:\n%s" % stderr)
return _clean_stdout(stdout)
return TerraformDelta(_explain_plan(_generate_plan()))
def _clean_stdout(stdout):
stdout = re.sub(re.compile(r"The plan command .* as an argument.", re.MULTILINE | re.DOTALL), "", stdout)
stdout = re.sub(re.compile(r"Note: .* is subsequently run.", re.MULTILINE | re.DOTALL), "", stdout)
stdout = re.sub(re.compile(r"\n+", re.MULTILINE), "\n", stdout)
return stdout
def init(stackname, context):
working_dir = join(TERRAFORM_DIR, stackname) # ll: ./.cfn/terraform/project--prod/
terraform = Terraform(working_dir=working_dir)
with _open(stackname, 'backend', mode='w') as fp:
fp.write(json.dumps({
'terraform': {
'backend': {
's3': {
'bucket': BUILDER_BUCKET,
'key': 'terraform/%s.tfstate' % stackname,
'region': BUILDER_REGION,
},
},
},
}))
with _open(stackname, 'providers', mode='w') as fp:
# TODO: possibly remove unused providers
# Terraform already prunes them when running, but would
# simplify the .cfn/terraform/$stackname/ files
fp.write(json.dumps({
'provider': {
'fastly': {
# exact version constraint
'version': "= %s" % PROVIDER_FASTLY_VERSION,
'api_key': "${data.%s.%s.data[\"api_key\"]}" % (DATA_TYPE_VAULT_GENERIC_SECRET, DATA_NAME_VAULT_FASTLY_API_KEY),
},
'aws': {
# TODO: pin version constraint
'version': "= %s" % '2.3.0',
'region': context['aws']['region'],
},
'google': {
'version': "= %s" % '1.20.0',
'region': 'us-east4',
'credentials': "${data.%s.%s.data[\"credentials\"]}" % (DATA_TYPE_VAULT_GENERIC_SECRET, DATA_NAME_VAULT_GCP_API_KEY),
},
'vault': {
'address': context['vault']['address'],
# exact version constraint
'version': "= %s" % PROVIDER_VAULT_VERSION,
},
},
'data': {
DATA_TYPE_VAULT_GENERIC_SECRET: {
# TODO: this should not be used unless Fastly is involved
DATA_NAME_VAULT_FASTLY_API_KEY: {
'path': VAULT_PATH_FASTLY,
},
# TODO: this should not be used unless GCP is involved
DATA_NAME_VAULT_GCP_API_KEY: {
'path': VAULT_PATH_GCP,
},
},
},
}))
terraform.init(input=False, capture_output=False, raise_on_error=True)
return terraform
def update_template(stackname):
context = load_context(stackname)
update(stackname, context)
@only_if_managed_services_are_present
def update(stackname, context):
terraform = init(stackname, context)
terraform.apply('out.plan', input=False, capture_output=False, raise_on_error=True)
@only_if_managed_services_are_present
def destroy(stackname, context):
terraform = init(stackname, context)
terraform.destroy(input=False, capture_output=False, raise_on_error=True)
terraform_directory = join(TERRAFORM_DIR, stackname)
shutil.rmtree(terraform_directory)
def _file_path_for_generation(stackname, name, extension='tf.json'):
"builds a path for a file to be placed in conf.TERRAFORM_DIR"
return join(TERRAFORM_DIR, stackname, '%s.%s' % (name, extension))
def _open(stackname, name, extension='tf.json', mode='r'):
"`open`s a file in the conf.TERRAFORM_DIR belonging to given `stackname` (./.cfn/terraform/$stackname/)"
terraform_directory = join(TERRAFORM_DIR, stackname)
mkdir_p(terraform_directory)
deprecated_path = join(TERRAFORM_DIR, stackname, '%s.tf' % name)
if os.path.exists(deprecated_path):
os.remove(deprecated_path)
return open(_file_path_for_generation(stackname, name, extension), mode)
|
import os
from enum import Enum
from cli import crypto
from cli.exception import CliException
from primitive_pb2 import Amount, uint256
from commands_pb2 import Command, CreateAsset, AddAssetQuantity, CreateAccount, CreateDomain, TransferAsset
BASE_NAME = "iroha-mizuki-cli"
class CommandList:
"""
AddAssetQuantity add_asset_quantity = 1;
AddPeer add_peer = 2;
AddSignatory add_signatory = 3;
CreateAsset create_asset = 4;
CreateAccount create_account = 5;
CreateDomain create_domain = 6;
RemoveSignatory remove_sign = 7;
SetAccountPermissions set_permission = 8;
SetAccountQuorum set_quorum = 9;
TransferAsset transfer_asset = 10;
AppendRole append_role = 11;
CreateRole create_role = 12;
GrantPermission grant_permission = 13;
RevokePermission revoke_permission = 14;
ExternalGuardian external_guardian = 15;
"""
class Type(Enum):
STR = 1
INT = 2
UINT = 3
FLOAT = 4
NONE = 5
def __init__(self, printInfo=False):
self.printInfo = printInfo
self.commands = {
"AddAssetQuantity": {
"option": {
"account_id": {
"type": self.Type.STR,
"detail": "target's account id like mizuki@domain",
"required": True
},
"asset_id": {
"type": self.Type.STR,
"detail": "target's asset id like japan/yen",
"required": True
},
"amount": {
"type": self.Type.FLOAT,
"detail": "target's asset id like japan/yen",
"required": True
},
},
"function": self.AddAssetQuantity,
"detail": "Add asset's quantity"
},
"CreateAccount": {
"option": {
"account_name": {
"type": self.Type.STR,
"detail": "account name like mizuki",
"required": True
},
"domain_id": {
"type": self.Type.STR,
"detail": "new account will be in this domain like japan",
"required": True
},
"keypair_name": {
"type": self.Type.STR,
"detail": "save to this keypair_name like mizukey, if no set, generates ${"
"account_name}.pub/${account_name} ",
"required": False
}
},
"function": self.CreateAccount,
"detail": "CreateAccount asset's quantity"
},
"CreateDomain": {
"option": {
"domain_name": {
"type": self.Type.STR,
"detail": "new domain name like japan",
"required": True
}
},
"function": self.CreateDomain,
"detail": "Create domain in domain"
},
"CreateAsset": {
"option": {
"asset_name": {
"type": self.Type.STR,
"detail": "asset name like mizuki",
"required": True
},
"domain_id": {
"type": self.Type.STR,
"detail": "new account will be in this domain like japan",
"required": True
},
"precision": {
"type": self.Type.INT,
"detail": "how much support .000, default 0",
"required": False
}
},
"function": self.CreateAsset,
"detail": "Create new asset in domain"
},
"TransferAsset": {
"option": {
"src_account_id": {
"type": self.Type.STR,
"detail": "current owner's account name like mizuki@japan",
"required": True
},
"dest_account_id": {
"type": self.Type.STR,
"detail": "next owner's account name like iori@japan",
"required": True
},
"asset_id": {
"type": self.Type.STR,
"detail": "managed asset's name like yen",
"required": True
},
"description": {
"type": self.Type.STR,
"detail": "attach some text",
"required": False
},
"amount": {
"type": self.Type.STR,
"detail": "how much transfer",
"required": True
}
},
"function": self.TransferAsset,
"detail": "transfer asset"
}
}
self.built_in = {
"config": {
"option": {},
"function": self.config,
"detail": " Print current state \n"
" - name\n"
" - publicKey\n"
" - privateKey\n"
},
"keygen": {
"option": {
"account_name": {
"type": self.Type.STR,
"detail": "target's account name",
"required": True
},
"make_conf": {
"type": self.Type.NONE,
"detail": "generate conf.yml",
"required": False
}
},
"function": self.keygen,
"detail": " Print current state \n"
" - name\n"
" - publicKey\n"
" - privateKey\n"
}
}
self.commands.update(self.built_in)
def validate(self, expected, argv):
for item in expected.items():
if item[1]["required"] and not item[0] in argv:
raise CliException("{} is required".format(item[0]))
if item[0] in argv:
if isinstance(argv[item[0]], str):
if item[1]["type"] == self.Type.INT and not argv[item[0]].replace("-", "").isdigit():
raise CliException("{} is integer".format(item[0]))
if item[1]["type"] == self.Type.UINT and not argv[item[0]].isdigit():
raise CliException("{} is unsigned integer".format(item[0]))
if item[1]["type"] == self.Type.FLOAT and not argv[item[0]].replace("-", "").replace(".",
"").isdigit():
raise CliException("{} is float".format(item[0]))
else:
raise CliException("{} is str even if number, float".format(item[0]))
def printTransaction(self, name, expected, argv):
if self.printInfo:
print("[{}] run {} ".format(BASE_NAME, name))
for n in expected.keys():
print("- {}: {}".format(n, argv[n]))
# =============================================
def config(self, argv):
print(
"\n"
" Config \n"
" =========\n"
)
print(" name : {}".format(argv["name"]))
print(" publicKey : {}".format(argv["publicKey"]))
print(" privateKey: {}".format(
argv["privateKey"][:5] + "**...**" + argv["privateKey"][-5:])
)
print(" targetPeer: {}".format(argv["location"]))
print("")
return None
def keygen(self, argv):
name = "keygen"
argv_info = self.commands[name]["option"]
self.validate(argv_info, argv)
self.printTransaction(name, argv_info, argv)
pubKey, priKey = crypto.generate_keypair_hex()
try:
if "keypair_name" in argv:
filename_base = argv["keypair_name"]
else:
filename_base = argv["account_name"]
try:
with open(filename_base + ".pub", "w") as pub:
pub.write(pubKey.decode('utf-8'))
except (OSError, IOError) as e:
print(e)
raise CliException("Cannot open : {name}".format(name=filename_base + ".pub"))
try:
with open(filename_base + ".pri", "w") as pri:
pri.write(priKey.decode('utf-8'))
except (OSError, IOError) as e:
print(e)
raise CliException("Cannot open : {name}".format(name=filename_base + ".pri"))
os.chmod(filename_base + ".pub", 0o400)
os.chmod(filename_base + ".pri", 0o400)
if "make_conf" in argv:
import yaml
conf_path = "config.yaml"
dumped_conf = yaml.dump({
"peer":{
"address":"localhost",
"port":50051
},
"account":{
"publicKeyPath":filename_base + ".pub",
"privateKeyPath":filename_base + ".pri",
"name":filename_base
}
}, default_flow_style=False)
try:
with open(conf_path, "w") as conf_file:
conf_file.write(dumped_conf)
except (OSError, IOError) as e:
print(e)
raise CliException("Cannot open : {name}".format(name=conf_path))
print("Generate {name}!".format(name=conf_path))
except CliException as e:
print(e)
print("file error")
return None
else:
if self.printInfo:
print(
"key save publicKey -> {} privateKey -> {}".format(filename_base + ".pub", filename_base))
print("key save publicKey:{} privateKey:{}".format(
pubKey[:5] + "..." + pubKey[-5:],
priKey[:5] + "**...**" + priKey[-5:],
))
return None
# =============================================
def AddAssetQuantity(self, argv):
name = "AddAssetQuantity"
argv_info = self.commands[name]["option"]
self.validate(argv_info, argv)
self.printTransaction(name, argv_info, argv)
# ToDo In now precision = 0, but when I enter 1.03, set precision = 2 automatically
# ToDo Correct to set Amount.value
return Command(add_asset_quantity=AddAssetQuantity(
account_id=argv["account_id"],
asset_id=argv["asset_id"],
amount=Amount(value=uint256(
first=int(float(argv["amount"])),
second=0,
third=0,
fourth=0,
), precision=0)
))
def CreateAccount(self, argv):
name = "CreateAccount"
argv_info = self.commands[name]["option"]
self.validate(argv_info, argv)
self.printTransaction(name, argv_info, argv)
# ToDo validate and print check
# I want to auto generate
pubKey, priKey = crypto.generate_keypair_hex()
try:
if "keypair_name" in argv:
filename_base = argv["keypair_name"]
else:
filename_base = argv["account_name"] + "@" + argv["domain_id"]
pub = open(filename_base + ".pub", "w")
pub.write(pubKey.decode('utf-8'))
pri = open(filename_base + ".pri", "w")
pri.write(priKey.decode('utf-8'))
pub.close()
pri.close()
except CliException as e:
print(e)
print("file error")
return None
else:
if self.printInfo:
print(
"key save publicKey -> {} privateKey -> {}".format(filename_base + ".pub", filename_base))
print("key save publicKey:{} privateKey:{}".format(
pubKey[:5] + "..." + pubKey[-5:],
priKey[:5] + "**...**" + priKey[-5:],
))
return Command(create_account=CreateAccount(
account_name=argv["account_name"],
domain_id=argv["domain_id"],
main_pubkey=pubKey
))
def CreateAsset(self, argv):
name = "CreateAsset"
argv_info = self.commands[name]["option"]
self.validate(argv_info, argv)
self.printTransaction(name, argv_info, argv)
return Command(create_asset=CreateAsset(
asset_name=argv["asset_name"],
domain_id=argv["domain_id"],
precision=int(argv.get("precision", 0))
))
def CreateDomain(self, argv):
name = "CreateDomain"
argv_info = self.commands[name]["option"]
self.validate(argv_info, argv)
self.printTransaction(name, argv_info, argv)
return Command(create_domain=CreateDomain(
domain_name=argv["domain_name"]
))
def TransferAsset(self, argv):
name = "CreateDomain"
argv_info = self.commands[name]["option"]
self.validate(argv_info, argv)
self.printTransaction(name, argv_info, argv)
# ToDo validate and print check
return Command(transfer_asset=TransferAsset(
src_account_id=argv["src_account_id"],
dest_account_id=argv["dest_account_id"],
asset_id=argv["asset_id"],
description=argv.get("description", ""),
amount=Amount(value=uint256(
first=int(float(argv["amount"])),
second=0,
third=0,
fourth=0,
), precision=0)
))
Update interface of crypto
import os
from enum import Enum
import binascii
from cli import crypto
from cli.exception import CliException
from primitive_pb2 import Amount, uint256
from commands_pb2 import Command, CreateAsset, AddAssetQuantity, CreateAccount, CreateDomain, TransferAsset
BASE_NAME = "iroha-mizuki-cli"
class CommandList:
"""
AddAssetQuantity add_asset_quantity = 1;
AddPeer add_peer = 2;
AddSignatory add_signatory = 3;
CreateAsset create_asset = 4;
CreateAccount create_account = 5;
CreateDomain create_domain = 6;
RemoveSignatory remove_sign = 7;
SetAccountPermissions set_permission = 8;
SetAccountQuorum set_quorum = 9;
TransferAsset transfer_asset = 10;
AppendRole append_role = 11;
CreateRole create_role = 12;
GrantPermission grant_permission = 13;
RevokePermission revoke_permission = 14;
ExternalGuardian external_guardian = 15;
"""
class Type(Enum):
STR = 1
INT = 2
UINT = 3
FLOAT = 4
NONE = 5
def __init__(self, printInfo=False):
self.printInfo = printInfo
self.commands = {
"AddAssetQuantity": {
"option": {
"account_id": {
"type": self.Type.STR,
"detail": "target's account id like mizuki@domain",
"required": True
},
"asset_id": {
"type": self.Type.STR,
"detail": "target's asset id like japan/yen",
"required": True
},
"amount": {
"type": self.Type.FLOAT,
"detail": "target's asset id like japan/yen",
"required": True
},
},
"function": self.AddAssetQuantity,
"detail": "Add asset's quantity"
},
"CreateAccount": {
"option": {
"account_name": {
"type": self.Type.STR,
"detail": "account name like mizuki",
"required": True
},
"domain_id": {
"type": self.Type.STR,
"detail": "new account will be in this domain like japan",
"required": True
},
"keypair_name": {
"type": self.Type.STR,
"detail": "save to this keypair_name like mizukey, if no set, generates ${"
"account_name}.pub/${account_name} ",
"required": False
}
},
"function": self.CreateAccount,
"detail": "CreateAccount asset's quantity"
},
"CreateDomain": {
"option": {
"domain_name": {
"type": self.Type.STR,
"detail": "new domain name like japan",
"required": True
}
},
"function": self.CreateDomain,
"detail": "Create domain in domain"
},
"CreateAsset": {
"option": {
"asset_name": {
"type": self.Type.STR,
"detail": "asset name like mizuki",
"required": True
},
"domain_id": {
"type": self.Type.STR,
"detail": "new account will be in this domain like japan",
"required": True
},
"precision": {
"type": self.Type.INT,
"detail": "how much support .000, default 0",
"required": False
}
},
"function": self.CreateAsset,
"detail": "Create new asset in domain"
},
"TransferAsset": {
"option": {
"src_account_id": {
"type": self.Type.STR,
"detail": "current owner's account name like mizuki@japan",
"required": True
},
"dest_account_id": {
"type": self.Type.STR,
"detail": "next owner's account name like iori@japan",
"required": True
},
"asset_id": {
"type": self.Type.STR,
"detail": "managed asset's name like yen",
"required": True
},
"description": {
"type": self.Type.STR,
"detail": "attach some text",
"required": False
},
"amount": {
"type": self.Type.STR,
"detail": "how much transfer",
"required": True
}
},
"function": self.TransferAsset,
"detail": "transfer asset"
}
}
self.built_in = {
"config": {
"option": {},
"function": self.config,
"detail": " Print current state \n"
" - name\n"
" - publicKey\n"
" - privateKey\n"
},
"keygen": {
"option": {
"account_name": {
"type": self.Type.STR,
"detail": "target's account name",
"required": True
},
"make_conf": {
"type": self.Type.NONE,
"detail": "generate conf.yml",
"required": False
}
},
"function": self.keygen,
"detail": " Print current state \n"
" - name\n"
" - publicKey\n"
" - privateKey\n"
}
}
self.commands.update(self.built_in)
def validate(self, expected, argv):
for item in expected.items():
if item[1]["required"] and not item[0] in argv:
raise CliException("{} is required".format(item[0]))
if item[0] in argv:
if isinstance(argv[item[0]], str):
if item[1]["type"] == self.Type.INT and not argv[item[0]].replace("-", "").isdigit():
raise CliException("{} is integer".format(item[0]))
if item[1]["type"] == self.Type.UINT and not argv[item[0]].isdigit():
raise CliException("{} is unsigned integer".format(item[0]))
if item[1]["type"] == self.Type.FLOAT and not argv[item[0]].replace("-", "").replace(".",
"").isdigit():
raise CliException("{} is float".format(item[0]))
else:
raise CliException("{} is str even if number, float".format(item[0]))
def printTransaction(self, name, expected, argv):
if self.printInfo:
print("[{}] run {} ".format(BASE_NAME, name))
for n in expected.keys():
print("- {}: {}".format(n, argv[n]))
# =============================================
def config(self, argv):
print(
"\n"
" Config \n"
" =========\n"
)
print(" name : {}".format(argv["name"]))
print(" publicKey : {}".format(argv["publicKey"]))
print(" privateKey: {}".format(
argv["privateKey"][:5] + "**...**" + argv["privateKey"][-5:])
)
print(" targetPeer: {}".format(argv["location"]))
print("")
return None
def keygen(self, argv):
name = "keygen"
argv_info = self.commands[name]["option"]
self.validate(argv_info, argv)
self.printTransaction(name, argv_info, argv)
key_pair = crypto.generate_keypair()
try:
if "keypair_name" in argv:
filename_base = argv["keypair_name"]
else:
filename_base = argv["account_name"]
try:
with open(filename_base + ".pub", "w") as pub:
pub.write(key_pair.public_key)
except (OSError, IOError) as e:
print(e)
raise CliException("Cannot open : {name}".format(name=filename_base + ".pub"))
try:
with open(filename_base + ".pri", "w") as pri:
pri.write(key_pair.private_key)
except (OSError, IOError) as e:
print(e)
raise CliException("Cannot open : {name}".format(name=filename_base + ".pri"))
os.chmod(filename_base + ".pub", 0o400)
os.chmod(filename_base + ".pri", 0o400)
if "make_conf" in argv:
import yaml
conf_path = "config.yaml"
dumped_conf = yaml.dump({
"peer": {
"address": "localhost",
"port": 50051
},
"account": {
"publicKeyPath": filename_base + ".pub",
"privateKeyPath": filename_base + ".pri",
"name": filename_base
}
}, default_flow_style=False)
try:
with open(conf_path, "w") as conf_file:
conf_file.write(dumped_conf)
except (OSError, IOError) as e:
print(e)
raise CliException("Cannot open : {name}".format(name=conf_path))
print("Generate {name}!".format(name=conf_path))
except CliException as e:
print(e)
print("file error")
return None
else:
if self.printInfo:
print(
"key save publicKey -> {} privateKey -> {}".format(filename_base + ".pub", filename_base))
print("key save publicKey:{} privateKey:{}".format(
key_pair.public_key[:5] + "..." + key_pair.public_key[-5:],
key_pair.private_key[:5] + "**...**" + key_pair.private_key[-5:],
))
return None
# =============================================
def AddAssetQuantity(self, argv):
name = "AddAssetQuantity"
argv_info = self.commands[name]["option"]
self.validate(argv_info, argv)
self.printTransaction(name, argv_info, argv)
# ToDo In now precision = 0, but when I enter 1.03, set precision = 2 automatically
# ToDo Correct to set Amount.value
return Command(add_asset_quantity=AddAssetQuantity(
account_id=argv["account_id"],
asset_id=argv["asset_id"],
amount=Amount(value=uint256(
first=int(float(argv["amount"])),
second=0,
third=0,
fourth=0,
), precision=0)
))
def CreateAccount(self, argv):
name = "CreateAccount"
argv_info = self.commands[name]["option"]
self.validate(argv_info, argv)
self.printTransaction(name, argv_info, argv)
# ToDo validate and print check
# I want to auto generate
key_pair = crypto.generate_keypair()
try:
if "keypair_name" in argv:
filename_base = argv["keypair_name"]
else:
filename_base = argv["account_name"] + "@" + argv["domain_id"]
pub = open(filename_base + ".pub", "w")
pub.write(key_pair.public_key)
pri = open(filename_base + ".pri", "w")
pri.write(key_pair.private_key)
pub.close()
pri.close()
except CliException as e:
print(e)
print("file error")
return None
else:
if self.printInfo:
print(
"key save publicKey -> {} privateKey -> {}".format(filename_base + ".pub", filename_base))
print("key save publicKey:{} privateKey:{}".format(
key_pair.public_key[:5] + "..." + key_pair.public_key[-5:],
key_pair.private_key[:5] + "**...**" + key_pair.private_key[-5:],
))
return Command(create_account=CreateAccount(
account_name=argv["account_name"],
domain_id=argv["domain_id"],
main_pubkey=binascii.hexlify(key_pair.raw_public_key)
))
def CreateAsset(self, argv):
name = "CreateAsset"
argv_info = self.commands[name]["option"]
self.validate(argv_info, argv)
self.printTransaction(name, argv_info, argv)
return Command(create_asset=CreateAsset(
asset_name=argv["asset_name"],
domain_id=argv["domain_id"],
precision=int(argv.get("precision", 0))
))
def CreateDomain(self, argv):
name = "CreateDomain"
argv_info = self.commands[name]["option"]
self.validate(argv_info, argv)
self.printTransaction(name, argv_info, argv)
return Command(create_domain=CreateDomain(
domain_name=argv["domain_name"]
))
def TransferAsset(self, argv):
name = "CreateDomain"
argv_info = self.commands[name]["option"]
self.validate(argv_info, argv)
self.printTransaction(name, argv_info, argv)
# ToDo validate and print check
return Command(transfer_asset=TransferAsset(
src_account_id=argv["src_account_id"],
dest_account_id=argv["dest_account_id"],
asset_id=argv["asset_id"],
description=argv.get("description", ""),
amount=Amount(value=uint256(
first=int(float(argv["amount"])),
second=0,
third=0,
fourth=0,
), precision=0)
))
|
# Copyright 2019 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Repository external dependency resolution functions."""
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
def _include_if_not_defined(repo_rule, name, **kwargs):
if not native.existing_rule(name):
repo_rule(name = name, **kwargs)
def stardoc_repositories():
"""Adds the external repositories used by the Starlark rules."""
_include_if_not_defined(
http_archive,
name = "bazel_skylib",
urls = ["https://github.com/bazelbuild/bazel-skylib/releases/download/0.8.0/bazel-skylib.0.8.0.tar.gz"],
sha256 = "2ef429f5d7ce7111263289644d233707dba35e39696377ebab8b0bc701f7818e",
)
_include_if_not_defined(
http_archive,
name = "rules_java",
urls = [
"https://mirror.bazel.build/github.com/bazelbuild/rules_java/archive/7cf3cefd652008d0a64a419c34c13bdca6c8f178.zip",
"https://github.com/bazelbuild/rules_java/archive/7cf3cefd652008d0a64a419c34c13bdca6c8f178.zip",
],
sha256 = "bc81f1ba47ef5cc68ad32225c3d0e70b8c6f6077663835438da8d5733f917598",
strip_prefix = "rules_java-7cf3cefd652008d0a64a419c34c13bdca6c8f178",
)
Update Skylib dependency (#61)
The update includes a fix for the flag --incompatible_disable_depset_items.
# Copyright 2019 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Repository external dependency resolution functions."""
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
def _include_if_not_defined(repo_rule, name, **kwargs):
if not native.existing_rule(name):
repo_rule(name = name, **kwargs)
def stardoc_repositories():
"""Adds the external repositories used by the Starlark rules."""
_include_if_not_defined(
http_archive,
name = "bazel_skylib",
urls = ["https://github.com/bazelbuild/bazel-skylib/archive/16de038c484145363340eeaf0e97a0c9889a931b.tar.gz"], # 2020-08-11
sha256 = "96e0cd3f731f0caef9e9919aa119ecc6dace36b149c2f47e40aa50587790402b",
strip_prefix = "bazel-skylib-16de038c484145363340eeaf0e97a0c9889a931b",
)
_include_if_not_defined(
http_archive,
name = "rules_java",
urls = [
"https://mirror.bazel.build/github.com/bazelbuild/rules_java/archive/7cf3cefd652008d0a64a419c34c13bdca6c8f178.zip",
"https://github.com/bazelbuild/rules_java/archive/7cf3cefd652008d0a64a419c34c13bdca6c8f178.zip",
],
sha256 = "bc81f1ba47ef5cc68ad32225c3d0e70b8c6f6077663835438da8d5733f917598",
strip_prefix = "rules_java-7cf3cefd652008d0a64a419c34c13bdca6c8f178",
)
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
import httplib
import six
from st2tests.fixturesloader import FixturesLoader
from tests.base import APIControllerWithRBACTestCase
http_client = six.moves.http_client
__all__ = [
'APIControllersRBACTestCase'
]
FIXTURES_PACK = 'generic'
TEST_FIXTURES = OrderedDict([
('runners', ['testrunner1.yaml']),
('sensors', ['sensor1.yaml']),
('actions', ['action1.yaml', 'local.yaml']),
('aliases', ['alias1.yaml']),
('triggers', ['trigger1.yaml']),
('rules', ['rule1.yaml']),
('triggertypes', ['triggertype1.yaml']),
('executions', ['execution1.yaml']),
('liveactions', ['liveaction1.yaml', 'parentliveaction.yaml', 'childliveaction.yaml']),
('enforcements', ['enforcement1.yaml']),
('apikeys', ['apikey1.yaml']),
])
MOCK_RUNNER_1 = {
'name': 'test-runner-1',
'description': 'test',
'enabled': False
}
MOCK_ACTION_1 = {
'name': 'ma.dummy.action',
'pack': 'examples',
'description': 'test description',
'enabled': True,
'entry_point': '/tmp/test/action2.py',
'runner_type': 'local-shell-script',
'parameters': {
'c': {'type': 'string', 'default': 'C1', 'position': 0},
'd': {'type': 'string', 'default': 'D1', 'immutable': True}
}
}
MOCK_ACTION_ALIAS_1 = {
'name': 'alias3',
'pack': 'aliases',
'description': 'test description',
'action_ref': 'core.local',
'formats': ['a', 'b']
}
MOCK_RULE_1 = {
'enabled': True,
'name': 'st2.test.rule2',
'pack': 'yoyohoneysingh',
'trigger': {
'type': 'wolfpack.triggertype-1'
},
'criteria': {
'trigger.k1': {
'pattern': 't1_p_v',
'type': 'equals'
}
},
'action': {
'ref': 'sixpack.st2.test.action',
'parameters': {
'ip2': '{{rule.k1}}',
'ip1': '{{trigger.t1_p}}'
}
},
'description': ''
}
class APIControllersRBACTestCase(APIControllerWithRBACTestCase):
"""
Test class which hits all the API endpoints which are behind the RBAC wall with a user which
has no permissions and makes sure API returns access denied.
"""
register_packs = True
fixtures_loader = FixturesLoader()
def setUp(self):
super(APIControllersRBACTestCase, self).setUp()
# Register packs
if self.register_packs:
self._register_packs()
# Insert mock objects - those objects are used to test get one, edit and delete operations
self.models = self.fixtures_loader.save_fixtures_to_db(fixtures_pack=FIXTURES_PACK,
fixtures_dict=TEST_FIXTURES)
def test_api_endpoints_behind_rbac_wall(self):
# alias_model = self.models['aliases']['alias1.yaml']
sensor_model = self.models['sensors']['sensor1.yaml']
rule_model = self.models['rules']['rule1.yaml']
enforcement_model = self.models['enforcements']['enforcement1.yaml']
execution_model = self.models['executions']['execution1.yaml']
supported_endpoints = [
# Runners
{
'path': '/v1/runnertypes',
'method': 'GET'
},
{
'path': '/v1/runnertypes/test-runner-1',
'method': 'GET'
},
{
'path': '/v1/runnertypes/test-runner-1',
'method': 'PUT',
'payload': MOCK_RUNNER_1
},
# Packs
{
'path': '/v1/packs',
'method': 'GET'
},
{
'path': '/v1/packs/dummy_pack_1',
'method': 'GET'
},
# Pack management
{
'path': '/v1/packs/install',
'method': 'POST',
'payload': {'packs': 'libcloud'}
},
{
'path': '/v1/packs/uninstall',
'method': 'POST',
'payload': {'packs': 'libcloud'}
},
{
'path': '/v1/packs/register',
'method': 'POST',
'payload': {'types': ['actions']}
},
{
'path': '/v1/packs/index/search',
'method': 'POST',
'payload': {'query': 'cloud'}
},
{
'path': '/v1/packs/index/health',
'method': 'GET'
},
# Pack views
{
'path': '/v1/packs/views/files/dummy_pack_1',
'method': 'GET'
},
# Pack config schemas
{
'path': '/v1/config_schemas',
'method': 'GET'
},
{
'path': '/v1/config_schemas/dummy_pack_1',
'method': 'GET'
},
{
'path': '/v1/packs/views/file/dummy_pack_1/pack.yaml',
'method': 'GET'
},
# Pack configs
{
'path': '/v1/configs/',
'method': 'GET'
},
{
'path': '/v1/configs/dummy_pack_1',
'method': 'GET'
},
{
'path': '/v1/configs/dummy_pack_1',
'method': 'PUT',
'payload': {
'foo': 'bar'
}
},
# Sensors
{
'path': '/v1/sensortypes',
'method': 'GET'
},
{
'path': '/v1/sensortypes/%s' % (sensor_model.ref),
'method': 'GET'
},
{
'path': '/v1/sensortypes/%s' % (sensor_model.ref),
'method': 'PUT',
'payload': {'enabled': False}
},
# Actions
{
'path': '/v1/actions',
'method': 'GET'
},
{
'path': '/v1/actions/wolfpack.action-1',
'method': 'GET'
},
{
'path': '/v1/actions',
'method': 'POST',
'payload': MOCK_ACTION_1
},
{
'path': '/v1/actions/wolfpack.action-1',
'method': 'PUT',
'payload': MOCK_ACTION_1
},
{
'path': '/v1/actions/wolfpack.action-1',
'method': 'DELETE'
},
# Action aliases
{
'path': '/v1/actionalias',
'method': 'GET'
},
{
'path': '/v1/actionalias/aliases.alias1',
'method': 'GET'
},
{
'path': '/v1/actionalias',
'method': 'POST',
'payload': MOCK_ACTION_ALIAS_1
},
{
'path': '/v1/actionalias/aliases.alias1',
'method': 'PUT',
'payload': MOCK_ACTION_ALIAS_1
},
{
'path': '/v1/actionalias/aliases.alias1',
'method': 'DELETE'
},
{
'path': '/v1/actionalias/match',
'method': 'POST',
'payload': {'command': 'test command string'}
},
# Rules
{
'path': '/v1/rules',
'method': 'GET'
},
{
'path': '/v1/rules/%s' % (rule_model.ref),
'method': 'GET'
},
{
'path': '/v1/rules',
'method': 'POST',
'payload': MOCK_RULE_1
},
{
'path': '/v1/rules/%s' % (rule_model.ref),
'method': 'PUT',
'payload': MOCK_RULE_1
},
{
'path': '/v1/rules/%s' % (rule_model.ref),
'method': 'DELETE'
},
# Rule enforcements
{
'path': '/v1/ruleenforcements',
'method': 'GET'
},
{
'path': '/v1/ruleenforcements/%s' % (enforcement_model.id),
'method': 'GET'
},
# Action Executions
{
'path': '/v1/executions',
'method': 'GET'
},
{
'path': '/v1/executions/%s' % (execution_model.id),
'method': 'GET'
},
{
'path': '/v1/executions',
'method': 'POST',
'payload': {'action': 'core.local'} # schedule execution / run action
},
{
'path': '/v1/executions/%s' % (execution_model.id),
'method': 'DELETE' # stop execution
},
{
'path': '/v1/executions/%s/re_run' % (execution_model.id),
'method': 'POST', # re-run execution
'payload': {'parameters': {}}
},
# Action execution nested controllers
{
'path': '/v1/executions/%s/attribute/trigger_instance' % (execution_model.id),
'method': 'GET'
},
{
'path': '/v1/executions/%s/children' % (execution_model.id),
'method': 'GET'
},
# Alias executions
{
'path': '/v1/aliasexecution',
'method': 'POST',
'payload': {'name': 'alias1', 'format': 'foo bar ponies',
'command': 'foo bar ponies',
'user': 'channel', 'source_channel': 'bar'}
},
# Webhook
{
'path': '/v1/webhooks/st2',
'method': 'POST',
'payload': {
'trigger': 'some',
'payload': {
'some': 'thing'
}
}
}
]
self.use_user(self.users['no_permissions'])
for endpoint in supported_endpoints:
response = self._perform_request_for_endpoint(endpoint=endpoint)
msg = '%s "%s" didn\'t return 403 status code (body=%s)' % (endpoint['method'],
endpoint['path'],
response.body)
self.assertEqual(response.status_code, httplib.FORBIDDEN, msg)
def test_icon_png_file_is_whitelisted(self):
self.use_user(self.users['no_permissions'])
# Test that access to icon.png file doesn't require any permissions
response = self.app.get('/v1/packs/views/file/dummy_pack_2/icon.png')
self.assertEqual(response.status_code, httplib.OK)
# Other files should return forbidden
response = self.app.get('/v1/packs/views/file/dummy_pack_2/pack.yaml',
expect_errors=True)
self.assertEqual(response.status_code, httplib.FORBIDDEN)
def _perform_request_for_endpoint(self, endpoint):
if endpoint['method'] == 'GET':
response = self.app.get(endpoint['path'], expect_errors=True)
elif endpoint['method'] == 'POST':
return self.app.post_json(endpoint['path'], endpoint['payload'], expect_errors=True)
elif endpoint['method'] == 'PUT':
return self.app.put_json(endpoint['path'], endpoint['payload'], expect_errors=True)
elif endpoint['method'] == 'DELETE':
return self.app.delete(endpoint['path'], expect_errors=True)
else:
raise ValueError('Unsupported method: %s' % (endpoint['method']))
return response
Add RBAC API endpoint tests for traces API endpoints.
Please enter the commit message for your changes. Lines starting
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
import httplib
import six
from st2tests.fixturesloader import FixturesLoader
from tests.base import APIControllerWithRBACTestCase
http_client = six.moves.http_client
__all__ = [
'APIControllersRBACTestCase'
]
FIXTURES_PACK = 'generic'
TEST_FIXTURES = OrderedDict([
('runners', ['testrunner1.yaml']),
('sensors', ['sensor1.yaml']),
('actions', ['action1.yaml', 'local.yaml']),
('aliases', ['alias1.yaml']),
('triggers', ['trigger1.yaml']),
('rules', ['rule1.yaml']),
('triggertypes', ['triggertype1.yaml']),
('executions', ['execution1.yaml']),
('liveactions', ['liveaction1.yaml', 'parentliveaction.yaml', 'childliveaction.yaml']),
('enforcements', ['enforcement1.yaml']),
('apikeys', ['apikey1.yaml']),
('traces', ['trace_for_test_enforce.yaml'])
])
MOCK_RUNNER_1 = {
'name': 'test-runner-1',
'description': 'test',
'enabled': False
}
MOCK_ACTION_1 = {
'name': 'ma.dummy.action',
'pack': 'examples',
'description': 'test description',
'enabled': True,
'entry_point': '/tmp/test/action2.py',
'runner_type': 'local-shell-script',
'parameters': {
'c': {'type': 'string', 'default': 'C1', 'position': 0},
'd': {'type': 'string', 'default': 'D1', 'immutable': True}
}
}
MOCK_ACTION_ALIAS_1 = {
'name': 'alias3',
'pack': 'aliases',
'description': 'test description',
'action_ref': 'core.local',
'formats': ['a', 'b']
}
MOCK_RULE_1 = {
'enabled': True,
'name': 'st2.test.rule2',
'pack': 'yoyohoneysingh',
'trigger': {
'type': 'wolfpack.triggertype-1'
},
'criteria': {
'trigger.k1': {
'pattern': 't1_p_v',
'type': 'equals'
}
},
'action': {
'ref': 'sixpack.st2.test.action',
'parameters': {
'ip2': '{{rule.k1}}',
'ip1': '{{trigger.t1_p}}'
}
},
'description': ''
}
class APIControllersRBACTestCase(APIControllerWithRBACTestCase):
"""
Test class which hits all the API endpoints which are behind the RBAC wall with a user which
has no permissions and makes sure API returns access denied.
"""
register_packs = True
fixtures_loader = FixturesLoader()
def setUp(self):
super(APIControllersRBACTestCase, self).setUp()
# Register packs
if self.register_packs:
self._register_packs()
# Insert mock objects - those objects are used to test get one, edit and delete operations
self.models = self.fixtures_loader.save_fixtures_to_db(fixtures_pack=FIXTURES_PACK,
fixtures_dict=TEST_FIXTURES)
def test_api_endpoints_behind_rbac_wall(self):
# alias_model = self.models['aliases']['alias1.yaml']
sensor_model = self.models['sensors']['sensor1.yaml']
rule_model = self.models['rules']['rule1.yaml']
enforcement_model = self.models['enforcements']['enforcement1.yaml']
execution_model = self.models['executions']['execution1.yaml']
trace_model = self.models['traces']['trace_for_test_enforce.yaml']
supported_endpoints = [
# Runners
{
'path': '/v1/runnertypes',
'method': 'GET'
},
{
'path': '/v1/runnertypes/test-runner-1',
'method': 'GET'
},
{
'path': '/v1/runnertypes/test-runner-1',
'method': 'PUT',
'payload': MOCK_RUNNER_1
},
# Packs
{
'path': '/v1/packs',
'method': 'GET'
},
{
'path': '/v1/packs/dummy_pack_1',
'method': 'GET'
},
# Pack management
{
'path': '/v1/packs/install',
'method': 'POST',
'payload': {'packs': 'libcloud'}
},
{
'path': '/v1/packs/uninstall',
'method': 'POST',
'payload': {'packs': 'libcloud'}
},
{
'path': '/v1/packs/register',
'method': 'POST',
'payload': {'types': ['actions']}
},
{
'path': '/v1/packs/index/search',
'method': 'POST',
'payload': {'query': 'cloud'}
},
{
'path': '/v1/packs/index/health',
'method': 'GET'
},
# Pack views
{
'path': '/v1/packs/views/files/dummy_pack_1',
'method': 'GET'
},
# Pack config schemas
{
'path': '/v1/config_schemas',
'method': 'GET'
},
{
'path': '/v1/config_schemas/dummy_pack_1',
'method': 'GET'
},
{
'path': '/v1/packs/views/file/dummy_pack_1/pack.yaml',
'method': 'GET'
},
# Pack configs
{
'path': '/v1/configs/',
'method': 'GET'
},
{
'path': '/v1/configs/dummy_pack_1',
'method': 'GET'
},
{
'path': '/v1/configs/dummy_pack_1',
'method': 'PUT',
'payload': {
'foo': 'bar'
}
},
# Sensors
{
'path': '/v1/sensortypes',
'method': 'GET'
},
{
'path': '/v1/sensortypes/%s' % (sensor_model.ref),
'method': 'GET'
},
{
'path': '/v1/sensortypes/%s' % (sensor_model.ref),
'method': 'PUT',
'payload': {'enabled': False}
},
# Actions
{
'path': '/v1/actions',
'method': 'GET'
},
{
'path': '/v1/actions/wolfpack.action-1',
'method': 'GET'
},
{
'path': '/v1/actions',
'method': 'POST',
'payload': MOCK_ACTION_1
},
{
'path': '/v1/actions/wolfpack.action-1',
'method': 'PUT',
'payload': MOCK_ACTION_1
},
{
'path': '/v1/actions/wolfpack.action-1',
'method': 'DELETE'
},
# Action aliases
{
'path': '/v1/actionalias',
'method': 'GET'
},
{
'path': '/v1/actionalias/aliases.alias1',
'method': 'GET'
},
{
'path': '/v1/actionalias',
'method': 'POST',
'payload': MOCK_ACTION_ALIAS_1
},
{
'path': '/v1/actionalias/aliases.alias1',
'method': 'PUT',
'payload': MOCK_ACTION_ALIAS_1
},
{
'path': '/v1/actionalias/aliases.alias1',
'method': 'DELETE'
},
{
'path': '/v1/actionalias/match',
'method': 'POST',
'payload': {'command': 'test command string'}
},
# Rules
{
'path': '/v1/rules',
'method': 'GET'
},
{
'path': '/v1/rules/%s' % (rule_model.ref),
'method': 'GET'
},
{
'path': '/v1/rules',
'method': 'POST',
'payload': MOCK_RULE_1
},
{
'path': '/v1/rules/%s' % (rule_model.ref),
'method': 'PUT',
'payload': MOCK_RULE_1
},
{
'path': '/v1/rules/%s' % (rule_model.ref),
'method': 'DELETE'
},
# Rule enforcements
{
'path': '/v1/ruleenforcements',
'method': 'GET'
},
{
'path': '/v1/ruleenforcements/%s' % (enforcement_model.id),
'method': 'GET'
},
# Action Executions
{
'path': '/v1/executions',
'method': 'GET'
},
{
'path': '/v1/executions/%s' % (execution_model.id),
'method': 'GET'
},
{
'path': '/v1/executions',
'method': 'POST',
'payload': {'action': 'core.local'} # schedule execution / run action
},
{
'path': '/v1/executions/%s' % (execution_model.id),
'method': 'DELETE' # stop execution
},
{
'path': '/v1/executions/%s/re_run' % (execution_model.id),
'method': 'POST', # re-run execution
'payload': {'parameters': {}}
},
# Action execution nested controllers
{
'path': '/v1/executions/%s/attribute/trigger_instance' % (execution_model.id),
'method': 'GET'
},
{
'path': '/v1/executions/%s/children' % (execution_model.id),
'method': 'GET'
},
# Alias executions
{
'path': '/v1/aliasexecution',
'method': 'POST',
'payload': {'name': 'alias1', 'format': 'foo bar ponies',
'command': 'foo bar ponies',
'user': 'channel', 'source_channel': 'bar'}
},
# Webhook
{
'path': '/v1/webhooks/st2',
'method': 'POST',
'payload': {
'trigger': 'some',
'payload': {
'some': 'thing'
}
}
},
# Traces
{
'path': '/v1/traces/',
'method': 'GET'
},
{
'path': '/v1/traces/%s' % (trace_model.id),
'method': 'GET'
},
]
self.use_user(self.users['no_permissions'])
for endpoint in supported_endpoints:
response = self._perform_request_for_endpoint(endpoint=endpoint)
msg = '%s "%s" didn\'t return 403 status code (body=%s)' % (endpoint['method'],
endpoint['path'],
response.body)
self.assertEqual(response.status_code, httplib.FORBIDDEN, msg)
def test_icon_png_file_is_whitelisted(self):
self.use_user(self.users['no_permissions'])
# Test that access to icon.png file doesn't require any permissions
response = self.app.get('/v1/packs/views/file/dummy_pack_2/icon.png')
self.assertEqual(response.status_code, httplib.OK)
# Other files should return forbidden
response = self.app.get('/v1/packs/views/file/dummy_pack_2/pack.yaml',
expect_errors=True)
self.assertEqual(response.status_code, httplib.FORBIDDEN)
def _perform_request_for_endpoint(self, endpoint):
if endpoint['method'] == 'GET':
response = self.app.get(endpoint['path'], expect_errors=True)
elif endpoint['method'] == 'POST':
return self.app.post_json(endpoint['path'], endpoint['payload'], expect_errors=True)
elif endpoint['method'] == 'PUT':
return self.app.put_json(endpoint['path'], endpoint['payload'], expect_errors=True)
elif endpoint['method'] == 'DELETE':
return self.app.delete(endpoint['path'], expect_errors=True)
else:
raise ValueError('Unsupported method: %s' % (endpoint['method']))
return response
|
# (c) 2013, Daniel Hokka Zakrisson <daniel@hozac.com>
# (c) 2014, Serge van Ginderachter <serge@vanginderachter.be>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#############################################
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.utils.vars import combine_vars
#FIXME: make into plugins
from ansible.inventory.ini import InventoryParser as InventoryINIParser
from ansible.inventory.yaml import InventoryParser as InventoryYAMLParser
from ansible.inventory.script import InventoryScript
__all__ = ['get_file_parser']
def get_file_parser(hostsfile, groups, loader):
# check to see if the specified file starts with a
# shebang (#!/), so if an error is raised by the parser
# class we can show a more apropos error
shebang_present = False
processed = False
myerr = []
parser = None
try:
inv_file = open(hostsfile)
first_line = inv_file.readlines()[0]
inv_file.close()
if first_line.startswith('#!'):
shebang_present = True
except:
pass
#FIXME: make this 'plugin loop'
# script
if loader.is_executable(hostsfile):
try:
parser = InventoryScript(loader=loader, groups=groups, filename=hostsfile)
processed = True
except Exception as e:
myerr.append(str(e))
elif shebang_present:
myerr.append("The file %s looks like it should be an executable inventory script, but is not marked executable. Perhaps you want to correct this with `chmod +x %s`?" % (hostsfile, hostsfile))
# YAML/JSON
if not processed and not shebang_present and os.path.splitext(hostsfile)[-1] in C.YAML_FILENAME_EXTENSIONS:
try:
parser = InventoryYAMLParser(loader=loader, groups=groups, filename=hostsfile)
processed = True
except Exception as e:
myerr.append(str(e))
# ini
if not processed and not shebang_present:
try:
parser = InventoryINIParser(loader=loader, groups=groups, filename=hostsfile)
processed = True
except Exception as e:
myerr.append(str(e))
if not processed and myerr:
raise AnsibleError( '\n'.join(myerr) )
return parser
class InventoryDirectory(object):
''' Host inventory parser for ansible using a directory of inventories. '''
def __init__(self, loader, groups=None, filename=C.DEFAULT_HOST_LIST):
if groups is None:
groups = dict()
self.names = os.listdir(filename)
self.names.sort()
self.directory = filename
self.parsers = []
self.hosts = {}
self.groups = groups
self._loader = loader
for i in self.names:
# Skip files that end with certain extensions or characters
if any(i.endswith(ext) for ext in C.DEFAULT_INVENTORY_IGNORE):
continue
# Skip hidden files
if i.startswith('.') and not i.startswith('./'):
continue
# These are things inside of an inventory basedir
if i in ("host_vars", "group_vars", "vars_plugins"):
continue
fullpath = os.path.join(self.directory, i)
if os.path.isdir(fullpath):
parser = InventoryDirectory(loader=loader, groups=groups, filename=fullpath)
else:
parser = get_file_parser(fullpath, self.groups, loader)
if parser is None:
#FIXME: needs to use display
import warnings
warnings.warning("Could not find parser for %s, skipping" % fullpath)
continue
self.parsers.append(parser)
# retrieve all groups and hosts form the parser and add them to
# self, don't look at group lists yet, to avoid
# recursion trouble, but just make sure all objects exist in self
newgroups = parser.groups.values()
for group in newgroups:
for host in group.hosts:
self._add_host(host)
for group in newgroups:
self._add_group(group)
# now check the objects lists so they contain only objects from
# self; membership data in groups is already fine (except all &
# ungrouped, see later), but might still reference objects not in self
for group in self.groups.values():
# iterate on a copy of the lists, as those lists get changed in
# the loop
# list with group's child group objects:
for child in group.child_groups[:]:
if child != self.groups[child.name]:
group.child_groups.remove(child)
group.child_groups.append(self.groups[child.name])
# list with group's parent group objects:
for parent in group.parent_groups[:]:
if parent != self.groups[parent.name]:
group.parent_groups.remove(parent)
group.parent_groups.append(self.groups[parent.name])
# list with group's host objects:
for host in group.hosts[:]:
if host != self.hosts[host.name]:
group.hosts.remove(host)
group.hosts.append(self.hosts[host.name])
# also check here that the group that contains host, is
# also contained in the host's group list
if group not in self.hosts[host.name].groups:
self.hosts[host.name].groups.append(group)
# extra checks on special groups all and ungrouped
# remove hosts from 'ungrouped' if they became member of other groups
if 'ungrouped' in self.groups:
ungrouped = self.groups['ungrouped']
# loop on a copy of ungrouped hosts, as we want to change that list
for host in frozenset(ungrouped.hosts):
if len(host.groups) > 1:
host.groups.remove(ungrouped)
ungrouped.hosts.remove(host)
# remove hosts from 'all' if they became member of other groups
# all should only contain direct children, not grandchildren
# direct children should have dept == 1
if 'all' in self.groups:
allgroup = self.groups['all' ]
# loop on a copy of all's child groups, as we want to change that list
for group in allgroup.child_groups[:]:
# groups might once have beeen added to all, and later be added
# to another group: we need to remove the link wit all then
if len(group.parent_groups) > 1 and allgroup in group.parent_groups:
# real children of all have just 1 parent, all
# this one has more, so not a direct child of all anymore
group.parent_groups.remove(allgroup)
allgroup.child_groups.remove(group)
elif allgroup not in group.parent_groups:
# this group was once added to all, but doesn't list it as
# a parent any more; the info in the group is the correct
# info
allgroup.child_groups.remove(group)
def _add_group(self, group):
""" Merge an existing group or add a new one;
Track parent and child groups, and hosts of the new one """
if group.name not in self.groups:
# it's brand new, add him!
self.groups[group.name] = group
# the Group class does not (yet) implement __eq__/__ne__,
# so unlike Host we do a regular comparison here
if self.groups[group.name] != group:
# different object, merge
self._merge_groups(self.groups[group.name], group)
def _add_host(self, host):
if host.name not in self.hosts:
# Papa's got a brand new host
self.hosts[host.name] = host
# because the __eq__/__ne__ methods in Host() compare the
# name fields rather than references, we use id() here to
# do the object comparison for merges
if self.hosts[host.name] != host:
# different object, merge
self._merge_hosts(self.hosts[host.name], host)
def _merge_groups(self, group, newgroup):
""" Merge all of instance newgroup into group,
update parent/child relationships
group lists may still contain group objects that exist in self with
same name, but was instanciated as a different object in some other
inventory parser; these are handled later """
# name
if group.name != newgroup.name:
raise AnsibleError("Cannot merge group %s with %s" % (group.name, newgroup.name))
# depth
group.depth = max([group.depth, newgroup.depth])
# hosts list (host objects are by now already added to self.hosts)
for host in newgroup.hosts:
grouphosts = dict([(h.name, h) for h in group.hosts])
if host.name in grouphosts:
# same host name but different object, merge
self._merge_hosts(grouphosts[host.name], host)
else:
# new membership, add host to group from self
# group from self will also be added again to host.groups, but
# as different object
group.add_host(self.hosts[host.name])
# now remove this the old object for group in host.groups
for hostgroup in [g for g in host.groups]:
if hostgroup.name == group.name and hostgroup != self.groups[group.name]:
self.hosts[host.name].groups.remove(hostgroup)
# group child membership relation
for newchild in newgroup.child_groups:
# dict with existing child groups:
childgroups = dict([(g.name, g) for g in group.child_groups])
# check if child of new group is already known as a child
if newchild.name not in childgroups:
self.groups[group.name].add_child_group(newchild)
# group parent membership relation
for newparent in newgroup.parent_groups:
# dict with existing parent groups:
parentgroups = dict([(g.name, g) for g in group.parent_groups])
# check if parent of new group is already known as a parent
if newparent.name not in parentgroups:
if newparent.name not in self.groups:
# group does not exist yet in self, import him
self.groups[newparent.name] = newparent
# group now exists but not yet as a parent here
self.groups[newparent.name].add_child_group(group)
# variables
group.vars = combine_vars(group.vars, newgroup.vars)
def _merge_hosts(self,host, newhost):
""" Merge all of instance newhost into host """
# name
if host.name != newhost.name:
raise AnsibleError("Cannot merge host %s with %s" % (host.name, newhost.name))
# group membership relation
for newgroup in newhost.groups:
# dict with existing groups:
hostgroups = dict([(g.name, g) for g in host.groups])
# check if new group is already known as a group
if newgroup.name not in hostgroups:
if newgroup.name not in self.groups:
# group does not exist yet in self, import him
self.groups[newgroup.name] = newgroup
# group now exists but doesn't have host yet
self.groups[newgroup.name].add_host(host)
# variables
host.vars = combine_vars(host.vars, newhost.vars)
def get_host_variables(self, host):
""" Gets additional host variables from all inventories """
vars = {}
for i in self.parsers:
vars.update(i.get_host_variables(host))
return vars
Improve inventory script error messages. (#17589)
When an inventory file looks executable (with a #!) but
isn't, the error message could be confusing. Especially
if the inventory file was named something like 'inventory'
or 'hosts'. Add some context and quote the filename.
This is based on https://github.com/ansible/ansible/pull/15758
# (c) 2013, Daniel Hokka Zakrisson <daniel@hozac.com>
# (c) 2014, Serge van Ginderachter <serge@vanginderachter.be>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#############################################
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.utils.vars import combine_vars
#FIXME: make into plugins
from ansible.inventory.ini import InventoryParser as InventoryINIParser
from ansible.inventory.yaml import InventoryParser as InventoryYAMLParser
from ansible.inventory.script import InventoryScript
__all__ = ['get_file_parser']
def get_file_parser(hostsfile, groups, loader):
# check to see if the specified file starts with a
# shebang (#!/), so if an error is raised by the parser
# class we can show a more apropos error
shebang_present = False
processed = False
myerr = []
parser = None
try:
inv_file = open(hostsfile)
first_line = inv_file.readlines()[0]
inv_file.close()
if first_line.startswith('#!'):
shebang_present = True
except:
pass
#FIXME: make this 'plugin loop'
# script
if loader.is_executable(hostsfile):
try:
parser = InventoryScript(loader=loader, groups=groups, filename=hostsfile)
processed = True
except Exception as e:
myerr.append(str(e))
elif shebang_present:
myerr.append("The inventory file \'%s\' looks like it should be an executable inventory script, but is not marked executable. Perhaps you want to correct this with `chmod +x %s`?" % (hostsfile, hostsfile))
# YAML/JSON
if not processed and not shebang_present and os.path.splitext(hostsfile)[-1] in C.YAML_FILENAME_EXTENSIONS:
try:
parser = InventoryYAMLParser(loader=loader, groups=groups, filename=hostsfile)
processed = True
except Exception as e:
myerr.append(str(e))
# ini
if not processed and not shebang_present:
try:
parser = InventoryINIParser(loader=loader, groups=groups, filename=hostsfile)
processed = True
except Exception as e:
myerr.append(str(e))
if not processed and myerr:
raise AnsibleError('\n'.join(myerr))
return parser
class InventoryDirectory(object):
''' Host inventory parser for ansible using a directory of inventories. '''
def __init__(self, loader, groups=None, filename=C.DEFAULT_HOST_LIST):
if groups is None:
groups = dict()
self.names = os.listdir(filename)
self.names.sort()
self.directory = filename
self.parsers = []
self.hosts = {}
self.groups = groups
self._loader = loader
for i in self.names:
# Skip files that end with certain extensions or characters
if any(i.endswith(ext) for ext in C.DEFAULT_INVENTORY_IGNORE):
continue
# Skip hidden files
if i.startswith('.') and not i.startswith('./'):
continue
# These are things inside of an inventory basedir
if i in ("host_vars", "group_vars", "vars_plugins"):
continue
fullpath = os.path.join(self.directory, i)
if os.path.isdir(fullpath):
parser = InventoryDirectory(loader=loader, groups=groups, filename=fullpath)
else:
parser = get_file_parser(fullpath, self.groups, loader)
if parser is None:
#FIXME: needs to use display
import warnings
warnings.warning("Could not find parser for %s, skipping" % fullpath)
continue
self.parsers.append(parser)
# retrieve all groups and hosts form the parser and add them to
# self, don't look at group lists yet, to avoid
# recursion trouble, but just make sure all objects exist in self
newgroups = parser.groups.values()
for group in newgroups:
for host in group.hosts:
self._add_host(host)
for group in newgroups:
self._add_group(group)
# now check the objects lists so they contain only objects from
# self; membership data in groups is already fine (except all &
# ungrouped, see later), but might still reference objects not in self
for group in self.groups.values():
# iterate on a copy of the lists, as those lists get changed in
# the loop
# list with group's child group objects:
for child in group.child_groups[:]:
if child != self.groups[child.name]:
group.child_groups.remove(child)
group.child_groups.append(self.groups[child.name])
# list with group's parent group objects:
for parent in group.parent_groups[:]:
if parent != self.groups[parent.name]:
group.parent_groups.remove(parent)
group.parent_groups.append(self.groups[parent.name])
# list with group's host objects:
for host in group.hosts[:]:
if host != self.hosts[host.name]:
group.hosts.remove(host)
group.hosts.append(self.hosts[host.name])
# also check here that the group that contains host, is
# also contained in the host's group list
if group not in self.hosts[host.name].groups:
self.hosts[host.name].groups.append(group)
# extra checks on special groups all and ungrouped
# remove hosts from 'ungrouped' if they became member of other groups
if 'ungrouped' in self.groups:
ungrouped = self.groups['ungrouped']
# loop on a copy of ungrouped hosts, as we want to change that list
for host in frozenset(ungrouped.hosts):
if len(host.groups) > 1:
host.groups.remove(ungrouped)
ungrouped.hosts.remove(host)
# remove hosts from 'all' if they became member of other groups
# all should only contain direct children, not grandchildren
# direct children should have dept == 1
if 'all' in self.groups:
allgroup = self.groups['all' ]
# loop on a copy of all's child groups, as we want to change that list
for group in allgroup.child_groups[:]:
# groups might once have beeen added to all, and later be added
# to another group: we need to remove the link wit all then
if len(group.parent_groups) > 1 and allgroup in group.parent_groups:
# real children of all have just 1 parent, all
# this one has more, so not a direct child of all anymore
group.parent_groups.remove(allgroup)
allgroup.child_groups.remove(group)
elif allgroup not in group.parent_groups:
# this group was once added to all, but doesn't list it as
# a parent any more; the info in the group is the correct
# info
allgroup.child_groups.remove(group)
def _add_group(self, group):
""" Merge an existing group or add a new one;
Track parent and child groups, and hosts of the new one """
if group.name not in self.groups:
# it's brand new, add him!
self.groups[group.name] = group
# the Group class does not (yet) implement __eq__/__ne__,
# so unlike Host we do a regular comparison here
if self.groups[group.name] != group:
# different object, merge
self._merge_groups(self.groups[group.name], group)
def _add_host(self, host):
if host.name not in self.hosts:
# Papa's got a brand new host
self.hosts[host.name] = host
# because the __eq__/__ne__ methods in Host() compare the
# name fields rather than references, we use id() here to
# do the object comparison for merges
if self.hosts[host.name] != host:
# different object, merge
self._merge_hosts(self.hosts[host.name], host)
def _merge_groups(self, group, newgroup):
""" Merge all of instance newgroup into group,
update parent/child relationships
group lists may still contain group objects that exist in self with
same name, but was instanciated as a different object in some other
inventory parser; these are handled later """
# name
if group.name != newgroup.name:
raise AnsibleError("Cannot merge inventory group %s with %s" % (group.name, newgroup.name))
# depth
group.depth = max([group.depth, newgroup.depth])
# hosts list (host objects are by now already added to self.hosts)
for host in newgroup.hosts:
grouphosts = dict([(h.name, h) for h in group.hosts])
if host.name in grouphosts:
# same host name but different object, merge
self._merge_hosts(grouphosts[host.name], host)
else:
# new membership, add host to group from self
# group from self will also be added again to host.groups, but
# as different object
group.add_host(self.hosts[host.name])
# now remove this the old object for group in host.groups
for hostgroup in [g for g in host.groups]:
if hostgroup.name == group.name and hostgroup != self.groups[group.name]:
self.hosts[host.name].groups.remove(hostgroup)
# group child membership relation
for newchild in newgroup.child_groups:
# dict with existing child groups:
childgroups = dict([(g.name, g) for g in group.child_groups])
# check if child of new group is already known as a child
if newchild.name not in childgroups:
self.groups[group.name].add_child_group(newchild)
# group parent membership relation
for newparent in newgroup.parent_groups:
# dict with existing parent groups:
parentgroups = dict([(g.name, g) for g in group.parent_groups])
# check if parent of new group is already known as a parent
if newparent.name not in parentgroups:
if newparent.name not in self.groups:
# group does not exist yet in self, import him
self.groups[newparent.name] = newparent
# group now exists but not yet as a parent here
self.groups[newparent.name].add_child_group(group)
# variables
group.vars = combine_vars(group.vars, newgroup.vars)
def _merge_hosts(self,host, newhost):
""" Merge all of instance newhost into host """
# name
if host.name != newhost.name:
raise AnsibleError("Cannot merge host %s with %s" % (host.name, newhost.name))
# group membership relation
for newgroup in newhost.groups:
# dict with existing groups:
hostgroups = dict([(g.name, g) for g in host.groups])
# check if new group is already known as a group
if newgroup.name not in hostgroups:
if newgroup.name not in self.groups:
# group does not exist yet in self, import him
self.groups[newgroup.name] = newgroup
# group now exists but doesn't have host yet
self.groups[newgroup.name].add_host(host)
# variables
host.vars = combine_vars(host.vars, newhost.vars)
def get_host_variables(self, host):
""" Gets additional host variables from all inventories """
vars = {}
for i in self.parsers:
vars.update(i.get_host_variables(host))
return vars
|
# (c) 2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# FIXME: copied mostly from old code, needs py3 improvements
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import fcntl
import textwrap
import os
import random
import subprocess
import sys
import time
import logging
import getpass
from struct import unpack, pack
from termios import TIOCGWINSZ
from multiprocessing import Lock
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.utils.color import stringc
from ansible.utils.unicode import to_bytes, to_unicode
try:
# Python 2
input = raw_input
except NameError:
# Python 3
pass
# These are module level as we currently fork and serialize the whole process and locks in the objects don't play well with that
debug_lock = Lock()
#TODO: make this a logging callback instead
if C.DEFAULT_LOG_PATH:
path = C.DEFAULT_LOG_PATH
if (os.path.exists(path) and not os.access(path, os.W_OK)) or not os.access(os.path.dirname(path), os.W_OK):
print("[WARNING]: log file at %s is not writeable, aborting\n" % path, file=sys.stderr)
logging.basicConfig(filename=path, level=logging.DEBUG, format='%(asctime)s %(name)s %(message)s')
mypid = str(os.getpid())
user = getpass.getuser()
logger = logging.getLogger("p=%s u=%s | " % (mypid, user))
else:
logger = None
class Display:
def __init__(self, verbosity=0):
self.columns = None
self.verbosity = verbosity
# list of all deprecation messages to prevent duplicate display
self._deprecations = {}
self._warns = {}
self._errors = {}
self.cowsay = None
self.noncow = os.getenv("ANSIBLE_COW_SELECTION",None)
self.set_cowsay_info()
self._set_column_width()
def set_cowsay_info(self):
if not C.ANSIBLE_NOCOWS:
if os.path.exists("/usr/bin/cowsay"):
self.cowsay = "/usr/bin/cowsay"
elif os.path.exists("/usr/games/cowsay"):
self.cowsay = "/usr/games/cowsay"
elif os.path.exists("/usr/local/bin/cowsay"):
# BSD path for cowsay
self.cowsay = "/usr/local/bin/cowsay"
elif os.path.exists("/opt/local/bin/cowsay"):
# MacPorts path for cowsay
self.cowsay = "/opt/local/bin/cowsay"
if self.cowsay and self.noncow == 'random':
cmd = subprocess.Popen([self.cowsay, "-l"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(out, err) = cmd.communicate()
cows = out.split()
cows.append(False)
self.noncow = random.choice(cows)
def display(self, msg, color=None, stderr=False, screen_only=False, log_only=False):
""" Display a message to the user
Note: msg *must* be a unicode string to prevent UnicodeError tracebacks.
"""
# FIXME: this needs to be implemented
#msg = utils.sanitize_output(msg)
if color:
msg = stringc(msg, color)
if not log_only:
if not msg.endswith(u'\n'):
msg2 = msg + u'\n'
else:
msg2 = msg
msg2 = to_bytes(msg2, encoding=self._output_encoding(stderr=stderr))
if sys.version_info >= (3,):
# Convert back to text string on python3
# We first convert to a byte string so that we get rid of
# characters that are invalid in the user's locale
msg2 = to_unicode(msg2, self._output_encoding(stderr=stderr))
if not stderr:
sys.stdout.write(msg2)
sys.stdout.flush()
else:
sys.stderr.write(msg2)
sys.stderr.flush()
if logger and not screen_only:
msg2 = msg.lstrip(u'\n')
msg2 = to_bytes(msg2)
if sys.version_info >= (3,):
# Convert back to text string on python3
# We first convert to a byte string so that we get rid of
# characters that are invalid in the user's locale
msg2 = to_unicode(msg2, self._output_encoding(stderr=stderr))
if color == 'red':
logger.error(msg2)
else:
logger.info(msg2)
def vv(self, msg, host=None):
return self.verbose(msg, host=host, caplevel=1)
def vvv(self, msg, host=None):
return self.verbose(msg, host=host, caplevel=2)
def vvvv(self, msg, host=None):
return self.verbose(msg, host=host, caplevel=3)
def vvvvv(self, msg, host=None):
return self.verbose(msg, host=host, caplevel=4)
def vvvvvv(self, msg, host=None):
return self.verbose(msg, host=host, caplevel=5)
def debug(self, msg):
if C.DEFAULT_DEBUG:
debug_lock.acquire()
self.display("%6d %0.5f: %s" % (os.getpid(), time.time(), msg), color='dark gray')
debug_lock.release()
def verbose(self, msg, host=None, caplevel=2):
# FIXME: this needs to be implemented
#msg = utils.sanitize_output(msg)
if self.verbosity > caplevel:
if host is None:
self.display(msg, color='blue')
else:
self.display("<%s> %s" % (host, msg), color='blue', screen_only=True)
def deprecated(self, msg, version=None, removed=False):
''' used to print out a deprecation message.'''
if not removed and not C.DEPRECATION_WARNINGS:
return
if not removed:
if version:
new_msg = "[DEPRECATION WARNING]: %s. This feature will be removed in version %s." % (msg, version)
else:
new_msg = "[DEPRECATION WARNING]: %s. This feature will be removed in a future release." % (msg)
new_msg = new_msg + " Deprecation warnings can be disabled by setting deprecation_warnings=False in ansible.cfg.\n\n"
else:
raise AnsibleError("[DEPRECATED]: %s. Please update your playbooks." % msg)
wrapped = textwrap.wrap(new_msg, self.columns, replace_whitespace=False, drop_whitespace=False)
new_msg = "\n".join(wrapped) + "\n"
if new_msg not in self._deprecations:
self.display(new_msg.strip(), color='purple', stderr=True)
self._deprecations[new_msg] = 1
def warning(self, msg):
new_msg = "\n[WARNING]: %s" % msg
wrapped = textwrap.wrap(new_msg, self.columns)
new_msg = "\n".join(wrapped) + "\n"
if new_msg not in self._warns:
self.display(new_msg, color='bright purple', stderr=True)
self._warns[new_msg] = 1
def system_warning(self, msg):
if C.SYSTEM_WARNINGS:
self.warning(msg)
def banner(self, msg, color=None):
'''
Prints a header-looking line with stars taking up to 80 columns
of width (3 columns, minimum)
'''
if self.cowsay:
try:
self.banner_cowsay(msg)
return
except OSError:
self.warning("somebody cleverly deleted cowsay or something during the PB run. heh.")
#FIXME: make this dynamic on tty size (look and ansible-doc)
msg = msg.strip()
star_len = (79 - len(msg))
if star_len < 0:
star_len = 3
stars = "*" * star_len
self.display("\n%s %s" % (msg, stars), color=color)
def banner_cowsay(self, msg, color=None):
if ": [" in msg:
msg = msg.replace("[","")
if msg.endswith("]"):
msg = msg[:-1]
runcmd = [self.cowsay,"-W", "60"]
if self.noncow:
runcmd.append('-f')
runcmd.append(self.noncow)
runcmd.append(msg)
cmd = subprocess.Popen(runcmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(out, err) = cmd.communicate()
self.display("%s\n" % out, color=color)
def error(self, msg, wrap_text=True):
if wrap_text:
new_msg = u"\n[ERROR]: %s" % msg
wrapped = textwrap.wrap(new_msg, self.columns)
new_msg = u"\n".join(wrapped) + u"\n"
else:
new_msg = msg
if new_msg not in self._errors:
self.display(new_msg, color='red', stderr=True)
self._errors[new_msg] = 1
@staticmethod
def prompt(self, msg):
prompt_string = to_bytes(msg, encoding=self._output_encoding())
if sys.version_info >= (3,):
# Convert back into text on python3. We do this double conversion
# to get rid of characters that are illegal in the user's locale
prompt_string = to_unicode(prompt_string)
return input(prompt_string)
@staticmethod
def _output_encoding(stderr=False):
if stderr:
return sys.stderr.encoding or 'utf-8'
return sys.stdout.encoding or 'utf-8'
def _set_column_width(self):
if os.isatty(0):
tty_size = unpack('HHHH', fcntl.ioctl(0, TIOCGWINSZ, pack('HHHH', 0, 0, 0, 0)))[1]
else:
tty_size = 0
self.columns = max(79, tty_size)
Fix display when run through a testing framework that overrides stderr/stdout
# (c) 2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# FIXME: copied mostly from old code, needs py3 improvements
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import fcntl
import textwrap
import os
import random
import subprocess
import sys
import time
import locale
import logging
import getpass
from struct import unpack, pack
from termios import TIOCGWINSZ
from multiprocessing import Lock
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.utils.color import stringc
from ansible.utils.unicode import to_bytes, to_unicode
try:
# Python 2
input = raw_input
except NameError:
# Python 3
pass
# These are module level as we currently fork and serialize the whole process and locks in the objects don't play well with that
debug_lock = Lock()
#TODO: make this a logging callback instead
if C.DEFAULT_LOG_PATH:
path = C.DEFAULT_LOG_PATH
if (os.path.exists(path) and not os.access(path, os.W_OK)) or not os.access(os.path.dirname(path), os.W_OK):
print("[WARNING]: log file at %s is not writeable, aborting\n" % path, file=sys.stderr)
logging.basicConfig(filename=path, level=logging.DEBUG, format='%(asctime)s %(name)s %(message)s')
mypid = str(os.getpid())
user = getpass.getuser()
logger = logging.getLogger("p=%s u=%s | " % (mypid, user))
else:
logger = None
class Display:
def __init__(self, verbosity=0):
self.columns = None
self.verbosity = verbosity
# list of all deprecation messages to prevent duplicate display
self._deprecations = {}
self._warns = {}
self._errors = {}
self.cowsay = None
self.noncow = os.getenv("ANSIBLE_COW_SELECTION",None)
self.set_cowsay_info()
self._set_column_width()
def set_cowsay_info(self):
if not C.ANSIBLE_NOCOWS:
if os.path.exists("/usr/bin/cowsay"):
self.cowsay = "/usr/bin/cowsay"
elif os.path.exists("/usr/games/cowsay"):
self.cowsay = "/usr/games/cowsay"
elif os.path.exists("/usr/local/bin/cowsay"):
# BSD path for cowsay
self.cowsay = "/usr/local/bin/cowsay"
elif os.path.exists("/opt/local/bin/cowsay"):
# MacPorts path for cowsay
self.cowsay = "/opt/local/bin/cowsay"
if self.cowsay and self.noncow == 'random':
cmd = subprocess.Popen([self.cowsay, "-l"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(out, err) = cmd.communicate()
cows = out.split()
cows.append(False)
self.noncow = random.choice(cows)
def display(self, msg, color=None, stderr=False, screen_only=False, log_only=False):
""" Display a message to the user
Note: msg *must* be a unicode string to prevent UnicodeError tracebacks.
"""
# FIXME: this needs to be implemented
#msg = utils.sanitize_output(msg)
if color:
msg = stringc(msg, color)
if not log_only:
if not msg.endswith(u'\n'):
msg2 = msg + u'\n'
else:
msg2 = msg
msg2 = to_bytes(msg2, encoding=self._output_encoding(stderr=stderr))
if sys.version_info >= (3,):
# Convert back to text string on python3
# We first convert to a byte string so that we get rid of
# characters that are invalid in the user's locale
msg2 = to_unicode(msg2, self._output_encoding(stderr=stderr))
if not stderr:
sys.stdout.write(msg2)
sys.stdout.flush()
else:
sys.stderr.write(msg2)
sys.stderr.flush()
if logger and not screen_only:
msg2 = msg.lstrip(u'\n')
msg2 = to_bytes(msg2)
if sys.version_info >= (3,):
# Convert back to text string on python3
# We first convert to a byte string so that we get rid of
# characters that are invalid in the user's locale
msg2 = to_unicode(msg2, self._output_encoding(stderr=stderr))
if color == 'red':
logger.error(msg2)
else:
logger.info(msg2)
def vv(self, msg, host=None):
return self.verbose(msg, host=host, caplevel=1)
def vvv(self, msg, host=None):
return self.verbose(msg, host=host, caplevel=2)
def vvvv(self, msg, host=None):
return self.verbose(msg, host=host, caplevel=3)
def vvvvv(self, msg, host=None):
return self.verbose(msg, host=host, caplevel=4)
def vvvvvv(self, msg, host=None):
return self.verbose(msg, host=host, caplevel=5)
def debug(self, msg):
if C.DEFAULT_DEBUG:
debug_lock.acquire()
self.display("%6d %0.5f: %s" % (os.getpid(), time.time(), msg), color='dark gray')
debug_lock.release()
def verbose(self, msg, host=None, caplevel=2):
# FIXME: this needs to be implemented
#msg = utils.sanitize_output(msg)
if self.verbosity > caplevel:
if host is None:
self.display(msg, color='blue')
else:
self.display("<%s> %s" % (host, msg), color='blue', screen_only=True)
def deprecated(self, msg, version=None, removed=False):
''' used to print out a deprecation message.'''
if not removed and not C.DEPRECATION_WARNINGS:
return
if not removed:
if version:
new_msg = "[DEPRECATION WARNING]: %s. This feature will be removed in version %s." % (msg, version)
else:
new_msg = "[DEPRECATION WARNING]: %s. This feature will be removed in a future release." % (msg)
new_msg = new_msg + " Deprecation warnings can be disabled by setting deprecation_warnings=False in ansible.cfg.\n\n"
else:
raise AnsibleError("[DEPRECATED]: %s. Please update your playbooks." % msg)
wrapped = textwrap.wrap(new_msg, self.columns, replace_whitespace=False, drop_whitespace=False)
new_msg = "\n".join(wrapped) + "\n"
if new_msg not in self._deprecations:
self.display(new_msg.strip(), color='purple', stderr=True)
self._deprecations[new_msg] = 1
def warning(self, msg):
new_msg = "\n[WARNING]: %s" % msg
wrapped = textwrap.wrap(new_msg, self.columns)
new_msg = "\n".join(wrapped) + "\n"
if new_msg not in self._warns:
self.display(new_msg, color='bright purple', stderr=True)
self._warns[new_msg] = 1
def system_warning(self, msg):
if C.SYSTEM_WARNINGS:
self.warning(msg)
def banner(self, msg, color=None):
'''
Prints a header-looking line with stars taking up to 80 columns
of width (3 columns, minimum)
'''
if self.cowsay:
try:
self.banner_cowsay(msg)
return
except OSError:
self.warning("somebody cleverly deleted cowsay or something during the PB run. heh.")
#FIXME: make this dynamic on tty size (look and ansible-doc)
msg = msg.strip()
star_len = (79 - len(msg))
if star_len < 0:
star_len = 3
stars = "*" * star_len
self.display("\n%s %s" % (msg, stars), color=color)
def banner_cowsay(self, msg, color=None):
if ": [" in msg:
msg = msg.replace("[","")
if msg.endswith("]"):
msg = msg[:-1]
runcmd = [self.cowsay,"-W", "60"]
if self.noncow:
runcmd.append('-f')
runcmd.append(self.noncow)
runcmd.append(msg)
cmd = subprocess.Popen(runcmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(out, err) = cmd.communicate()
self.display("%s\n" % out, color=color)
def error(self, msg, wrap_text=True):
if wrap_text:
new_msg = u"\n[ERROR]: %s" % msg
wrapped = textwrap.wrap(new_msg, self.columns)
new_msg = u"\n".join(wrapped) + u"\n"
else:
new_msg = msg
if new_msg not in self._errors:
self.display(new_msg, color='red', stderr=True)
self._errors[new_msg] = 1
@staticmethod
def prompt(self, msg):
prompt_string = to_bytes(msg, encoding=self._output_encoding())
if sys.version_info >= (3,):
# Convert back into text on python3. We do this double conversion
# to get rid of characters that are illegal in the user's locale
prompt_string = to_unicode(prompt_string)
return input(prompt_string)
@staticmethod
def _output_encoding(stderr=False):
encoding = locale.getpreferredencoding()
# https://bugs.python.org/issue6202
# Python2 hardcodes an obsolete value on Mac. Use MacOSX defaults
# instead.
if encoding in ('mac-roman',):
encoding = 'utf-8'
return encoding
def _set_column_width(self):
if os.isatty(0):
tty_size = unpack('HHHH', fcntl.ioctl(0, TIOCGWINSZ, pack('HHHH', 0, 0, 0, 0)))[1]
else:
tty_size = 0
self.columns = max(79, tty_size)
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from collections import defaultdict
from collections import MutableMapping
from six import iteritems
from jinja2.exceptions import UndefinedError
try:
from hashlib import sha1
except ImportError:
from sha import sha as sha1
from ansible import constants as C
from ansible.cli import CLI
from ansible.errors import AnsibleError, AnsibleParserError, AnsibleUndefinedVariable, AnsibleFileNotFound
from ansible.inventory.host import Host
from ansible.parsing import DataLoader
from ansible.plugins import lookup_loader
from ansible.plugins.cache import FactCache
from ansible.template import Templar
from ansible.utils.debug import debug
from ansible.utils.listify import listify_lookup_plugin_terms
from ansible.utils.vars import combine_vars
from ansible.vars.hostvars import HostVars
from ansible.vars.unsafe_proxy import wrap_var
VARIABLE_CACHE = dict()
HOSTVARS_CACHE = dict()
try:
from __main__ import display
display = display
except ImportError:
from ansible.utils.display import Display
display = Display()
def preprocess_vars(a):
'''
Ensures that vars contained in the parameter passed in are
returned as a list of dictionaries, to ensure for instance
that vars loaded from a file conform to an expected state.
'''
if a is None:
return None
elif not isinstance(a, list):
data = [ a ]
else:
data = a
for item in data:
if not isinstance(item, MutableMapping):
raise AnsibleError("variable files must contain either a dictionary of variables, or a list of dictionaries. Got: %s (%s)" % (a, type(a)))
return data
class VariableManager:
def __init__(self):
self._fact_cache = FactCache()
self._nonpersistent_fact_cache = defaultdict(dict)
self._vars_cache = defaultdict(dict)
self._extra_vars = defaultdict(dict)
self._host_vars_files = defaultdict(dict)
self._group_vars_files = defaultdict(dict)
self._inventory = None
self._omit_token = '__omit_place_holder__%s' % sha1(os.urandom(64)).hexdigest()
def __getstate__(self):
data = dict(
fact_cache = self._fact_cache.copy(),
np_fact_cache = self._nonpersistent_fact_cache.copy(),
vars_cache = self._vars_cache.copy(),
extra_vars = self._extra_vars.copy(),
host_vars_files = self._host_vars_files.copy(),
group_vars_files = self._group_vars_files.copy(),
omit_token = self._omit_token,
)
return data
def __setstate__(self, data):
self._fact_cache = data.get('fact_cache', defaultdict(dict))
self._nonpersistent_fact_cache = data.get('np_fact_cache', defaultdict(dict))
self._vars_cache = data.get('vars_cache', defaultdict(dict))
self._extra_vars = data.get('extra_vars', dict())
self._host_vars_files = data.get('host_vars_files', defaultdict(dict))
self._group_vars_files = data.get('group_vars_files', defaultdict(dict))
self._omit_token = data.get('omit_token', '__omit_place_holder__%s' % sha1(os.urandom(64)).hexdigest())
self._inventory = None
def _get_cache_entry(self, play=None, host=None, task=None):
play_id = "NONE"
if play:
play_id = play._uuid
host_id = "NONE"
if host:
host_id = host.get_name()
task_id = "NONE"
if task:
task_id = task._uuid
return "PLAY:%s;HOST:%s;TASK:%s" % (play_id, host_id, task_id)
@property
def extra_vars(self):
''' ensures a clean copy of the extra_vars are made '''
return self._extra_vars.copy()
@extra_vars.setter
def extra_vars(self, value):
''' ensures a clean copy of the extra_vars are used to set the value '''
assert isinstance(value, MutableMapping)
self._extra_vars = value.copy()
def set_inventory(self, inventory):
self._inventory = inventory
def _preprocess_vars(self, a):
'''
Ensures that vars contained in the parameter passed in are
returned as a list of dictionaries, to ensure for instance
that vars loaded from a file conform to an expected state.
'''
if a is None:
return None
elif not isinstance(a, list):
data = [ a ]
else:
data = a
for item in data:
if not isinstance(item, MutableMapping):
raise AnsibleError("variable files must contain either a dictionary of variables, or a list of dictionaries. Got: %s (%s)" % (a, type(a)))
return data
def get_vars(self, loader, play=None, host=None, task=None, include_hostvars=True, include_delegate_to=True, use_cache=True):
'''
Returns the variables, with optional "context" given via the parameters
for the play, host, and task (which could possibly result in different
sets of variables being returned due to the additional context).
The order of precedence is:
- play->roles->get_default_vars (if there is a play context)
- group_vars_files[host] (if there is a host context)
- host_vars_files[host] (if there is a host context)
- host->get_vars (if there is a host context)
- fact_cache[host] (if there is a host context)
- play vars (if there is a play context)
- play vars_files (if there's no host context, ignore
file names that cannot be templated)
- task->get_vars (if there is a task context)
- vars_cache[host] (if there is a host context)
- extra vars
'''
debug("in VariableManager get_vars()")
cache_entry = self._get_cache_entry(play=play, host=host, task=task)
if cache_entry in VARIABLE_CACHE and use_cache:
debug("vars are cached, returning them now")
return VARIABLE_CACHE[cache_entry]
all_vars = defaultdict(dict)
magic_variables = self._get_magic_variables(
loader=loader,
play=play,
host=host,
task=task,
include_hostvars=include_hostvars,
include_delegate_to=include_delegate_to,
)
if play:
# first we compile any vars specified in defaults/main.yml
# for all roles within the specified play
for role in play.get_roles():
all_vars = combine_vars(all_vars, role.get_default_vars())
# if we have a task in this context, and that task has a role, make
# sure it sees its defaults above any other roles, as we previously
# (v1) made sure each task had a copy of its roles default vars
if task and task._role is not None:
all_vars = combine_vars(all_vars, task._role.get_default_vars())
if host:
# next, if a host is specified, we load any vars from group_vars
# files and then any vars from host_vars files which may apply to
# this host or the groups it belongs to
# we merge in vars from groups specified in the inventory (INI or script)
all_vars = combine_vars(all_vars, host.get_group_vars())
# then we merge in the special 'all' group_vars first, if they exist
if 'all' in self._group_vars_files:
data = preprocess_vars(self._group_vars_files['all'])
for item in data:
all_vars = combine_vars(all_vars, item)
for group in host.get_groups():
if group.name in self._group_vars_files and group.name != 'all':
for data in self._group_vars_files[group.name]:
data = preprocess_vars(data)
for item in data:
all_vars = combine_vars(all_vars, item)
# then we merge in vars from the host specified in the inventory (INI or script)
all_vars = combine_vars(all_vars, host.get_vars())
# then we merge in the host_vars/<hostname> file, if it exists
host_name = host.get_name()
if host_name in self._host_vars_files:
for data in self._host_vars_files[host_name]:
data = preprocess_vars(data)
for item in data:
all_vars = combine_vars(all_vars, item)
# finally, the facts caches for this host, if it exists
try:
host_facts = wrap_var(self._fact_cache.get(host.name, dict()))
all_vars = combine_vars(all_vars, host_facts)
except KeyError:
pass
if play:
all_vars = combine_vars(all_vars, play.get_vars())
for vars_file_item in play.get_vars_files():
# create a set of temporary vars here, which incorporate the extra
# and magic vars so we can properly template the vars_files entries
temp_vars = combine_vars(all_vars, self._extra_vars)
temp_vars = combine_vars(temp_vars, magic_variables)
templar = Templar(loader=loader, variables=temp_vars)
# we assume each item in the list is itself a list, as we
# support "conditional includes" for vars_files, which mimics
# the with_first_found mechanism.
#vars_file_list = templar.template(vars_file_item)
vars_file_list = vars_file_item
if not isinstance(vars_file_list, list):
vars_file_list = [ vars_file_list ]
# now we iterate through the (potential) files, and break out
# as soon as we read one from the list. If none are found, we
# raise an error, which is silently ignored at this point.
try:
for vars_file in vars_file_list:
vars_file = templar.template(vars_file)
try:
data = preprocess_vars(loader.load_from_file(vars_file))
if data is not None:
for item in data:
all_vars = combine_vars(all_vars, item)
break
except AnsibleFileNotFound as e:
# we continue on loader failures
continue
except AnsibleParserError as e:
raise
else:
raise AnsibleFileNotFound("vars file %s was not found" % vars_file_item)
except (UndefinedError, AnsibleUndefinedVariable):
if host is not None and self._fact_cache.get(host.name, dict()).get('module_setup') and task is not None:
raise AnsibleUndefinedVariable("an undefined variable was found when attempting to template the vars_files item '%s'" % vars_file_item, obj=vars_file_item)
else:
# we do not have a full context here, and the missing variable could be
# because of that, so just show a warning and continue
display.vvv("skipping vars_file '%s' due to an undefined variable" % vars_file_item)
continue
if not C.DEFAULT_PRIVATE_ROLE_VARS:
for role in play.get_roles():
all_vars = combine_vars(all_vars, role.get_vars(include_params=False))
if task:
if task._role:
all_vars = combine_vars(all_vars, task._role.get_vars())
all_vars = combine_vars(all_vars, task.get_vars())
if host:
all_vars = combine_vars(all_vars, self._vars_cache.get(host.get_name(), dict()))
all_vars = combine_vars(all_vars, self._nonpersistent_fact_cache.get(host.name, dict()))
all_vars = combine_vars(all_vars, self._extra_vars)
all_vars = combine_vars(all_vars, magic_variables)
# if we have a task and we're delegating to another host, figure out the
# variables for that host now so we don't have to rely on hostvars later
if task and task.delegate_to is not None and include_delegate_to:
all_vars['ansible_delegated_vars'] = self._get_delegated_vars(loader, play, task, all_vars)
#VARIABLE_CACHE[cache_entry] = all_vars
debug("done with get_vars()")
return all_vars
def _get_magic_variables(self, loader, play, host, task, include_hostvars, include_delegate_to):
'''
Returns a dictionary of so-called "magic" variables in Ansible,
which are special variables we set internally for use.
'''
variables = dict()
variables['playbook_dir'] = loader.get_basedir()
if host:
variables['group_names'] = [group.name for group in host.get_groups()]
if self._inventory is not None:
variables['groups'] = dict()
for (group_name, group) in iteritems(self._inventory.groups):
variables['groups'][group_name] = [h.name for h in group.get_hosts()]
if include_hostvars:
hostvars_cache_entry = self._get_cache_entry(play=play)
if hostvars_cache_entry in HOSTVARS_CACHE:
hostvars = HOSTVARS_CACHE[hostvars_cache_entry]
else:
hostvars = HostVars(play=play, inventory=self._inventory, loader=loader, variable_manager=self)
HOSTVARS_CACHE[hostvars_cache_entry] = hostvars
variables['hostvars'] = hostvars
variables['vars'] = hostvars[host.get_name()]
if task:
if task._role:
variables['role_path'] = task._role._role_path
if self._inventory is not None:
variables['inventory_dir'] = self._inventory.basedir()
variables['inventory_file'] = self._inventory.src()
if play:
# add the list of hosts in the play, as adjusted for limit/filters
# DEPRECATED: play_hosts should be deprecated in favor of ansible_play_hosts,
# however this would take work in the templating engine, so for now
# we'll add both so we can give users something transitional to use
host_list = [x.name for x in self._inventory.get_hosts()]
variables['play_hosts'] = host_list
variables['ansible_play_hosts'] = host_list
# the 'omit' value alows params to be left out if the variable they are based on is undefined
variables['omit'] = self._omit_token
variables['ansible_version'] = CLI.version_info(gitinfo=False)
return variables
def _get_delegated_vars(self, loader, play, task, existing_variables):
# we unfortunately need to template the delegate_to field here,
# as we're fetching vars before post_validate has been called on
# the task that has been passed in
vars_copy = existing_variables.copy()
templar = Templar(loader=loader, variables=vars_copy)
items = []
if task.loop is not None:
if task.loop in lookup_loader:
#TODO: remove convert_bare true and deprecate this in with_
try:
loop_terms = listify_lookup_plugin_terms(terms=task.loop_args, templar=templar, loader=loader, fail_on_undefined=True, convert_bare=True)
except AnsibleUndefinedVariable as e:
if 'has no attribute' in str(e):
loop_terms = []
self._display.deprecated("Skipping task due to undefined attribute, in the future this will be a fatal error.")
else:
raise
items = lookup_loader.get(task.loop, loader=loader, templar=templar).run(terms=loop_terms, variables=vars_copy)
else:
raise AnsibleError("Unexpected failure in finding the lookup named '%s' in the available lookup plugins" % task.loop)
else:
items = [None]
delegated_host_vars = dict()
for item in items:
# update the variables with the item value for templating, in case we need it
if item is not None:
vars_copy['item'] = item
templar.set_available_variables(vars_copy)
delegated_host_name = templar.template(task.delegate_to, fail_on_undefined=False)
if delegated_host_name in delegated_host_vars:
# no need to repeat ourselves, as the delegate_to value
# does not appear to be tied to the loop item variable
continue
# a dictionary of variables to use if we have to create a new host below
new_delegated_host_vars = dict(
ansible_host=delegated_host_name,
ansible_user=C.DEFAULT_REMOTE_USER,
ansible_connection=C.DEFAULT_TRANSPORT,
)
# now try to find the delegated-to host in inventory, or failing that,
# create a new host on the fly so we can fetch variables for it
delegated_host = None
if self._inventory is not None:
delegated_host = self._inventory.get_host(delegated_host_name)
# try looking it up based on the address field, and finally
# fall back to creating a host on the fly to use for the var lookup
if delegated_host is None:
for h in self._inventory.get_hosts(ignore_limits_and_restrictions=True):
# check if the address matches, or if both the delegated_to host
# and the current host are in the list of localhost aliases
if h.address == delegated_host_name or h.name in C.LOCALHOST and delegated_host_name in C.LOCALHOST:
delegated_host = h
break
else:
delegated_host = Host(name=delegated_host_name)
delegated_host.vars.update(new_delegated_host_vars)
else:
delegated_host = Host(name=delegated_host_name)
delegated_host.vars.update(new_delegated_host_vars)
# now we go fetch the vars for the delegated-to host and save them in our
# master dictionary of variables to be used later in the TaskExecutor/PlayContext
delegated_host_vars[delegated_host_name] = self.get_vars(
loader=loader,
play=play,
host=delegated_host,
task=task,
include_delegate_to=False,
include_hostvars=False,
)
return delegated_host_vars
def _get_inventory_basename(self, path):
'''
Returns the basename minus the extension of the given path, so the
bare filename can be matched against host/group names later
'''
(name, ext) = os.path.splitext(os.path.basename(path))
if ext not in ('.yml', '.yaml'):
return os.path.basename(path)
else:
return name
def _load_inventory_file(self, path, loader):
'''
helper function, which loads the file and gets the
basename of the file without the extension
'''
if loader.is_directory(path):
data = dict()
try:
names = loader.list_directory(path)
except os.error as err:
raise AnsibleError("This folder cannot be listed: %s: %s." % (path, err.strerror))
# evaluate files in a stable order rather than whatever
# order the filesystem lists them.
names.sort()
# do not parse hidden files or dirs, e.g. .svn/
paths = [os.path.join(path, name) for name in names if not name.startswith('.')]
for p in paths:
_found, results = self._load_inventory_file(path=p, loader=loader)
if results is not None:
data = combine_vars(data, results)
else:
file_name, ext = os.path.splitext(path)
data = None
if not ext or ext not in C.YAML_FILENAME_EXTENSIONS:
for test_ext in C.YAML_FILENAME_EXTENSIONS:
new_path = path + test_ext
if loader.path_exists(new_path):
data = loader.load_from_file(new_path)
break
else:
if loader.path_exists(path):
data = loader.load_from_file(path)
name = self._get_inventory_basename(path)
return (name, data)
def add_host_vars_file(self, path, loader):
'''
Loads and caches a host_vars file in the _host_vars_files dict,
where the key to that dictionary is the basename of the file, minus
the extension, for matching against a given inventory host name
'''
(name, data) = self._load_inventory_file(path, loader)
if data:
if name not in self._host_vars_files:
self._host_vars_files[name] = []
self._host_vars_files[name].append(data)
return data
else:
return dict()
def add_group_vars_file(self, path, loader):
'''
Loads and caches a host_vars file in the _host_vars_files dict,
where the key to that dictionary is the basename of the file, minus
the extension, for matching against a given inventory host name
'''
(name, data) = self._load_inventory_file(path, loader)
if data:
if name not in self._group_vars_files:
self._group_vars_files[name] = []
self._group_vars_files[name].append(data)
return data
else:
return dict()
def set_host_facts(self, host, facts):
'''
Sets or updates the given facts for a host in the fact cache.
'''
assert isinstance(facts, dict)
if host.name not in self._fact_cache:
self._fact_cache[host.name] = facts
else:
try:
self._fact_cache.update(host.name, facts)
except KeyError:
self._fact_cache[host.name] = facts
def set_nonpersistent_facts(self, host, facts):
'''
Sets or updates the given facts for a host in the fact cache.
'''
assert isinstance(facts, dict)
if host.name not in self._nonpersistent_fact_cache:
self._nonpersistent_fact_cache[host.name] = facts
else:
try:
self._nonpersistent_fact_cache[host.name].update(facts)
except KeyError:
self._nonpersistent_fact_cache[host.name] = facts
def set_host_variable(self, host, varname, value):
'''
Sets a value in the vars_cache for a host.
'''
host_name = host.get_name()
if host_name not in self._vars_cache:
self._vars_cache[host_name] = dict()
self._vars_cache[host_name][varname] = value
group_names should not include implied 'all', fixes #12763
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from collections import defaultdict
from collections import MutableMapping
from six import iteritems
from jinja2.exceptions import UndefinedError
try:
from hashlib import sha1
except ImportError:
from sha import sha as sha1
from ansible import constants as C
from ansible.cli import CLI
from ansible.errors import AnsibleError, AnsibleParserError, AnsibleUndefinedVariable, AnsibleFileNotFound
from ansible.inventory.host import Host
from ansible.parsing import DataLoader
from ansible.plugins import lookup_loader
from ansible.plugins.cache import FactCache
from ansible.template import Templar
from ansible.utils.debug import debug
from ansible.utils.listify import listify_lookup_plugin_terms
from ansible.utils.vars import combine_vars
from ansible.vars.hostvars import HostVars
from ansible.vars.unsafe_proxy import wrap_var
VARIABLE_CACHE = dict()
HOSTVARS_CACHE = dict()
try:
from __main__ import display
display = display
except ImportError:
from ansible.utils.display import Display
display = Display()
def preprocess_vars(a):
'''
Ensures that vars contained in the parameter passed in are
returned as a list of dictionaries, to ensure for instance
that vars loaded from a file conform to an expected state.
'''
if a is None:
return None
elif not isinstance(a, list):
data = [ a ]
else:
data = a
for item in data:
if not isinstance(item, MutableMapping):
raise AnsibleError("variable files must contain either a dictionary of variables, or a list of dictionaries. Got: %s (%s)" % (a, type(a)))
return data
class VariableManager:
def __init__(self):
self._fact_cache = FactCache()
self._nonpersistent_fact_cache = defaultdict(dict)
self._vars_cache = defaultdict(dict)
self._extra_vars = defaultdict(dict)
self._host_vars_files = defaultdict(dict)
self._group_vars_files = defaultdict(dict)
self._inventory = None
self._omit_token = '__omit_place_holder__%s' % sha1(os.urandom(64)).hexdigest()
def __getstate__(self):
data = dict(
fact_cache = self._fact_cache.copy(),
np_fact_cache = self._nonpersistent_fact_cache.copy(),
vars_cache = self._vars_cache.copy(),
extra_vars = self._extra_vars.copy(),
host_vars_files = self._host_vars_files.copy(),
group_vars_files = self._group_vars_files.copy(),
omit_token = self._omit_token,
)
return data
def __setstate__(self, data):
self._fact_cache = data.get('fact_cache', defaultdict(dict))
self._nonpersistent_fact_cache = data.get('np_fact_cache', defaultdict(dict))
self._vars_cache = data.get('vars_cache', defaultdict(dict))
self._extra_vars = data.get('extra_vars', dict())
self._host_vars_files = data.get('host_vars_files', defaultdict(dict))
self._group_vars_files = data.get('group_vars_files', defaultdict(dict))
self._omit_token = data.get('omit_token', '__omit_place_holder__%s' % sha1(os.urandom(64)).hexdigest())
self._inventory = None
def _get_cache_entry(self, play=None, host=None, task=None):
play_id = "NONE"
if play:
play_id = play._uuid
host_id = "NONE"
if host:
host_id = host.get_name()
task_id = "NONE"
if task:
task_id = task._uuid
return "PLAY:%s;HOST:%s;TASK:%s" % (play_id, host_id, task_id)
@property
def extra_vars(self):
''' ensures a clean copy of the extra_vars are made '''
return self._extra_vars.copy()
@extra_vars.setter
def extra_vars(self, value):
''' ensures a clean copy of the extra_vars are used to set the value '''
assert isinstance(value, MutableMapping)
self._extra_vars = value.copy()
def set_inventory(self, inventory):
self._inventory = inventory
def _preprocess_vars(self, a):
'''
Ensures that vars contained in the parameter passed in are
returned as a list of dictionaries, to ensure for instance
that vars loaded from a file conform to an expected state.
'''
if a is None:
return None
elif not isinstance(a, list):
data = [ a ]
else:
data = a
for item in data:
if not isinstance(item, MutableMapping):
raise AnsibleError("variable files must contain either a dictionary of variables, or a list of dictionaries. Got: %s (%s)" % (a, type(a)))
return data
def get_vars(self, loader, play=None, host=None, task=None, include_hostvars=True, include_delegate_to=True, use_cache=True):
'''
Returns the variables, with optional "context" given via the parameters
for the play, host, and task (which could possibly result in different
sets of variables being returned due to the additional context).
The order of precedence is:
- play->roles->get_default_vars (if there is a play context)
- group_vars_files[host] (if there is a host context)
- host_vars_files[host] (if there is a host context)
- host->get_vars (if there is a host context)
- fact_cache[host] (if there is a host context)
- play vars (if there is a play context)
- play vars_files (if there's no host context, ignore
file names that cannot be templated)
- task->get_vars (if there is a task context)
- vars_cache[host] (if there is a host context)
- extra vars
'''
debug("in VariableManager get_vars()")
cache_entry = self._get_cache_entry(play=play, host=host, task=task)
if cache_entry in VARIABLE_CACHE and use_cache:
debug("vars are cached, returning them now")
return VARIABLE_CACHE[cache_entry]
all_vars = defaultdict(dict)
magic_variables = self._get_magic_variables(
loader=loader,
play=play,
host=host,
task=task,
include_hostvars=include_hostvars,
include_delegate_to=include_delegate_to,
)
if play:
# first we compile any vars specified in defaults/main.yml
# for all roles within the specified play
for role in play.get_roles():
all_vars = combine_vars(all_vars, role.get_default_vars())
# if we have a task in this context, and that task has a role, make
# sure it sees its defaults above any other roles, as we previously
# (v1) made sure each task had a copy of its roles default vars
if task and task._role is not None:
all_vars = combine_vars(all_vars, task._role.get_default_vars())
if host:
# next, if a host is specified, we load any vars from group_vars
# files and then any vars from host_vars files which may apply to
# this host or the groups it belongs to
# we merge in vars from groups specified in the inventory (INI or script)
all_vars = combine_vars(all_vars, host.get_group_vars())
# then we merge in the special 'all' group_vars first, if they exist
if 'all' in self._group_vars_files:
data = preprocess_vars(self._group_vars_files['all'])
for item in data:
all_vars = combine_vars(all_vars, item)
for group in host.get_groups():
if group.name in self._group_vars_files and group.name != 'all':
for data in self._group_vars_files[group.name]:
data = preprocess_vars(data)
for item in data:
all_vars = combine_vars(all_vars, item)
# then we merge in vars from the host specified in the inventory (INI or script)
all_vars = combine_vars(all_vars, host.get_vars())
# then we merge in the host_vars/<hostname> file, if it exists
host_name = host.get_name()
if host_name in self._host_vars_files:
for data in self._host_vars_files[host_name]:
data = preprocess_vars(data)
for item in data:
all_vars = combine_vars(all_vars, item)
# finally, the facts caches for this host, if it exists
try:
host_facts = wrap_var(self._fact_cache.get(host.name, dict()))
all_vars = combine_vars(all_vars, host_facts)
except KeyError:
pass
if play:
all_vars = combine_vars(all_vars, play.get_vars())
for vars_file_item in play.get_vars_files():
# create a set of temporary vars here, which incorporate the extra
# and magic vars so we can properly template the vars_files entries
temp_vars = combine_vars(all_vars, self._extra_vars)
temp_vars = combine_vars(temp_vars, magic_variables)
templar = Templar(loader=loader, variables=temp_vars)
# we assume each item in the list is itself a list, as we
# support "conditional includes" for vars_files, which mimics
# the with_first_found mechanism.
#vars_file_list = templar.template(vars_file_item)
vars_file_list = vars_file_item
if not isinstance(vars_file_list, list):
vars_file_list = [ vars_file_list ]
# now we iterate through the (potential) files, and break out
# as soon as we read one from the list. If none are found, we
# raise an error, which is silently ignored at this point.
try:
for vars_file in vars_file_list:
vars_file = templar.template(vars_file)
try:
data = preprocess_vars(loader.load_from_file(vars_file))
if data is not None:
for item in data:
all_vars = combine_vars(all_vars, item)
break
except AnsibleFileNotFound as e:
# we continue on loader failures
continue
except AnsibleParserError as e:
raise
else:
raise AnsibleFileNotFound("vars file %s was not found" % vars_file_item)
except (UndefinedError, AnsibleUndefinedVariable):
if host is not None and self._fact_cache.get(host.name, dict()).get('module_setup') and task is not None:
raise AnsibleUndefinedVariable("an undefined variable was found when attempting to template the vars_files item '%s'" % vars_file_item, obj=vars_file_item)
else:
# we do not have a full context here, and the missing variable could be
# because of that, so just show a warning and continue
display.vvv("skipping vars_file '%s' due to an undefined variable" % vars_file_item)
continue
if not C.DEFAULT_PRIVATE_ROLE_VARS:
for role in play.get_roles():
all_vars = combine_vars(all_vars, role.get_vars(include_params=False))
if task:
if task._role:
all_vars = combine_vars(all_vars, task._role.get_vars())
all_vars = combine_vars(all_vars, task.get_vars())
if host:
all_vars = combine_vars(all_vars, self._vars_cache.get(host.get_name(), dict()))
all_vars = combine_vars(all_vars, self._nonpersistent_fact_cache.get(host.name, dict()))
all_vars = combine_vars(all_vars, self._extra_vars)
all_vars = combine_vars(all_vars, magic_variables)
# if we have a task and we're delegating to another host, figure out the
# variables for that host now so we don't have to rely on hostvars later
if task and task.delegate_to is not None and include_delegate_to:
all_vars['ansible_delegated_vars'] = self._get_delegated_vars(loader, play, task, all_vars)
#VARIABLE_CACHE[cache_entry] = all_vars
debug("done with get_vars()")
return all_vars
def _get_magic_variables(self, loader, play, host, task, include_hostvars, include_delegate_to):
'''
Returns a dictionary of so-called "magic" variables in Ansible,
which are special variables we set internally for use.
'''
variables = dict()
variables['playbook_dir'] = loader.get_basedir()
if host:
variables['group_names'] = [group.name for group in host.get_groups() if group.name != 'all']
if self._inventory is not None:
variables['groups'] = dict()
for (group_name, group) in iteritems(self._inventory.groups):
variables['groups'][group_name] = [h.name for h in group.get_hosts()]
if include_hostvars:
hostvars_cache_entry = self._get_cache_entry(play=play)
if hostvars_cache_entry in HOSTVARS_CACHE:
hostvars = HOSTVARS_CACHE[hostvars_cache_entry]
else:
hostvars = HostVars(play=play, inventory=self._inventory, loader=loader, variable_manager=self)
HOSTVARS_CACHE[hostvars_cache_entry] = hostvars
variables['hostvars'] = hostvars
variables['vars'] = hostvars[host.get_name()]
if task:
if task._role:
variables['role_path'] = task._role._role_path
if self._inventory is not None:
variables['inventory_dir'] = self._inventory.basedir()
variables['inventory_file'] = self._inventory.src()
if play:
# add the list of hosts in the play, as adjusted for limit/filters
# DEPRECATED: play_hosts should be deprecated in favor of ansible_play_hosts,
# however this would take work in the templating engine, so for now
# we'll add both so we can give users something transitional to use
host_list = [x.name for x in self._inventory.get_hosts()]
variables['play_hosts'] = host_list
variables['ansible_play_hosts'] = host_list
# the 'omit' value alows params to be left out if the variable they are based on is undefined
variables['omit'] = self._omit_token
variables['ansible_version'] = CLI.version_info(gitinfo=False)
return variables
def _get_delegated_vars(self, loader, play, task, existing_variables):
# we unfortunately need to template the delegate_to field here,
# as we're fetching vars before post_validate has been called on
# the task that has been passed in
vars_copy = existing_variables.copy()
templar = Templar(loader=loader, variables=vars_copy)
items = []
if task.loop is not None:
if task.loop in lookup_loader:
#TODO: remove convert_bare true and deprecate this in with_
try:
loop_terms = listify_lookup_plugin_terms(terms=task.loop_args, templar=templar, loader=loader, fail_on_undefined=True, convert_bare=True)
except AnsibleUndefinedVariable as e:
if 'has no attribute' in str(e):
loop_terms = []
self._display.deprecated("Skipping task due to undefined attribute, in the future this will be a fatal error.")
else:
raise
items = lookup_loader.get(task.loop, loader=loader, templar=templar).run(terms=loop_terms, variables=vars_copy)
else:
raise AnsibleError("Unexpected failure in finding the lookup named '%s' in the available lookup plugins" % task.loop)
else:
items = [None]
delegated_host_vars = dict()
for item in items:
# update the variables with the item value for templating, in case we need it
if item is not None:
vars_copy['item'] = item
templar.set_available_variables(vars_copy)
delegated_host_name = templar.template(task.delegate_to, fail_on_undefined=False)
if delegated_host_name in delegated_host_vars:
# no need to repeat ourselves, as the delegate_to value
# does not appear to be tied to the loop item variable
continue
# a dictionary of variables to use if we have to create a new host below
new_delegated_host_vars = dict(
ansible_host=delegated_host_name,
ansible_user=C.DEFAULT_REMOTE_USER,
ansible_connection=C.DEFAULT_TRANSPORT,
)
# now try to find the delegated-to host in inventory, or failing that,
# create a new host on the fly so we can fetch variables for it
delegated_host = None
if self._inventory is not None:
delegated_host = self._inventory.get_host(delegated_host_name)
# try looking it up based on the address field, and finally
# fall back to creating a host on the fly to use for the var lookup
if delegated_host is None:
for h in self._inventory.get_hosts(ignore_limits_and_restrictions=True):
# check if the address matches, or if both the delegated_to host
# and the current host are in the list of localhost aliases
if h.address == delegated_host_name or h.name in C.LOCALHOST and delegated_host_name in C.LOCALHOST:
delegated_host = h
break
else:
delegated_host = Host(name=delegated_host_name)
delegated_host.vars.update(new_delegated_host_vars)
else:
delegated_host = Host(name=delegated_host_name)
delegated_host.vars.update(new_delegated_host_vars)
# now we go fetch the vars for the delegated-to host and save them in our
# master dictionary of variables to be used later in the TaskExecutor/PlayContext
delegated_host_vars[delegated_host_name] = self.get_vars(
loader=loader,
play=play,
host=delegated_host,
task=task,
include_delegate_to=False,
include_hostvars=False,
)
return delegated_host_vars
def _get_inventory_basename(self, path):
'''
Returns the basename minus the extension of the given path, so the
bare filename can be matched against host/group names later
'''
(name, ext) = os.path.splitext(os.path.basename(path))
if ext not in ('.yml', '.yaml'):
return os.path.basename(path)
else:
return name
def _load_inventory_file(self, path, loader):
'''
helper function, which loads the file and gets the
basename of the file without the extension
'''
if loader.is_directory(path):
data = dict()
try:
names = loader.list_directory(path)
except os.error as err:
raise AnsibleError("This folder cannot be listed: %s: %s." % (path, err.strerror))
# evaluate files in a stable order rather than whatever
# order the filesystem lists them.
names.sort()
# do not parse hidden files or dirs, e.g. .svn/
paths = [os.path.join(path, name) for name in names if not name.startswith('.')]
for p in paths:
_found, results = self._load_inventory_file(path=p, loader=loader)
if results is not None:
data = combine_vars(data, results)
else:
file_name, ext = os.path.splitext(path)
data = None
if not ext or ext not in C.YAML_FILENAME_EXTENSIONS:
for test_ext in C.YAML_FILENAME_EXTENSIONS:
new_path = path + test_ext
if loader.path_exists(new_path):
data = loader.load_from_file(new_path)
break
else:
if loader.path_exists(path):
data = loader.load_from_file(path)
name = self._get_inventory_basename(path)
return (name, data)
def add_host_vars_file(self, path, loader):
'''
Loads and caches a host_vars file in the _host_vars_files dict,
where the key to that dictionary is the basename of the file, minus
the extension, for matching against a given inventory host name
'''
(name, data) = self._load_inventory_file(path, loader)
if data:
if name not in self._host_vars_files:
self._host_vars_files[name] = []
self._host_vars_files[name].append(data)
return data
else:
return dict()
def add_group_vars_file(self, path, loader):
'''
Loads and caches a host_vars file in the _host_vars_files dict,
where the key to that dictionary is the basename of the file, minus
the extension, for matching against a given inventory host name
'''
(name, data) = self._load_inventory_file(path, loader)
if data:
if name not in self._group_vars_files:
self._group_vars_files[name] = []
self._group_vars_files[name].append(data)
return data
else:
return dict()
def set_host_facts(self, host, facts):
'''
Sets or updates the given facts for a host in the fact cache.
'''
assert isinstance(facts, dict)
if host.name not in self._fact_cache:
self._fact_cache[host.name] = facts
else:
try:
self._fact_cache.update(host.name, facts)
except KeyError:
self._fact_cache[host.name] = facts
def set_nonpersistent_facts(self, host, facts):
'''
Sets or updates the given facts for a host in the fact cache.
'''
assert isinstance(facts, dict)
if host.name not in self._nonpersistent_fact_cache:
self._nonpersistent_fact_cache[host.name] = facts
else:
try:
self._nonpersistent_fact_cache[host.name].update(facts)
except KeyError:
self._nonpersistent_fact_cache[host.name] = facts
def set_host_variable(self, host, varname, value):
'''
Sets a value in the vars_cache for a host.
'''
host_name = host.get_name()
if host_name not in self._vars_cache:
self._vars_cache[host_name] = dict()
self._vars_cache[host_name][varname] = value
|
from flask_restful.reqparse import Argument
from flask_restful.reqparse import RequestParser as OriginalRequestParser
class RequestParser(OriginalRequestParser):
def __init__(self, *args, **kargs):
arguments = kargs.pop('arguments', None)
select = kargs.pop('select', None)
update = kargs.pop('update', None)
remove = kargs.pop('remove', None)
super(RequestParser, self).__init__(*args, **kargs)
if arguments:
if select: # add just selected arguments
arguments = dict((k, arguments[k]) for k in select)
self.add_arguments(arguments)
if update:
self.update_arguments(update)
if remove:
self.remove_arguments(remove)
def add_arguments(self, arguments):
'''Add arguments to be parsed. Accepts either a dictionary of arguments
or a list of `Argument`.'''
if isinstance(arguments, dict): # {'argument': {'required': ...}, }
arguments = [Argument(*i) for i in arguments.items()]
for argument in arguments:
self.add_argument(argument)
def update_arguments(self, dictionary):
''' Update the arguments matching the given dictionary keys. '''
for arg in self.args:
if arg.name in dictionary.keys():
setattr(arg, arg.name, dictionary[arg.name])
def remove_arguments(self, names):
''' Remove the arguments matching the given names. '''
for name in names:
self.remove_argument(name)
def copy(self, update=None, remove=None):
''' Creates a copy with the same set of arguments. '''
parsercopy = super(RequestParser, self).copy()
if update:
parsercopy.update_arguments(update)
if remove:
parsercopy.remove_arguments(remove)
return parsercopy
Creating parse_args alias.
from flask_restful.reqparse import Argument
from flask_restful.reqparse import RequestParser as OriginalRequestParser
class RequestParser(OriginalRequestParser):
def __init__(self, *args, **kargs):
arguments = kargs.pop('arguments', None)
select = kargs.pop('select', None)
update = kargs.pop('update', None)
super(RequestParser, self).__init__(*args, **kargs)
if arguments:
if select: # add just selected arguments
arguments = dict((k, arguments[k]) for k in select)
self.add_arguments(arguments)
if update:
self.update_arguments(update)
def add_arguments(self, arguments):
'''Add arguments to be parsed. Accepts either a dictionary of arguments
or a list of `Argument`.'''
if isinstance(arguments, dict): # {'argument': {'required': ...}, }
arguments = [Argument(*i) for i in arguments.items()]
for argument in arguments:
self.add_argument(argument)
def update_arguments(self, dictionary):
''' Update the arguments matching the given dictionary keys. '''
for arg in self.args:
if arg.name in dictionary.keys():
setattr(arg, arg.name, dictionary[arg.name])
def remove_arguments(self, names):
''' Remove the arguments matching the given names. '''
for name in names:
self.remove_argument(name)
def copy(self, update=None, remove=None):
''' Creates a copy with the same set of arguments. '''
parsercopy = super(RequestParser, self).copy()
if update:
parsercopy.update_arguments(update)
if remove:
parsercopy.remove_arguments(remove)
return parsercopy
def parse(self, *args, **kwargs):
'''`parse_args` alias.'''
return self.parse_args(*args, **kwargs)
|
import argparse
import csv
import os
from collections import namedtuple
from math import sqrt, pi, atan2
import platform
import cairo
import gi
import numpy as np
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk, Gdk, Gio, GdkPixbuf, GObject
def cl_arg():
parser = argparse.ArgumentParser(
formatter_class=argparse.MetavarTypeHelpFormatter,
description='GUI to annotate images.')
parser.add_argument('-i', '--images',
type=str,
help='Folder with images (%(type)s).')
parser.add_argument('-t', '--types',
type=str,
help='File with point types in csv (%(type)s).')
parser.add_argument('-p', '--points',
type=str,
help='File of saved points in csv (%(type)s).')
arguments = parser.parse_args()
return arguments
def main(handler):
args = cl_arg()
if args.images:
if os.path.isdir(args.images):
handler.open_image_folder(args.images)
else:
handler.open_image(args.images)
if args.types:
handler.load_point_types(args.types)
if args.points:
handler.load_points(args.points)
class App(Gtk.Application):
def __init__(self):
super().__init__(application_id='org.annotate.images',
flags=Gio.ApplicationFlags.FLAGS_NONE)
self.window = None
self.handler = None
def do_startup(self):
Gtk.Application.do_startup(self)
self.make_action('preferences', self.on_preferences)
self.make_action('open_image_folder', self.on_open_image_folder)
self.make_action('open_image', self.on_open_image)
self.make_action('open_markings', self.on_open_markings)
self.make_action('open_markings_types', self.on_open_marking_types)
self.make_action('save_markings', self.on_save_markings)
self.make_action('save_as_markings', self.on_save_as_markings)
self.make_action('quit', self.on_quit)
self.make_action('previous_image', self.on_previous_image)
self.make_action('next_image', self.on_next_image)
self.make_action('switch_image', self.on_switch_image)
self.make_action('switch_to_boundingbox', self.on_switch_bounding_box)
self.make_action('zoom_out', self.on_zoom_out)
self.make_action('zoom_in', self.on_zoom_in)
self.make_action('zoom_normal', self.on_zoom_normal)
self.make_action('about', self.on_about)
def do_activate(self):
menu_builder = Gtk.Builder()
menu_builder.add_from_file('data/menu.glade')
menu_bar = menu_builder.get_object('menu_bar')
self.set_menubar(menu_bar)
win_builder = Gtk.Builder()
win_builder.add_from_file('data/GUI.glade')
self.handler = Handler(win_builder)
win_builder.connect_signals(self.handler)
self.window = win_builder.get_object('main_window')
self.window.set_title('Image Annotating')
self.window.set_application(self)
self.window.show_all()
main(self.handler)
def make_action(self, name, func):
action = Gio.SimpleAction.new(name, None)
action.connect('activate', func)
self.add_action(action)
def on_about(self, action, param):
about_dialog = AboutDialog(self.window)
response = about_dialog.run()
if response:
about_dialog.destroy()
def on_quit(self, action, param):
self.quit()
def on_open_image_folder(self, action, param):
self.handler.file_dialog(self.handler.open_dir_button)
def on_open_image(self, action, param):
self.handler.file_dialog(self.handler.open_image_button)
def on_open_markings(self, action, param):
self.handler.file_dialog(self.handler.load_points_button)
def on_open_marking_types(self, action, param):
self.handler.file_dialog(self.handler.load_point_type_button)
def on_save_markings(self, action, param):
self.handler.save_points_shortcut()
def on_save_as_markings(self, action, param):
self.handler.file_dialog(self.handler.save_points_button)
def on_previous_image(self, action, param):
self.handler.open_next_image(self.handler.previous_image_button)
def on_next_image(self, action, param):
self.handler.open_next_image(self.handler.next_image_button)
def on_switch_image(self, action, param):
self.handler.switch_things_shortcut(self.handler.switch_image_button)
def on_switch_bounding_box(self, action, param):
self.handler.switch_things_shortcut(self.handler.switch_box_button)
def on_zoom_out(self, action, param):
self.handler.zoom_pressed(self.handler.zoom_out_button)
def on_zoom_in(self, action, param):
self.handler.zoom_pressed(self.handler.zoom_in_button)
def on_zoom_normal(self, action, param):
self.handler.zoom_pressed(self.handler.zoom_normal)
def on_preferences(self, action, pram):
preferences_dialog = PreferencesDialog(self.window)
response = preferences_dialog.run()
if response:
preferences_dialog.destroy()
class PreferencesDialog(Gtk.Dialog):
def __init__(self, parent):
header = 'Preferences'
response = (Gtk.STOCK_OK, Gtk.ResponseType.OK)
Gtk.Dialog.__init__(self, header, parent, 0, response)
self.set_default_size(150, 100)
label = Gtk.Label('Coming soon')
label2 = Gtk.Label('')
box = self.get_content_area()
box.add(label)
box.add(label2)
self.show_all()
# drawing size
# ending of annotated files
# working dir
class AboutDialog(Gtk.Dialog):
def __init__(self, parent):
header = 'About'
response = (Gtk.STOCK_OK, Gtk.ResponseType.OK)
Gtk.Dialog.__init__(self, header, parent, 0, response)
self.set_default_size(150, 100)
label = Gtk.Label('Image annotation program')
label2 = Gtk.Label('Used to make markings on images.')
box = self.get_content_area()
box.add(label)
box.add(label2)
self.show_all()
class PointsNotSavedDialog(Gtk.Dialog):
def __init__(self, parent):
header = 'Points not saved!'
response = (Gtk.STOCK_CANCEL,
Gtk.ResponseType.CANCEL,
Gtk.STOCK_OK,
Gtk.ResponseType.OK)
Gtk.Dialog.__init__(self, header, parent, 0, response)
self.set_default_size(150, 100)
label = Gtk.Label('The current points have not been saved.')
label2 = Gtk.Label('Use Cancel to return and then save.')
label3 = Gtk.Label('Use OK to discard and continue.')
box = self.get_content_area()
box.add(label)
box.add(label2)
box.add(label3)
self.show_all()
class OverridePointImageDialog(Gtk.Dialog):
def __init__(self, parent):
header = 'Point - Image mismatch'
response = (Gtk.STOCK_NO,
Gtk.ResponseType.NO,
Gtk.STOCK_YES,
Gtk.ResponseType.YES)
Gtk.Dialog.__init__(self, header, parent, 0, response)
self.set_default_size(150, 100)
label = Gtk.Label('The points loaded do not match the current image.')
label2 = Gtk.Label('Do you want to show the point anyway?')
box = self.get_content_area()
box.add(label)
box.add(label2)
self.show_all()
class Handler:
def __init__(self, gui_builder):
self.dir_delimiter = '/'
if platform.system().startswith('Win'):
self.dir_delimiter = '\\'
# named tuples used.
self.buf_and_image = namedtuple('buf_and_image', ['buf', 'image'])
self.color = namedtuple('color', ['r', 'g', 'b', 'a'])
self.point = namedtuple('point', ('image', 'type',
'x', 'y', 'x2', 'y2', 'box')
+ self.color._fields)
self.summary_values = namedtuple('summary_values', ['amount', 'size',
'color'])
# handles to different widgets
self.main_window = gui_builder.get_object('main_window')
self.scroll_window = gui_builder.get_object('scroll_window')
self.v_adjust = self.scroll_window.get_vadjustment()
self.h_adjust = self.scroll_window.get_hadjustment()
self.layout = gui_builder.get_object('layout')
self.draw_image = gui_builder.get_object('draw_image')
self.draw_image_and_buf = self. buf_and_image(
self.draw_image.get_pixbuf(),
self.draw_image)
self.save_points_button = gui_builder.get_object('save_points')
self.open_image_button = gui_builder.get_object('open_image')
self.load_point_type_button = gui_builder.get_object('load_point_type')
self.load_points_button = gui_builder.get_object('load_points')
self.open_dir_button = gui_builder.get_object('open_image_folder')
self.zoom_in_button = gui_builder.get_object('zoom_in')
self.zoom_out_button = gui_builder.get_object('zoom_out')
self.zoom_normal = gui_builder.get_object('zoom_too_normal')
self.zoom_slider = gui_builder.get_object('zoom_scale')
self.gtk_point_type_list = gui_builder.get_object('point_type_list')
self.gtk_point_summary_list = gui_builder.get_object('point_summary')
self.point_type_button = gui_builder.get_object('select_point_type_box')
self.switch_image_button = gui_builder.get_object('switch_image')
self.switch_image_button.set_sensitive(False)
self.switch_box_button = gui_builder.get_object('draw_boxes')
self.progress_bar = gui_builder.get_object('progress_bar')
self.last_entry_label = gui_builder.get_object('last_entry')
self.next_image_button = gui_builder.get_object('open_next_image')
self.next_image_button.set_sensitive(False)
self.previous_image_button = gui_builder.get_object(
'open_previous_image')
self.previous_image_button.set_sensitive(False)
# setup the status bar
self.status_bar = gui_builder.get_object('status_bar')
self.status_msg = self.status_bar.get_context_id('Message')
self.status_warning = self.status_bar.get_context_id('Warning')
self.show_missing_image_warning = True
# ready the draw area
self.scroll_speed = 78
self.radius = 10
self.buffers_and_images = {}
self.init_draw_area(gui_builder)
self.window_height = 0
self.window_width = 0
self.do_scroll = False
self.do_drag = False
self.pressed_on_point = False
self.pressed_on_point_head = False
self.pressed_on_point_tail = False
self.point_clicked = None
self.pressed_x = None
self.pressed_y = None
self.draw_temp = None
self.draw_buf_temp = None
self.do_draw_bounding_boxes = False
# ready the point type selection
self.point_type_color = self.hex_color_to_rgba('#FF0000')
self.point_type = None
self.current_image = 'None'
self.list_of_images = []
self.tree_image_index = {}
self.image_folder = None
self.current_point_file = None
self.font = 'arial 11'
self.bold_font = 'arial bold 11'
self.background_color = '#FFFFFF'
self.point_summary_dict = {}
self.point_type_button.set_active(0)
# init list to store points in
self.point_list = []
self.points_saved = True
self.override_point_image_match = False
# init variables for zooming
self.slider_pressed = False
self.zoom_percent = 100
self.image_width = 100
self.image_height = 100
self.do_run_idle_tasks = True
task = self.do_draw_markings_when_idle()
GObject.idle_add(task.__next__)
def set_cursor(self, cursor_type=None):
cursor = Gdk.Cursor(Gdk.CursorType.ARROW)
if cursor_type == 'cross':
cursor = Gdk.Cursor(Gdk.CursorType.CROSSHAIR)
self.layout.get_bin_window().set_cursor(cursor)
def do_draw_markings_when_idle(self):
while self.do_run_idle_tasks:
if not self.do_drag and not self.do_scroll and \
not self.slider_pressed:
self.draw_markings()
yield True
yield False
def summary_init_values(self, color='#FFFFFF'):
return self.summary_values(0, 0, color)
def init_draw_area(self, gui_builder):
images = ['original_image',
'bw_image']
for im in images:
image = gui_builder.get_object(im)
buf = image.get_pixbuf()
bi = self.buf_and_image(buf, image)
self.buffers_and_images[im.rstrip('_image')] = bi
def delete_window(self, *args):
if self.warning_dialog_response():
return True
self.do_run_idle_tasks = False
self.main_window.destroy()
def warning_dialog_response(self):
if not self.points_saved:
warning_dialog = PointsNotSavedDialog(self.main_window)
response = warning_dialog.run()
if response == Gtk.ResponseType.OK:
warning_dialog.destroy()
return False
elif response == Gtk.ResponseType.CANCEL:
warning_dialog.destroy()
return True
def warning_point_image_mismatch(self):
warning_dialog = OverridePointImageDialog(self.main_window)
response = warning_dialog.run()
if response == Gtk.ResponseType.YES:
warning_dialog.destroy()
return True
elif response == Gtk.ResponseType.NO:
warning_dialog.destroy()
return False
def hex_color_to_rgba(self, hex_color):
h = hex_color.lstrip('#')
rgb = [int(h[i:i + 2], 16) / 255 for i in (0, 2, 4)]
rgb.append(1)
rgba = self.color._make(rgb)
return rgba
@staticmethod
def rgba_color_to_hex(rgba):
rgb = (int(rgba.r * 255), int(rgba.g * 255), int(rgba.b * 255))
hex_color = '#%02X%02X%02X' % rgb
return hex_color
def switch_images(self, button):
original = self.buffers_and_images.get('original')
bw = self.buffers_and_images.get('bw')
if button.get_active():
original.image.hide()
bw.image.show()
else:
original.image.show()
bw.image.hide()
def switch_to_bounding_box(self, button):
if button.get_active():
self.do_draw_bounding_boxes = True
self.set_cursor('cross')
else:
self.do_draw_bounding_boxes = False
self.set_cursor()
def zoom_slide(self, slider, scroll, value):
self.zoom_percent = round(value)
if abs(slider.get_value() - value) >= 10:
self.check_zoom_range()
self.zoom()
def check_zoom_range(self):
if self.zoom_percent > 250:
self.zoom_percent = 250
elif self.zoom_percent < 10:
self.zoom_percent = 10
def zoom_slide_pressed(self, scale, event):
self.slider_pressed = True
def zoom_slide_release(self, scale, event):
self.slider_pressed = False
self.check_zoom_range()
self.zoom()
def mouse_wheel(self, event_box, event):
if event.state & Gdk.ModifierType.CONTROL_MASK:
self.zoom_mouse_wheel(event)
else:
self.do_scroll_step(event)
self.move_draw_image()
self.draw_markings()
return True
def do_scroll_step(self, event):
y_updated = self.v_adjust.get_value()
y_updated = y_updated + event.delta_y * self.scroll_speed
self.v_adjust.set_value(y_updated)
def scale_to_zoom(self, *numbers, divide=False, offset=None):
if divide:
factor = 100 / self.zoom_percent
else:
factor = self.zoom_percent / 100
if offset is None:
offset = (0,) * len(numbers)
if len(numbers) == 1:
return numbers[0] * factor + offset[0]
args_out = []
for n, o in zip(numbers, offset):
if n is None:
args_out.append(None)
else:
args_out.append(n * factor - o)
return args_out
def zoom_mouse_wheel(self, event):
if event.delta_y == 1:
self.zoom_percent = self.zoom_percent - 10
elif event.delta_y == -1:
self.zoom_percent = self.zoom_percent + 10
self.check_zoom_range()
self.pressed_x = event.x
self.pressed_y = event.y
self.zoom()
self.scroll(event)
self.move_draw_image()
def zoom_pressed(self, button):
if button.get_label() == 'Zoom too normal':
self.zoom_percent = 100
elif button.get_label() == 'Zoom in':
self.zoom_percent = self.zoom_percent + 10
elif button.get_label() == 'Zoom out':
self.zoom_percent = self.zoom_percent - 10
self.check_zoom_range()
self.zoom()
def zoom(self):
self.zoom_slider.set_value(self.zoom_percent)
self.progress_bar.set_text(None)
task = self.zoom_with_progress()
GObject.idle_add(task.__next__)
def zoom_with_progress(self):
progress = 0
self.progress_bar.set_fraction(0.0)
yield True
width, height = self.scale_to_zoom(self.image_width, self.image_height)
self.layout.set_size(width, height)
for bi in self.buffers_and_images.values():
try:
self.scale_image(bi, height, width)
except AttributeError:
self.warn_annotated_image()
progress = progress + 0.50
self.progress_bar.set_fraction(progress)
yield True
self.draw_markings()
self.progress_bar.set_text('Done!')
yield False
@staticmethod
def scale_image(buf_image, height, width):
buf_new = buf_image.buf.scale_simple(width,
height,
GdkPixbuf.InterpType.BILINEAR)
buf_image.image.set_from_pixbuf(buf_new)
return buf_new
def resize(self, widget, event):
if event.width != self.window_width \
or event.height != self.window_height:
self.resize_draw_image()
self.draw_markings()
self.window_height = event.height
self.window_width = event.width
def resize_draw_image(self):
width = self.h_adjust.get_page_size()
height = self.v_adjust.get_page_size()
draw = self.draw_image_and_buf
buf_new = self.scale_image(draw, height, width)
self.draw_image_and_buf = self.buf_and_image(buf_new, draw.image)
def move_draw_image(self):
x = self.h_adjust.get_value()
y = self.v_adjust.get_value()
self.layout.move(self.draw_image, x, y)
def scroll(self, event):
scroll_x = self.h_adjust.get_value()
scroll_y = self.v_adjust.get_value()
change_x = self.pressed_x - event.x
change_y = self.pressed_y - event.y
new_scroll_x = scroll_x + change_x
self.h_adjust.set_value(new_scroll_x)
new_scroll_y = scroll_y + change_y
self.v_adjust.set_value(new_scroll_y)
def warn_annotated_image(self):
if self.show_missing_image_warning:
status_string = 'Computer annotated image not loaded!'
self.status_bar.push(self.status_warning, status_string)
self.switch_image_button.set_sensitive(False)
self.show_missing_image_warning = False
def point_type_changed(self, button):
model = button.get_model()
active = button.get_active()
if active >= 0:
code = model[active][0]
color = self.hex_color_to_rgba(code)
self.point_type_color = color
self.point_type = model[active][1]
self.update_summary()
def handle_shortcuts(self, event_box, event):
key_name = Gdk.keyval_name(event.keyval)
self.switch_point_type(key_name)
def save_points_shortcut(self):
if self.current_point_file is None:
self.file_dialog(self.save_points_button)
else:
self.save_points(self.current_point_file)
@staticmethod
def switch_things_shortcut(button):
if button.get_sensitive():
if button.get_active():
button.set_active(False)
else:
button.set_active(True)
def switch_point_type(self, key_name):
try:
idx = int(key_name) - 1
if idx in range(len(self.gtk_point_type_list)):
self.point_type_button.set_active(idx)
except ValueError:
pass
def mouse_move(self, event_box, event):
if self.do_drag:
self.make_line_marking(event)
elif self.do_scroll:
self.scroll(event)
elif self.pressed_on_point:
self.point_clicked = self.move_marking_live(event)
def make_point(self, x, y, x2=None, y2=None, box=False):
args = (self.current_image, self.point_type, x, y, x2, y2, box)
point = self.point(*args, *self.point_type_color)
return point
def make_line_marking(self, event):
args = (self.pressed_x, self.pressed_y, event.x, event.y)
point = self.make_point(*args)
self.draw_live(point)
def add_remove_point(self, event_box, event):
if event.button == 1:
if event.state & Gdk.ModifierType.CONTROL_MASK:
self.remove_marking(event)
else:
self.pressed_on_point = self.find_closest_point(event)
if not self.pressed_on_point or self.do_drag:
self.add_marking(event)
elif event.button == 2:
self.button_scroll(event)
elif event.button == 3:
self.remove_marking(event)
self.draw_markings()
def button_scroll(self, event):
if event.type == Gdk.EventType.BUTTON_PRESS:
self.do_scroll = True
self.pressed_x = event.x
self.pressed_y = event.y
elif event.type == Gdk.EventType.BUTTON_RELEASE:
self.do_scroll = False
self.move_draw_image()
def find_closest_point(self, point):
scaled_p = self.scale_to_zoom(point.x, point.y, divide=True)
dist_keep = np.inf
p_keep = None
for p in self.point_list:
if p.image == self.current_image:
dist_head = self.get_size(p, scaled_p)
dist_tail = self.get_size(p, scaled_p, head=False)
dist = min(dist_head, dist_tail)
if dist < dist_keep:
dist_keep = dist
p_keep = p
if dist == dist_head:
self.pressed_on_point_head = True
self.pressed_on_point_tail = False
else:
self.pressed_on_point_tail = True
self.pressed_on_point_head = False
dist_keep = self.scale_to_zoom(dist_keep)
smaller_then_radius = dist_keep < self.radius
if smaller_then_radius:
self.point_clicked = p_keep
return smaller_then_radius
@staticmethod
def get_size(marking, point=None, head=True):
x1 = marking[2]
y1 = marking[3]
if point is None:
x2 = marking[4]
y2 = marking[5]
if x2 is None:
return 0
else:
x2 = point[0]
y2 = point[1]
if not head:
x1 = marking[4]
y1 = marking[5]
if x1 is None:
return np.inf
return sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)
@staticmethod
def get_angle(marking):
if marking.x2 is None:
return 0
else:
angle = atan2(-(marking.y2 - marking.y),
(marking.x2 - marking.x)) / pi * 180
return angle
def check_if_clicked_on_marking(self, event):
if self.point_type is not None:
if self.check_if_click(event):
if self.find_closest_point(event):
return True
else:
status_string = 'No point types loaded!'
self.status_bar.push(self.status_msg, status_string)
return False
def remove_marking(self, event):
if self.check_if_clicked_on_marking(event):
self.points_saved = False
self.point_list.remove(self.point_clicked)
label_text = 'removed: (%i, %i)' % (int(event.x), int(event.y))
self.update_label(label_text)
self.make_new_summary(self.point_clicked, add=False)
self.update_summary()
def update_label(self, text):
self.last_entry_label.set_text(text)
def make_new_summary(self, point, *, add):
if add:
sign = 1
else:
sign = -1
key = self.current_image + '--' + point.type
summary = self.point_summary_dict.get(key)
size = self.get_size(point)
new_summary = self.summary_values(summary.amount + sign*1,
summary.size + sign*size,
summary.color)
self.point_summary_dict[key] = new_summary
def check_if_click(self, event, do_drag=False):
if event.type == Gdk.EventType.BUTTON_PRESS:
self.do_drag = do_drag
self.pressed_x = event.x
self.pressed_y = event.y
self.draw_temp = self.draw_image_and_buf
self.draw_buf_temp = self.draw_temp.image.get_pixbuf()
if event.type == Gdk.EventType.BUTTON_RELEASE:
self.do_drag = False
sensitivity = 5
if abs(self.pressed_x - event.x) < sensitivity and \
abs(self.pressed_y - event.y) < sensitivity:
return True
return False
def add_marking(self, event):
if self.point_type is not None:
if self.check_if_click(event, do_drag=True):
self.add_point(event)
elif event.type == Gdk.EventType.BUTTON_RELEASE:
self.add_size_mark(event)
else:
status_string = 'No point types loaded!'
self.status_bar.push(self.status_msg, status_string)
def add_size_mark(self, event):
self.points_saved = False
args = self.scale_to_zoom(self.pressed_x,
self.pressed_y,
event.x,
event.y,
divide=True)
box = self.do_draw_bounding_boxes
point = self.make_point(*args, box)
self.point_list.append(point)
label_text = '%s %i px, %i degrees' % (self.point_type,
int(self.get_size(point)),
int(self.get_angle(point)))
self.update_label(label_text)
self.make_new_summary(point, add=True)
self.update_summary()
def add_point(self, event):
self.points_saved = False
args = self.scale_to_zoom(event.x, event.y, divide=True)
point = self.make_point(*args)
self.point_list.append(point)
label_text = '%s (%i, %i)' % (self.point_type,
int(point.x),
int(point.y))
self.update_label(label_text)
self.make_new_summary(point, add=True)
self.update_summary()
def move_marking_live(self, event):
point = self.point_clicked
new_coord = self.scale_to_zoom(event.x, event.y, divide=True)
if self.pressed_on_point_head:
new_point = point._replace(x=new_coord[0], y=new_coord[1])
else:
new_point = point._replace(x2=new_coord[0], y2=new_coord[1])
self.point_list.remove(point)
self.point_list.append(new_point)
self.change_size_in_summary(point, new_point)
self.update_summary()
return new_point
def change_size_in_summary(self, point_old, point_new):
size_old = self.get_size(point_old)
size_new = self.get_size(point_new)
key = self.current_image + '--' + point_old.type
summary = self.point_summary_dict.get(key)
new_summary = self.summary_values(summary.amount,
summary.size + size_new - size_old,
summary.color)
self.point_summary_dict[key] = new_summary
def update_summary(self):
self.gtk_point_summary_list.clear()
old_image = ''
idx = 0
self.tree_image_index = {}
dict_sort = sorted(self.point_summary_dict.items(), key=lambda x: x[0])
for key, summary in dict_sort:
full_image, point_type = key.split('--')
image_font, point_font = self.get_font(full_image, point_type)
image = full_image.split(self.dir_delimiter)[-1]
if image != old_image:
self.gtk_point_summary_list.append([image, '', '',
image_font,
self.background_color])
old_image = image
self.tree_image_index.update({idx: full_image})
idx = idx + 1
self.gtk_point_summary_list.append([point_type,
str(summary.amount),
str(int(summary.size)),
point_font,
summary.color])
idx = idx + 1
def get_font(self, image, point_type):
if image == self.current_image:
image_font = self.bold_font
if point_type == self.point_type:
point_font = self.bold_font
else:
point_font = self.font
else:
image_font = self.font
point_font = self.font
return image_font, point_font
def draw_live(self, point):
width = self.draw_buf_temp.get_width()
height = self.draw_buf_temp.get_height()
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, width, height)
cr = cairo.Context(surface)
Gdk.cairo_set_source_pixbuf(cr, self.draw_buf_temp, 0, 0)
cr.paint()
cr.set_source_rgba(point.r, point.g, point.b, point.a)
args = self.shift_coordinates(point)
self.draw_circle(cr, args[0], args[1])
if self.do_draw_bounding_boxes:
self.draw_box(cr, *args)
else:
self.draw_line(cr, *args)
surface = cr.get_target()
draw_buf = Gdk.pixbuf_get_from_surface(surface, 0, 0, width, height)
self.draw_temp.image.set_from_pixbuf(draw_buf)
def shift_coordinates(self, point):
offset = (self.h_adjust.get_value(), self.v_adjust.get_value())
args = (point.x - offset[0], point.y - offset[1],
point.x2 - offset[0], point.y2 - offset[1])
return args
def get_draw_coordinate(self, p):
offset = (self.h_adjust.get_value(), self.v_adjust.get_value(),
self.h_adjust.get_value(), self.v_adjust.get_value())
args = self.scale_to_zoom(p.x, p.y, p.x2, p.y2, offset=offset)
return args
def draw_markings(self):
draw = self.draw_image_and_buf
draw_buf = draw.buf
width = draw_buf.get_width()
height = draw_buf.get_height()
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, width, height)
cr = cairo.Context(surface)
Gdk.cairo_set_source_pixbuf(cr, draw_buf, 0, 0)
cr.paint()
for point in self.point_list:
if point.image == self.current_image or \
self.override_point_image_match:
args = self.get_draw_coordinate(point)
cr.set_source_rgba(point.r, point.g, point.b, point.a)
self.draw_circle(cr, args[0], args[1])
if point.box:
self.draw_box(cr, *args)
elif args[3] is not None:
self.draw_line(cr, *args)
surface = cr.get_target()
draw_buf = Gdk.pixbuf_get_from_surface(surface, 0, 0, width, height)
draw.image.set_from_pixbuf(draw_buf)
def draw_circle(self, cr, x, y):
cr.arc(x, y, self.radius, 0, 2 * pi)
cr.fill()
def draw_line(self, cr, x, y, x2, y2):
cr.move_to(x, y)
cr.line_to(x2, y2)
cr.set_line_width(3)
cr.stroke()
cr.arc(x2, y2, self.radius / 2, 0, 2 * pi)
cr.fill()
def draw_box(self, cr, x, y, x2, y2):
cr.move_to(x, y)
cr.line_to(x, y2)
cr.line_to(x2, y2)
cr.line_to(x2, y)
cr.line_to(x, y)
cr.set_line_width(3)
cr.stroke()
cr.arc(x2, y2, self.radius / 2, 0, 2 * pi)
cr.fill()
def open_image_from_tree(self, tree, path, col):
idx = Gtk.TreePath.get_indices(path)[0]
if idx in self.tree_image_index:
self.open_image(self.tree_image_index.get(idx))
def open_next_image(self, button):
shift = 1
if button.get_label() == 'Open previous image':
shift = -1
if not self.list_of_images:
self.get_list_of_images()
try:
idx = self.list_of_images.index(self.current_image) + shift
except ValueError:
idx = 0
if 0 <= idx < len(self.list_of_images):
new_image = self.list_of_images[idx]
self.open_image(new_image)
if idx + 1 == len(self.list_of_images):
self.next_image_button.set_sensitive(False)
elif idx == 0:
self.previous_image_button.set_sensitive(False)
elif idx + 1 > len(self.list_of_images) or idx < 0:
status_string = 'No more images in folder'
self.status_bar.push(self.status_msg, status_string)
def get_list_of_images(self):
files = list(self.get_files_in_dir())
self.list_of_images = sorted(files, key=lambda x: x)
def get_files_in_dir(self):
for file in os.listdir(self.image_folder):
if file.endswith('JPG'):
yield os.path.join(self.image_folder, file)
elif file.endswith('_annotated.png'):
pass
elif file.endswith('png'):
yield os.path.join(self.image_folder, file)
@staticmethod
def add_image_filters(dialog):
filter_jpg = Gtk.FileFilter()
filter_jpg.set_name('JPG images')
filter_jpg.add_mime_type('image/jpeg')
dialog.add_filter(filter_jpg)
filter_png = Gtk.FileFilter()
filter_png.set_name('Png images')
filter_png.add_mime_type('image/png')
dialog.add_filter(filter_png)
filter_any = Gtk.FileFilter()
filter_any.set_name('Any files')
filter_any.add_pattern('*')
dialog.add_filter(filter_any)
@staticmethod
def add_text_filters(dialog):
filter_csv = Gtk.FileFilter()
filter_csv.set_name('csv')
filter_csv.add_mime_type('text/csv')
dialog.add_filter(filter_csv)
filter_plain = Gtk.FileFilter()
filter_plain.set_name('Plain text')
filter_plain.add_mime_type('text/plain')
dialog.add_filter(filter_plain)
filter_any = Gtk.FileFilter()
filter_any.set_name('Any files')
filter_any.add_pattern('*')
dialog.add_filter(filter_any)
def open_image_folder(self, filename):
self.image_folder = filename
self.open_next_image(self.next_image_button)
def open_image(self, filename):
self.current_image = filename
self.image_folder = os.path.dirname(filename)
status_string = 'Image and computer annotated image opened.'
self.status_bar.push(self.status_msg, status_string)
self.next_image_button.set_sensitive(True)
self.previous_image_button.set_sensitive(True)
self.switch_image_button.set_sensitive(True)
self.show_missing_image_warning = True
original = self.buffers_and_images.get('original')
original.image.set_from_file(filename)
new_original_buf = original.image.get_pixbuf()
new_original = self.buf_and_image(new_original_buf,
original.image)
self.buffers_and_images['original'] = new_original
bw = self.buffers_and_images.get('bw')
bw_filename = filename[0:-4] + '_annotated.png'
bw.image.set_from_file(bw_filename)
new_bw_buf = bw.image.get_pixbuf()
new_bw = self.buf_and_image(new_bw_buf, bw.image)
self.buffers_and_images['bw'] = new_bw
self.zoom_percent = 100
self.image_width = new_original.buf.get_width()
self.image_height = new_original.buf.get_height()
for pt in self.gtk_point_type_list:
key = self.current_image + '--' + pt[1]
if key not in self.point_summary_dict:
new_dict = {key: self.summary_init_values(pt[0])}
self.point_summary_dict.update(new_dict)
self.update_summary()
self.zoom()
def load_point_types(self, filename):
status_string = 'Point types loaded.'
self.status_bar.push(self.status_msg, status_string)
self.gtk_point_type_list.clear()
image = self.current_image.split(self.dir_delimiter)
self.gtk_point_summary_list.append([image[-1], '', '', self.font,
self.background_color])
with open(filename, newline='') as csv_file:
reader = csv.reader(csv_file, delimiter=',')
reader.__next__()
sort_points = sorted(reader, key=lambda x: x[1])
for point in sort_points:
self.update_point_types(point)
self.point_type_button.set_active(0)
self.draw_markings()
def update_point_types(self, row):
self.gtk_point_type_list.append(row)
key = self.current_image + '--' + row[1]
self.point_summary_dict.update({key: self.summary_init_values(row[0])})
self.update_summary()
def save_points(self, filename):
self.current_point_file = filename
status_string = 'points saved'
self.status_bar.push(self.status_msg, status_string)
self.points_saved = True
header = ['image', 'type', 'x1', 'y1', 'x2', 'y2', 'box'
'red', 'green', 'blue', 'alpha']
with open(filename, 'w') as csv_file:
writer = csv.writer(csv_file)
writer.writerow(header)
for p in self.point_list:
writer.writerow(p)
def load_points(self, filename):
self.current_point_file = filename
status_string = 'Point loaded.'
self.status_bar.push(self.status_msg, status_string)
self.point_list = []
self.gtk_point_summary_list.clear()
with open(filename, newline='') as csv_file:
reader = csv.reader(csv_file, delimiter=',')
reader.__next__()
image_point_match = self.points_parser(reader)
if not image_point_match:
if self.warning_point_image_mismatch():
self.override_point_image_match = True
self.make_summary_dict()
self.update_summary()
self.points_saved = True
self.draw_markings()
@staticmethod
def point_parser(row):
args = []
for data in row:
try:
args.append(float(data))
except ValueError:
if data == 'True':
args.append(True)
elif data == 'False':
args.append(False)
elif not data:
args.append(None)
else:
args.append(data)
return args
def points_parser(self, reader):
image_point_match = False
for row in reader:
if not row:
pass
else:
args = self.point_parser(row)
if args[0] == self.current_image:
image_point_match = True
self.point_list.append(self.point(*args))
return image_point_match
def make_summary_dict(self):
self.point_summary_dict.clear()
for p in self.point_list:
color = self.rgba_color_to_hex(p)
key = p.image + '--' + p.type
size = self.get_size(p)
if key not in self.point_summary_dict:
values = self.summary_values(1, size, color)
else:
values = self.point_summary_dict.get(key)
values = self.summary_values(values.amount + 1,
values.size + size,
color)
self.point_summary_dict.update({key: values})
for pt in self.gtk_point_type_list:
key = self.current_image + '--' + pt[1]
if key not in self.point_summary_dict:
new_dict = {key: self.summary_init_values(pt[0])}
self.point_summary_dict.update(new_dict)
def file_dialog(self, button):
text = 'Choose a file'
action = Gtk.FileChooserAction.OPEN
file_button = Gtk.STOCK_OPEN
if button.get_label() == 'Save points':
text = 'Save points as'
action = Gtk.FileChooserAction.SAVE
file_button = Gtk.STOCK_SAVE
elif button.get_label() == 'Load points':
if self.warning_dialog_response():
return True
text = 'Choose a file with the points'
elif button.get_label() == 'Open image':
text = 'Choose a image to open'
elif button.get_label() == 'Load point types':
text = 'Choose a file with the point types'
elif button.get_label() == 'Open image folder':
text = 'choose a folder with images'
action = Gtk.FileChooserAction.SELECT_FOLDER
response = (Gtk.STOCK_CANCEL,
Gtk.ResponseType.CANCEL,
file_button,
Gtk.ResponseType.OK)
dialog = Gtk.FileChooserDialog(text,
self.main_window,
action,
response)
if button.get_label() == 'Open image':
self.add_image_filters(dialog)
response = dialog.run()
if response == Gtk.ResponseType.OK:
self.open_image(dialog.get_filename())
elif button.get_label() == 'Load point types':
self.add_text_filters(dialog)
response = dialog.run()
if response == Gtk.ResponseType.OK:
self.load_point_types(dialog.get_filename())
elif button.get_label() == 'Save points':
dialog.set_do_overwrite_confirmation(True)
dialog.set_current_name('untitled.csv')
if self.current_point_file is None:
if self.image_folder is not None:
dialog.set_current_folder(self.image_folder)
else:
dialog.set_filename(self.current_point_file)
self.add_text_filters(dialog)
response = dialog.run()
if response == Gtk.ResponseType.OK:
self.save_points(dialog.get_filename())
elif button.get_label() == 'Load points':
self.add_text_filters(dialog)
response = dialog.run()
if response == Gtk.ResponseType.OK:
self.load_points(dialog.get_filename())
elif button.get_label() == 'Open image folder':
response = dialog.run()
if response == Gtk.ResponseType.OK:
self.open_image_folder(dialog.get_filename())
dialog.destroy()
if __name__ == '__main__':
app = App()
app.run()
code clean up
import argparse
import csv
import os
from collections import namedtuple
from math import sqrt, pi, atan2
import platform
import cairo
import gi
import numpy as np
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk, Gdk, Gio, GdkPixbuf, GObject
def cl_arg():
parser = argparse.ArgumentParser(
formatter_class=argparse.MetavarTypeHelpFormatter,
description='GUI to annotate images.')
parser.add_argument('-i', '--images',
type=str,
help='Folder with images (%(type)s).')
parser.add_argument('-t', '--types',
type=str,
help='File with point types in csv (%(type)s).')
parser.add_argument('-p', '--points',
type=str,
help='File of saved points in csv (%(type)s).')
arguments = parser.parse_args()
return arguments
def main(handler):
args = cl_arg()
if args.images:
if os.path.isdir(args.images):
handler.open_image_folder(args.images)
else:
handler.open_image(args.images)
if args.types:
handler.load_point_types(args.types)
if args.points:
handler.load_points(args.points)
class App(Gtk.Application):
def __init__(self):
super().__init__(application_id='org.annotate.images',
flags=Gio.ApplicationFlags.FLAGS_NONE)
self.window = None
self.handler = None
def do_startup(self):
Gtk.Application.do_startup(self)
self.make_action('preferences', self.on_preferences)
self.make_action('open_image_folder', self.on_open_image_folder)
self.make_action('open_image', self.on_open_image)
self.make_action('open_markings', self.on_open_markings)
self.make_action('open_markings_types', self.on_open_marking_types)
self.make_action('save_markings', self.on_save_markings)
self.make_action('save_as_markings', self.on_save_as_markings)
self.make_action('quit', self.on_quit)
self.make_action('previous_image', self.on_previous_image)
self.make_action('next_image', self.on_next_image)
self.make_action('switch_image', self.on_switch_image)
self.make_action('switch_to_boundingbox', self.on_switch_bounding_box)
self.make_action('zoom_out', self.on_zoom_out)
self.make_action('zoom_in', self.on_zoom_in)
self.make_action('zoom_normal', self.on_zoom_normal)
self.make_action('about', self.on_about)
def do_activate(self):
menu_builder = Gtk.Builder()
menu_builder.add_from_file('data/menu.glade')
menu_bar = menu_builder.get_object('menu_bar')
self.set_menubar(menu_bar)
win_builder = Gtk.Builder()
win_builder.add_from_file('data/GUI.glade')
self.handler = Handler(win_builder)
win_builder.connect_signals(self.handler)
self.window = win_builder.get_object('main_window')
self.window.set_title('Image Annotating')
self.window.set_application(self)
self.window.show_all()
main(self.handler)
def make_action(self, name, func):
action = Gio.SimpleAction.new(name, None)
action.connect('activate', func)
self.add_action(action)
def on_about(self, action, param):
about_dialog = AboutDialog(self.window)
response = about_dialog.run()
if response:
about_dialog.destroy()
def on_quit(self, action, param):
self.quit()
def on_open_image_folder(self, action, param):
self.handler.file_dialog(self.handler.open_dir_button)
def on_open_image(self, action, param):
self.handler.file_dialog(self.handler.open_image_button)
def on_open_markings(self, action, param):
self.handler.file_dialog(self.handler.load_points_button)
def on_open_marking_types(self, action, param):
self.handler.file_dialog(self.handler.load_point_type_button)
def on_save_markings(self, action, param):
self.handler.save_points_shortcut()
def on_save_as_markings(self, action, param):
self.handler.file_dialog(self.handler.save_points_button)
def on_previous_image(self, action, param):
self.handler.open_next_image(self.handler.previous_image_button)
def on_next_image(self, action, param):
self.handler.open_next_image(self.handler.next_image_button)
def on_switch_image(self, action, param):
self.handler.switch_things_shortcut(self.handler.switch_image_button)
def on_switch_bounding_box(self, action, param):
self.handler.switch_things_shortcut(self.handler.switch_box_button)
def on_zoom_out(self, action, param):
self.handler.zoom_pressed(self.handler.zoom_out_button)
def on_zoom_in(self, action, param):
self.handler.zoom_pressed(self.handler.zoom_in_button)
def on_zoom_normal(self, action, param):
self.handler.zoom_pressed(self.handler.zoom_normal)
def on_preferences(self, action, pram):
preferences_dialog = PreferencesDialog(self.window)
response = preferences_dialog.run()
if response:
preferences_dialog.destroy()
class PreferencesDialog(Gtk.Dialog):
def __init__(self, parent):
header = 'Preferences'
response = (Gtk.STOCK_OK, Gtk.ResponseType.OK)
Gtk.Dialog.__init__(self, header, parent, 0, response)
self.set_default_size(150, 100)
label = Gtk.Label('Coming soon')
label2 = Gtk.Label('')
box = self.get_content_area()
box.add(label)
box.add(label2)
self.show_all()
# drawing size
# ending of annotated files
# working dir
class AboutDialog(Gtk.Dialog):
def __init__(self, parent):
header = 'About'
response = (Gtk.STOCK_OK, Gtk.ResponseType.OK)
Gtk.Dialog.__init__(self, header, parent, 0, response)
self.set_default_size(150, 100)
label = Gtk.Label('Image annotation program')
label2 = Gtk.Label('Used to make markings on images.')
box = self.get_content_area()
box.add(label)
box.add(label2)
self.show_all()
class PointsNotSavedDialog(Gtk.Dialog):
def __init__(self, parent):
header = 'Points not saved!'
response = (Gtk.STOCK_CANCEL,
Gtk.ResponseType.CANCEL,
Gtk.STOCK_OK,
Gtk.ResponseType.OK)
Gtk.Dialog.__init__(self, header, parent, 0, response)
self.set_default_size(150, 100)
label = Gtk.Label('The current points have not been saved.')
label2 = Gtk.Label('Use Cancel to return and then save.')
label3 = Gtk.Label('Use OK to discard and continue.')
box = self.get_content_area()
box.add(label)
box.add(label2)
box.add(label3)
self.show_all()
class OverridePointImageDialog(Gtk.Dialog):
def __init__(self, parent):
header = 'Point - Image mismatch'
response = (Gtk.STOCK_NO,
Gtk.ResponseType.NO,
Gtk.STOCK_YES,
Gtk.ResponseType.YES)
Gtk.Dialog.__init__(self, header, parent, 0, response)
self.set_default_size(150, 100)
label = Gtk.Label('The points loaded do not match the current image.')
label2 = Gtk.Label('Do you want to show the point anyway?')
box = self.get_content_area()
box.add(label)
box.add(label2)
self.show_all()
class Handler:
def __init__(self, gui_builder):
self.dir_delimiter = '/'
if platform.system().startswith('Win'):
self.dir_delimiter = '\\'
# named tuples used.
self.buf_and_image = namedtuple('buf_and_image', ['buf', 'image'])
self.color = namedtuple('color', ['r', 'g', 'b', 'a'])
self.point = namedtuple('point', ('image', 'type',
'x', 'y', 'x2', 'y2', 'box')
+ self.color._fields)
self.summary_values = namedtuple('summary_values', ['amount', 'size',
'color'])
# handles to different widgets
self.main_window = gui_builder.get_object('main_window')
self.scroll_window = gui_builder.get_object('scroll_window')
self.v_adjust = self.scroll_window.get_vadjustment()
self.h_adjust = self.scroll_window.get_hadjustment()
self.layout = gui_builder.get_object('layout')
self.draw_image = gui_builder.get_object('draw_image')
self.draw_image_and_buf = self. buf_and_image(
self.draw_image.get_pixbuf(),
self.draw_image)
self.save_points_button = gui_builder.get_object('save_points')
self.open_image_button = gui_builder.get_object('open_image')
self.load_point_type_button = gui_builder.get_object('load_point_type')
self.load_points_button = gui_builder.get_object('load_points')
self.open_dir_button = gui_builder.get_object('open_image_folder')
self.zoom_in_button = gui_builder.get_object('zoom_in')
self.zoom_out_button = gui_builder.get_object('zoom_out')
self.zoom_normal = gui_builder.get_object('zoom_too_normal')
self.zoom_slider = gui_builder.get_object('zoom_scale')
self.gtk_point_type_list = gui_builder.get_object('point_type_list')
self.gtk_point_summary_list = gui_builder.get_object('point_summary')
self.point_type_button = gui_builder.get_object('select_point_type_box')
self.switch_image_button = gui_builder.get_object('switch_image')
self.switch_image_button.set_sensitive(False)
self.switch_box_button = gui_builder.get_object('draw_boxes')
self.progress_bar = gui_builder.get_object('progress_bar')
self.last_entry_label = gui_builder.get_object('last_entry')
self.next_image_button = gui_builder.get_object('open_next_image')
self.next_image_button.set_sensitive(False)
self.previous_image_button = gui_builder.get_object(
'open_previous_image')
self.previous_image_button.set_sensitive(False)
# setup the status bar
self.status_bar = gui_builder.get_object('status_bar')
self.status_msg = self.status_bar.get_context_id('Message')
self.status_warning = self.status_bar.get_context_id('Warning')
self.show_missing_image_warning = True
# ready the draw area
self.scroll_speed = 78
self.radius = 10
self.buffers_and_images = {}
self.init_draw_area(gui_builder)
self.window_height = 0
self.window_width = 0
self.do_scroll = False
self.do_drag = False
self.pressed_on_point = False
self.pressed_on_point_head = False
self.pressed_on_point_tail = False
self.point_clicked = None
self.pressed_x = None
self.pressed_y = None
self.draw_temp = None
self.draw_buf_temp = None
self.do_draw_bounding_boxes = False
# ready the point type selection
self.point_type_color = self.hex_color_to_rgba('#FF0000')
self.point_type = None
self.current_image = 'None'
self.list_of_images = []
self.tree_image_index = {}
self.image_folder = None
self.current_point_file = None
self.font = 'arial 11'
self.bold_font = 'arial bold 11'
self.background_color = '#FFFFFF'
self.point_summary_dict = {}
self.point_type_button.set_active(0)
# init list to store points in
self.point_list = []
self.points_saved = True
self.override_point_image_match = False
# init variables for zooming
self.slider_pressed = False
self.zoom_percent = 100
self.image_width = 100
self.image_height = 100
self.do_run_idle_tasks = True
task = self.do_draw_markings_when_idle()
GObject.idle_add(task.__next__)
def set_cursor(self, cursor_type=None):
cursor = Gdk.Cursor(Gdk.CursorType.ARROW)
if cursor_type == 'cross':
cursor = Gdk.Cursor(Gdk.CursorType.CROSSHAIR)
self.layout.get_bin_window().set_cursor(cursor)
def do_draw_markings_when_idle(self):
while self.do_run_idle_tasks:
if not self.do_drag and not self.do_scroll and \
not self.slider_pressed:
self.draw_markings()
yield True
yield False
def summary_init_values(self, color='#FFFFFF'):
return self.summary_values(0, 0, color)
def init_draw_area(self, gui_builder):
images = ['original_image',
'bw_image']
for im in images:
image = gui_builder.get_object(im)
buf = image.get_pixbuf()
bi = self.buf_and_image(buf, image)
self.buffers_and_images[im.rstrip('_image')] = bi
def delete_window(self, *args):
if self.warning_dialog_response():
return True
self.do_run_idle_tasks = False
self.main_window.destroy()
def warning_dialog_response(self):
if not self.points_saved:
warning_dialog = PointsNotSavedDialog(self.main_window)
response = warning_dialog.run()
if response == Gtk.ResponseType.OK:
warning_dialog.destroy()
return False
elif response == Gtk.ResponseType.CANCEL:
warning_dialog.destroy()
return True
def warning_point_image_mismatch(self):
warning_dialog = OverridePointImageDialog(self.main_window)
response = warning_dialog.run()
if response == Gtk.ResponseType.YES:
warning_dialog.destroy()
return True
elif response == Gtk.ResponseType.NO:
warning_dialog.destroy()
return False
def hex_color_to_rgba(self, hex_color):
h = hex_color.lstrip('#')
rgb = [int(h[i:i + 2], 16) / 255 for i in (0, 2, 4)]
rgb.append(1)
rgba = self.color._make(rgb)
return rgba
@staticmethod
def rgba_color_to_hex(rgba):
rgb = (int(rgba.r * 255), int(rgba.g * 255), int(rgba.b * 255))
hex_color = '#%02X%02X%02X' % rgb
return hex_color
def switch_images(self, button):
original = self.buffers_and_images.get('original')
bw = self.buffers_and_images.get('bw')
if button.get_active():
original.image.hide()
bw.image.show()
else:
original.image.show()
bw.image.hide()
def switch_to_bounding_box(self, button):
if button.get_active():
self.do_draw_bounding_boxes = True
self.set_cursor('cross')
else:
self.do_draw_bounding_boxes = False
self.set_cursor()
def zoom_slide(self, slider, scroll, value):
self.zoom_percent = round(value)
if abs(slider.get_value() - value) >= 10:
self.check_zoom_range()
self.zoom()
def check_zoom_range(self):
if self.zoom_percent > 250:
self.zoom_percent = 250
elif self.zoom_percent < 10:
self.zoom_percent = 10
def zoom_slide_pressed(self, scale, event):
self.slider_pressed = True
def zoom_slide_release(self, scale, event):
self.slider_pressed = False
self.check_zoom_range()
self.zoom()
def mouse_wheel(self, event_box, event):
if event.state & Gdk.ModifierType.CONTROL_MASK:
self.zoom_mouse_wheel(event)
else:
self.do_scroll_step(event)
self.move_draw_image()
self.draw_markings()
return True
def do_scroll_step(self, event):
y_updated = self.v_adjust.get_value()
y_updated = y_updated + event.delta_y * self.scroll_speed
self.v_adjust.set_value(y_updated)
def scale_to_zoom(self, *numbers, divide=False, offset=None):
if divide:
factor = 100 / self.zoom_percent
else:
factor = self.zoom_percent / 100
if offset is None:
offset = (0,) * len(numbers)
if len(numbers) == 1:
return numbers[0] * factor - offset[0]
args_out = []
for n, o in zip(numbers, offset):
if n is None:
args_out.append(None)
else:
args_out.append(n * factor - o)
return args_out
def zoom_mouse_wheel(self, event):
x, y = self.scale_to_zoom(event.x, event.y)
if event.delta_y == 1:
self.zoom_percent = self.zoom_percent - 10
elif event.delta_y == -1:
self.zoom_percent = self.zoom_percent + 10
self.check_zoom_range()
self.zoom()
delta_x = self.scale_to_zoom(event.x) - x
delta_y = self.scale_to_zoom(event.y) - y
delta_x = self.scale_to_zoom(delta_x, divide=True)
delta_y = self.scale_to_zoom(delta_y, divide=True)
self.scroll(delta_x, delta_y, delta=True)
self.move_draw_image()
def zoom_pressed(self, button):
if button.get_label() == 'Zoom too normal':
self.zoom_percent = 100
elif button.get_label() == 'Zoom in':
self.zoom_percent = self.zoom_percent + 10
elif button.get_label() == 'Zoom out':
self.zoom_percent = self.zoom_percent - 10
self.check_zoom_range()
self.zoom()
def zoom(self):
self.zoom_slider.set_value(self.zoom_percent)
self.progress_bar.set_text(None)
task = self.zoom_with_progress()
GObject.idle_add(task.__next__)
def zoom_with_progress(self):
progress = 0
self.progress_bar.set_fraction(0.0)
yield True
width, height = self.scale_to_zoom(self.image_width, self.image_height)
self.layout.set_size(width, height)
for bi in self.buffers_and_images.values():
try:
self.scale_image(bi, height, width)
except AttributeError:
self.warn_annotated_image()
progress = progress + 0.50
self.progress_bar.set_fraction(progress)
yield True
self.draw_markings()
self.progress_bar.set_text('Done!')
yield False
@staticmethod
def scale_image(buf_image, height, width):
buf_new = buf_image.buf.scale_simple(width,
height,
GdkPixbuf.InterpType.BILINEAR)
buf_image.image.set_from_pixbuf(buf_new)
return buf_new
def resize(self, widget, event):
if event.width != self.window_width \
or event.height != self.window_height:
self.resize_draw_image()
self.draw_markings()
self.window_height = event.height
self.window_width = event.width
def resize_draw_image(self):
width = self.h_adjust.get_page_size()
height = self.v_adjust.get_page_size()
draw = self.draw_image_and_buf
buf_new = self.scale_image(draw, height, width)
self.draw_image_and_buf = self.buf_and_image(buf_new, draw.image)
def move_draw_image(self):
x = self.h_adjust.get_value()
y = self.v_adjust.get_value()
self.layout.move(self.draw_image, x, y)
def scroll(self, x, y, *, delta=False):
scroll_x = self.h_adjust.get_value()
scroll_y = self.v_adjust.get_value()
if delta:
change_x = x
change_y = y
else:
change_x = self.pressed_x - x
change_y = self.pressed_y - y
new_scroll_x = scroll_x + change_x
self.h_adjust.set_value(new_scroll_x)
new_scroll_y = scroll_y + change_y
self.v_adjust.set_value(new_scroll_y)
def warn_annotated_image(self):
if self.show_missing_image_warning:
status_string = 'Computer annotated image not loaded!'
self.status_bar.push(self.status_warning, status_string)
self.switch_image_button.set_sensitive(False)
self.show_missing_image_warning = False
def point_type_changed(self, button):
model = button.get_model()
active = button.get_active()
if active >= 0:
code = model[active][0]
color = self.hex_color_to_rgba(code)
self.point_type_color = color
self.point_type = model[active][1]
self.update_summary()
def handle_shortcuts(self, event_box, event):
key_name = Gdk.keyval_name(event.keyval)
self.switch_point_type(key_name)
def save_points_shortcut(self):
if self.current_point_file is None:
self.file_dialog(self.save_points_button)
else:
self.save_points(self.current_point_file)
@staticmethod
def switch_things_shortcut(button):
if button.get_sensitive():
if button.get_active():
button.set_active(False)
else:
button.set_active(True)
def switch_point_type(self, key_name):
try:
idx = int(key_name) - 1
if idx in range(len(self.gtk_point_type_list)):
self.point_type_button.set_active(idx)
except ValueError:
pass
def mouse_move(self, event_box, event):
if self.do_drag:
self.make_line_marking(event)
elif self.do_scroll:
self.scroll(event.x, event.y)
elif self.pressed_on_point:
self.point_clicked = self.move_marking_live(event)
def make_point(self, x, y, x2=None, y2=None, box=False):
args = (self.current_image, self.point_type, x, y, x2, y2, box)
point = self.point(*args, *self.point_type_color)
return point
def make_line_marking(self, event):
args = (self.pressed_x, self.pressed_y, event.x, event.y)
point = self.make_point(*args)
self.draw_live(point)
def add_remove_point(self, event_box, event):
if event.button == 1:
if event.state & Gdk.ModifierType.CONTROL_MASK:
self.remove_marking(event)
else:
self.pressed_on_point = self.find_closest_point(event)
if not self.pressed_on_point or self.do_drag:
self.add_marking(event)
elif event.button == 2:
self.button_scroll(event)
elif event.button == 3:
self.remove_marking(event)
self.draw_markings()
def button_scroll(self, event):
if event.type == Gdk.EventType.BUTTON_PRESS:
self.do_scroll = True
self.pressed_x = event.x
self.pressed_y = event.y
elif event.type == Gdk.EventType.BUTTON_RELEASE:
self.do_scroll = False
self.move_draw_image()
def find_closest_point(self, point):
scaled_p = self.scale_to_zoom(point.x, point.y, divide=True)
dist_keep = np.inf
p_keep = None
for p in self.point_list:
if p.image == self.current_image:
dist_head = self.get_dist(p, scaled_p)
dist_tail = self.get_dist(p, scaled_p, head=False)
dist = min(dist_head, dist_tail)
if dist < dist_keep:
dist_keep = dist
p_keep = p
if dist == dist_head:
self.pressed_on_point_head = True
self.pressed_on_point_tail = False
else:
self.pressed_on_point_tail = True
self.pressed_on_point_head = False
dist_keep = self.scale_to_zoom(dist_keep)
smaller_then_radius = dist_keep < self.radius
if smaller_then_radius:
self.point_clicked = p_keep
return smaller_then_radius
@staticmethod
def get_dist(marking, point=None, head=True):
x1 = marking[2]
y1 = marking[3]
if point is None:
x2 = marking[4]
y2 = marking[5]
if x2 is None:
return 0
else:
x2 = point[0]
y2 = point[1]
if not head:
x1 = marking[4]
y1 = marking[5]
if x1 is None:
return np.inf
return sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)
@staticmethod
def get_angle(marking):
if marking.x2 is None:
return 0
else:
angle = atan2(-(marking.y2 - marking.y),
(marking.x2 - marking.x)) / pi * 180
return angle
def check_if_clicked_on_marking(self, event):
if self.point_type is not None:
if self.check_if_click(event):
if self.find_closest_point(event):
return True
else:
status_string = 'No point types loaded!'
self.status_bar.push(self.status_msg, status_string)
return False
def remove_marking(self, event):
if self.check_if_clicked_on_marking(event):
self.points_saved = False
self.point_list.remove(self.point_clicked)
label_text = 'removed: (%i, %i)' % (int(event.x), int(event.y))
self.update_label(label_text)
self.make_new_summary(self.point_clicked, add=False)
self.update_summary()
def update_label(self, text):
self.last_entry_label.set_text(text)
def make_new_summary(self, point, *, add):
if add:
sign = 1
else:
sign = -1
key = self.current_image + '--' + point.type
summary = self.point_summary_dict.get(key)
size = self.get_dist(point)
new_summary = self.summary_values(summary.amount + sign*1,
summary.size + sign*size,
summary.color)
self.point_summary_dict[key] = new_summary
def check_if_click(self, event, do_drag=False):
if event.type == Gdk.EventType.BUTTON_PRESS:
self.do_drag = do_drag
self.pressed_x = event.x
self.pressed_y = event.y
self.draw_temp = self.draw_image_and_buf
self.draw_buf_temp = self.draw_temp.image.get_pixbuf()
if event.type == Gdk.EventType.BUTTON_RELEASE:
self.do_drag = False
sensitivity = 5
if abs(self.pressed_x - event.x) < sensitivity and \
abs(self.pressed_y - event.y) < sensitivity:
return True
return False
def add_marking(self, event):
if self.point_type is not None:
if self.check_if_click(event, do_drag=True):
self.add_point(event)
elif event.type == Gdk.EventType.BUTTON_RELEASE:
self.add_size_mark(event)
else:
status_string = 'No point types loaded!'
self.status_bar.push(self.status_msg, status_string)
def add_size_mark(self, event):
self.points_saved = False
args = self.scale_to_zoom(self.pressed_x,
self.pressed_y,
event.x,
event.y,
divide=True)
box = self.do_draw_bounding_boxes
point = self.make_point(*args, box)
self.point_list.append(point)
label_text = '%s %i px, %i degrees' % (self.point_type,
int(self.get_dist(point)),
int(self.get_angle(point)))
self.update_label(label_text)
self.make_new_summary(point, add=True)
self.update_summary()
def add_point(self, event):
self.points_saved = False
args = self.scale_to_zoom(event.x, event.y, divide=True)
point = self.make_point(*args)
self.point_list.append(point)
label_text = '%s (%i, %i)' % (self.point_type,
int(point.x),
int(point.y))
self.update_label(label_text)
self.make_new_summary(point, add=True)
self.update_summary()
def move_marking_live(self, event):
point = self.point_clicked
new_coord = self.scale_to_zoom(event.x, event.y, divide=True)
if self.pressed_on_point_head:
new_point = point._replace(x=new_coord[0], y=new_coord[1])
else:
new_point = point._replace(x2=new_coord[0], y2=new_coord[1])
self.point_list.remove(point)
self.point_list.append(new_point)
self.change_size_in_summary(point, new_point)
self.update_summary()
return new_point
def change_size_in_summary(self, point_old, point_new):
size_old = self.get_dist(point_old)
size_new = self.get_dist(point_new)
key = self.current_image + '--' + point_old.type
summary = self.point_summary_dict.get(key)
new_summary = self.summary_values(summary.amount,
summary.size + size_new - size_old,
summary.color)
self.point_summary_dict[key] = new_summary
def update_summary(self):
self.gtk_point_summary_list.clear()
old_image = ''
idx = 0
self.tree_image_index = {}
dict_sort = sorted(self.point_summary_dict.items(), key=lambda x: x[0])
for key, summary in dict_sort:
full_image, point_type = key.split('--')
image_font, point_font = self.get_font(full_image, point_type)
image = full_image.split(self.dir_delimiter)[-1]
if image != old_image:
self.gtk_point_summary_list.append([image, '', '',
image_font,
self.background_color])
old_image = image
self.tree_image_index.update({idx: full_image})
idx = idx + 1
self.gtk_point_summary_list.append([point_type,
str(summary.amount),
str(int(summary.size)),
point_font,
summary.color])
idx = idx + 1
def get_font(self, image, point_type):
if image == self.current_image:
image_font = self.bold_font
if point_type == self.point_type:
point_font = self.bold_font
else:
point_font = self.font
else:
image_font = self.font
point_font = self.font
return image_font, point_font
def draw_live(self, point):
width = self.draw_buf_temp.get_width()
height = self.draw_buf_temp.get_height()
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, width, height)
cr = cairo.Context(surface)
Gdk.cairo_set_source_pixbuf(cr, self.draw_buf_temp, 0, 0)
cr.paint()
cr.set_source_rgba(point.r, point.g, point.b, point.a)
args = self.shift_coordinates(point)
self.draw_circle(cr, args[0], args[1])
if self.do_draw_bounding_boxes:
self.draw_box(cr, *args)
else:
self.draw_line(cr, *args)
surface = cr.get_target()
draw_buf = Gdk.pixbuf_get_from_surface(surface, 0, 0, width, height)
self.draw_temp.image.set_from_pixbuf(draw_buf)
def shift_coordinates(self, point):
offset = (self.h_adjust.get_value(), self.v_adjust.get_value())
args = (point.x - offset[0], point.y - offset[1],
point.x2 - offset[0], point.y2 - offset[1])
return args
def get_draw_coordinate(self, p):
offset = (self.h_adjust.get_value(), self.v_adjust.get_value(),
self.h_adjust.get_value(), self.v_adjust.get_value())
args = self.scale_to_zoom(p.x, p.y, p.x2, p.y2, offset=offset)
return args
def draw_markings(self):
draw = self.draw_image_and_buf
draw_buf = draw.buf
width = draw_buf.get_width()
height = draw_buf.get_height()
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, width, height)
cr = cairo.Context(surface)
Gdk.cairo_set_source_pixbuf(cr, draw_buf, 0, 0)
cr.paint()
for point in self.point_list:
if point.image == self.current_image or \
self.override_point_image_match:
args = self.get_draw_coordinate(point)
cr.set_source_rgba(point.r, point.g, point.b, point.a)
self.draw_circle(cr, args[0], args[1])
if point.box:
self.draw_box(cr, *args)
elif args[3] is not None:
self.draw_line(cr, *args)
surface = cr.get_target()
draw_buf = Gdk.pixbuf_get_from_surface(surface, 0, 0, width, height)
draw.image.set_from_pixbuf(draw_buf)
def draw_circle(self, cr, x, y):
cr.arc(x, y, self.radius, 0, 2 * pi)
cr.fill()
def draw_line(self, cr, x, y, x2, y2):
cr.move_to(x, y)
cr.line_to(x2, y2)
cr.set_line_width(3)
cr.stroke()
cr.arc(x2, y2, self.radius / 2, 0, 2 * pi)
cr.fill()
def draw_box(self, cr, x, y, x2, y2):
cr.move_to(x, y)
cr.line_to(x, y2)
cr.line_to(x2, y2)
cr.line_to(x2, y)
cr.line_to(x, y)
cr.set_line_width(3)
cr.stroke()
cr.arc(x2, y2, self.radius / 2, 0, 2 * pi)
cr.fill()
def open_image_from_tree(self, tree, path, col):
idx = Gtk.TreePath.get_indices(path)[0]
if idx in self.tree_image_index:
self.open_image(self.tree_image_index.get(idx))
def open_next_image(self, button):
shift = 1
if button.get_label() == 'Open previous image':
shift = -1
if not self.list_of_images:
self.get_list_of_images()
try:
idx = self.list_of_images.index(self.current_image) + shift
except ValueError:
idx = 0
if 0 <= idx < len(self.list_of_images):
new_image = self.list_of_images[idx]
self.open_image(new_image)
if idx + 1 == len(self.list_of_images):
self.next_image_button.set_sensitive(False)
elif idx == 0:
self.previous_image_button.set_sensitive(False)
elif idx + 1 > len(self.list_of_images) or idx < 0:
status_string = 'No more images in folder'
self.status_bar.push(self.status_msg, status_string)
def get_list_of_images(self):
files = list(self.get_files_in_dir())
self.list_of_images = sorted(files, key=lambda x: x)
def get_files_in_dir(self):
for file in os.listdir(self.image_folder):
if file.endswith('JPG'):
yield os.path.join(self.image_folder, file)
elif file.endswith('_annotated.png'):
pass
elif file.endswith('png'):
yield os.path.join(self.image_folder, file)
@staticmethod
def add_image_filters(dialog):
filter_jpg = Gtk.FileFilter()
filter_jpg.set_name('JPG images')
filter_jpg.add_mime_type('image/jpeg')
dialog.add_filter(filter_jpg)
filter_png = Gtk.FileFilter()
filter_png.set_name('Png images')
filter_png.add_mime_type('image/png')
dialog.add_filter(filter_png)
filter_any = Gtk.FileFilter()
filter_any.set_name('Any files')
filter_any.add_pattern('*')
dialog.add_filter(filter_any)
@staticmethod
def add_text_filters(dialog):
filter_csv = Gtk.FileFilter()
filter_csv.set_name('csv')
filter_csv.add_mime_type('text/csv')
dialog.add_filter(filter_csv)
filter_plain = Gtk.FileFilter()
filter_plain.set_name('Plain text')
filter_plain.add_mime_type('text/plain')
dialog.add_filter(filter_plain)
filter_any = Gtk.FileFilter()
filter_any.set_name('Any files')
filter_any.add_pattern('*')
dialog.add_filter(filter_any)
def open_image_folder(self, filename):
self.image_folder = filename
self.open_next_image(self.next_image_button)
def open_image(self, filename):
self.current_image = filename
self.image_folder = os.path.dirname(filename)
status_string = 'Image and computer annotated image opened.'
self.status_bar.push(self.status_msg, status_string)
self.next_image_button.set_sensitive(True)
self.previous_image_button.set_sensitive(True)
self.switch_image_button.set_sensitive(True)
self.show_missing_image_warning = True
original = self.buffers_and_images.get('original')
original.image.set_from_file(filename)
new_original_buf = original.image.get_pixbuf()
new_original = self.buf_and_image(new_original_buf,
original.image)
self.buffers_and_images['original'] = new_original
bw = self.buffers_and_images.get('bw')
bw_filename = filename[0:-4] + '_annotated.png'
bw.image.set_from_file(bw_filename)
new_bw_buf = bw.image.get_pixbuf()
new_bw = self.buf_and_image(new_bw_buf, bw.image)
self.buffers_and_images['bw'] = new_bw
self.zoom_percent = 100
self.image_width = new_original.buf.get_width()
self.image_height = new_original.buf.get_height()
for pt in self.gtk_point_type_list:
key = self.current_image + '--' + pt[1]
if key not in self.point_summary_dict:
new_dict = {key: self.summary_init_values(pt[0])}
self.point_summary_dict.update(new_dict)
self.update_summary()
self.zoom()
def load_point_types(self, filename):
status_string = 'Point types loaded.'
self.status_bar.push(self.status_msg, status_string)
self.gtk_point_type_list.clear()
image = self.current_image.split(self.dir_delimiter)
self.gtk_point_summary_list.append([image[-1], '', '', self.font,
self.background_color])
with open(filename, newline='') as csv_file:
reader = csv.reader(csv_file, delimiter=',')
reader.__next__()
sort_points = sorted(reader, key=lambda x: x[1])
for point in sort_points:
self.update_point_types(point)
self.point_type_button.set_active(0)
self.draw_markings()
def update_point_types(self, row):
self.gtk_point_type_list.append(row)
key = self.current_image + '--' + row[1]
self.point_summary_dict.update({key: self.summary_init_values(row[0])})
self.update_summary()
def save_points(self, filename):
self.current_point_file = filename
status_string = 'points saved'
self.status_bar.push(self.status_msg, status_string)
self.points_saved = True
header = ['image', 'type', 'x1', 'y1', 'x2', 'y2', 'box'
'red', 'green', 'blue', 'alpha']
with open(filename, 'w') as csv_file:
writer = csv.writer(csv_file)
writer.writerow(header)
for p in self.point_list:
writer.writerow(p)
def load_points(self, filename):
self.current_point_file = filename
status_string = 'Point loaded.'
self.status_bar.push(self.status_msg, status_string)
self.point_list = []
self.gtk_point_summary_list.clear()
with open(filename, newline='') as csv_file:
reader = csv.reader(csv_file, delimiter=',')
reader.__next__()
image_point_match = self.points_parser(reader)
if not image_point_match:
if self.warning_point_image_mismatch():
self.override_point_image_match = True
self.make_summary_dict()
self.update_summary()
self.points_saved = True
self.draw_markings()
@staticmethod
def point_parser(row):
args = []
for data in row:
try:
args.append(float(data))
except ValueError:
if data == 'True':
args.append(True)
elif data == 'False':
args.append(False)
elif not data:
args.append(None)
else:
args.append(data)
return args
def points_parser(self, reader):
image_point_match = False
for row in reader:
if not row:
pass
else:
args = self.point_parser(row)
if args[0] == self.current_image:
image_point_match = True
self.point_list.append(self.point(*args))
return image_point_match
def make_summary_dict(self):
self.point_summary_dict.clear()
for p in self.point_list:
color = self.rgba_color_to_hex(p)
key = p.image + '--' + p.type
size = self.get_dist(p)
if key not in self.point_summary_dict:
values = self.summary_values(1, size, color)
else:
values = self.point_summary_dict.get(key)
values = self.summary_values(values.amount + 1,
values.size + size,
color)
self.point_summary_dict.update({key: values})
for pt in self.gtk_point_type_list:
key = self.current_image + '--' + pt[1]
if key not in self.point_summary_dict:
new_dict = {key: self.summary_init_values(pt[0])}
self.point_summary_dict.update(new_dict)
def file_dialog(self, button):
text = 'Choose a file'
action = Gtk.FileChooserAction.OPEN
file_button = Gtk.STOCK_OPEN
if button.get_label() == 'Save points':
text = 'Save points as'
action = Gtk.FileChooserAction.SAVE
file_button = Gtk.STOCK_SAVE
elif button.get_label() == 'Load points':
if self.warning_dialog_response():
return True
text = 'Choose a file with the points'
elif button.get_label() == 'Open image':
text = 'Choose a image to open'
elif button.get_label() == 'Load point types':
text = 'Choose a file with the point types'
elif button.get_label() == 'Open image folder':
text = 'choose a folder with images'
action = Gtk.FileChooserAction.SELECT_FOLDER
response = (Gtk.STOCK_CANCEL,
Gtk.ResponseType.CANCEL,
file_button,
Gtk.ResponseType.OK)
dialog = Gtk.FileChooserDialog(text,
self.main_window,
action,
response)
if button.get_label() == 'Open image':
self.add_image_filters(dialog)
response = dialog.run()
if response == Gtk.ResponseType.OK:
self.open_image(dialog.get_filename())
elif button.get_label() == 'Load point types':
self.add_text_filters(dialog)
response = dialog.run()
if response == Gtk.ResponseType.OK:
self.load_point_types(dialog.get_filename())
elif button.get_label() == 'Save points':
dialog.set_do_overwrite_confirmation(True)
dialog.set_current_name('untitled.csv')
if self.current_point_file is None:
if self.image_folder is not None:
dialog.set_current_folder(self.image_folder)
else:
dialog.set_filename(self.current_point_file)
self.add_text_filters(dialog)
response = dialog.run()
if response == Gtk.ResponseType.OK:
self.save_points(dialog.get_filename())
elif button.get_label() == 'Load points':
self.add_text_filters(dialog)
response = dialog.run()
if response == Gtk.ResponseType.OK:
self.load_points(dialog.get_filename())
elif button.get_label() == 'Open image folder':
response = dialog.run()
if response == Gtk.ResponseType.OK:
self.open_image_folder(dialog.get_filename())
dialog.destroy()
if __name__ == '__main__':
app = App()
app.run()
|
from models import Boat, Telemetry, engine
from flask import Flask, jsonify
from memcached import mc_client
from oboeware import OboeMiddleware
from werkzeug.serving import run_simple
import oboe
app = Flask(__name__)
@app.route("/data/v1")
def seabus():
telemetry = {'boats': []}
for boat in Boat.all_seabuses():
lat = lon = None
cached_telemetry = mc_client.get(str(boat.mmsi))
if not cached_telemetry:
seabus_telemetry = Telemetry.latest_for_boat(boat)
lat = seabus_telemetry.lat
lon = seabus_telemetry.lon
# cache for next time
cached_telemetry = {'lat': telemetry.lat, 'lon': telemetry.lon}
mc_client.set(str(boat.mmsi), cached_telemetry)
else:
lat = cached_telemetry.get('lat')
lon = cached_telemetry.get('lon')
if None not in (lat, lon):
name = boat.name
id = boat.id
telemetry['boats'].append(
{'lat': lat,
'lon': lon,
'name': name,
'id': id
}
)
return jsonify(telemetry)
if __name__ == '__main__':
tv_app = OboeMiddleware(app)
if not tv_app:
app.run(debug=True)
else:
run_simple('127.0.0.1', 5000, tv_app)
[mc] cache correct seabus data on the web side
from models import Boat, Telemetry, engine
from flask import Flask, jsonify
from memcached import mc_client
from oboeware import OboeMiddleware
from werkzeug.serving import run_simple
import oboe
app = Flask(__name__)
@app.route("/data/v1")
def seabus():
telemetry = {'boats': []}
for boat in Boat.all_seabuses():
lat = lon = None
cached_telemetry = mc_client.get(str(boat.mmsi))
if not cached_telemetry:
seabus_telemetry = Telemetry.latest_for_boat(boat)
lat = seabus_telemetry.lat
lon = seabus_telemetry.lon
# cache for next time
cached_telemetry = {'lat': seabus_telemetry.lat, 'lon': seabus_telemetry.lon}
mc_client.set(str(boat.mmsi), cached_telemetry)
else:
lat = cached_telemetry.get('lat')
lon = cached_telemetry.get('lon')
if None not in (lat, lon):
name = boat.name
id = boat.id
telemetry['boats'].append(
{'lat': lat,
'lon': lon,
'name': name,
'id': id
}
)
return jsonify(telemetry)
if __name__ == '__main__':
tv_app = OboeMiddleware(app)
if not tv_app:
app.run(debug=True)
else:
run_simple('127.0.0.1', 5000, tv_app)
|
import matplotlib.pyplot as plt
def generate_chart(total_files, occurrencies):
labels = 'With @Refactoring', 'Without @Refactoring'
sizes = [total_files, occurrencies]
colors = ['yellowgreen', 'gold', ]
explode = (0, 0.1)
plt.pie(sizes, explode=explode, labels=labels, colors=colors,
autopct='%1.1f%%', shadow=False, startangle=180)
plt.axis('equal')
plt.savefig('chart.png')
plt.show()
changing chart labels and fixes to visualization
import matplotlib.pyplot as plt
def generate_chart(total_files, occurrencies):
labels = 'Annotation occurrencies', 'Files without annotation'
sizes = [total_files, occurrencies]
colors = ['yellowgreen', 'gold', ]
explode = (0.2, 0)
plt.pie(sizes, explode=explode, labels=labels, colors=colors,
autopct='%1.1f%%', shadow=True, startangle=90)
#plt.axis('equal')
plt.savefig('chart.png')
plt.show()
|
# -*- coding: UTF-8 -*-
# Copyright 2002-2017 Luc Saffre
# This file is part of Lino Welfare.
#
# Lino Welfare is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# Lino Welfare is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with Lino Welfare. If not, see
# <http://www.gnu.org/licenses/>.
# Note that this module may not have a docstring because any global
# variable defined here will override the global namespace of modules
# like `lino_welfare/__init__.py` who include this file with execfile
# This module is part of the Lino Welfare test suite.
# To test only this module:
#
# $ python setup.py test -s tests.PackagesTests
import six
requires = ['lino-cosi',
# 'vobject',
'pytidylib', 'channels',
'django-iban', 'metafone', 'weasyprint',
# 'cairocffi<0.7'] # seems that < 0.7 no longer required
'cairocffi']
if six.PY2:
requires.append('suds')
else:
requires.append('suds-py3')
SETUP_INFO = dict(
name='lino-welfare',
version='17.10.0',
install_requires=requires,
test_suite='tests',
tests_require=['pytest'],
include_package_data=True,
zip_safe=False,
description=u"A Lino application for Belgian Centres for Public Welfare",
long_description="""\
Lino Welfare is a modular
`Lino <http://www.lino-framework.org>`__
application for Belgian
*Public Centres for Social Welfare*.
- For *introductions* and *commercial information* about Lino Welfare
please see `www.saffre-rumma.net
<http://www.saffre-rumma.net/welfare/>`__.
- The central project homepage is http://welfare.lino-framework.org
- There are separate *user guides* in `French
<http://fr.welfare.lino-framework.org>`_ and `German
<http://de.welfare.lino-framework.org>`_.
- Online demo site at http://welfare-demo.lino-framework.org
""",
author='Luc Saffre',
author_email='luc.saffre@gmail.com',
url="http://welfare.lino-framework.org",
license='GNU Affero General Public License v3',
classifiers="""\
Programming Language :: Python
Programming Language :: Python :: 2
Development Status :: 5 - Production/Stable
Environment :: Web Environment
Framework :: Django
Intended Audience :: Developers
Intended Audience :: System Administrators
License :: OSI Approved :: GNU Affero General Public License v3
Natural Language :: English
Natural Language :: French
Natural Language :: German
Operating System :: OS Independent
Topic :: Database :: Front-Ends
Topic :: Home Automation
Topic :: Office/Business
Topic :: Sociology :: Genealogy
Topic :: Education""".splitlines())
SETUP_INFO.update(packages=[
'lino_welfare',
'lino_welfare.modlib',
'lino_welfare.modlib.active_job_search',
'lino_welfare.modlib.active_job_search.fixtures',
'lino_welfare.modlib.aids',
'lino_welfare.modlib.aids.fixtures',
'lino_welfare.modlib.art61',
'lino_welfare.modlib.art61.fixtures',
'lino_welfare.modlib.badges',
'lino_welfare.modlib.cal',
'lino_welfare.modlib.cal.fixtures',
'lino_welfare.modlib.cbss',
'lino_welfare.modlib.cbss.fixtures',
'lino_welfare.modlib.cbss.management',
'lino_welfare.modlib.cbss.management.commands',
'lino_welfare.modlib.client_vouchers',
'lino_welfare.modlib.contacts',
'lino_welfare.modlib.contacts.fixtures',
'lino_welfare.modlib.contacts.management',
'lino_welfare.modlib.contacts.management.commands',
'lino_welfare.modlib.xcourses',
'lino_welfare.modlib.xcourses.fixtures',
'lino_welfare.modlib.cv',
'lino_welfare.modlib.cv.fixtures',
'lino_welfare.modlib.debts',
'lino_welfare.modlib.debts.fixtures',
'lino_welfare.modlib.dupable_clients',
'lino_welfare.modlib.dupable_clients.fixtures',
'lino_welfare.modlib.finan',
'lino_welfare.modlib.finan.fixtures',
'lino_welfare.modlib.esf',
'lino_welfare.modlib.esf.fixtures',
'lino_welfare.modlib.households',
'lino_welfare.modlib.households.fixtures',
'lino_welfare.modlib.integ',
'lino_welfare.modlib.integ.fixtures',
'lino_welfare.modlib.isip',
'lino_welfare.modlib.jobs',
'lino_welfare.modlib.jobs.fixtures',
'lino_welfare.modlib.ledger',
'lino_welfare.modlib.ledger.fixtures',
'lino_welfare.modlib.notes',
'lino_welfare.modlib.notes.fixtures',
'lino_welfare.modlib.newcomers',
'lino_welfare.modlib.newcomers.fixtures',
'lino_welfare.modlib.pcsw',
'lino_welfare.modlib.pcsw.fixtures',
'lino_welfare.modlib.polls',
'lino_welfare.modlib.polls.fixtures',
'lino_welfare.modlib.projects',
'lino_welfare.modlib.reception',
'lino_welfare.modlib.sales',
'lino_welfare.modlib.sepa',
'lino_welfare.modlib.sepa.fixtures',
'lino_welfare.modlib.system',
'lino_welfare.modlib.immersion',
'lino_welfare.modlib.immersion.fixtures',
'lino_welfare.modlib.users',
'lino_welfare.modlib.users.fixtures',
'lino_welfare.modlib.welfare',
'lino_welfare.modlib.welfare.fixtures',
'lino_welfare.modlib.welfare.management',
'lino_welfare.modlib.welfare.management.commands',
'lino_welfare.projects',
'lino_welfare.projects.chatelet',
'lino_welfare.projects.chatelet.modlib',
'lino_welfare.projects.chatelet.modlib.courses',
'lino_welfare.projects.chatelet.modlib.courses.fixtures',
'lino_welfare.projects.chatelet.modlib.cv',
'lino_welfare.projects.chatelet.modlib.cv.fixtures',
'lino_welfare.projects.chatelet.modlib.isip',
'lino_welfare.projects.chatelet.modlib.pcsw',
'lino_welfare.projects.chatelet.modlib.pcsw.fixtures',
'lino_welfare.projects.chatelet.settings',
'lino_welfare.projects.chatelet.tests',
'lino_welfare.projects.std',
'lino_welfare.projects.std.settings',
'lino_welfare.projects.std.tests',
'lino_welfare.projects.eupen',
'lino_welfare.projects.eupen.modlib',
'lino_welfare.projects.eupen.modlib.pcsw',
'lino_welfare.projects.eupen.modlib.pcsw.fixtures',
'lino_welfare.projects.eupen.settings',
'lino_welfare.projects.eupen.tests',
'lino_welfare.projects.y2k',
'lino_welfare.projects.y2k.settings',
'lino_welfare.scripts',
])
SETUP_INFO.update(message_extractors={
'lino_welfare': [
('**/cache/**', 'ignore', None),
('**.py', 'python', None),
('**.js', 'javascript', None),
('**/config/**.html', 'jinja2', None),
#~ ('**/templates/**.txt', 'genshi', {
#~ 'template_class': 'genshi.template:TextTemplate'
#~ })
],
})
SETUP_INFO.update(package_data=dict())
# def add_package_data(package, *patterns):
# l = SETUP_INFO['package_data'].setdefault(package, [])
# l.extend(patterns)
# return l
# add_package_data('lino_welfare.modlib.cbss',
# 'WSDL/*.wsdl',
# 'XSD/*.xsd',
# 'XSD/SSDN/Common/*.xsd',
# 'XSD/SSDN/OCMW_CPAS/IdentifyPerson/*.xsd',
# 'XSD/SSDN/OCMW_CPAS/ManageAccess/*.xsd',
# 'XSD/SSDN/OCMW_CPAS/PerformInvestigation/*.xsd',
# 'XSD/SSDN/OCMW_CPAS/Loi65Wet65/*.xsd',
# 'XSD/SSDN/Person/*.xsd',
# 'XSD/SSDN/Service/*.xsd')
# add_package_data('lino_welfare.modlib.cbss',
# 'config/cbss/RetrieveTIGroupsRequest/*.odt')
# add_package_data('lino_welfare.modlib.cbss',
# 'config/cbss/IdentifyPersonRequest/*.odt')
# add_package_data('lino_welfare.modlib.cbss', 'fixtures/*.csv')
# add_package_data('lino_welfare.modlib.cbss', 'fixtures/*.xml')
# add_package_data('lino_welfare.modlib.debts', 'config/debts/Budget/*.odt')
# add_package_data('lino_welfare.modlib.courses', 'config/courses/Course/*.odt')
# add_package_data('lino_welfare.modlib.pcsw', 'config/pcsw/Client/*.odt')
# add_package_data('lino_welfare.modlib.cal', 'config/cal/Guest/*.odt')
# add_package_data('lino_welfare.modlib.jobs',
# 'config/jobs/ContractsSituation/*.odt')
# add_package_data('lino_welfare.modlib.jobs',
# 'config/jobs/OldJobsOverview/*.odt')
# add_package_data('lino_welfare.modlib.jobs', 'config/jobs/JobsOverview/*.odt')
# add_package_data('lino_welfare.settings', 'media/pictures/contacts.Person.jpg')
# add_package_data('lino_welfare', 'config/lino_welfare/ActivityReport/*.odt')
# add_package_data('lino_welfare', 'config/admin_main.html')
# l = add_package_data('lino_welfare.modlib.welfare')
# for lng in 'fr de nl'.split():
# l.append('lino_welfare/modlib/welfare/locale/%s/LC_MESSAGES/*.mo' % lng)
http://luc.lino-framework.org/blog/2018/0202.html
# -*- coding: UTF-8 -*-
# Copyright 2002-2018 Luc Saffre
# This file is part of Lino Welfare.
#
# Lino Welfare is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# Lino Welfare is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with Lino Welfare. If not, see
# <http://www.gnu.org/licenses/>.
# Note that this module may not have a docstring because any global
# variable defined here will override the global namespace of modules
# like `lino_welfare/__init__.py` who include this file with execfile
# This module is part of the Lino Welfare test suite.
# To test only this module:
#
# $ python setup.py test -s tests.PackagesTests
import six
requires = ['lino-cosi',
# 'vobject',
'pytidylib', 'channels<2',
'django-iban', 'metafone', 'weasyprint',
# 'cairocffi<0.7'] # seems that < 0.7 no longer required
'cairocffi']
if six.PY2:
requires.append('suds')
else:
requires.append('suds-py3')
SETUP_INFO = dict(
name='lino-welfare',
version='17.10.0',
install_requires=requires,
test_suite='tests',
tests_require=['pytest'],
include_package_data=True,
zip_safe=False,
description=u"A Lino application for Belgian Centres for Public Welfare",
long_description="""\
Lino Welfare is a modular
`Lino <http://www.lino-framework.org>`__
application for Belgian
*Public Centres for Social Welfare*.
- For *introductions* and *commercial information* about Lino Welfare
please see `www.saffre-rumma.net
<http://www.saffre-rumma.net/welfare/>`__.
- The central project homepage is http://welfare.lino-framework.org
- There are separate *user guides* in `French
<http://fr.welfare.lino-framework.org>`_ and `German
<http://de.welfare.lino-framework.org>`_.
- Online demo site at http://welfare-demo.lino-framework.org
""",
author='Luc Saffre',
author_email='luc.saffre@gmail.com',
url="http://welfare.lino-framework.org",
license='GNU Affero General Public License v3',
classifiers="""\
Programming Language :: Python
Programming Language :: Python :: 2
Development Status :: 5 - Production/Stable
Environment :: Web Environment
Framework :: Django
Intended Audience :: Developers
Intended Audience :: System Administrators
License :: OSI Approved :: GNU Affero General Public License v3
Natural Language :: English
Natural Language :: French
Natural Language :: German
Operating System :: OS Independent
Topic :: Database :: Front-Ends
Topic :: Home Automation
Topic :: Office/Business
Topic :: Sociology :: Genealogy
Topic :: Education""".splitlines())
SETUP_INFO.update(packages=[
'lino_welfare',
'lino_welfare.modlib',
'lino_welfare.modlib.active_job_search',
'lino_welfare.modlib.active_job_search.fixtures',
'lino_welfare.modlib.aids',
'lino_welfare.modlib.aids.fixtures',
'lino_welfare.modlib.art61',
'lino_welfare.modlib.art61.fixtures',
'lino_welfare.modlib.badges',
'lino_welfare.modlib.cal',
'lino_welfare.modlib.cal.fixtures',
'lino_welfare.modlib.cbss',
'lino_welfare.modlib.cbss.fixtures',
'lino_welfare.modlib.cbss.management',
'lino_welfare.modlib.cbss.management.commands',
'lino_welfare.modlib.client_vouchers',
'lino_welfare.modlib.contacts',
'lino_welfare.modlib.contacts.fixtures',
'lino_welfare.modlib.contacts.management',
'lino_welfare.modlib.contacts.management.commands',
'lino_welfare.modlib.xcourses',
'lino_welfare.modlib.xcourses.fixtures',
'lino_welfare.modlib.cv',
'lino_welfare.modlib.cv.fixtures',
'lino_welfare.modlib.debts',
'lino_welfare.modlib.debts.fixtures',
'lino_welfare.modlib.dupable_clients',
'lino_welfare.modlib.dupable_clients.fixtures',
'lino_welfare.modlib.finan',
'lino_welfare.modlib.finan.fixtures',
'lino_welfare.modlib.esf',
'lino_welfare.modlib.esf.fixtures',
'lino_welfare.modlib.households',
'lino_welfare.modlib.households.fixtures',
'lino_welfare.modlib.integ',
'lino_welfare.modlib.integ.fixtures',
'lino_welfare.modlib.isip',
'lino_welfare.modlib.jobs',
'lino_welfare.modlib.jobs.fixtures',
'lino_welfare.modlib.ledger',
'lino_welfare.modlib.ledger.fixtures',
'lino_welfare.modlib.notes',
'lino_welfare.modlib.notes.fixtures',
'lino_welfare.modlib.newcomers',
'lino_welfare.modlib.newcomers.fixtures',
'lino_welfare.modlib.pcsw',
'lino_welfare.modlib.pcsw.fixtures',
'lino_welfare.modlib.polls',
'lino_welfare.modlib.polls.fixtures',
'lino_welfare.modlib.projects',
'lino_welfare.modlib.reception',
'lino_welfare.modlib.sales',
'lino_welfare.modlib.sepa',
'lino_welfare.modlib.sepa.fixtures',
'lino_welfare.modlib.system',
'lino_welfare.modlib.immersion',
'lino_welfare.modlib.immersion.fixtures',
'lino_welfare.modlib.users',
'lino_welfare.modlib.users.fixtures',
'lino_welfare.modlib.welfare',
'lino_welfare.modlib.welfare.fixtures',
'lino_welfare.modlib.welfare.management',
'lino_welfare.modlib.welfare.management.commands',
'lino_welfare.projects',
'lino_welfare.projects.chatelet',
'lino_welfare.projects.chatelet.modlib',
'lino_welfare.projects.chatelet.modlib.courses',
'lino_welfare.projects.chatelet.modlib.courses.fixtures',
'lino_welfare.projects.chatelet.modlib.cv',
'lino_welfare.projects.chatelet.modlib.cv.fixtures',
'lino_welfare.projects.chatelet.modlib.isip',
'lino_welfare.projects.chatelet.modlib.pcsw',
'lino_welfare.projects.chatelet.modlib.pcsw.fixtures',
'lino_welfare.projects.chatelet.settings',
'lino_welfare.projects.chatelet.tests',
'lino_welfare.projects.std',
'lino_welfare.projects.std.settings',
'lino_welfare.projects.std.tests',
'lino_welfare.projects.eupen',
'lino_welfare.projects.eupen.modlib',
'lino_welfare.projects.eupen.modlib.pcsw',
'lino_welfare.projects.eupen.modlib.pcsw.fixtures',
'lino_welfare.projects.eupen.settings',
'lino_welfare.projects.eupen.tests',
'lino_welfare.projects.y2k',
'lino_welfare.projects.y2k.settings',
'lino_welfare.scripts',
])
SETUP_INFO.update(message_extractors={
'lino_welfare': [
('**/cache/**', 'ignore', None),
('**.py', 'python', None),
('**.js', 'javascript', None),
('**/config/**.html', 'jinja2', None),
#~ ('**/templates/**.txt', 'genshi', {
#~ 'template_class': 'genshi.template:TextTemplate'
#~ })
],
})
SETUP_INFO.update(package_data=dict())
# def add_package_data(package, *patterns):
# l = SETUP_INFO['package_data'].setdefault(package, [])
# l.extend(patterns)
# return l
# add_package_data('lino_welfare.modlib.cbss',
# 'WSDL/*.wsdl',
# 'XSD/*.xsd',
# 'XSD/SSDN/Common/*.xsd',
# 'XSD/SSDN/OCMW_CPAS/IdentifyPerson/*.xsd',
# 'XSD/SSDN/OCMW_CPAS/ManageAccess/*.xsd',
# 'XSD/SSDN/OCMW_CPAS/PerformInvestigation/*.xsd',
# 'XSD/SSDN/OCMW_CPAS/Loi65Wet65/*.xsd',
# 'XSD/SSDN/Person/*.xsd',
# 'XSD/SSDN/Service/*.xsd')
# add_package_data('lino_welfare.modlib.cbss',
# 'config/cbss/RetrieveTIGroupsRequest/*.odt')
# add_package_data('lino_welfare.modlib.cbss',
# 'config/cbss/IdentifyPersonRequest/*.odt')
# add_package_data('lino_welfare.modlib.cbss', 'fixtures/*.csv')
# add_package_data('lino_welfare.modlib.cbss', 'fixtures/*.xml')
# add_package_data('lino_welfare.modlib.debts', 'config/debts/Budget/*.odt')
# add_package_data('lino_welfare.modlib.courses', 'config/courses/Course/*.odt')
# add_package_data('lino_welfare.modlib.pcsw', 'config/pcsw/Client/*.odt')
# add_package_data('lino_welfare.modlib.cal', 'config/cal/Guest/*.odt')
# add_package_data('lino_welfare.modlib.jobs',
# 'config/jobs/ContractsSituation/*.odt')
# add_package_data('lino_welfare.modlib.jobs',
# 'config/jobs/OldJobsOverview/*.odt')
# add_package_data('lino_welfare.modlib.jobs', 'config/jobs/JobsOverview/*.odt')
# add_package_data('lino_welfare.settings', 'media/pictures/contacts.Person.jpg')
# add_package_data('lino_welfare', 'config/lino_welfare/ActivityReport/*.odt')
# add_package_data('lino_welfare', 'config/admin_main.html')
# l = add_package_data('lino_welfare.modlib.welfare')
# for lng in 'fr de nl'.split():
# l.append('lino_welfare/modlib/welfare/locale/%s/LC_MESSAGES/*.mo' % lng)
|
#! /usr/bin/env python
"""Collection of classes for representing finite element meshes."""
import numpy as np
from collections import namedtuple
import ap.mesh.meshtools as meshtools
import ap.mesh.parsers as parsers
def mesh_factory(*args, **kwargs):
"""
Parse a finite element mesh representation and then convert it to a
Mesh or ArgyrisMesh object.
Required Arguments
------------------
* mesh_files : text files comprising the finite element mesh.
Keyword Arguments
-----------------
* argyris : boolean to specify if the mesh should have
additional nodes added to transform it in to an
Argyris mesh. Defaults to False.
* order : Mesh element order. The nodes will be renumbered
appropriately (1 for linears, 2 for quadratics).
Defaults to None. This is not implemented yet.
* projection : function that projects nodes. Defaults to None
(no projection)
* borders : a dictionary correlating names with GMSH 'Physical
Line' attributes. For example,
borders = {'open' : (1, 2)}
will correlate edges on Physical Lines 1 and 2 with the 'open' edge
collection.
* default_border : the default edge collection for any edges that
are not in a special_border collection. Defaults
to 'land'.
"""
parsed_mesh = parsers.parser_factory(*args)
if 'argyris' in kwargs:
keywords = kwargs.copy()
del keywords['argyris']
return ArgyrisMesh(parsed_mesh, **keywords)
elif 'Argyris' in kwargs:
keywords = kwargs.copy()
del keywords['Argyris']
return ArgyrisMesh(parsed_mesh, **keywords)
else:
return Mesh(parsed_mesh, **kwargs)
class Mesh(object):
"""
Representation of a finite element mesh. If every node shares the
same final coordinate value (e.g. all z-values are the same) then
this dimension is dropped.
Required Arguments
------------------
* parsed_mesh : Something that has the same interface as a MeshParser
(has fields elements, nodes, and edge_collections)
Optional Arguments
------------------
* borders : A dictionary correlating names with a tuple of
GMSH physical line numbers. for example:
borders = {'no_flow' : (1, 2, 3), 'coast' : (4, 5, 6)}
* default_border : the name corresponding to the default edge
collection. Defaults to "land".
* ignore_given_edges : If True, then throw out the provided edges and
extract them automatically from the element
connectivity matrix. Defaults to False. Useful when
the edges supplied by the parsed mesh could be
erroneous (contain non-mesh information).
* projection : function for transforming nodes (say from 3D to
2D); for example,
projection = lambda x : x[0:2]
will project the nodes down to the XY plane. Defaults to the
identity function.
Properties
----------
* elements : element connectivity matrix.
* nodes : coordinates of nodes.
* edge_collections : a dictionary relating the border names to the
edge tuples that fall along that border. If possible, the last
number in the tuple is the geometrical item number that the edge
falls upon from GMSH. Otherwise it is -1. For example,
print(t.edge_collections)
=> {'land': set([(3, 4, 7, 3), (4, 1, 8, 4), (2, 3, 6, 2),
(1, 2, 5, 1)])}
* boundary_nodes : Set containing the node numbers of nodes on the
boundary.
* interior_nodes : Set containing the node numbers of nodes in the
interior.
Methods
-------
* get_nnz() : Calculate the number of nonzero entries in a typical
finite element matrix (e.g. stiffness matrix) based on
the total number of inner products. This will be
exactly the value of the length of one of the
tripplet-form vectors.
* savetxt(prefix="") : Save the mesh as text files
1. prefix + nodes.txt
2. prefix + elements.txt
3. prefix + interior_nodes.txt
4. prefix + boundary_nodes.txt
and, additionally, for each edge collection save
prefix + name + _edges.txt.
"""
def __init__(self, parsed_mesh, borders = None, default_border="land",
ignore_given_edges = False, projection = lambda x : x):
if borders == None:
borders = {}
self.elements = parsed_mesh.elements
self.nodes = meshtools.project_nodes(projection, parsed_mesh.elements,
parsed_mesh.nodes,
attempt_flatten = True)
self.edge_collections = \
meshtools.organize_edges(parsed_mesh.edges, borders = borders,
default_border = default_border)
if max(map(len, self.edge_collections.values())) == 0 \
or ignore_given_edges:
self.edge_collections = \
{default_border :
set(meshtools.extract_boundary_edges(self.elements))}
if len(np.unique(self.elements)) != self.nodes.shape[0]:
self._fix_unused_nodes()
self.boundary_nodes = {}
interior_nodes = set(range(1, len(self.nodes)+1))
for name, edge_collection in self.edge_collections.items():
self.boundary_nodes[name] = \
np.fromiter(set(node for edge in edge_collection
for node in edge[0:-1]), int)
interior_nodes -= set(self.boundary_nodes[name])
self.interior_nodes = np.fromiter(interior_nodes, int)
def get_nnz(self):
"""
Estimate the number of nonzero entries present in some IJV-format
sparse matrix constructed from inner products on this collection of
elements.
"""
return self.elements.shape[1]**2 * self.elements.shape[0]
def savetxt(self, prefix=""):
"""
Save the mesh as a series of text files:
* prefix_nodes.txt : nodal coordinates of the mesh.
* prefix_elements.txt : element connectivity matrix of the mesh.
* prefix_interior_nodes.txt : list of interior nodes of the mesh.
* prefix_NAME_boundary_nodes.txt : list of boundary nodes of the mesh
corresponding to the border with name NAME.
* prefix_NAME_edges.txt : edges in boundary collection NAME, saved in the
traditional GMSH order.
Optional Arguments
------------------
prefix : a string prepended to each of the file names. If nonempty,
prepend exactly 'prefix + "_".'
"""
if prefix:
prefix += "_"
np.savetxt(prefix + "nodes.txt", self.nodes)
np.savetxt(prefix + "elements.txt", self.elements, fmt="%d")
np.savetxt(prefix + "interior_nodes.txt", self.interior_nodes, fmt="%d")
for (name, collection) in self.boundary_nodes.items():
np.savetxt(prefix + name + "_boundary_nodes.txt", collection,
fmt="%d")
for name, collection in self.edge_collections.items():
np.savetxt(prefix + name + '_edges.txt', [t for t in collection],
fmt='%d')
def _fix_unused_nodes(self):
"""
GMSH has a bug where it saves non-mesh nodes (that is, nodes that
are not members of any element) to some files. Get around that
issue by deleting the extra nodes and renumbering accordingly.
"""
number_of_mesh_nodes = len(np.unique(self.elements))
old_to_new = dict(zip(np.unique(self.elements),
range(1, number_of_mesh_nodes + 1)))
new_to_old = {new_node : old_node
for (old_node, new_node) in old_to_new.items()}
new_elements = np.array([[old_to_new[node] for node in element]
for element in self.elements])
new_nodes = np.array([self.nodes[new_to_old[new_node_number] - 1]
for new_node_number in new_to_old.keys()])
try:
edge_size = {3 : 2, 6 : 3, 10 : 4, 15 : 5}[self.elements.shape[1]]
except KeyError:
raise ValueError("Unsupported mesh type")
new_edge_collections = dict()
for key, collection in self.edge_collections.items():
new_edge_collections[key] = set()
for edge in collection:
# geometrical information available
if len(edge) == edge_size + 1:
new_edge_collections[key].add(tuple([old_to_new[node]
for node in edge[0:-1]] + [edge[-1]]))
# geometrical information not available
elif len(edge) == edge_size:
new_edge_collections[key].add(tuple([old_to_new[node]
for node in edge]))
else:
raise ValueError("Mismatch between size of mesh and" +
" size of edges")
self.edge_collections = new_edge_collections
self.elements = new_elements
self.nodes = new_nodes
ArgyrisEdge = namedtuple('ArgyrisEdge', ['element_number', 'edge_type', 'edge'])
class ArgyrisMesh(object):
"""
Class to build an Argyris mesh from a parsed mesh. Can handle a mesh
with multiple boundary conditions.
The algorithm is as follows:
1. Treat the current midpoint nodes as the normal derivative basis
functions.
2. Extract the corner nodes as a separate array. Associate each corner node
with five new nodes stacked at the same location.
3. Update nodal coordinates and fix the element order.
Required Arguments
------------------
* mesh : a parsed mesh (inherits from the MeshParser class)
Properties
----------
* elements : a numpy array listing the node numbers of every element.
* edges_by_midpoint : a dictionary associating each element with a certain
edge (indexed by the normal derivative basis function number)
* node_collections : a list of ArgyrisNodeCollection objects.
* nodes : a numpy array of node coordinates.
Methods
-------
* savetxt: save the mesh in multiple text files.
"""
def __init__(self, parsed_mesh, borders = None, default_border="land",
ignore_given_edges = False, projection = lambda x : x):
if borders == None:
borders = dict()
if parsed_mesh.elements.shape[1] != 6:
raise NotImplementedError("Support for changing mesh order is not "
+ "implemented.")
# parsed_mesh = meshtools.change_order(parsed_mesh, 2)
lagrange_mesh = Mesh(parsed_mesh, borders = borders,
default_border = default_border,
projection = projection,
ignore_given_edges = ignore_given_edges)
# if not projected, try to flatten as a last resort.
if (lagrange_mesh.nodes.shape[1] == 3 and
np.all(lagrange_mesh.nodes[:, 2] == lagrange_mesh.nodes[0, 2])):
lagrange_mesh.nodes = lagrange_mesh.nodes[:, 0:2]
if lagrange_mesh.nodes.shape[1] != 2:
raise ValueError("Requires a 2D mesh; try a different projection.")
self.elements = np.zeros((lagrange_mesh.elements.shape[0], 21),
dtype = np.int)
self.elements[:, 0:6] = lagrange_mesh.elements
# solve a lot of orientation problems later by ensuring that the corner
# nodes are in sorted order.
for element in self.elements:
self._sort_corners_increasing(element[0:6])
# stack the extra basis function nodes on the corners.
max_lagrange_mesh = lagrange_mesh.elements.max() + 1
self.stacked_nodes = \
{node_number : np.arange(max_lagrange_mesh + 5*count,
max_lagrange_mesh + 5*count + 5)
for count, node_number in \
enumerate(np.unique(self.elements[:, 0:3]))}
for element in self.elements:
element[6:11] = self.stacked_nodes[element[0]]
element[11:16] = self.stacked_nodes[element[1]]
element[16:21] = self.stacked_nodes[element[2]]
self._fix_argyris_node_order()
# update the edges by elements.
self.edges_by_midpoint = dict()
edge_type_to_nodes = {1 : (0, 1, 18), 2 : (0, 2, 19), 3 : (1, 2, 20)}
for element_number, element in enumerate(self.elements):
for edge_type in range(1, 4):
(i, j, k) = edge_type_to_nodes[edge_type]
edge = ArgyrisEdge(element_number = element_number + 1,
edge_type = edge_type,
edge = (element[i], element[j], element[k]))
if element[17 + edge_type] in self.edges_by_midpoint:
if (self.edges_by_midpoint[element[17 + edge_type]].edge
!= edge.edge):
raise ValueError("Mesh is not consistent")
else:
self.edges_by_midpoint[element[17 + edge_type]] = edge
# set coordinates for the new nodes.
self.nodes = np.zeros((self.elements.max(), 2))
self.nodes.fill(np.nan)
self.nodes[0:lagrange_mesh.nodes.shape[0], :] = lagrange_mesh.nodes
for stacked_node, new_nodes in self.stacked_nodes.items():
self.nodes[new_nodes - 1] = self.nodes[stacked_node - 1]
# Construct the edge collections.
self.node_collections = []
self._build_node_collections(lagrange_mesh)
def savetxt(self, prefix = ""):
"""
Save the following text files:
nodes.txt : nodal coordinates
elements.txt : element connectivity matrix
and for each collection of nodes with key NAME:
NAME_edges.txt : all edge tuples (end, end, midpoint)
NAME_all.txt : all numbers of nodes in the collection.
"""
if prefix:
prefix += "_"
np.savetxt(prefix + 'nodes.txt', self.nodes)
np.savetxt(prefix + 'elements.txt', self.elements, fmt="%d")
for collection in self.node_collections:
prefix = prefix[0:-1]
collection.savetxt(prefix)
def _sort_corners_increasing(self, element):
"""
Ensure that the corners of the input quadratic element are in increasing
order. For example, convert
1 3 2 4 6 5
to
1 2 3 5 6 4
"""
if element[0] > element[1]:
element[0], element[1] = element[1], element[0]
element[4], element[5] = element[5], element[4]
if element[1] > element[2]:
element[2], element[1] = element[1], element[2]
element[3], element[5] = element[5], element[3]
if element[0] > element[2]:
element[2], element[0] = element[0], element[2]
element[3], element[4] = element[4], element[3]
if element[0] > element[1]:
element[0], element[1] = element[1], element[0]
element[4], element[5] = element[5], element[4]
def _build_node_collections(self, lagrange_mesh):
"""
Handle the edges by building a list of ArgyrisNodeCollection
objects. This is done by extracting the information regarding
corner nodes and midpoints from the lagrange edge data and saving
the interior nodes as everything that was not a boundary node.
"""
interior_function_values = set(lagrange_mesh.elements[:, 0:3].flatten())
interior_normal_derivatives = \
set(lagrange_mesh.elements[:, 3:6].flatten())
for border_name, collection in lagrange_mesh.edge_collections.items():
# save left points of edges.
function_values = {x[0] for x in collection}
normal_derivatives = {x[2] for x in collection}
self.node_collections.append(ArgyrisNodeCollection(function_values,
normal_derivatives, collection, self, name = border_name))
interior_function_values.difference_update(function_values)
interior_normal_derivatives.difference_update(normal_derivatives)
self.node_collections.append(ArgyrisNodeCollection(
interior_function_values, interior_normal_derivatives, [], self,
name = 'interior'))
def _fix_argyris_node_order(self):
"""
Fix the node orderings from the constructed format
[1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21]
to the usual Argyris format of
[1 2 3 7 8 12 13 17 18 9 10 11 14 15 16 19 20 21 4 6 5].
"""
normal_derivatives1 = self.elements[:, 3].copy()
normal_derivatives2 = self.elements[:, 5].copy()
normal_derivatives3 = self.elements[:, 4].copy()
first_nodes = self.elements[:, 6:11].copy()
second_nodes = self.elements[:, 11:16].copy()
third_nodes = self.elements[:, 16:21].copy()
self.elements[:, 18] = normal_derivatives1
self.elements[:, 19] = normal_derivatives2
self.elements[:, 20] = normal_derivatives3
self.elements[:, 3:5] = first_nodes[:, 0:2]
self.elements[:, 9:12] = first_nodes[:, 2:5]
self.elements[:, 5:7] = second_nodes[:, 0:2]
self.elements[:, 12:15] = second_nodes[:, 2:5]
self.elements[:, 7:9] = third_nodes[:, 0:2]
self.elements[:, 15:18] = third_nodes[:, 2:5]
class ArgyrisNodeCollection(object):
"""
Contains information about a group of nodes in an Argyris Mesh and any
relevant edge data.
Required Arguments
------------------
* function_values : set of basis function numbers that approximate function
values on the Argyris mesh.
* normal_derivatives : set of the node numbers corresponding to normal
derivative basis functions.
* edges : set of tuples corresponding to (endpoint, endpoint, midpoint)
* mesh : the relevant Argyris mesh.
Optional Arguments
------------------
* name : prefix on the output files. Defaults to 'inner'.
"""
def __init__(self, function_values, normal_derivatives,
edges, mesh, name = 'inner'):
self.function_values = function_values
self.normal_derivatives = normal_derivatives
self.name = name
self.stacked_nodes = {node : mesh.stacked_nodes[node] for node in
self.function_values}
self.edges = [mesh.edges_by_midpoint[edge[-2]] for edge in edges]
def savetxt(self, prefix = ""):
"""
Save the data to text files; place all node numbers in the collection
in one file and all information on edges in another.
"""
if prefix:
prefix += "_"
if self.edges: # don't save if there are no edges.
edge_array = np.array([[edge.element_number, edge.edge_type,
edge.edge[0], edge.edge[1], edge.edge[2]]
for edge in self.edges])
np.savetxt(prefix + self.name + "_edges.txt", edge_array, "%d")
# Use list comprehensions because they do the same thing in python2.7
# and python3.*; *.values() became an iterator in python3000.
np.savetxt(prefix + self.name + "_all.txt",
np.unique(np.hstack(
[x for x in self.stacked_nodes.values()] +
[x for x in self.stacked_nodes.keys()] +
[x for x in self.normal_derivatives])), "%d")
def __str__(self):
"""For interactive debugging use."""
return ("Node collection name: " + self.name + "\n" +
"function values:\n" + str(self.function_values) + "\n" +
"normal derivatives:\n" + str(self.normal_derivatives) + "\n")
compliant (up to alignment of =s) with pep8
#! /usr/bin/env python
"""Collection of classes for representing finite element meshes."""
import numpy as np
from collections import namedtuple
import ap.mesh.meshtools as meshtools
import ap.mesh.parsers as parsers
def mesh_factory(*args, **kwargs):
"""
Parse a finite element mesh representation and then convert it to a
Mesh or ArgyrisMesh object.
Required Arguments
------------------
* mesh_files : text files comprising the finite element mesh.
Keyword Arguments
-----------------
* argyris : boolean to specify if the mesh should have
additional nodes added to transform it in to an
Argyris mesh. Defaults to False.
* order : Mesh element order. The nodes will be renumbered
appropriately (1 for linears, 2 for quadratics).
Defaults to None. This is not implemented yet.
* projection : function that projects nodes. Defaults to None
(no projection)
* borders : a dictionary correlating names with GMSH 'Physical
Line' attributes. For example,
borders = {'open' : (1, 2)}
will correlate edges on Physical Lines 1 and 2 with the 'open' edge
collection.
* default_border : the default edge collection for any edges that
are not in a special_border collection. Defaults
to 'land'.
"""
parsed_mesh = parsers.parser_factory(*args)
if 'argyris' in kwargs:
keywords = kwargs.copy()
del keywords['argyris']
return ArgyrisMesh(parsed_mesh, **keywords)
elif 'Argyris' in kwargs:
keywords = kwargs.copy()
del keywords['Argyris']
return ArgyrisMesh(parsed_mesh, **keywords)
else:
return Mesh(parsed_mesh, **kwargs)
class Mesh(object):
"""
Representation of a finite element mesh. If every node shares the
same final coordinate value (e.g. all z-values are the same) then
this dimension is dropped.
Required Arguments
------------------
* parsed_mesh : Something that has the same interface as a MeshParser
(has fields elements, nodes, and edge_collections)
Optional Arguments
------------------
* borders : A dictionary correlating names with a tuple of
GMSH physical line numbers. for example:
borders = {'no_flow' : (1, 2, 3), 'coast' : (4, 5, 6)}
* default_border : the name corresponding to the default edge
collection. Defaults to "land".
* ignore_given_edges : If True, then throw out the provided edges and
extract them automatically from the element
connectivity matrix. Defaults to False. Useful when
the edges supplied by the parsed mesh could be
erroneous (contain non-mesh information).
* projection : function for transforming nodes (say from 3D to
2D); for example,
projection = lambda x : x[0:2]
will project the nodes down to the XY plane. Defaults to the
identity function.
Properties
----------
* elements : element connectivity matrix.
* nodes : coordinates of nodes.
* edge_collections : a dictionary relating the border names to the
edge tuples that fall along that border. If possible, the last
number in the tuple is the geometrical item number that the edge
falls upon from GMSH. Otherwise it is -1. For example,
print(t.edge_collections)
=> {'land': set([(3, 4, 7, 3), (4, 1, 8, 4), (2, 3, 6, 2),
(1, 2, 5, 1)])}
* boundary_nodes : Set containing the node numbers of nodes on the
boundary.
* interior_nodes : Set containing the node numbers of nodes in the
interior.
Methods
-------
* get_nnz() : Calculate the number of nonzero entries in a typical
finite element matrix (e.g. stiffness matrix) based on
the total number of inner products. This will be
exactly the value of the length of one of the
tripplet-form vectors.
* savetxt(prefix="") : Save the mesh as text files
1. prefix + nodes.txt
2. prefix + elements.txt
3. prefix + interior_nodes.txt
4. prefix + boundary_nodes.txt
and, additionally, for each edge collection save
prefix + name + _edges.txt.
"""
def __init__(self, parsed_mesh, borders=None, default_border="land",
ignore_given_edges=False, projection=lambda x: x):
if borders is None:
borders = {}
self.elements = parsed_mesh.elements
self.nodes = meshtools.project_nodes(projection, parsed_mesh.elements,
parsed_mesh.nodes,
attempt_flatten=True)
self.edge_collections = \
meshtools.organize_edges(parsed_mesh.edges, borders=borders,
default_border=default_border)
if max(map(len, self.edge_collections.values())) == 0 \
or ignore_given_edges:
self.edge_collections = \
{default_border:
set(meshtools.extract_boundary_edges(self.elements))}
if len(np.unique(self.elements)) != self.nodes.shape[0]:
self._fix_unused_nodes()
self.boundary_nodes = {}
interior_nodes = set(range(1, len(self.nodes)+1))
for name, edge_collection in self.edge_collections.items():
self.boundary_nodes[name] = \
np.fromiter(set(node for edge in edge_collection
for node in edge[0:-1]), int)
interior_nodes -= set(self.boundary_nodes[name])
self.interior_nodes = np.fromiter(interior_nodes, int)
def get_nnz(self):
"""
Estimate the number of nonzero entries present in some IJV-format
sparse matrix constructed from inner products on this collection of
elements.
"""
return self.elements.shape[1]**2 * self.elements.shape[0]
def savetxt(self, prefix=""):
"""
Save the mesh as a series of text files:
* prefix_nodes.txt : nodal coordinates of the mesh.
* prefix_elements.txt : element connectivity matrix of the mesh.
* prefix_interior_nodes.txt : list of interior nodes of the mesh.
* prefix_NAME_boundary_nodes.txt : list of boundary nodes of the mesh
corresponding to the border with name NAME.
* prefix_NAME_edges.txt : edges in boundary collection NAME, saved in
the traditional GMSH order.
Optional Arguments
------------------
prefix : a string prepended to each of the file names. If nonempty,
prepend exactly 'prefix + "_".'
"""
if prefix:
prefix += "_"
np.savetxt(prefix + "nodes.txt", self.nodes)
np.savetxt(prefix + "elements.txt", self.elements, fmt="%d")
np.savetxt(prefix + "interior_nodes.txt", self.interior_nodes,
fmt="%d")
for (name, collection) in self.boundary_nodes.items():
np.savetxt(prefix + name + "_boundary_nodes.txt", collection,
fmt="%d")
for name, collection in self.edge_collections.items():
np.savetxt(prefix + name + '_edges.txt', [t for t in collection],
fmt='%d')
def _fix_unused_nodes(self):
"""
GMSH has a bug where it saves non-mesh nodes (that is, nodes that
are not members of any element) to some files. Get around that
issue by deleting the extra nodes and renumbering accordingly.
"""
number_of_mesh_nodes = len(np.unique(self.elements))
old_to_new = dict(zip(np.unique(self.elements),
range(1, number_of_mesh_nodes + 1)))
new_to_old = {new_node: old_node
for (old_node, new_node) in old_to_new.items()}
new_elements = np.array([[old_to_new[node] for node in element]
for element in self.elements])
new_nodes = np.array([self.nodes[new_to_old[new_node_number] - 1]
for new_node_number in new_to_old.keys()])
try:
edge_size = {3: 2, 6: 3, 10: 4, 15: 5}[self.elements.shape[1]]
except KeyError:
raise ValueError("Unsupported mesh type")
new_edge_collections = dict()
for key, collection in self.edge_collections.items():
new_edge_collections[key] = set()
for edge in collection:
# geometrical information available
if len(edge) == edge_size + 1:
new_edge = tuple([old_to_new[node]
for node in edge[0:-1]] + [edge[-1]])
new_edge_collections[key].add(new_edge)
# geometrical information not available
elif len(edge) == edge_size:
new_edge = tuple([old_to_new[node] for node in edge])
new_edge_collections[key].add(new_edge)
else:
raise ValueError("Mismatch between size of mesh and" +
" size of edges")
self.edge_collections = new_edge_collections
self.elements = new_elements
self.nodes = new_nodes
ArgyrisEdge = namedtuple('ArgyrisEdge',
['element_number', 'edge_type', 'edge'])
class ArgyrisMesh(object):
"""
Class to build an Argyris mesh from a parsed mesh. Can handle a mesh
with multiple boundary conditions.
The algorithm is as follows:
1. Treat the current midpoint nodes as the normal derivative basis
functions.
2. Extract the corner nodes as a separate array. Associate each corner node
with five new nodes stacked at the same location.
3. Update nodal coordinates and fix the element order.
Required Arguments
------------------
* mesh : a parsed mesh (inherits from the MeshParser class)
Properties
----------
* elements : a numpy array listing the node numbers of every element.
* edges_by_midpoint : a dictionary associating each element with a certain
edge (indexed by the normal derivative basis function number)
* node_collections : a list of ArgyrisNodeCollection objects.
* nodes : a numpy array of node coordinates.
Methods
-------
* savetxt: save the mesh in multiple text files.
"""
def __init__(self, parsed_mesh, borders=None, default_border="land",
ignore_given_edges=False, projection=lambda x: x):
if borders is None:
borders = dict()
if parsed_mesh.elements.shape[1] != 6:
raise NotImplementedError("Support for changing mesh order is not "
+ "implemented.")
# parsed_mesh = meshtools.change_order(parsed_mesh, 2)
lagrange_mesh = Mesh(parsed_mesh, borders=borders,
default_border=default_border,
projection=projection,
ignore_given_edges=ignore_given_edges)
# if not projected, try to flatten as a last resort.
if lagrange_mesh.nodes.shape[1] == 3:
if np.all(lagrange_mesh.nodes[:, 2] == lagrange_mesh.nodes[0, 2]):
lagrange_mesh.nodes = lagrange_mesh.nodes[:, 0:2]
if lagrange_mesh.nodes.shape[1] != 2:
raise ValueError("Requires a 2D mesh; try a different projection.")
self.elements = np.zeros((lagrange_mesh.elements.shape[0], 21),
dtype=np.int)
self.elements[:, 0:6] = lagrange_mesh.elements
# solve a lot of orientation problems later by ensuring that the corner
# nodes are in sorted order.
for element in self.elements:
self._sort_corners_increasing(element[0:6])
# stack the extra basis function nodes on the corners.
max_lagrange_mesh = lagrange_mesh.elements.max() + 1
self.stacked_nodes = \
{node_number: np.arange(max_lagrange_mesh + 5*count,
max_lagrange_mesh + 5*count + 5)
for count, node_number in
enumerate(np.unique(self.elements[:, 0:3]))}
for element in self.elements:
element[6:11] = self.stacked_nodes[element[0]]
element[11:16] = self.stacked_nodes[element[1]]
element[16:21] = self.stacked_nodes[element[2]]
self._fix_argyris_node_order()
# update the edges by elements.
self.edges_by_midpoint = dict()
edge_type_to_nodes = {1: (0, 1, 18), 2: (0, 2, 19), 3: (1, 2, 20)}
for element_number, element in enumerate(self.elements):
for edge_type in range(1, 4):
(i, j, k) = edge_type_to_nodes[edge_type]
edge = ArgyrisEdge(element_number=element_number + 1,
edge_type=edge_type,
edge=(element[i], element[j], element[k]))
if element[17 + edge_type] in self.edges_by_midpoint:
if (self.edges_by_midpoint[element[17 + edge_type]].edge
!= edge.edge):
raise ValueError("Mesh is not consistent")
else:
self.edges_by_midpoint[element[17 + edge_type]] = edge
# set coordinates for the new nodes.
self.nodes = np.zeros((self.elements.max(), 2))
self.nodes.fill(np.nan)
self.nodes[0:lagrange_mesh.nodes.shape[0], :] = lagrange_mesh.nodes
for stacked_node, new_nodes in self.stacked_nodes.items():
self.nodes[new_nodes - 1] = self.nodes[stacked_node - 1]
# Construct the edge collections.
self.node_collections = []
self._build_node_collections(lagrange_mesh)
def savetxt(self, prefix=""):
"""
Save the following text files:
nodes.txt : nodal coordinates
elements.txt : element connectivity matrix
and for each collection of nodes with key NAME:
NAME_edges.txt : all edge tuples (end, end, midpoint)
NAME_all.txt : all numbers of nodes in the collection.
"""
if prefix:
prefix += "_"
np.savetxt(prefix + 'nodes.txt', self.nodes)
np.savetxt(prefix + 'elements.txt', self.elements, fmt="%d")
for collection in self.node_collections:
prefix = prefix[0:-1]
collection.savetxt(prefix)
def _sort_corners_increasing(self, element):
"""
Ensure that the corners of the input quadratic element are in
increasing order. For example, convert
1 3 2 4 6 5
to
1 2 3 5 6 4
"""
if element[0] > element[1]:
element[0], element[1] = element[1], element[0]
element[4], element[5] = element[5], element[4]
if element[1] > element[2]:
element[2], element[1] = element[1], element[2]
element[3], element[5] = element[5], element[3]
if element[0] > element[2]:
element[2], element[0] = element[0], element[2]
element[3], element[4] = element[4], element[3]
if element[0] > element[1]:
element[0], element[1] = element[1], element[0]
element[4], element[5] = element[5], element[4]
def _build_node_collections(self, lagrange_mesh):
"""
Handle the edges by building a list of ArgyrisNodeCollection
objects. This is done by extracting the information regarding
corner nodes and midpoints from the lagrange edge data and saving
the interior nodes as everything that was not a boundary node.
"""
interior_function_values = set(lagrange_mesh.elements[:, 0:3].flat)
interior_normal_derivatives = set(lagrange_mesh.elements[:, 3:6].flat)
for border_name, collection in lagrange_mesh.edge_collections.items():
# save left points of edges.
function_values = {x[0] for x in collection}
normal_derivatives = {x[2] for x in collection}
self.node_collections.append(
ArgyrisNodeCollection(function_values, normal_derivatives,
collection, self, name=border_name))
interior_function_values.difference_update(function_values)
interior_normal_derivatives.difference_update(normal_derivatives)
self.node_collections.append(ArgyrisNodeCollection(
interior_function_values, interior_normal_derivatives, [], self,
name='interior'))
def _fix_argyris_node_order(self):
"""
Fix the node orderings from the constructed format
[1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21]
to the usual Argyris format of
[1 2 3 7 8 12 13 17 18 9 10 11 14 15 16 19 20 21 4 6 5].
"""
normal_derivatives1 = self.elements[:, 3].copy()
normal_derivatives2 = self.elements[:, 5].copy()
normal_derivatives3 = self.elements[:, 4].copy()
first_nodes = self.elements[:, 6:11].copy()
second_nodes = self.elements[:, 11:16].copy()
third_nodes = self.elements[:, 16:21].copy()
self.elements[:, 18] = normal_derivatives1
self.elements[:, 19] = normal_derivatives2
self.elements[:, 20] = normal_derivatives3
self.elements[:, 3:5] = first_nodes[:, 0:2]
self.elements[:, 9:12] = first_nodes[:, 2:5]
self.elements[:, 5:7] = second_nodes[:, 0:2]
self.elements[:, 12:15] = second_nodes[:, 2:5]
self.elements[:, 7:9] = third_nodes[:, 0:2]
self.elements[:, 15:18] = third_nodes[:, 2:5]
class ArgyrisNodeCollection(object):
"""
Contains information about a group of nodes in an Argyris Mesh and any
relevant edge data.
Required Arguments
------------------
* function_values : set of basis function numbers that approximate function
values on the Argyris mesh.
* normal_derivatives : set of the node numbers corresponding to normal
derivative basis functions.
* edges : set of tuples corresponding to (endpoint, endpoint, midpoint)
* mesh : the relevant Argyris mesh.
Optional Arguments
------------------
* name : prefix on the output files. Defaults to 'inner'.
"""
def __init__(self, function_values, normal_derivatives,
edges, mesh, name='inner'):
self.function_values = function_values
self.normal_derivatives = normal_derivatives
self.name = name
self.stacked_nodes = {node: mesh.stacked_nodes[node] for node in
self.function_values}
self.edges = [mesh.edges_by_midpoint[edge[-2]] for edge in edges]
def savetxt(self, prefix=""):
"""
Save the data to text files; place all node numbers in the collection
in one file and all information on edges in another.
"""
if prefix:
prefix += "_"
if self.edges: # don't save if there are no edges.
edge_array = np.array([[edge.element_number, edge.edge_type,
edge.edge[0], edge.edge[1], edge.edge[2]]
for edge in self.edges])
np.savetxt(prefix + self.name + "_edges.txt", edge_array, "%d")
# Use list comprehensions because they do the same thing in python2.7
# and python3.*; *.values() became an iterator in python3000.
np.savetxt(prefix + self.name + "_all.txt",
np.unique(np.hstack(
[x for x in self.stacked_nodes.values()] +
[x for x in self.stacked_nodes.keys()] +
[x for x in self.normal_derivatives])), "%d")
def __str__(self):
"""For interactive debugging use."""
return ("Node collection name: " + self.name + "\n" +
"function values:\n" + str(self.function_values) + "\n" +
"normal derivatives:\n" + str(self.normal_derivatives) + "\n")
|
# aws-pcf-quickstart
#
# Copyright (c) 2017-Present Pivotal Software, Inc. All Rights Reserved.
#
# This program and the accompanying materials are made available under
# the terms of the under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import tempfile
from mock import patch, Mock
import download_and_import
import om_manager
import settings
class TestDownloadAndImport(unittest.TestCase):
def setUp(self):
self.settings = Mock(settings.Settings)
self.settings.ert_release_version = "1.9.0"
self.settings.ert_release_sha256 = "xyz123"
self.settings.stemcell_release_version = "123"
self.settings.stemcell_release_sha256 = "123"
@patch('util.exponential_backoff_cmd')
@patch('download_and_import.do_pivnet_download')
@patch('download_and_import.do_github_download')
def test_download_asset_success(self, mock_do_github_download, mock_do_pivnet_download, mock_util):
mock_do_pivnet_download.return_value = "", "", 0
mock_do_github_download.return_value = "", "", 0
out, err, exit_code = download_and_import.download_assets(
self.settings, '/home/ubuntu/tiles/')
self.assertEqual(mock_do_pivnet_download.call_count, 3)
self.assertEqual(mock_do_github_download.call_count, 1)
self.assertEqual(exit_code, 0)
self.assertEqual(mock_do_pivnet_download.mock_calls[0][1][0], 'stemcells-ubuntu-xenial')
self.assertEqual(mock_do_pivnet_download.mock_calls[1][1][0], 'stemcells')
self.assertEqual(mock_do_pivnet_download.mock_calls[2][1][0], 'srt')
@patch('util.exponential_backoff_cmd')
@patch('download_and_import.do_pivnet_download')
@patch('download_and_import.do_github_download')
def test_download_asset_pivnet_failure(self, mock_do_github_download, mock_do_pivnet_download, mock_util):
mock_do_pivnet_download.return_value = "download failed", "", 1
mock_do_github_download.return_value = "", "", 0
out, err, exit_code = download_and_import.download_assets(
self.settings, '/home/ubuntu/tiles/')
self.assertEqual(mock_do_pivnet_download.call_count, 1)
self.assertEqual(mock_do_github_download.call_count, 1)
self.assertEqual(out, "download failed")
self.assertEqual(exit_code, 1)
@patch('util.exponential_backoff_cmd')
@patch('download_and_import.do_pivnet_download')
@patch('download_and_import.do_github_download')
def test_download_asset_github_failure(self, mock_do_github_download, mock_do_pivnet_download, mock_util):
mock_do_pivnet_download.return_value = "", "", 0
mock_do_github_download.return_value = "download failed", "", 1
out, err, exit_code = download_and_import.download_assets(
self.settings, '/home/ubuntu/tiles/')
self.assertEqual(mock_do_github_download.call_count, 1)
self.assertEqual(mock_do_pivnet_download.call_count, 0)
self.assertEqual(out, "download failed")
self.assertEqual(exit_code, 1)
@patch('util.exponential_backoff_cmd')
@patch('download_and_import.verify_sha256')
@patch('glob.glob')
def test_do_pivnet_download_success(self, mock_os_listdir, mock_verify_sha256, mock_util):
mock_util.return_value = "", "", 0
mock_os_listdir.return_value = ['/home/ubuntu/tiles/srt-1.9.0.pivotal']
mock_verify_sha256.return_value = 0
download_and_import.do_pivnet_download(
'srt', '1.9.0', 'srt*.pivotal', 'xyz123', '/home/ubuntu/tiles/')
mock_verify_sha256.assert_called_with(
'/home/ubuntu/tiles/srt-1.9.0.pivotal', 'xyz123')
def test_verify_sha256_match(self):
with tempfile.TemporaryDirectory() as temp_dir:
file_name = "{}/foo.txt".format(temp_dir)
with open(file_name, 'w') as f:
f.write('Test\n')
f.write('foo bar\n')
result = download_and_import.verify_sha256(
file_name, '9e7e95359fb81b4089289d58f9a38ff37d744db7c5941a156ff23216706da8cd'
)
self.assertEqual(result, 0)
def test_verify_sha256_no_match(self):
with tempfile.TemporaryDirectory() as temp_dir:
file_name = "{}/foo.txt".format(temp_dir)
with open(file_name, 'w') as f:
f.write('Test\n')
f.write('foo bar baz\n')
result = download_and_import.verify_sha256(
file_name, '9e7e95359fb81b4089289d58f9a38ff37d744db7c5941a156ff23216706da8cd'
)
self.assertEqual(result, 1)
@patch('util.exponential_backoff_cmd')
@patch('om_manager.get_om_with_auth')
@patch('os.listdir')
def test_upload_assets_success(self, mock_os_listdir, mock_get_om_with_auth, mock_util):
mock_os_listdir.return_value = [
'tile.pivotal', 'stemcell.tgz', 'password.txt', 'secondtile.pivotal']
mock_get_om_with_auth.return_value = [
"om", "-u", "username", "-p", "password"]
mock_util.return_value = "", "", 0
out, err, exit_code = download_and_import.upload_assets(
self.settings, "/home/ubuntu/tiles")
self.assertEqual(exit_code, 0)
self.assertEqual(mock_util.call_count, 2)
@patch('util.exponential_backoff_cmd')
@patch('om_manager.get_om_with_auth')
@patch('os.listdir')
def test_upload_stemcell_success(self, mock_os_listdir, mock_get_om_with_auth, mock_util):
mock_os_listdir.return_value = [
'tile.pivotal', 'stemcell.tgz', 'password.txt', 'secondtile.pivotal']
mock_get_om_with_auth.return_value = [
"om", "-u", "username", "-p", "password"]
mock_util.return_value = "", "", 0
out, err, exit_code = download_and_import.upload_stemcell(
self.settings, "/home/ubuntu/tiles")
self.assertEqual(exit_code, 0)
self.assertEqual(mock_util.call_count, 1)
@patch('util.exponential_backoff_cmd')
@patch('om_manager.get_om_with_auth')
@patch('os.listdir')
def test_upload_stemcell_failure(self, mock_os_listdir, mock_get_om_with_auth, mock_util):
mock_os_listdir.return_value = [
'tile.pivotal', 'stemcell.tgz', 'password.txt', 'secondtile.pivotal']
mock_get_om_with_auth.return_value = [
"om", "-u", "username", "-p", "password"]
mock_util.return_value = "icky stemcell", "", 1
out, err, exit_code = download_and_import.upload_stemcell(
self.settings, "/home/ubuntu/tiles")
self.assertEqual(exit_code, 1)
self.assertEqual(mock_util.call_count, 1)
self.assertEqual(out, "icky stemcell")
Revert "test should also use srt"
This reverts commit 46b37098a5614e045c4dfd9fdaa378c8beb5e416.
# aws-pcf-quickstart
#
# Copyright (c) 2017-Present Pivotal Software, Inc. All Rights Reserved.
#
# This program and the accompanying materials are made available under
# the terms of the under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import tempfile
from mock import patch, Mock
import download_and_import
import om_manager
import settings
class TestDownloadAndImport(unittest.TestCase):
def setUp(self):
self.settings = Mock(settings.Settings)
self.settings.ert_release_version = "1.9.0"
self.settings.ert_release_sha256 = "xyz123"
self.settings.stemcell_release_version = "123"
self.settings.stemcell_release_sha256 = "123"
@patch('util.exponential_backoff_cmd')
@patch('download_and_import.do_pivnet_download')
@patch('download_and_import.do_github_download')
def test_download_asset_success(self, mock_do_github_download, mock_do_pivnet_download, mock_util):
mock_do_pivnet_download.return_value = "", "", 0
mock_do_github_download.return_value = "", "", 0
out, err, exit_code = download_and_import.download_assets(
self.settings, '/home/ubuntu/tiles/')
self.assertEqual(mock_do_pivnet_download.call_count, 3)
self.assertEqual(mock_do_github_download.call_count, 1)
self.assertEqual(exit_code, 0)
self.assertEqual(mock_do_pivnet_download.mock_calls[0][1][0], 'stemcells-ubuntu-xenial')
self.assertEqual(mock_do_pivnet_download.mock_calls[1][1][0], 'stemcells')
self.assertEqual(mock_do_pivnet_download.mock_calls[2][1][0], 'cf')
@patch('util.exponential_backoff_cmd')
@patch('download_and_import.do_pivnet_download')
@patch('download_and_import.do_github_download')
def test_download_asset_pivnet_failure(self, mock_do_github_download, mock_do_pivnet_download, mock_util):
mock_do_pivnet_download.return_value = "download failed", "", 1
mock_do_github_download.return_value = "", "", 0
out, err, exit_code = download_and_import.download_assets(
self.settings, '/home/ubuntu/tiles/')
self.assertEqual(mock_do_pivnet_download.call_count, 1)
self.assertEqual(mock_do_github_download.call_count, 1)
self.assertEqual(out, "download failed")
self.assertEqual(exit_code, 1)
@patch('util.exponential_backoff_cmd')
@patch('download_and_import.do_pivnet_download')
@patch('download_and_import.do_github_download')
def test_download_asset_github_failure(self, mock_do_github_download, mock_do_pivnet_download, mock_util):
mock_do_pivnet_download.return_value = "", "", 0
mock_do_github_download.return_value = "download failed", "", 1
out, err, exit_code = download_and_import.download_assets(
self.settings, '/home/ubuntu/tiles/')
self.assertEqual(mock_do_github_download.call_count, 1)
self.assertEqual(mock_do_pivnet_download.call_count, 0)
self.assertEqual(out, "download failed")
self.assertEqual(exit_code, 1)
@patch('util.exponential_backoff_cmd')
@patch('download_and_import.verify_sha256')
@patch('glob.glob')
def test_do_pivnet_download_success(self, mock_os_listdir, mock_verify_sha256, mock_util):
mock_util.return_value = "", "", 0
mock_os_listdir.return_value = ['/home/ubuntu/tiles/cf-1.9.0.pivotal']
mock_verify_sha256.return_value = 0
download_and_import.do_pivnet_download(
'cf', '1.9.0', 'cf*.pivotal', 'xyz123', '/home/ubuntu/tiles/')
mock_verify_sha256.assert_called_with(
'/home/ubuntu/tiles/cf-1.9.0.pivotal', 'xyz123')
def test_verify_sha256_match(self):
with tempfile.TemporaryDirectory() as temp_dir:
file_name = "{}/foo.txt".format(temp_dir)
with open(file_name, 'w') as f:
f.write('Test\n')
f.write('foo bar\n')
result = download_and_import.verify_sha256(
file_name, '9e7e95359fb81b4089289d58f9a38ff37d744db7c5941a156ff23216706da8cd'
)
self.assertEqual(result, 0)
def test_verify_sha256_no_match(self):
with tempfile.TemporaryDirectory() as temp_dir:
file_name = "{}/foo.txt".format(temp_dir)
with open(file_name, 'w') as f:
f.write('Test\n')
f.write('foo bar baz\n')
result = download_and_import.verify_sha256(
file_name, '9e7e95359fb81b4089289d58f9a38ff37d744db7c5941a156ff23216706da8cd'
)
self.assertEqual(result, 1)
@patch('util.exponential_backoff_cmd')
@patch('om_manager.get_om_with_auth')
@patch('os.listdir')
def test_upload_assets_success(self, mock_os_listdir, mock_get_om_with_auth, mock_util):
mock_os_listdir.return_value = [
'tile.pivotal', 'stemcell.tgz', 'password.txt', 'secondtile.pivotal']
mock_get_om_with_auth.return_value = [
"om", "-u", "username", "-p", "password"]
mock_util.return_value = "", "", 0
out, err, exit_code = download_and_import.upload_assets(
self.settings, "/home/ubuntu/tiles")
self.assertEqual(exit_code, 0)
self.assertEqual(mock_util.call_count, 2)
@patch('util.exponential_backoff_cmd')
@patch('om_manager.get_om_with_auth')
@patch('os.listdir')
def test_upload_stemcell_success(self, mock_os_listdir, mock_get_om_with_auth, mock_util):
mock_os_listdir.return_value = [
'tile.pivotal', 'stemcell.tgz', 'password.txt', 'secondtile.pivotal']
mock_get_om_with_auth.return_value = [
"om", "-u", "username", "-p", "password"]
mock_util.return_value = "", "", 0
out, err, exit_code = download_and_import.upload_stemcell(
self.settings, "/home/ubuntu/tiles")
self.assertEqual(exit_code, 0)
self.assertEqual(mock_util.call_count, 1)
@patch('util.exponential_backoff_cmd')
@patch('om_manager.get_om_with_auth')
@patch('os.listdir')
def test_upload_stemcell_failure(self, mock_os_listdir, mock_get_om_with_auth, mock_util):
mock_os_listdir.return_value = [
'tile.pivotal', 'stemcell.tgz', 'password.txt', 'secondtile.pivotal']
mock_get_om_with_auth.return_value = [
"om", "-u", "username", "-p", "password"]
mock_util.return_value = "icky stemcell", "", 1
out, err, exit_code = download_and_import.upload_stemcell(
self.settings, "/home/ubuntu/tiles")
self.assertEqual(exit_code, 1)
self.assertEqual(mock_util.call_count, 1)
self.assertEqual(out, "icky stemcell")
|
# SPDX-License-Identifier: Apache-2.0
#
# Copyright (C) 2015, ARM Limited and contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Frequency Analysis Module """
import os
import itertools
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
import pandas as pd
import numpy as np
from lisa.analysis.base import TraceAnalysisBase
from lisa.utils import memoized
from lisa.trace import requires_events, requires_one_event_of, CPU, MissingTraceEventError
from lisa.datautils import series_integrate, df_refit_index, series_refit_index, series_deduplicate, df_add_delta, series_mean, df_window
class FrequencyAnalysis(TraceAnalysisBase):
"""
Support for plotting Frequency Analysis data
:param trace: input Trace object
:type trace: :class:`trace.Trace`
"""
name = 'frequency'
@requires_one_event_of('cpu_frequency', 'userspace@cpu_frequency_devlib')
def df_cpus_frequency(self, signals_init=True):
"""
Similar to ``trace.df_event('cpu_frequency')``, with
``userspace@cpu_frequency_devlib`` support.
:param signals_init: If ``True``, and initial value for signals will be
provided. This includes initial value taken outside window
boundaries and devlib-provided events.
The ``userspace@cpu_frequency_devlib`` user event is merged in the dataframe if
it provides earlier values for a CPU.
"""
def rename(df):
return df.rename(
{
'cpu_id': 'cpu',
'state': 'frequency',
},
axis=1,
)
df = self.trace.df_event('cpu_frequency', signals_init=signals_init)
df = rename(df)
if not signals_init:
return df
try:
devlib_df = self.trace.df_event('userspace@devlib_cpu_frequency')
except MissingTraceEventError:
return df
else:
devlib_df = rename(df)
# Get the initial values for each CPU
def init_freq(df, devlib):
df = df.groupby('cpu_id', observed=True, sort=False).head(1).copy()
df['devlib'] = devlib
return df
init_df = init_freq(df, False)
init_devlib_df = init_freq(devlib_df, True)
# Get the initial values as given by devlib and cpufreq.
# We want to select:
# * the first value
# * the 2nd value if that comes from cpufreq
init_df = pd.concat([init_df, init_devlib_df])
init_df.sort_index(inplace=True)
init_groups = init_df.groupby('cpu_id', observed=True, sort=False)
first_df = init_groups.head(1)
# devlib == False means it's already in the existing dataframe, and we
# don't want duplicates
first_df = first_df[first_df['devlib'] == True]
del first_df['devlib']
# The dataframe of the second values.
# If they are from cpufreq, we keep them, but if they are from devlib,
# they are useless (and actually harmful, since they correspond to no
# CPU transition)
second_df = init_groups.tail(1)
# Only keep non-devlib second events
second_df = second_df[second_df['devlib'] == False]
del second_df['devlib']
df = pd.concat([df, first_df, second_df])
df.sort_index(inplace=True)
return df
@df_cpus_frequency.used_events
def df_cpu_frequency(self, cpu, **kwargs):
"""
Same as :meth:`df_cpus_frequency` but for a single CPU.
:param cpu: CPU ID to get the frequency of.
:type cpu: int
:Variable keyword arguments: Forwarded to :meth:`df_cpus_frequency`.
"""
df = self.df_cpus_frequency(**kwargs)
return df[df['cpu'] == cpu]
@df_cpus_frequency.used_events
def _check_freq_domain_coherency(self, cpus=None):
"""
Check that all CPUs of a given frequency domain have the same frequency
transitions.
:param cpus: CPUs to take into account. All other CPUs are ignored.
If `None`, all CPUs will be checked.
:type cpus: list(int) or None
"""
domains = self.trace.plat_info['freq-domains']
if cpus is None:
cpus = list(itertools.chain.from_iterable(domains))
if len(cpus) < 2:
return
df = self.df_cpus_frequency()
for domain in domains:
# restrict the domain to what we care. Other CPUs may have garbage
# data, but the caller is not going to look at it anyway.
domain = set(domain) & set(cpus)
if len(domain) < 2:
continue
# Get the frequency column for each CPU in the domain
freq_columns = [
# drop the index since we only care about the transitions, and
# not when they happened
df[df['cpu'] == cpu]['frequency'].reset_index(drop=True)
for cpu in domain
]
# Check that all columns are equal. If they are not, that means that
# at least one CPU has a frequency transition that is different
# from another one in the same domain, which is highly suspicious
ref = freq_columns[0]
for col in freq_columns:
# If the trace started in the middle of a group of transitions,
# ignore that transition by shifting and re-test
if not (ref.equals(col) or ref[:-1].equals(col.shift()[1:])):
raise ValueError('Frequencies of CPUs in the freq domain {} are not coherent'.format(cpus))
@TraceAnalysisBase.cache
@df_cpus_frequency.used_events
@requires_events('cpu_idle')
def _get_frequency_residency(self, cpus):
"""
Get a DataFrame with per cluster frequency residency, i.e. amount of
time spent at a given frequency in each cluster.
:param cpus: A tuple of CPU IDs
:type cpus: tuple(int)
:returns: A :class:`pandas.DataFrame` with:
* A ``total_time`` column (the total time spent at a frequency)
* A ``active_time`` column (the non-idle time spent at a frequency)
"""
freq_df = self.df_cpus_frequency()
# Assumption: all CPUs in a cluster run at the same frequency, i.e. the
# frequency is scaled per-cluster not per-CPU. Hence, we can limit the
# cluster frequencies data to a single CPU.
self._check_freq_domain_coherency(cpus)
cluster_freqs = freq_df[freq_df.cpu == cpus[0]]
# Compute TOTAL Time
cluster_freqs = df_add_delta(cluster_freqs, col="total_time", window=self.trace.window)
time_df = cluster_freqs[["total_time", "frequency"]].groupby('frequency', observed=True, sort=False).sum()
# Compute ACTIVE Time
cluster_active = self.trace.analysis.idle.signal_cluster_active(cpus)
# In order to compute the active time spent at each frequency we
# multiply 2 square waves:
# - cluster_active, a square wave of the form:
# cluster_active[t] == 1 if at least one CPU is reported to be
# non-idle by CPUFreq at time t
# cluster_active[t] == 0 otherwise
# - freq_active, square wave of the form:
# freq_active[t] == 1 if at time t the frequency is f
# freq_active[t] == 0 otherwise
available_freqs = sorted(cluster_freqs.frequency.unique())
cluster_freqs = cluster_freqs.join(
cluster_active.to_frame(name='active'), how='outer')
cluster_freqs.fillna(method='ffill', inplace=True)
nonidle_time = []
for freq in available_freqs:
freq_active = cluster_freqs.frequency.apply(lambda x: 1 if x == freq else 0)
active_t = cluster_freqs.active * freq_active
# Compute total time by integrating the square wave
nonidle_time.append(series_integrate(active_t))
time_df["active_time"] = pd.DataFrame(index=available_freqs, data=nonidle_time)
return time_df
@_get_frequency_residency.used_events
def df_cpu_frequency_residency(self, cpu):
"""
Get per-CPU frequency residency, i.e. amount of
time CPU `cpu` spent at each frequency.
:param cpu: CPU ID
:type cpu: int
:returns: A :class:`pandas.DataFrame` with:
* A ``total_time`` column (the total time spent at a frequency)
* A ``active_time`` column (the non-idle time spent at a frequency)
"""
if not isinstance(cpu, int):
raise TypeError('Input CPU parameter must be an integer')
return self._get_frequency_residency((cpu,))
@_get_frequency_residency.used_events
def df_domain_frequency_residency(self, cpu):
"""
Get per-frequency-domain frequency residency, i.e. amount of time each
domain at each frequency.
:param cpu: Any CPU of the domain to analyse
:type cpu: int
:returns: A :class:`pandas.DataFrame` with:
* A ``total_time`` column (the total time spent at a frequency)
* A ``active_time`` column (the non-idle time spent at a frequency)
"""
domains = self.trace.plat_info['freq-domains']
for domain in domains:
if cpu in domain:
return self._get_frequency_residency(tuple(domain))
@TraceAnalysisBase.cache
@df_cpu_frequency.used_events
def df_cpu_frequency_transitions(self, cpu):
"""
Compute number of frequency transitions of a given CPU.
:param cpu: a CPU ID
:type cpu: int
:returns: A :class:`pandas.DataFrame` with:
* A ``transitions`` column (the number of frequency transitions)
"""
freq_df = self.df_cpu_frequency(cpu, signals_init=False)
# Since we want to count the number of events appearing inside the
# window, make sure we don't get anything outside it
freq_df = df_window(
freq_df,
window=self.trace.window,
method='exclusive',
clip_window=False,
)
cpu_freqs = freq_df['frequency']
# Remove possible duplicates (example: when devlib sets trace markers
# a cpu_frequency event is triggered that can generate a duplicate)
cpu_freqs = series_deduplicate(cpu_freqs, keep='first', consecutives=True)
transitions = cpu_freqs.value_counts()
transitions.name = "transitions"
transitions.sort_index(inplace=True)
return pd.DataFrame(transitions)
@TraceAnalysisBase.cache
@df_cpu_frequency_transitions.used_events
def df_cpu_frequency_transition_rate(self, cpu):
"""
Compute frequency transition rate of a given CPU.
:param cpu: a CPU ID
:type cpu: int
:returns: A :class:`pandas.DataFrame` with:
* A ``transitions`` column (the number of frequency transitions per second)
"""
transitions = self.df_cpu_frequency_transitions(cpu)['transitions']
return pd.DataFrame(dict(
transitions=transitions / self.trace.time_range,
))
@df_cpu_frequency.used_events
def get_average_cpu_frequency(self, cpu):
"""
Get the average frequency for a given CPU
:param cpu: The CPU to analyse
:type cpu: int
"""
df = self.df_cpu_frequency(cpu)
freq = series_refit_index(df['frequency'], window=self.trace.window)
return series_mean(freq)
@TraceAnalysisBase.cache
@requires_events('clock_set_rate', 'clock_enable', 'clock_disable')
def df_peripheral_clock_effective_rate(self, clk_name):
rate_df = self.trace.df_event('clock_set_rate')
enable_df = self.trace.df_event('clock_enable')
disable_df = self.trace.df_event('clock_disable')
freq = rate_df[rate_df.clk_name == clk_name]
enables = enable_df[enable_df.clk_name == clk_name]
disables = disable_df[disable_df.clk_name == clk_name]
freq = pd.concat([freq, enables, disables], sort=False).sort_index()
freq['start'] = freq.index
freq['len'] = (freq.start - freq.start.shift()).fillna(0).shift(-1)
# The last value will be NaN, fix to be appropriate length
freq.loc[freq.index[-1], 'len'] = self.trace.end - freq.index[-1]
freq.ffill(inplace=True)
freq['effective_rate'] = np.where(
freq['state'] == 0, 0,
np.where(freq['state'] == 1, freq['state'], float('nan'))
)
return freq
###############################################################################
# Plotting Methods
###############################################################################
@TraceAnalysisBase.plot_method(return_axis=True)
@df_peripheral_clock_effective_rate.used_events
def plot_peripheral_clock(self, clk, axis=None, **kwargs):
"""
Plot the frequency of a particular peripheral clock
:param clk: The clk name to chart
:type clk: str
"""
logger = self.get_logger()
window = self.trace.window
start, end = window
def plotter(axis, local_fig):
freq_axis, state_axis = axis
freq_axis.get_figure().suptitle('Peripheral frequency', y=.97, fontsize=16, horizontalalignment='center')
freq = self.df_peripheral_clock_effective_rate(clk)
freq = df_refit_index(freq, window=window)
# Plot frequency information (set rate)
freq_axis.set_title("Clock frequency for " + clk)
set_rate = freq['state'].dropna()
rate_axis_lib = 0
if len(set_rate) > 0:
rate_axis_lib = set_rate.max()
set_rate.plot(style=['b--'], ax=freq_axis, drawstyle='steps-post', alpha=0.4, label="clock_set_rate value")
freq_axis.hlines(set_rate.iloc[-1], set_rate.index[-1], end, linestyle='--', color='b', alpha=0.4)
else:
logger.warning('No clock_set_rate events to plot')
# Plot frequency information (effective rate)
eff_rate = freq['effective_rate'].dropna()
eff_rate = series_refit_index(eff_rate, window=window)
if len(eff_rate) > 0 and eff_rate.max() > 0:
rate_axis_lib = max(rate_axis_lib, eff_rate.max())
eff_rate.plot(style=['b-'], ax=freq_axis, drawstyle='steps-post', alpha=1.0, label="Effective rate (with on/off)")
freq_axis.hlines(eff_rate.iloc[-1], eff_rate.index[-1], end, linestyle='-', color='b', alpha=1.0)
else:
logger.warning('No effective frequency events to plot')
freq_axis.set_ylim(0, rate_axis_lib * 1.1)
freq_axis.set_xlabel('')
freq_axis.grid(True)
freq_axis.legend()
def mhz(x, pos):
return '{:1.2f} MHz'.format(x * 1e-6)
freq_axis.get_yaxis().set_major_formatter(FuncFormatter(mhz))
on = freq[freq.state == 1]
state_axis.hlines([0] * len(on),
on['start'], on['start'] + on['len'],
linewidth=10.0, label='clock on', color='green')
off = freq[freq.state == 0]
state_axis.hlines([0] * len(off),
off['start'], off['start'] + off['len'],
linewidth=10.0, label='clock off', color='red')
# Plot time period that the clock state was unknown from the trace
indeterminate = pd.concat([on, off]).sort_index()
if indeterminate.empty:
indet_range_max = end
else:
indet_range_max = indeterminate.index[0]
state_axis.hlines(0, 0, indet_range_max, linewidth=1.0, label='indeterminate clock state', linestyle='--')
state_axis.legend(bbox_to_anchor=(0., 1.02, 1., 0.102), loc=3, ncol=3, mode='expand')
state_axis.set_yticks([])
state_axis.set_xlabel('seconds')
state_axis.set_xlim(start, end)
return self.do_plot(plotter, height=8, nrows=2, axis=axis, **kwargs)
@TraceAnalysisBase.plot_method()
@df_cpu_frequency.used_events
def plot_cpu_frequencies(self, cpu: CPU, axis, local_fig, average: bool=True):
"""
Plot frequency for the specified CPU
:param cpu: The CPU for which to plot frequencies
:type cpus: int
:param average: If ``True``, add a horizontal line which is the
frequency average.
:type average: bool
If ``sched_overutilized`` events are available, the plots will also
show the intervals of time where the system was overutilized.
"""
logger = self.get_logger()
df = self.df_cpu_frequency(cpu)
if "freqs" in self.trace.plat_info:
frequencies = self.trace.plat_info['freqs'][cpu]
else:
logger.info("Estimating CPU{} frequencies from trace".format(cpu))
frequencies = sorted(list(df.frequency.unique()))
logger.debug("Estimated frequencies: {}".format(frequencies))
avg = self.get_average_cpu_frequency(cpu)
logger.info(
"Average frequency for CPU{} : {:.3f} GHz".format(cpu, avg / 1e6))
df = df_refit_index(df, window=self.trace.window)
df['frequency'].plot(ax=axis, drawstyle='steps-post')
if average and avg > 0:
axis.axhline(avg, color=self.get_next_color(axis), linestyle='--',
label="average")
plot_overutilized = self.trace.analysis.status.plot_overutilized
if self.trace.has_events(plot_overutilized.used_events):
plot_overutilized(axis=axis)
axis.set_ylabel('Frequency (Hz)')
axis.set_ylim(frequencies[0] * 0.9, frequencies[-1] * 1.1)
axis.legend()
if local_fig:
axis.set_xlabel('Time')
axis.set_title('Frequency of CPU{}'.format(cpu))
axis.grid(True)
@TraceAnalysisBase.plot_method(return_axis=True)
@plot_cpu_frequencies.used_events
def plot_domain_frequencies(self, axis=None, **kwargs):
"""
Plot frequency trend for all frequency domains.
If ``sched_overutilized`` events are available, the plots will also show
the intervals of time where the cluster was overutilized.
"""
domains = self.trace.plat_info['freq-domains']
def plotter(axes, local_fig):
for idx, domain in enumerate(domains):
axis = axes[idx] if len(domains) > 1 else axes
self.plot_cpu_frequencies(domain[0], axis=axis)
axis.set_title('Frequencies of CPUS {}'.format(domain))
return self.do_plot(plotter, nrows=len(domains), sharex=True, axis=axis, **kwargs)
@TraceAnalysisBase.plot_method(return_axis=True)
@df_cpu_frequency_residency.used_events
def plot_cpu_frequency_residency(self, cpu: CPU, pct: bool=False, axis=None, **kwargs):
"""
Plot per-CPU frequency residency.
:param cpu: The CPU to generate the plot for
:type cpu: int
:param pct: Plot residencies in percentage
:type pct: bool
"""
residency_df = self.df_cpu_frequency_residency(cpu)
total_df = residency_df.total_time
active_df = residency_df.active_time
if pct:
total_df = total_df * 100 / total_df.sum()
active_df = active_df * 100 / active_df.sum()
def plotter(axes, local_fig):
total_df.plot.barh(ax=axes[0])
axes[0].set_title("CPU{} total frequency residency".format(cpu))
active_df.plot.barh(ax=axes[1])
axes[1].set_title("CPU{} active frequency residency".format(cpu))
for axis in axes:
if pct:
axis.set_xlabel("Time share (%)")
else:
axis.set_xlabel("Time (s)")
axis.set_ylabel("Frequency (Hz)")
axis.grid(True)
return self.do_plot(plotter, nrows=2, axis=axis, **kwargs)
@TraceAnalysisBase.plot_method(return_axis=True)
@plot_cpu_frequency_residency.used_events
def plot_domain_frequency_residency(self, pct: bool=False, axis=None, **kwargs):
"""
Plot the frequency residency for all frequency domains.
:param pct: Plot residencies in percentage
:type pct: bool
"""
domains = self.trace.plat_info['freq-domains']
def plotter(axes, local_fig):
for idx, domain in enumerate(domains):
local_axes = axes[2 * idx: 2 * (idx + 1)]
self.plot_cpu_frequency_residency(domain[0],
pct=pct,
axis=local_axes,
)
for axis in local_axes:
title = axis.get_title()
axis.set_title(title.replace(
"CPU{}".format(domain[0]), "CPUs {}".format(domain)))
return self.do_plot(plotter, nrows=2 * len(domains), sharex=True, axis=axis, **kwargs)
@TraceAnalysisBase.plot_method()
@df_cpu_frequency_transitions.used_events
def plot_cpu_frequency_transitions(self, cpu: CPU, axis, local_fig, pct: bool=False):
"""
Plot frequency transitions count of the specified CPU
:param cpu: The CPU to genererate the plot for
:type cpu: int
:param pct: Plot frequency transitions in percentage
:type pct: bool
"""
df = self.df_cpu_frequency_transitions(cpu)
if pct:
df = df * 100 / df.sum()
if not df.empty:
df["transitions"].plot.barh(ax=axis)
axis.set_title('Frequency transitions of CPU{}'.format(cpu))
if pct:
axis.set_xlabel("Transitions share (%)")
else:
axis.set_xlabel("Transition count")
axis.set_ylabel("Frequency (Hz)")
axis.grid(True)
@TraceAnalysisBase.plot_method(return_axis=True)
@plot_cpu_frequency_transitions.used_events
def plot_domain_frequency_transitions(self, pct: bool=False, axis=None, **kwargs):
"""
Plot frequency transitions count for all frequency domains
:param pct: Plot frequency transitions in percentage
:type pct: bool
"""
domains = self.trace.plat_info['freq-domains']
def plotter(axes, local_fig):
for domain, axis in zip(domains, axes):
self.plot_cpu_frequency_transitions(
cpu=domain[0],
pct=pct,
axis=axis,
)
title = axis.get_title()
axis.set_title(title.replace("CPU{}".format(domain[0]),
"CPUs {}".format(domain)))
return self.do_plot(plotter, nrows=len(domains), axis=axis, **kwargs)
# vim :set tabstop=4 shiftwidth=4 expandtab textwidth=80
lisa.analysis.frequency: Fix df_cpu_frequency()
* Only require one of cpu_frequency, userspace@cpu_frequency_devlib
* Fix typo in reference to userspace@cpu_frequency_devlib
* Fix wrong column names
# SPDX-License-Identifier: Apache-2.0
#
# Copyright (C) 2015, ARM Limited and contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Frequency Analysis Module """
import os
import itertools
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
import pandas as pd
import numpy as np
from lisa.analysis.base import TraceAnalysisBase
from lisa.utils import memoized
from lisa.trace import requires_events, requires_one_event_of, CPU, MissingTraceEventError
from lisa.datautils import series_integrate, df_refit_index, series_refit_index, series_deduplicate, df_add_delta, series_mean, df_window
class FrequencyAnalysis(TraceAnalysisBase):
"""
Support for plotting Frequency Analysis data
:param trace: input Trace object
:type trace: :class:`trace.Trace`
"""
name = 'frequency'
@requires_one_event_of('cpu_frequency', 'userspace@cpu_frequency_devlib')
def df_cpus_frequency(self, signals_init=True):
"""
Similar to ``trace.df_event('cpu_frequency')``, with
``userspace@cpu_frequency_devlib`` support.
:param signals_init: If ``True``, and initial value for signals will be
provided. This includes initial value taken outside window
boundaries and devlib-provided events.
The ``userspace@cpu_frequency_devlib`` user event is merged in the dataframe if
it provides earlier values for a CPU.
"""
def rename(df):
return df.rename(
{
'cpu_id': 'cpu',
'state': 'frequency',
},
axis=1,
)
def check_empty(df, excep):
if df.empty:
raise excep
else:
return df
try:
df = self.trace.df_event('cpu_frequency', signals_init=signals_init)
except MissingTraceEventError as e:
excep = e
df = pd.DataFrame(columns=['cpu', 'frequency'])
else:
excep = None
df = rename(df)
if not signals_init:
return check_empty(df, excep)
try:
devlib_df = self.trace.df_event('userspace@cpu_frequency_devlib')
except MissingTraceEventError as e:
return check_empty(df, e)
else:
devlib_df = rename(devlib_df)
# Get the initial values for each CPU
def init_freq(df, devlib):
df = df.groupby('cpu', observed=True, sort=False).head(1).copy()
df['devlib'] = devlib
return df
init_df = init_freq(df, False)
init_devlib_df = init_freq(devlib_df, True)
# Get the initial values as given by devlib and cpufreq.
# We want to select:
# * the first value
# * the 2nd value if that comes from cpufreq
init_df = pd.concat([init_df, init_devlib_df])
init_df.sort_index(inplace=True)
init_groups = init_df.groupby('cpu', observed=True, sort=False)
first_df = init_groups.head(1)
# devlib == False means it's already in the existing dataframe, and we
# don't want duplicates
first_df = first_df[first_df['devlib'] == True]
del first_df['devlib']
# The dataframe of the second values.
# If they are from cpufreq, we keep them, but if they are from devlib,
# they are useless (and actually harmful, since they correspond to no
# CPU transition)
second_df = init_groups.tail(1)
# Only keep non-devlib second events
second_df = second_df[second_df['devlib'] == False]
del second_df['devlib']
df = pd.concat([df, first_df, second_df])
df.sort_index(inplace=True)
return check_empty(df, None)
@df_cpus_frequency.used_events
def df_cpu_frequency(self, cpu, **kwargs):
"""
Same as :meth:`df_cpus_frequency` but for a single CPU.
:param cpu: CPU ID to get the frequency of.
:type cpu: int
:Variable keyword arguments: Forwarded to :meth:`df_cpus_frequency`.
"""
df = self.df_cpus_frequency(**kwargs)
return df[df['cpu'] == cpu]
@df_cpus_frequency.used_events
def _check_freq_domain_coherency(self, cpus=None):
"""
Check that all CPUs of a given frequency domain have the same frequency
transitions.
:param cpus: CPUs to take into account. All other CPUs are ignored.
If `None`, all CPUs will be checked.
:type cpus: list(int) or None
"""
domains = self.trace.plat_info['freq-domains']
if cpus is None:
cpus = list(itertools.chain.from_iterable(domains))
if len(cpus) < 2:
return
df = self.df_cpus_frequency()
for domain in domains:
# restrict the domain to what we care. Other CPUs may have garbage
# data, but the caller is not going to look at it anyway.
domain = set(domain) & set(cpus)
if len(domain) < 2:
continue
# Get the frequency column for each CPU in the domain
freq_columns = [
# drop the index since we only care about the transitions, and
# not when they happened
df[df['cpu'] == cpu]['frequency'].reset_index(drop=True)
for cpu in domain
]
# Check that all columns are equal. If they are not, that means that
# at least one CPU has a frequency transition that is different
# from another one in the same domain, which is highly suspicious
ref = freq_columns[0]
for col in freq_columns:
# If the trace started in the middle of a group of transitions,
# ignore that transition by shifting and re-test
if not (ref.equals(col) or ref[:-1].equals(col.shift()[1:])):
raise ValueError('Frequencies of CPUs in the freq domain {} are not coherent'.format(cpus))
@TraceAnalysisBase.cache
@df_cpus_frequency.used_events
@requires_events('cpu_idle')
def _get_frequency_residency(self, cpus):
"""
Get a DataFrame with per cluster frequency residency, i.e. amount of
time spent at a given frequency in each cluster.
:param cpus: A tuple of CPU IDs
:type cpus: tuple(int)
:returns: A :class:`pandas.DataFrame` with:
* A ``total_time`` column (the total time spent at a frequency)
* A ``active_time`` column (the non-idle time spent at a frequency)
"""
freq_df = self.df_cpus_frequency()
# Assumption: all CPUs in a cluster run at the same frequency, i.e. the
# frequency is scaled per-cluster not per-CPU. Hence, we can limit the
# cluster frequencies data to a single CPU.
self._check_freq_domain_coherency(cpus)
cluster_freqs = freq_df[freq_df.cpu == cpus[0]]
# Compute TOTAL Time
cluster_freqs = df_add_delta(cluster_freqs, col="total_time", window=self.trace.window)
time_df = cluster_freqs[["total_time", "frequency"]].groupby('frequency', observed=True, sort=False).sum()
# Compute ACTIVE Time
cluster_active = self.trace.analysis.idle.signal_cluster_active(cpus)
# In order to compute the active time spent at each frequency we
# multiply 2 square waves:
# - cluster_active, a square wave of the form:
# cluster_active[t] == 1 if at least one CPU is reported to be
# non-idle by CPUFreq at time t
# cluster_active[t] == 0 otherwise
# - freq_active, square wave of the form:
# freq_active[t] == 1 if at time t the frequency is f
# freq_active[t] == 0 otherwise
available_freqs = sorted(cluster_freqs.frequency.unique())
cluster_freqs = cluster_freqs.join(
cluster_active.to_frame(name='active'), how='outer')
cluster_freqs.fillna(method='ffill', inplace=True)
nonidle_time = []
for freq in available_freqs:
freq_active = cluster_freqs.frequency.apply(lambda x: 1 if x == freq else 0)
active_t = cluster_freqs.active * freq_active
# Compute total time by integrating the square wave
nonidle_time.append(series_integrate(active_t))
time_df["active_time"] = pd.DataFrame(index=available_freqs, data=nonidle_time)
return time_df
@_get_frequency_residency.used_events
def df_cpu_frequency_residency(self, cpu):
"""
Get per-CPU frequency residency, i.e. amount of
time CPU `cpu` spent at each frequency.
:param cpu: CPU ID
:type cpu: int
:returns: A :class:`pandas.DataFrame` with:
* A ``total_time`` column (the total time spent at a frequency)
* A ``active_time`` column (the non-idle time spent at a frequency)
"""
if not isinstance(cpu, int):
raise TypeError('Input CPU parameter must be an integer')
return self._get_frequency_residency((cpu,))
@_get_frequency_residency.used_events
def df_domain_frequency_residency(self, cpu):
"""
Get per-frequency-domain frequency residency, i.e. amount of time each
domain at each frequency.
:param cpu: Any CPU of the domain to analyse
:type cpu: int
:returns: A :class:`pandas.DataFrame` with:
* A ``total_time`` column (the total time spent at a frequency)
* A ``active_time`` column (the non-idle time spent at a frequency)
"""
domains = self.trace.plat_info['freq-domains']
for domain in domains:
if cpu in domain:
return self._get_frequency_residency(tuple(domain))
@TraceAnalysisBase.cache
@df_cpu_frequency.used_events
def df_cpu_frequency_transitions(self, cpu):
"""
Compute number of frequency transitions of a given CPU.
:param cpu: a CPU ID
:type cpu: int
:returns: A :class:`pandas.DataFrame` with:
* A ``transitions`` column (the number of frequency transitions)
"""
freq_df = self.df_cpu_frequency(cpu, signals_init=False)
# Since we want to count the number of events appearing inside the
# window, make sure we don't get anything outside it
freq_df = df_window(
freq_df,
window=self.trace.window,
method='exclusive',
clip_window=False,
)
cpu_freqs = freq_df['frequency']
# Remove possible duplicates (example: when devlib sets trace markers
# a cpu_frequency event is triggered that can generate a duplicate)
cpu_freqs = series_deduplicate(cpu_freqs, keep='first', consecutives=True)
transitions = cpu_freqs.value_counts()
transitions.name = "transitions"
transitions.sort_index(inplace=True)
return pd.DataFrame(transitions)
@TraceAnalysisBase.cache
@df_cpu_frequency_transitions.used_events
def df_cpu_frequency_transition_rate(self, cpu):
"""
Compute frequency transition rate of a given CPU.
:param cpu: a CPU ID
:type cpu: int
:returns: A :class:`pandas.DataFrame` with:
* A ``transitions`` column (the number of frequency transitions per second)
"""
transitions = self.df_cpu_frequency_transitions(cpu)['transitions']
return pd.DataFrame(dict(
transitions=transitions / self.trace.time_range,
))
@df_cpu_frequency.used_events
def get_average_cpu_frequency(self, cpu):
"""
Get the average frequency for a given CPU
:param cpu: The CPU to analyse
:type cpu: int
"""
df = self.df_cpu_frequency(cpu)
freq = series_refit_index(df['frequency'], window=self.trace.window)
return series_mean(freq)
@TraceAnalysisBase.cache
@requires_events('clock_set_rate', 'clock_enable', 'clock_disable')
def df_peripheral_clock_effective_rate(self, clk_name):
rate_df = self.trace.df_event('clock_set_rate')
enable_df = self.trace.df_event('clock_enable')
disable_df = self.trace.df_event('clock_disable')
freq = rate_df[rate_df.clk_name == clk_name]
enables = enable_df[enable_df.clk_name == clk_name]
disables = disable_df[disable_df.clk_name == clk_name]
freq = pd.concat([freq, enables, disables], sort=False).sort_index()
freq['start'] = freq.index
freq['len'] = (freq.start - freq.start.shift()).fillna(0).shift(-1)
# The last value will be NaN, fix to be appropriate length
freq.loc[freq.index[-1], 'len'] = self.trace.end - freq.index[-1]
freq.ffill(inplace=True)
freq['effective_rate'] = np.where(
freq['state'] == 0, 0,
np.where(freq['state'] == 1, freq['state'], float('nan'))
)
return freq
###############################################################################
# Plotting Methods
###############################################################################
@TraceAnalysisBase.plot_method(return_axis=True)
@df_peripheral_clock_effective_rate.used_events
def plot_peripheral_clock(self, clk, axis=None, **kwargs):
"""
Plot the frequency of a particular peripheral clock
:param clk: The clk name to chart
:type clk: str
"""
logger = self.get_logger()
window = self.trace.window
start, end = window
def plotter(axis, local_fig):
freq_axis, state_axis = axis
freq_axis.get_figure().suptitle('Peripheral frequency', y=.97, fontsize=16, horizontalalignment='center')
freq = self.df_peripheral_clock_effective_rate(clk)
freq = df_refit_index(freq, window=window)
# Plot frequency information (set rate)
freq_axis.set_title("Clock frequency for " + clk)
set_rate = freq['state'].dropna()
rate_axis_lib = 0
if len(set_rate) > 0:
rate_axis_lib = set_rate.max()
set_rate.plot(style=['b--'], ax=freq_axis, drawstyle='steps-post', alpha=0.4, label="clock_set_rate value")
freq_axis.hlines(set_rate.iloc[-1], set_rate.index[-1], end, linestyle='--', color='b', alpha=0.4)
else:
logger.warning('No clock_set_rate events to plot')
# Plot frequency information (effective rate)
eff_rate = freq['effective_rate'].dropna()
eff_rate = series_refit_index(eff_rate, window=window)
if len(eff_rate) > 0 and eff_rate.max() > 0:
rate_axis_lib = max(rate_axis_lib, eff_rate.max())
eff_rate.plot(style=['b-'], ax=freq_axis, drawstyle='steps-post', alpha=1.0, label="Effective rate (with on/off)")
freq_axis.hlines(eff_rate.iloc[-1], eff_rate.index[-1], end, linestyle='-', color='b', alpha=1.0)
else:
logger.warning('No effective frequency events to plot')
freq_axis.set_ylim(0, rate_axis_lib * 1.1)
freq_axis.set_xlabel('')
freq_axis.grid(True)
freq_axis.legend()
def mhz(x, pos):
return '{:1.2f} MHz'.format(x * 1e-6)
freq_axis.get_yaxis().set_major_formatter(FuncFormatter(mhz))
on = freq[freq.state == 1]
state_axis.hlines([0] * len(on),
on['start'], on['start'] + on['len'],
linewidth=10.0, label='clock on', color='green')
off = freq[freq.state == 0]
state_axis.hlines([0] * len(off),
off['start'], off['start'] + off['len'],
linewidth=10.0, label='clock off', color='red')
# Plot time period that the clock state was unknown from the trace
indeterminate = pd.concat([on, off]).sort_index()
if indeterminate.empty:
indet_range_max = end
else:
indet_range_max = indeterminate.index[0]
state_axis.hlines(0, 0, indet_range_max, linewidth=1.0, label='indeterminate clock state', linestyle='--')
state_axis.legend(bbox_to_anchor=(0., 1.02, 1., 0.102), loc=3, ncol=3, mode='expand')
state_axis.set_yticks([])
state_axis.set_xlabel('seconds')
state_axis.set_xlim(start, end)
return self.do_plot(plotter, height=8, nrows=2, axis=axis, **kwargs)
@TraceAnalysisBase.plot_method()
@df_cpu_frequency.used_events
def plot_cpu_frequencies(self, cpu: CPU, axis, local_fig, average: bool=True):
"""
Plot frequency for the specified CPU
:param cpu: The CPU for which to plot frequencies
:type cpus: int
:param average: If ``True``, add a horizontal line which is the
frequency average.
:type average: bool
If ``sched_overutilized`` events are available, the plots will also
show the intervals of time where the system was overutilized.
"""
logger = self.get_logger()
df = self.df_cpu_frequency(cpu)
if "freqs" in self.trace.plat_info:
frequencies = self.trace.plat_info['freqs'][cpu]
else:
logger.info("Estimating CPU{} frequencies from trace".format(cpu))
frequencies = sorted(list(df.frequency.unique()))
logger.debug("Estimated frequencies: {}".format(frequencies))
avg = self.get_average_cpu_frequency(cpu)
logger.info(
"Average frequency for CPU{} : {:.3f} GHz".format(cpu, avg / 1e6))
df = df_refit_index(df, window=self.trace.window)
df['frequency'].plot(ax=axis, drawstyle='steps-post')
if average and avg > 0:
axis.axhline(avg, color=self.get_next_color(axis), linestyle='--',
label="average")
plot_overutilized = self.trace.analysis.status.plot_overutilized
if self.trace.has_events(plot_overutilized.used_events):
plot_overutilized(axis=axis)
axis.set_ylabel('Frequency (Hz)')
axis.set_ylim(frequencies[0] * 0.9, frequencies[-1] * 1.1)
axis.legend()
if local_fig:
axis.set_xlabel('Time')
axis.set_title('Frequency of CPU{}'.format(cpu))
axis.grid(True)
@TraceAnalysisBase.plot_method(return_axis=True)
@plot_cpu_frequencies.used_events
def plot_domain_frequencies(self, axis=None, **kwargs):
"""
Plot frequency trend for all frequency domains.
If ``sched_overutilized`` events are available, the plots will also show
the intervals of time where the cluster was overutilized.
"""
domains = self.trace.plat_info['freq-domains']
def plotter(axes, local_fig):
for idx, domain in enumerate(domains):
axis = axes[idx] if len(domains) > 1 else axes
self.plot_cpu_frequencies(domain[0], axis=axis)
axis.set_title('Frequencies of CPUS {}'.format(domain))
return self.do_plot(plotter, nrows=len(domains), sharex=True, axis=axis, **kwargs)
@TraceAnalysisBase.plot_method(return_axis=True)
@df_cpu_frequency_residency.used_events
def plot_cpu_frequency_residency(self, cpu: CPU, pct: bool=False, axis=None, **kwargs):
"""
Plot per-CPU frequency residency.
:param cpu: The CPU to generate the plot for
:type cpu: int
:param pct: Plot residencies in percentage
:type pct: bool
"""
residency_df = self.df_cpu_frequency_residency(cpu)
total_df = residency_df.total_time
active_df = residency_df.active_time
if pct:
total_df = total_df * 100 / total_df.sum()
active_df = active_df * 100 / active_df.sum()
def plotter(axes, local_fig):
total_df.plot.barh(ax=axes[0])
axes[0].set_title("CPU{} total frequency residency".format(cpu))
active_df.plot.barh(ax=axes[1])
axes[1].set_title("CPU{} active frequency residency".format(cpu))
for axis in axes:
if pct:
axis.set_xlabel("Time share (%)")
else:
axis.set_xlabel("Time (s)")
axis.set_ylabel("Frequency (Hz)")
axis.grid(True)
return self.do_plot(plotter, nrows=2, axis=axis, **kwargs)
@TraceAnalysisBase.plot_method(return_axis=True)
@plot_cpu_frequency_residency.used_events
def plot_domain_frequency_residency(self, pct: bool=False, axis=None, **kwargs):
"""
Plot the frequency residency for all frequency domains.
:param pct: Plot residencies in percentage
:type pct: bool
"""
domains = self.trace.plat_info['freq-domains']
def plotter(axes, local_fig):
for idx, domain in enumerate(domains):
local_axes = axes[2 * idx: 2 * (idx + 1)]
self.plot_cpu_frequency_residency(domain[0],
pct=pct,
axis=local_axes,
)
for axis in local_axes:
title = axis.get_title()
axis.set_title(title.replace(
"CPU{}".format(domain[0]), "CPUs {}".format(domain)))
return self.do_plot(plotter, nrows=2 * len(domains), sharex=True, axis=axis, **kwargs)
@TraceAnalysisBase.plot_method()
@df_cpu_frequency_transitions.used_events
def plot_cpu_frequency_transitions(self, cpu: CPU, axis, local_fig, pct: bool=False):
"""
Plot frequency transitions count of the specified CPU
:param cpu: The CPU to genererate the plot for
:type cpu: int
:param pct: Plot frequency transitions in percentage
:type pct: bool
"""
df = self.df_cpu_frequency_transitions(cpu)
if pct:
df = df * 100 / df.sum()
if not df.empty:
df["transitions"].plot.barh(ax=axis)
axis.set_title('Frequency transitions of CPU{}'.format(cpu))
if pct:
axis.set_xlabel("Transitions share (%)")
else:
axis.set_xlabel("Transition count")
axis.set_ylabel("Frequency (Hz)")
axis.grid(True)
@TraceAnalysisBase.plot_method(return_axis=True)
@plot_cpu_frequency_transitions.used_events
def plot_domain_frequency_transitions(self, pct: bool=False, axis=None, **kwargs):
"""
Plot frequency transitions count for all frequency domains
:param pct: Plot frequency transitions in percentage
:type pct: bool
"""
domains = self.trace.plat_info['freq-domains']
def plotter(axes, local_fig):
for domain, axis in zip(domains, axes):
self.plot_cpu_frequency_transitions(
cpu=domain[0],
pct=pct,
axis=axis,
)
title = axis.get_title()
axis.set_title(title.replace("CPU{}".format(domain[0]),
"CPUs {}".format(domain)))
return self.do_plot(plotter, nrows=len(domains), axis=axis, **kwargs)
# vim :set tabstop=4 shiftwidth=4 expandtab textwidth=80
|
#! /usr/local/bin/python
"""
See LICENSE file for copyright and license details.
"""
from datetime import datetime
from database.databaseaccess import DatabaseAccess
from modules.core_module import CoreModule
from modules.statement import Statement
from modules.constant import *
from modules.function import *
from generic.modules.function import *
from database.mappings import T_TRADE
from generic.modules.calculator_finance import *
class Trade(CoreModule):
"""
Trade class.
"""
def __init__(self, config):
"""
Initialisation
"""
self.config = config
self.statement_trade = Statement(Table.TRADE)
self.flag_insupdel = StatementType.INSERT
self.trade_id = DEFAULT_INT
self.market_id = DEFAULT_INT
self.commodity_name = ''
self.date_buy = DEFAULT_DATE
self.year_buy = DEFAULT_INT
self.month_buy = DEFAULT_INT
self.day_buy = DEFAULT_INT
self.date_sell = DEFAULT_DATE
self.year_sell = DEFAULT_INT
self.month_sell = DEFAULT_INT
self.day_sell = DEFAULT_INT
self.long_flag = DEFAULT_INT
self.price_buy = DEFAULT_DECIMAL
self.price_sell = DEFAULT_DECIMAL
self.shares_buy = DEFAULT_DECIMAL
self.shares_sell = DEFAULT_DECIMAL
self.commission_buy = DEFAULT_DECIMAL
self.commission_sell = DEFAULT_DECIMAL
self.tax_buy = DEFAULT_DECIMAL
self.tax_sell = DEFAULT_DECIMAL
self.risk_input = DEFAULT_DECIMAL
self.risk_input_percent = DEFAULT_DECIMAL
self.risk_initial = DEFAULT_DECIMAL
self.risk_initial_percent = DEFAULT_DECIMAL
self.risk_actual = DEFAULT_DECIMAL
self.risk_actual_percent = DEFAULT_DECIMAL
self.cost_total = DEFAULT_DECIMAL
self.cost_other = DEFAULT_DECIMAL
self.amount_buy_simple = DEFAULT_DECIMAL
self.amount_sell_simple = DEFAULT_DECIMAL
self.stoploss = DEFAULT_DECIMAL
self.profit_loss = DEFAULT_DECIMAL
self.profit_loss_percent = DEFAULT_DECIMAL
self.r_multiple = DEFAULT_DECIMAL
self.win_flag = DEFAULT_DECIMAL
self.id_buy = DEFAULT_INT
self.id_sell = DEFAULT_INT
self.currency_exchange_id = DEFAULT_INT
self.drawdown_id = DEFAULT_INT
self.pool_at_start = DEFAULT_DECIMAL
self.date_expiration = DEFAULT_DATE
self.expired_flag = DEFAULT_INT
self.active = DEFAULT_INT
self.date_created = DEFAULT_DATE
self.date_modified = DEFAULT_DATE
self.trade_record = []
def create_statements(self, input_fields, statements_finance):
"""
Creates the records needed for Table.TRADE and returns them as a
Statement object.
"""
#NOTE: price_buy will be fields['i_amount']
#When we buy more, it will be overwritten!
#Trading without adding to positions is assumed by this code!
try:
dba = DatabaseAccess(self.config)
self.date_created = current_date()
self.date_modified = current_date()
records = 0
self.finance_id = dba.first_finance_id_from_latest()
if self.finance_id != -1:
for fields in input_fields:
if deals_with_commodities(
fields[Input.ACCOUNT_FROM]
, fields[Input.ACCOUNT_TO]):
#TODO: indent the below code appropriately.
record = records + 1
# GENERAL INFO AT START
self.general_info_at_start(dba, fields)
# UPDATE/INSERT
if dba.invade_already_started(self.market_id,
self.commodity_name_id, T_TRADE):
self.update_info(fields, self.trade_record)
else:
self.insert_info(fields, self.trade_record)
# GENERAL VARIABLES THAT CAN BE CALCULATED ON THE DATA WE HAVE
self.general_info_at_end(fields, self.trade_record)
# TEST INFO
self.print_test_info()
# ADDING THE STATEMENTS
self.add_to_statement(records)
self.finance_id = self.finance_id + 1
return self.statement_trade
except Exception as ex:
print Error.CREATE_STATEMENTS_TABLE_TRADE, ex
finally:
dba = None
def general_info_at_start(self, dba, fields):
"""
General info at the start of the trade.
"""
try:
self.market_id = dba.market_id_from_market(
fields[Input.MARKET_CODE])
self.commodity_name_id = dba.commodity_name_id_from_commodity_name(
fields[Input.COMMODITY_NAME], self.market_id)
self.finance_record = dba.get_finance_record(self.finance_id)
self.trade_record = dba.get_invade_record(self.finance_id, T_TRADE)
self.long_flag = dba.get_long_flag_value(fields[Input.ACCOUNT_FROM],
fields[Input.ACCOUNT_TO], self.trade_record)
# TEST INFO
print 'test finance_record=', self.finance_record
print 'test trade_record=', self.trade_record
print 'test: long_flag =', self.long_flag
#print library_test()
except Exception as ex:
print Error.CREATE_STATEMENTS_TABLE_TRADE, ex
def update_info(self, fields, trade_record):
"""
Update info.
"""
#NOTE: Correct way of updating = Supplier.query.filter(<your stuff here, or user filter_by, or whatever is in your where clause>).update(values)
#e.g.: session.query(Supplier).filter_by(id=2).update({"name": u"Mayowa"})
#TABLE_TRADE.query.filter(market_name=...,commodity_name=...).update({"date_...": date_... etc.})
try:
self.flag_insupdel = StatementType.UPDATE
trade_id = trade_record['trade_id']
## buy/sell related fields
if (we_are_buying(fields[Input.ACCOUNT_FROM], fields[Input.ACCOUNT_TO])
and T_TRADE.id_buy == -1):
id_buy = self.finance_id
id_sell = trade_record['id_sell']
date_buy = self.date_created
date_sell = trade_record['date_sell']
price_buy = abs(fields[Input.PRICE])
price_sell = abs(trade_record['price_sell'])
shares_buy = fields[Input.QUANTITY]
shares_sell = trade_record['shares_sell']
commission_buy = fields[Input.COMMISSION]
commission_sell = trade_record['commission_sell']
tax_buy = fields[Input.TAX]
tax_sell = trade_record['tax_sell']
elif (not we_are_buying(fields[Input.ACCOUNT_FROM], fields[Input.ACCOUNT_TO])
and T_TRADE.id_sell == -1):
id_buy = trade_record['id_buy']
id_sell = self.finance_id
date_buy = trade_record['date_buy']
date_sell = self.date_created
price_buy = abs(trade_record['price_buy'])
price_sell = abs(fields[Input.PRICE])
shares_buy = trade_record['shares_buy']
shares_sell = fields[Input.QUANTITY]
commission_buy = trade_record['commission_buy']
commission_sell = fields[Input.COMMISSION]
tax_buy = trade_record['tax_buy']
tax_sell = fields[Input.TAX]
else:
raise Exception(
"{0} already contains a sell or buy record" \
" and you are trying to add one like it" \
" again?".format(T_TRADE))
stoploss = trade_record['stoploss']
profit_loss = calculate_profit_loss(
trade_record['amount_sell'],
trade_record['amount_buy'])
pool_at_start = trade_record['pool_at_start']
self.date_created = trade_record['date_created']
amount_buy_simple = trade_record['amount_buy_simple']
amount_sell_simple = calculate_amount_simple(
Decimal(fields[Input.PRICE])
, Decimal(fields[Input.QUANTITY]))
risk_input = trade_record['risk_input']
risk_input_percent = trade_record['risk_input_percent']
risk_initial = trade_record['risk_initial']
risk_initial_percent = (risk_initial/amount_buy_simple)*Decimal(100.0)
risk_actual = calculate_risk_actual(
trade_record['price_buy'],
trade_record['shares_buy'],
trade_record['price_sell'],
trade_record['shares_sell'],
trade_record['stoploss'],
trade_record['risk_initial'])
risk_actual_percent = (risk_actual/amount_buy_simple)*Decimal(100.0)
cost_total = calculate_cost_total(
trade_record['tax_buy'],
trade_record['commission_buy'],
trade_record['tax_sell'],
trade_record['commission_sell'])
cost_other = calculate_cost_other(
cost_total,
profit_loss)
if we_are_buying(fields[Input.ACCOUNT_FROM], fields[Input.ACCOUNT_TO]):
win_flag = dba.get_win_flag_value(
price_buy,
trade_record['price_sell'],
long_flag)
else:
win_flag = dba.get_win_flag_value(
trade_record['price_buy'],
price_sell,
long_flag)
currency_exchange_id = trade_record['currency_exchange_id']
drawdown_id = trade_record['drawdown_id']
r_multiple = calculate_r_multiple(
trade_record['price_buy'],
trade_record['price_sell'],
trade_record['price_stoploss'])
date_expiration = trade_record['date_expiration']
#TODO: for investing, id_buy/sell is id_firstbuy and id_firstsell
# and expiration flag should only be set at the end of the trade, when
# the trade is closed. This means that date_buy and date_sell is not
# enough to determine if a trade is closed or not. The total shares
# should also be 0 when added up OR shares_buy = shares_sell.
# So add:
#if trade_closed: (or something like that)
expired_flag = (1 if date_sell > date_expiration else 0)
except Exception as ex:
print Error.CREATE_STATEMENTS_TABLE_TRADE, ex
def insert_info(self, fields, trade_record):
"""
Insert info.
"""
try:
self.flag_insupdel = StatementType.INSERT
trade_id = None # insert: new one created automatically
## buy/sell related fields
if we_are_buying(fields[Input.ACCOUNT_FROM], fields[Input.ACCOUNT_TO]):
id_buy = self.finance_id
id_sell = -1
date_buy = self.date_created
date_sell = string_to_date(DEFAULT_DATE)
price_buy = abs(fields[Input.PRICE])
price_sell = DEFAULT_DECIMAL
shares_buy = fields[Input.QUANTITY]
shares_sell = DEFAULT_INT
#TODO: commission and tax from T_RATE, when fields[Input.AUTOMATIC_FLAG] is 1
commission_buy = fields[Input.COMMISSION]
commission_sell = DEFAULT_DECIMAL
tax_buy = fields[Input.TAX]
tax_sell = DEFAULT_DECIMAL
else:
id_buy = -1
id_sell = self.finance_id
date_sell = self.date_created
date_buy = string_to_date(DEFAULT_DATE)
price_buy = DEFAULT_DECIMAL
price_sell = abs(fields[Input.PRICE])
shares_buy = DEFAULT_INT
shares_sell = fields[Input.QUANTITY]
commission_buy = DEFAULT_DECIMAL
# TODO: commission and tax from T_RATE (see also higher)
commission_sell = fields[Input.COMMISSION]
tax_buy = DEFAULT_DECIMAL
tax_sell = fields[Input.TAX]
stoploss = calculate_stoploss(
abs(fields[Input.PRICE]),
fields[Input.QUANTITY],
fields[Input.TAX],
fields[Input.COMMISSION],
fields[Input.RISK],
fields[Input.POOL])
profit_loss = DEFAULT_DECIMAL #Only calculated at end of trade.
pool_at_start = fields[Input.POOL]
amount_buy_simple = calculate_amount_simple(
Decimal(fields[Input.PRICE])
, Decimal(fields[Input.QUANTITY]))
amount_sell_simple = DEFAULT_DECIMAL
risk_input = calculate_risk_input(
fields[Input.POOL],
fields[Input.RISK])
risk_input_percent = fields[Input.RISK]
risk_initial = calculate_risk_initial(
fields[Input.PRICE],
fields[Input.QUANTITY],
stoploss)
risk_initial_percent = Decimal(100.0)*risk_initial/amount_buy_simple
risk_actual = DEFAULT_DECIMAL
risk_actual_percent = DEFAULT_DECIMAL
cost_total = DEFAULT_DECIMAL
cost_other = DEFAULT_DECIMAL
win_flag = -1 #not yet finished, we can not know it yet.
currency_exchange_id = dba.first_currency_exchange_id_from_latest()
drawdown_id = dba.new_drawdown_record()
r_multiple = DEFAULT_DECIMAL
date_expiration = fields[Input.DATE_EXPIRATION]
expired_flag = DEFAULT_INT
except Exception as ex:
print Error.CREATE_STATEMENTS_TABLE_TRADE, ex
def general_info_at_end(self, fields, trade_record):
"""
General info at the end of the trade.
"""
try:
profit_loss_percent = profit_loss/Decimal(100.0)
year_buy = date_buy.year
month_buy = date_buy.month
day_buy = date_buy.day
year_sell = date_sell.year
month_sell = date_sell.month
day_sell = date_sell.day
except Exception as ex:
print Error.CREATE_STATEMENTS_TABLE_TRADE, ex
def add_to_statement(self, records):
"""
Add the data to the statement list.
"""
self.statement_trade.add(
records,
{
'trade_id':self.trade_id,
'market_id':int(self.market_id),
'commodity_name_id':int(self.commodity_name_id),
'date_buy':self.date_buy,
'year_buy':self.year_buy,
'month_buy':self.month_buy,
'day_buy':self.day_buy,
'date_sell':self.date_sell,
'year_sell':self.year_sell,
'month_sell':self.month_sell,
'day_sell':self.day_sell,
'long_flag':int(self.long_flag),
'price_buy':Decimal(self.price_buy),
'price_sell':Decimal(self.price_sell),
'shares_buy':int(self.shares_buy),
'shares_sell':int(self.shares_sell),
'commission_buy':Decimal(self.commission_buy),
'commission_sell':Decimal(self.commission_sell),
'tax_buy':Decimal(self.tax_buy),
'tax_sell':Decimal(self.tax_sell),
'risk_input':Decimal(self.risk_input),
'risk_input_percent':Decimal(self.risk_input_percent),
'risk_initial':Decimal(self.risk_initial),
'risk_initial_percent':Decimal(self.risk_initial_percent),
'risk_actual':Decimal(self.risk_actual),
'risk_actual_percent':Decimal(self.risk_actual_percent),
'cost_total':Decimal(self.cost_total),
'cost_other':Decimal(self.cost_other),
'amount_buy_simple':Decimal(self.amount_buy_simple),
'amount_sell_simple':Decimal(self.amount_sell_simple),
'stoploss':Decimal(self.stoploss),
'profit_loss':Decimal(self.profit_loss),
'profit_loss_percent':Decimal(self.profit_loss_percent),
'r_multiple':Decimal(self.r_multiple),
'win_flag':int(self.win_flag),
'id_buy':int(self.id_buy),
'id_sell':int(self.id_sell),
'currency_exchange_id':int(self.currency_exchange_id),
'drawdown_id':int(self.drawdown_id),
'pool_at_start':Decimal(self.pool_at_start),
'date_expiration':self.date_expiration,
'expired_flag':self.expired_flag,
'active':1,
'date_created':self.date_created,
'date_modified':self.date_modified
},
self.flag_insupdel
)
def print_test_info(self):
"""
Print test info.
"""
print('<print>')
print('market_id =', self.market_id)
print('commodity_name_id =', self.commodity_name_id)
print('date_buy =', self.date_buy)
print('date_sell =', self.date_sell)
print('long_flag =', self.long_flag)
print('price_buy =', self.price_buy)
print('price_sell =', self.price_sell)
print('risk_input =', self.risk_input)
print('risk_input_percent =', self.risk_input_percent)
print('risk_initial =', self.risk_initial)
print('risk_initial_percent =', self.risk_initial_percent)
print('risk_actual =', self.risk_actual)
print('risk_actual_percent =', self.risk_actual_percent)
print('cost_total =', self.cost_total)
print('cost_other =', self.cost_other)
print('amount_buy_simple =', self.amount_buy_simple)
print('amount_sell_simple =', self.amount_sell_simple)
print('stoploss =', self.stoploss)
print('profit_loss =', self.profit_loss)
print('profit_loss_percent =', self.profit_loss_percent)
print('r_multiple =', self.r_multiple)
print('win_flag =', self.win_flag)
print('id_buy =', self.id_buy)
print('id_sell =', self.id_sell)
print('currency_exchange_id =', self.currency_exchange_id)
print('drawdown_id =', self.drawdown_id)
print('pool_at_start =', self.pool_at_start)
print('date_expiration =', self.date_expiration)
print('expired_flag =', self.expired_flag)
print('<\print>')
Still trying to fix the trade module.
#! /usr/local/bin/python
"""
See LICENSE file for copyright and license details.
"""
from datetime import datetime
from database.databaseaccess import DatabaseAccess
from modules.core_module import CoreModule
from modules.statement import Statement
from modules.constant import *
from modules.function import *
from generic.modules.function import *
from database.mappings import T_TRADE
import generic.modules.calculator_finance as calc
class Trade(CoreModule):
"""
Trade class.
"""
def __init__(self, config):
"""
Initialisation
"""
self.config = config
self.statement_trade = Statement(Table.TRADE)
self.flag_insupdel = StatementType.INSERT
self.trade_id = DEFAULT_INT
self.market_id = DEFAULT_INT
self.commodity_name = ''
self.date_buy = DEFAULT_DATE
self.year_buy = DEFAULT_INT
self.month_buy = DEFAULT_INT
self.day_buy = DEFAULT_INT
self.date_sell = DEFAULT_DATE
self.year_sell = DEFAULT_INT
self.month_sell = DEFAULT_INT
self.day_sell = DEFAULT_INT
self.long_flag = DEFAULT_INT
self.price_buy = DEFAULT_DECIMAL
self.price_sell = DEFAULT_DECIMAL
self.shares_buy = DEFAULT_DECIMAL
self.shares_sell = DEFAULT_DECIMAL
self.commission_buy = DEFAULT_DECIMAL
self.commission_sell = DEFAULT_DECIMAL
self.tax_buy = DEFAULT_DECIMAL
self.tax_sell = DEFAULT_DECIMAL
self.risk_input = DEFAULT_DECIMAL
self.risk_input_percent = DEFAULT_DECIMAL
self.risk_initial = DEFAULT_DECIMAL
self.risk_initial_percent = DEFAULT_DECIMAL
self.risk_actual = DEFAULT_DECIMAL
self.risk_actual_percent = DEFAULT_DECIMAL
self.cost_total = DEFAULT_DECIMAL
self.cost_other = DEFAULT_DECIMAL
self.amount_buy_simple = DEFAULT_DECIMAL
self.amount_sell_simple = DEFAULT_DECIMAL
self.stoploss = DEFAULT_DECIMAL
self.profit_loss = DEFAULT_DECIMAL
self.profit_loss_percent = DEFAULT_DECIMAL
self.r_multiple = DEFAULT_DECIMAL
self.win_flag = DEFAULT_DECIMAL
self.id_buy = DEFAULT_INT
self.id_sell = DEFAULT_INT
self.currency_exchange_id = DEFAULT_INT
self.drawdown_id = DEFAULT_INT
self.pool_at_start = DEFAULT_DECIMAL
self.date_expiration = DEFAULT_DATE
self.expired_flag = DEFAULT_INT
self.active = DEFAULT_INT
self.date_created = DEFAULT_DATE
self.date_modified = DEFAULT_DATE
self.trade_record = []
def create_statements(self, input_fields, statements_finance):
"""
Creates the records needed for Table.TRADE and returns them as a
Statement object.
"""
#NOTE: price_buy will be fields['i_amount']
#When we buy more, it will be overwritten!
#Trading without adding to positions is assumed by this code!
try:
dba = DatabaseAccess(self.config)
self.date_created = current_date()
self.date_modified = current_date()
records = 0
self.finance_id = dba.first_finance_id_from_latest()
if self.finance_id != -1:
for fields in input_fields:
if deals_with_commodities(
fields[Input.ACCOUNT_FROM]
, fields[Input.ACCOUNT_TO]):
#TODO: indent the below code appropriately.
record = records + 1
# GENERAL INFO AT START
self.general_info_at_start(dba, fields)
# UPDATE/INSERT
if dba.invade_already_started(self.market_id,
self.commodity_name_id, T_TRADE):
self.update_info(fields, self.trade_record)
else:
self.insert_info(fields, self.trade_record)
# GENERAL VARIABLES THAT CAN BE CALCULATED ON THE DATA WE HAVE
self.general_info_at_end(fields, self.trade_record)
# TEST INFO
self.print_test_info()
# ADDING THE STATEMENTS
self.add_to_statement(records)
self.finance_id = self.finance_id + 1
return self.statement_trade
except Exception as ex:
print Error.CREATE_STATEMENTS_TABLE_TRADE, ex
finally:
dba = None
def general_info_at_start(self, dba, fields):
"""
General info at the start of the trade.
"""
try:
self.market_id = dba.market_id_from_market(
fields[Input.MARKET_CODE])
self.commodity_name_id = dba.commodity_name_id_from_commodity_name(
fields[Input.COMMODITY_NAME], self.market_id)
self.finance_record = dba.get_finance_record(self.finance_id)
self.trade_record = dba.get_invade_record(self.finance_id, T_TRADE)
self.long_flag = dba.get_long_flag_value(fields[Input.ACCOUNT_FROM],
fields[Input.ACCOUNT_TO], self.trade_record)
# TEST INFO
print 'test finance_record=', self.finance_record
print 'test trade_record=', self.trade_record
print 'test: long_flag =', self.long_flag
#print library_test()
except Exception as ex:
print Error.CREATE_STATEMENTS_TABLE_TRADE, ex
def update_info(self, fields, trade_record):
"""
Update info.
"""
#NOTE: Correct way of updating = Supplier.query.filter(<your stuff here, or user filter_by, or whatever is in your where clause>).update(values)
#e.g.: session.query(Supplier).filter_by(id=2).update({"name": u"Mayowa"})
#TABLE_TRADE.query.filter(market_name=...,commodity_name=...).update({"date_...": date_... etc.})
try:
self.flag_insupdel = StatementType.UPDATE
trade_id = trade_record['trade_id']
## buy/sell related fields
if (we_are_buying(fields[Input.ACCOUNT_FROM], fields[Input.ACCOUNT_TO])
and T_TRADE.id_buy == -1):
id_buy = self.finance_id
id_sell = trade_record['id_sell']
date_buy = self.date_created
date_sell = trade_record['date_sell']
price_buy = abs(fields[Input.PRICE])
price_sell = abs(trade_record['price_sell'])
shares_buy = fields[Input.QUANTITY]
shares_sell = trade_record['shares_sell']
commission_buy = fields[Input.COMMISSION]
commission_sell = trade_record['commission_sell']
tax_buy = fields[Input.TAX]
tax_sell = trade_record['tax_sell']
elif (not we_are_buying(fields[Input.ACCOUNT_FROM], fields[Input.ACCOUNT_TO])
and T_TRADE.id_sell == -1):
id_buy = trade_record['id_buy']
id_sell = self.finance_id
date_buy = trade_record['date_buy']
date_sell = self.date_created
price_buy = abs(trade_record['price_buy'])
price_sell = abs(fields[Input.PRICE])
shares_buy = trade_record['shares_buy']
shares_sell = fields[Input.QUANTITY]
commission_buy = trade_record['commission_buy']
commission_sell = fields[Input.COMMISSION]
tax_buy = trade_record['tax_buy']
tax_sell = fields[Input.TAX]
else:
raise Exception(
"{0} already contains a sell or buy record" \
" and you are trying to add one like it" \
" again?".format(T_TRADE))
stoploss = trade_record['stoploss']
profit_loss = calc.calculate_profit_loss(
trade_record['amount_sell'],
trade_record['amount_buy'])
pool_at_start = trade_record['pool_at_start']
self.date_created = trade_record['date_created']
amount_buy_simple = trade_record['amount_buy_simple']
amount_sell_simple = calc.calculate_amount_simple(
Decimal(fields[Input.PRICE])
, Decimal(fields[Input.QUANTITY]))
risk_input = trade_record['risk_input']
risk_input_percent = trade_record['risk_input_percent']
risk_initial = trade_record['risk_initial']
risk_initial_percent = (risk_initial/amount_buy_simple)*Decimal(100.0)
risk_actual = calc.calculate_risk_actual(
trade_record['price_buy'],
trade_record['shares_buy'],
trade_record['price_sell'],
trade_record['shares_sell'],
trade_record['stoploss'],
trade_record['risk_initial'])
risk_actual_percent = (risk_actual/amount_buy_simple)*Decimal(100.0)
cost_total = calc.calculate_cost_total(
trade_record['tax_buy'],
trade_record['commission_buy'],
trade_record['tax_sell'],
trade_record['commission_sell'])
cost_other = calc.calculate_cost_other(
cost_total,
profit_loss)
if we_are_buying(fields[Input.ACCOUNT_FROM], fields[Input.ACCOUNT_TO]):
win_flag = dba.get_win_flag_value(
price_buy,
trade_record['price_sell'],
long_flag)
else:
win_flag = dba.get_win_flag_value(
trade_record['price_buy'],
price_sell,
long_flag)
currency_exchange_id = trade_record['currency_exchange_id']
drawdown_id = trade_record['drawdown_id']
r_multiple = calc.calculate_r_multiple(
trade_record['price_buy'],
trade_record['price_sell'],
trade_record['price_stoploss'])
date_expiration = trade_record['date_expiration']
#TODO: for investing, id_buy/sell is id_firstbuy and id_firstsell
# and expiration flag should only be set at the end of the trade, when
# the trade is closed. This means that date_buy and date_sell is not
# enough to determine if a trade is closed or not. The total shares
# should also be 0 when added up OR shares_buy = shares_sell.
# So add:
#if trade_closed: (or something like that)
expired_flag = (1 if date_sell > date_expiration else 0)
except Exception as ex:
print Error.CREATE_STATEMENTS_TABLE_TRADE, ex
def insert_info(self, fields, trade_record):
"""
Insert info.
"""
try:
self.flag_insupdel = StatementType.INSERT
trade_id = None # insert: new one created automatically
## buy/sell related fields
if we_are_buying(fields[Input.ACCOUNT_FROM], fields[Input.ACCOUNT_TO]):
id_buy = self.finance_id
id_sell = -1
date_buy = self.date_created
date_sell = string_to_date(DEFAULT_DATE)
price_buy = abs(fields[Input.PRICE])
price_sell = DEFAULT_DECIMAL
shares_buy = fields[Input.QUANTITY]
shares_sell = DEFAULT_INT
#TODO: commission and tax from T_RATE, when fields[Input.AUTOMATIC_FLAG] is 1
commission_buy = fields[Input.COMMISSION]
commission_sell = DEFAULT_DECIMAL
tax_buy = fields[Input.TAX]
tax_sell = DEFAULT_DECIMAL
else:
id_buy = -1
id_sell = self.finance_id
date_sell = self.date_created
date_buy = string_to_date(DEFAULT_DATE)
price_buy = DEFAULT_DECIMAL
price_sell = abs(fields[Input.PRICE])
shares_buy = DEFAULT_INT
shares_sell = fields[Input.QUANTITY]
commission_buy = DEFAULT_DECIMAL
# TODO: commission and tax from T_RATE (see also higher)
commission_sell = fields[Input.COMMISSION]
tax_buy = DEFAULT_DECIMAL
tax_sell = fields[Input.TAX]
stoploss = calc.calculate_stoploss(
abs(fields[Input.PRICE]),
fields[Input.QUANTITY],
fields[Input.TAX],
fields[Input.COMMISSION],
fields[Input.RISK],
fields[Input.POOL])
profit_loss = DEFAULT_DECIMAL #Only calculated at end of trade.
pool_at_start = fields[Input.POOL]
amount_buy_simple = calc.calculate_amount_simple(
Decimal(fields[Input.PRICE])
, Decimal(fields[Input.QUANTITY]))
amount_sell_simple = DEFAULT_DECIMAL
risk_input = calc.calculate_risk_input(
fields[Input.POOL],
fields[Input.RISK])
risk_input_percent = fields[Input.RISK]
risk_initial = calc.calculate_risk_initial(
fields[Input.PRICE],
fields[Input.QUANTITY],
stoploss)
risk_initial_percent = Decimal(100.0)*risk_initial/amount_buy_simple
risk_actual = DEFAULT_DECIMAL
risk_actual_percent = DEFAULT_DECIMAL
cost_total = DEFAULT_DECIMAL
cost_other = DEFAULT_DECIMAL
win_flag = -1 #not yet finished, we can not know it yet.
currency_exchange_id = dba.first_currency_exchange_id_from_latest()
drawdown_id = dba.new_drawdown_record()
r_multiple = DEFAULT_DECIMAL
date_expiration = fields[Input.DATE_EXPIRATION]
expired_flag = DEFAULT_INT
except Exception as ex:
print Error.CREATE_STATEMENTS_TABLE_TRADE, ex
def general_info_at_end(self, fields, trade_record):
"""
General info at the end of the trade.
"""
try:
profit_loss_percent = profit_loss/Decimal(100.0)
year_buy = date_buy.year
month_buy = date_buy.month
day_buy = date_buy.day
year_sell = date_sell.year
month_sell = date_sell.month
day_sell = date_sell.day
except Exception as ex:
print Error.CREATE_STATEMENTS_TABLE_TRADE, ex
def add_to_statement(self, records):
"""
Add the data to the statement list.
"""
self.statement_trade.add(
records,
{
'trade_id':self.trade_id,
'market_id':int(self.market_id),
'commodity_name_id':int(self.commodity_name_id),
'date_buy':self.date_buy,
'year_buy':self.year_buy,
'month_buy':self.month_buy,
'day_buy':self.day_buy,
'date_sell':self.date_sell,
'year_sell':self.year_sell,
'month_sell':self.month_sell,
'day_sell':self.day_sell,
'long_flag':int(self.long_flag),
'price_buy':Decimal(self.price_buy),
'price_sell':Decimal(self.price_sell),
'shares_buy':int(self.shares_buy),
'shares_sell':int(self.shares_sell),
'commission_buy':Decimal(self.commission_buy),
'commission_sell':Decimal(self.commission_sell),
'tax_buy':Decimal(self.tax_buy),
'tax_sell':Decimal(self.tax_sell),
'risk_input':Decimal(self.risk_input),
'risk_input_percent':Decimal(self.risk_input_percent),
'risk_initial':Decimal(self.risk_initial),
'risk_initial_percent':Decimal(self.risk_initial_percent),
'risk_actual':Decimal(self.risk_actual),
'risk_actual_percent':Decimal(self.risk_actual_percent),
'cost_total':Decimal(self.cost_total),
'cost_other':Decimal(self.cost_other),
'amount_buy_simple':Decimal(self.amount_buy_simple),
'amount_sell_simple':Decimal(self.amount_sell_simple),
'stoploss':Decimal(self.stoploss),
'profit_loss':Decimal(self.profit_loss),
'profit_loss_percent':Decimal(self.profit_loss_percent),
'r_multiple':Decimal(self.r_multiple),
'win_flag':int(self.win_flag),
'id_buy':int(self.id_buy),
'id_sell':int(self.id_sell),
'currency_exchange_id':int(self.currency_exchange_id),
'drawdown_id':int(self.drawdown_id),
'pool_at_start':Decimal(self.pool_at_start),
'date_expiration':self.date_expiration,
'expired_flag':self.expired_flag,
'active':1,
'date_created':self.date_created,
'date_modified':self.date_modified
},
self.flag_insupdel
)
def print_test_info(self):
"""
Print test info.
"""
print('<print>')
print('market_id =', self.market_id)
print('commodity_name_id =', self.commodity_name_id)
print('date_buy =', self.date_buy)
print('date_sell =', self.date_sell)
print('long_flag =', self.long_flag)
print('price_buy =', self.price_buy)
print('price_sell =', self.price_sell)
print('risk_input =', self.risk_input)
print('risk_input_percent =', self.risk_input_percent)
print('risk_initial =', self.risk_initial)
print('risk_initial_percent =', self.risk_initial_percent)
print('risk_actual =', self.risk_actual)
print('risk_actual_percent =', self.risk_actual_percent)
print('cost_total =', self.cost_total)
print('cost_other =', self.cost_other)
print('amount_buy_simple =', self.amount_buy_simple)
print('amount_sell_simple =', self.amount_sell_simple)
print('stoploss =', self.stoploss)
print('profit_loss =', self.profit_loss)
print('profit_loss_percent =', self.profit_loss_percent)
print('r_multiple =', self.r_multiple)
print('win_flag =', self.win_flag)
print('id_buy =', self.id_buy)
print('id_sell =', self.id_sell)
print('currency_exchange_id =', self.currency_exchange_id)
print('drawdown_id =', self.drawdown_id)
print('pool_at_start =', self.pool_at_start)
print('date_expiration =', self.date_expiration)
print('expired_flag =', self.expired_flag)
print('<\print>')
|
import glob
import logging
import os
import subprocess
from plugins import BaseAssembler
from yapsy.IPlugin import IPlugin
class KikiAssembler(BaseAssembler, IPlugin):
new_version = True
def run(self, reads=None):
### Run Kiki Assembler
self.arast_popen([self.executable, '-k', self.k, '-i'] + self.data.readfiles + ['-o', self.outpath + '/kiki'])
### Find Contig Files
contigs = glob.glob(self.outpath + '/*.contig')
contigs_renamed = [contig + '.fa' for contig in contigs]
### Convert to standard FastA
for i in range(len(contigs)):
self.tab_to_fasta(contigs[i], contigs_renamed[i], self.contig_threshold)
return {'contigs': contigs_renamed,
'scaffolds': contigs_renamed,
'some_stat': 50}
def tab_to_fasta(self, tabbed_file, outfile, threshold):
""" Converter for Kiki format """
tabbed = open(tabbed_file, 'r')
fasta = open(outfile, 'w')
prefixes = ['>_', ' len_', ' cov_', ' stdev_', ' GC_', ' seed_', '\n']
for line in tabbed:
l = line.split('\t')
if int(l[1]) >= int(threshold):
for i in range(len(l)):
fasta.write(prefixes[i] + l[i])
tabbed.close()
fasta.close()
clean up
import glob
import logging
import os
import subprocess
from plugins import BaseAssembler
from yapsy.IPlugin import IPlugin
class KikiAssembler(BaseAssembler, IPlugin):
new_version = True
def run(self, reads=None):
### Run Kiki Assembler
self.arast_popen([self.executable, '-k', self.k, '-i'] + self.data.readfiles + ['-o', self.outpath + '/kiki'])
### Find Contig Files
contigs = glob.glob(self.outpath + '/*.contig')
contigs_renamed = [contig + '.fa' for contig in contigs]
### Convert to standard FastA
for i in range(len(contigs)):
self.tab_to_fasta(contigs[i], contigs_renamed[i], self.contig_threshold)
return {'contigs': contigs_renamed}
def tab_to_fasta(self, tabbed_file, outfile, threshold):
""" Converter for Kiki format """
tabbed = open(tabbed_file, 'r')
fasta = open(outfile, 'w')
prefixes = ['>_', ' len_', ' cov_', ' stdev_', ' GC_', ' seed_', '\n']
for line in tabbed:
l = line.split('\t')
if int(l[1]) >= int(threshold):
for i in range(len(l)):
fasta.write(prefixes[i] + l[i])
tabbed.close()
fasta.close()
|
"""
TWLight email sending.
TWLight generates and sends emails using https://bameda.github.io/djmail/ .
Any view that wishes to send an email should do so using a task defined here.
Templates for these emails are available in emails/templates/emails. djmail
will look for files named {{ name }}-body-html.html, {{ name }}-body-text.html,
and {{ name }}-subject.html, where {{ name }} is the name attribute of the
TemplateMail subclass.
Email templates are normal Django templates. This means two important things:
1) They can be rendered with context;
2) They can use {% trans %} and {% blocktrans %}. In fact, they _should_, to
support internationalization.
Add a 'lang' attribute to the context passed into TemplateMail in order to
specify which language to render the template in.
There is no need to faff about with Celery in this file. djmail will decide
whether to send synchronously or asynchronously based on the value of
settings.DJMAIL_REAL_BACKEND.
"""
from djmail import template_mail
import logging
from django_comments.models import Comment
from django_comments.signals import comment_was_posted
from django.contrib.sites.shortcuts import get_current_site
from django.core.urlresolvers import reverse_lazy
from django.db.models.signals import pre_save, post_save
from django.dispatch import receiver
from django.shortcuts import get_object_or_404
from TWLight.applications.models import Application
from TWLight.resources.models import Partner
logger = logging.getLogger(__name__)
# COMMENT NOTIFICATION
# ------------------------------------------------------------------------------
class CommentNotificationEmailEditors(template_mail.TemplateMail):
name = 'comment_notification_editors'
class CommentNotificationEmailOthers(template_mail.TemplateMail):
name = 'comment_notification_others'
class ApprovalNotification(template_mail.TemplateMail):
name = 'approval_notification'
class WaitlistNotification(template_mail.TemplateMail):
name = 'waitlist_notification'
class RejectionNotification(template_mail.TemplateMail):
name = 'rejection_notification'
@receiver(comment_was_posted)
def send_comment_notification_emails(sender, **kwargs):
"""
Any time a comment is posted on an application, this sends email to the
application owner and anyone else who previously commented.
"""
current_comment = kwargs['comment']
app = current_comment.content_object
assert isinstance(app, Application)
logger.info('Received comment signal on app number {app.pk}; preparing '
'to send notification emails'.format(app=app))
if 'request' in kwargs:
# This is the expected case; the comment_was_posted signal should send
# this.
request = kwargs['request']
else:
# But if there's no request somehow, get_current_site has a sensible
# default.
request = None
base_url = get_current_site(request).domain
logger.info('Site base_url is {base_url}'.format(base_url=base_url))
app_url = 'https://{base}{path}'.format(
base=base_url, path=app.get_absolute_url())
logger.info('app_url is {app_url}'.format(app_url=app_url))
# If the editor who owns this application was not the comment poster, notify
# them of the new comment.
if current_comment.user_email != app.editor.user.email:
if app.editor.user.email:
logger.info('we should notify the editor')
email = CommentNotificationEmailEditors()
logger.info('email constructed')
email.send(app.editor.user.email, {'app': app, 'app_url': app_url})
logger.info('Email queued for {app.editor.user.email} about '
'app #{app.pk}'.format(app=app))
# Send to any previous commenters on the thread, other than the editor and
# the person who left the comment just now.
all_comments = Comment.objects.filter(object_pk=app.pk,
content_type__model='application',
content_type__app_label='applications')
user_emails = set(
[comment.user.email for comment in all_comments]
)
user_emails.remove(current_comment.user_email)
try:
user_emails.remove(app.editor.user.email)
except KeyError:
# If the editor is not among the prior commenters, that's fine; no
# reason they should be.
pass
for user_email in user_emails:
if user_email:
email = CommentNotificationEmailOthers()
email.send(user_email, {'app': app, 'app_url': app_url})
logger.info('Email queued for {app.editor.user.email} about app '
'#{app.pk}'.format(app=app))
def send_approval_notification_email(instance):
email = ApprovalNotification()
email.send(instance.user.email,
{'user': instance.user, 'partner': instance.partner})
def send_waitlist_notification_email(instance):
email = WaitlistNotification()
email.send(instance.user.email,
{'user': instance.user,
'partner': instance.partner,
'link': reverse_lazy('partners:list')})
def send_rejection_notification_email(instance):
base_url = get_current_site(None).domain
if instance.pk:
app_url = 'https://{base}{path}'.format(
base=base_url, path=instance.get_absolute_url())
else:
# If we are sending an email for a newly created instance, it won't have
# a pk, so instance.get_absolute_url() won't return successfully.
# This should lead to an imperfect but navigable user experience, given
# the text of the email - it won't take them straight to their app, but
# it will take them to a page *via which* they can perform the review
# steps described in the email template.
app_url = reverse_lazy('users:home')
email = RejectionNotification()
email.send(instance.user.email,
{'user': instance.user,
'partner': instance.partner,
'app_url': app_url})
@receiver(pre_save, sender=Application)
def update_app_status_on_save(sender, instance, **kwargs):
"""
If the Application's status has changed in a way that justifies sending
email, do so. Otherwise, do nothing.
"""
# Maps status indicators to the correct email handling function.
handlers = {
Application.APPROVED: send_approval_notification_email,
Application.NOT_APPROVED: send_rejection_notification_email,
# We can't use Partner.WAITLIST as the key, because that's actually an
# integer, and it happens to be the same as Application.APPROVED. If
# we're going to have a lot more keys on this list, we're going to have
# to start thinking about namespacing them.
'waitlist': send_waitlist_notification_email,
}
handler_key = None
# Case 1: Application already existed; status has been changed.
if instance.id:
orig_app = Application.objects.get(pk=instance.id)
orig_status = orig_app.status
if orig_status != instance.status:
handler_key = instance.status
# Case 2: Application was just created.
else:
# WAITLIST is a status adhering to Partner, not to Application. So
# to email editors when they apply to a waitlisted partner, we need
# to check Partner status on app submission.
if instance.partner.status == Partner.WAITLIST:
handler_key = 'waitlist'
else:
handler_key = instance.status
if handler_key:
try:
# Send email if it has an email-worthy status.
handlers[handler_key](instance)
except KeyError:
# This is probably okay - it probably means we were in case 2 above
# and the application was created with PENDING status. We'll only
# log the surprising cases.
if handler_key != Application.PENDING:
logger.exception('Email handler key was set to {handler_key}, '
'but no such handler exists'.format(handler_key=handler_key))
pass
@receiver(pre_save, sender=Partner)
def notify_applicants_when_waitlisted(sender, instance, **kwargs):
"""
When Partners are switched to WAITLIST status, anyone with open applications
should be notified.
"""
if instance.id:
orig_partner = get_object_or_404(Partner, pk=instance.id)
if ((orig_partner.status != instance.status) and
instance.status == Partner.WAITLIST):
for app in orig_partner.applications.filter(
status__in=[Application.PENDING, Application.QUESTION]):
send_waitlist_notification_email(app)
Don't try to send emails for applications created with a SENT status.
"""
TWLight email sending.
TWLight generates and sends emails using https://bameda.github.io/djmail/ .
Any view that wishes to send an email should do so using a task defined here.
Templates for these emails are available in emails/templates/emails. djmail
will look for files named {{ name }}-body-html.html, {{ name }}-body-text.html,
and {{ name }}-subject.html, where {{ name }} is the name attribute of the
TemplateMail subclass.
Email templates are normal Django templates. This means two important things:
1) They can be rendered with context;
2) They can use {% trans %} and {% blocktrans %}. In fact, they _should_, to
support internationalization.
Add a 'lang' attribute to the context passed into TemplateMail in order to
specify which language to render the template in.
There is no need to faff about with Celery in this file. djmail will decide
whether to send synchronously or asynchronously based on the value of
settings.DJMAIL_REAL_BACKEND.
"""
from djmail import template_mail
import logging
from django_comments.models import Comment
from django_comments.signals import comment_was_posted
from django.contrib.sites.shortcuts import get_current_site
from django.core.urlresolvers import reverse_lazy
from django.db.models.signals import pre_save, post_save
from django.dispatch import receiver
from django.shortcuts import get_object_or_404
from TWLight.applications.models import Application
from TWLight.resources.models import Partner
logger = logging.getLogger(__name__)
# COMMENT NOTIFICATION
# ------------------------------------------------------------------------------
class CommentNotificationEmailEditors(template_mail.TemplateMail):
name = 'comment_notification_editors'
class CommentNotificationEmailOthers(template_mail.TemplateMail):
name = 'comment_notification_others'
class ApprovalNotification(template_mail.TemplateMail):
name = 'approval_notification'
class WaitlistNotification(template_mail.TemplateMail):
name = 'waitlist_notification'
class RejectionNotification(template_mail.TemplateMail):
name = 'rejection_notification'
@receiver(comment_was_posted)
def send_comment_notification_emails(sender, **kwargs):
"""
Any time a comment is posted on an application, this sends email to the
application owner and anyone else who previously commented.
"""
current_comment = kwargs['comment']
app = current_comment.content_object
assert isinstance(app, Application)
logger.info('Received comment signal on app number {app.pk}; preparing '
'to send notification emails'.format(app=app))
if 'request' in kwargs:
# This is the expected case; the comment_was_posted signal should send
# this.
request = kwargs['request']
else:
# But if there's no request somehow, get_current_site has a sensible
# default.
request = None
base_url = get_current_site(request).domain
logger.info('Site base_url is {base_url}'.format(base_url=base_url))
app_url = 'https://{base}{path}'.format(
base=base_url, path=app.get_absolute_url())
logger.info('app_url is {app_url}'.format(app_url=app_url))
# If the editor who owns this application was not the comment poster, notify
# them of the new comment.
if current_comment.user_email != app.editor.user.email:
if app.editor.user.email:
logger.info('we should notify the editor')
email = CommentNotificationEmailEditors()
logger.info('email constructed')
email.send(app.editor.user.email, {'app': app, 'app_url': app_url})
logger.info('Email queued for {app.editor.user.email} about '
'app #{app.pk}'.format(app=app))
# Send to any previous commenters on the thread, other than the editor and
# the person who left the comment just now.
all_comments = Comment.objects.filter(object_pk=app.pk,
content_type__model='application',
content_type__app_label='applications')
user_emails = set(
[comment.user.email for comment in all_comments]
)
user_emails.remove(current_comment.user_email)
try:
user_emails.remove(app.editor.user.email)
except KeyError:
# If the editor is not among the prior commenters, that's fine; no
# reason they should be.
pass
for user_email in user_emails:
if user_email:
email = CommentNotificationEmailOthers()
email.send(user_email, {'app': app, 'app_url': app_url})
logger.info('Email queued for {app.editor.user.email} about app '
'#{app.pk}'.format(app=app))
def send_approval_notification_email(instance):
email = ApprovalNotification()
email.send(instance.user.email,
{'user': instance.user, 'partner': instance.partner})
def send_waitlist_notification_email(instance):
email = WaitlistNotification()
email.send(instance.user.email,
{'user': instance.user,
'partner': instance.partner,
'link': reverse_lazy('partners:list')})
def send_rejection_notification_email(instance):
base_url = get_current_site(None).domain
if instance.pk:
app_url = 'https://{base}{path}'.format(
base=base_url, path=instance.get_absolute_url())
else:
# If we are sending an email for a newly created instance, it won't have
# a pk, so instance.get_absolute_url() won't return successfully.
# This should lead to an imperfect but navigable user experience, given
# the text of the email - it won't take them straight to their app, but
# it will take them to a page *via which* they can perform the review
# steps described in the email template.
app_url = reverse_lazy('users:home')
email = RejectionNotification()
email.send(instance.user.email,
{'user': instance.user,
'partner': instance.partner,
'app_url': app_url})
@receiver(pre_save, sender=Application)
def update_app_status_on_save(sender, instance, **kwargs):
"""
If the Application's status has changed in a way that justifies sending
email, do so. Otherwise, do nothing.
"""
# Maps status indicators to the correct email handling function.
handlers = {
Application.APPROVED: send_approval_notification_email,
Application.NOT_APPROVED: send_rejection_notification_email,
# We can't use Partner.WAITLIST as the key, because that's actually an
# integer, and it happens to be the same as Application.APPROVED. If
# we're going to have a lot more keys on this list, we're going to have
# to start thinking about namespacing them.
'waitlist': send_waitlist_notification_email,
}
handler_key = None
# Case 1: Application already existed; status has been changed.
if instance.id:
orig_app = Application.objects.get(pk=instance.id)
orig_status = orig_app.status
if orig_status != instance.status:
handler_key = instance.status
# Case 2: Application was just created.
else:
# WAITLIST is a status adhering to Partner, not to Application. So
# to email editors when they apply to a waitlisted partner, we need
# to check Partner status on app submission.
# SENT is a post approval step that we don't need to send emails about.
if instance.partner.status == Partner.WAITLIST:
handler_key = 'waitlist'
elif instance.status == Application.SENT:
handler_key = None
else:
handler_key = instance.status
if handler_key:
try:
# Send email if it has an email-worthy status.
handlers[handler_key](instance)
except KeyError:
# This is probably okay - it probably means we were in case 2 above
# and the application was created with PENDING status. We'll only
# log the surprising cases.
if handler_key != Application.PENDING:
logger.exception('Email handler key was set to {handler_key}, '
'but no such handler exists'.format(handler_key=handler_key))
pass
@receiver(pre_save, sender=Partner)
def notify_applicants_when_waitlisted(sender, instance, **kwargs):
"""
When Partners are switched to WAITLIST status, anyone with open applications
should be notified.
"""
if instance.id:
orig_partner = get_object_or_404(Partner, pk=instance.id)
if ((orig_partner.status != instance.status) and
instance.status == Partner.WAITLIST):
for app in orig_partner.applications.filter(
status__in=[Application.PENDING, Application.QUESTION]):
send_waitlist_notification_email(app)
|
from mock import patch
from django_comments import get_form_target
from django_comments.models import Comment
from django_comments.signals import comment_was_posted
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site
from django.core import mail
from django.core.urlresolvers import reverse
from django.test import TestCase, RequestFactory
from TWLight.applications.factories import ApplicationFactory
from TWLight.applications.models import Application
from TWLight.resources.factories import PartnerFactory
from TWLight.resources.models import Partner
from TWLight.resources.tests import EditorCraftRoom
from TWLight.users.factories import EditorFactory, UserFactory
from TWLight.users.groups import get_coordinators
# We need to import these in order to register the signal handlers; if we don't,
# when we test that those handler functions have been called, we will get
# False even when they work in real life.
from .tasks import (send_comment_notification_emails,
send_approval_notification_email,
send_rejection_notification_email)
class ApplicationCommentTest(TestCase):
def setUp(self):
super(ApplicationCommentTest, self).setUp()
self.editor = EditorFactory(user__email='editor@example.com').user
coordinators = get_coordinators()
self.coordinator1 = EditorFactory(user__email='c1@example.com',
user__username='c1').user
self.coordinator2 = EditorFactory(user__email='c2@example.com',
user__username='c2').user
coordinators.user_set.add(self.coordinator1)
coordinators.user_set.add(self.coordinator2)
self.partner = PartnerFactory()
def _create_comment(self, app, user):
CT = ContentType.objects.get_for_model
comm = Comment.objects.create(
content_type=CT(Application),
object_pk=app.pk,
user=user,
user_name=user.username,
user_email=user.email,
comment="Content!",
site=Site.objects.get_current(),
)
comm.save()
return comm
def _set_up_email_test_objects(self):
app = ApplicationFactory(editor=self.editor.editor,
partner=self.partner)
factory = RequestFactory()
request = factory.post(get_form_target())
return app, request
def test_comment_email_sending_1(self):
"""
A coordinator posts a comment to an Editor's application and an email
is send to that Editor. An email is not sent to the coordinator.
"""
app, request = self._set_up_email_test_objects()
request.user = UserFactory()
self.assertEqual(len(mail.outbox), 0)
comment1 = self._create_comment(app, self.coordinator1)
comment_was_posted.send(
sender=Comment,
comment=comment1,
request=request
)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].to, [self.editor.email])
def test_comment_email_sending_2(self):
"""
After a coordinator posts a comment, the Editor posts an additional
comment. An email is sent to the coordinator who posted the earlier
comment. An email is not sent to the editor.
"""
app, request = self._set_up_email_test_objects()
request.user = UserFactory()
self.assertEqual(len(mail.outbox), 0)
_ = self._create_comment(app, self.coordinator1)
comment2 = self._create_comment(app, self.editor)
comment_was_posted.send(
sender=Comment,
comment=comment2,
request=request
)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].to, [self.coordinator1.email])
def test_comment_email_sending_3(self):
"""
After the editor and coordinator post a comment, an additional
coordinator posts a comment. One email is sent to the first coordinator,
and a distinct email is sent to the editor.
"""
app, request = self._set_up_email_test_objects()
request.user = UserFactory()
self.assertEqual(len(mail.outbox), 0)
_ = self._create_comment(app, self.coordinator1)
_ = self._create_comment(app, self.editor)
comment3 = self._create_comment(app, self.coordinator2)
comment_was_posted.send(
sender=Comment,
comment=comment3,
request=request
)
self.assertEqual(len(mail.outbox), 2)
# Either order of email sending is fine.
try:
self.assertEqual(mail.outbox[0].to, [self.coordinator1.email])
self.assertEqual(mail.outbox[1].to, [self.editor.email])
except AssertionError:
self.assertEqual(mail.outbox[1].to, [self.coordinator1.email])
self.assertEqual(mail.outbox[0].to, [self.editor.email])
def test_comment_email_sending_4(self):
"""
A comment made on an application that's any further along the process
than PENDING (i.e. a coordinator has taken some action on it) should
fire an email to the coordinator who took the last action on it.
"""
app, request = self._set_up_email_test_objects()
request.user = UserFactory()
self.assertEqual(len(mail.outbox), 0)
# Create a coordinator with a test client session
coordinator = EditorCraftRoom(self, Terms=True, Coordinator=True)
self.partner.coordinator = coordinator.user
self.partner.save()
# Approve the application
url = reverse('applications:evaluate',
kwargs={'pk': app.pk})
response = self.client.post(url,
data={'status': Application.QUESTION},
follow=True)
comment4 = self._create_comment(app, self.editor)
comment_was_posted.send(
sender=Comment,
comment=comment4,
request=request
)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].to, [coordinator.user.email])
def test_comment_email_sending_5(self):
"""
A comment from the applying editor made on an application that
has had no actions taken on it and no existing comments should
not fire an email to anyone.
"""
app, request = self._set_up_email_test_objects()
request.user = UserFactory()
self.assertEqual(len(mail.outbox), 0)
comment5 = self._create_comment(app, self.editor)
comment_was_posted.send(
sender=Comment,
comment=comment5,
request=request
)
self.assertEqual(len(mail.outbox), 0)
# We'd like to mock out send_comment_notification_emails and test that
# it is called when comment_was_posted is fired, but we can't; the signal
# handler is attached to the real send_comment_notification_emails, not
# the mocked one.
class ApplicationStatusTest(TestCase):
@patch('TWLight.emails.tasks.send_approval_notification_email')
def test_approval_calls_email_function(self, mock_email):
app = ApplicationFactory(status=Application.PENDING)
app.status = Application.APPROVED
app.save()
self.assertTrue(mock_email.called)
@patch('TWLight.emails.tasks.send_approval_notification_email')
def test_reapproval_does_not_call_email_function(self, mock_email):
"""
Saving an Application with APPROVED status, when it already had an
APPROVED status, should not re-send the email.
"""
app = ApplicationFactory(status=Application.PENDING)
app.status = Application.APPROVED
app.save()
app.save()
self.assertEqual(mock_email.call_count, 1)
@patch('TWLight.emails.tasks.send_rejection_notification_email')
def test_rejection_calls_email_function(self, mock_email):
app = ApplicationFactory(status=Application.PENDING)
app.status = Application.NOT_APPROVED
app.save()
self.assertTrue(mock_email.called)
@patch('TWLight.emails.tasks.send_rejection_notification_email')
def test_rerejection_does_not_call_email_function(self, mock_email):
app = ApplicationFactory(status=Application.PENDING)
app.status = Application.NOT_APPROVED
app.save()
app.save()
self.assertEqual(mock_email.call_count, 1)
def test_pending_does_not_call_email_function(self):
"""
Applications saved with a PENDING status should not generate email.
"""
orig_outbox = len(mail.outbox)
_ = ApplicationFactory(status=Application.PENDING)
self.assertEqual(len(mail.outbox), orig_outbox)
def test_question_does_not_call_email_function(self):
"""
Applications saved with a QUESTION status should not generate email.
"""
orig_outbox = len(mail.outbox)
_ = ApplicationFactory(status=Application.QUESTION)
self.assertEqual(len(mail.outbox), orig_outbox)
def test_sent_does_not_call_email_function(self):
"""
Applications saved with a SENT status should not generate email.
"""
orig_outbox = len(mail.outbox)
_ = ApplicationFactory(status=Application.SENT)
self.assertEqual(len(mail.outbox), orig_outbox)
@patch('TWLight.emails.tasks.send_waitlist_notification_email')
def test_waitlist_calls_email_function(self, mock_email):
partner = PartnerFactory(status=Partner.WAITLIST)
app = ApplicationFactory(status=Application.PENDING, partner=partner)
self.assertTrue(mock_email.called)
partner.delete()
app.delete()
@patch('TWLight.emails.tasks.send_waitlist_notification_email')
def test_nonwaitlist_does_not_call_email_function(self, mock_email):
partner = PartnerFactory(status=Partner.AVAILABLE)
app = ApplicationFactory(status=Application.PENDING, partner=partner)
self.assertFalse(mock_email.called)
partner.delete()
app.delete()
partner = PartnerFactory(status=Partner.NOT_AVAILABLE)
app = ApplicationFactory(status=Application.PENDING, partner=partner)
self.assertFalse(mock_email.called)
partner.delete()
app.delete()
@patch('TWLight.emails.tasks.send_waitlist_notification_email')
def test_waitlisting_partner_calls_email_function(self, mock_email):
"""
Switching a Partner to WAITLIST status should call the email function
for apps to that partner with open statuses.
"""
partner = PartnerFactory(status=Partner.AVAILABLE)
app = ApplicationFactory(status=Application.PENDING, partner=partner)
self.assertFalse(mock_email.called)
partner.status = Partner.WAITLIST
partner.save()
self.assertTrue(mock_email.called)
mock_email.assert_called_with(app)
@patch('TWLight.emails.tasks.send_waitlist_notification_email')
def test_waitlisting_partner_does_not_call_email_function(self, mock_email):
"""
Switching a Partner to WAITLIST status should NOT call the email
function for apps to that partner with closed statuses.
"""
partner = PartnerFactory(status=Partner.AVAILABLE)
app = ApplicationFactory(status=Application.APPROVED, partner=partner)
app = ApplicationFactory(status=Application.NOT_APPROVED, partner=partner)
app = ApplicationFactory(status=Application.SENT, partner=partner)
self.assertFalse(mock_email.called)
partner.status = Partner.WAITLIST
partner.save()
self.assertFalse(mock_email.called)
test coverage for contact us
from mock import patch
from django_comments import get_form_target
from django_comments.models import Comment
from django_comments.signals import comment_was_posted
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site
from django.core import mail
from django.core.urlresolvers import reverse
from django.test import TestCase, RequestFactory
from TWLight.applications.factories import ApplicationFactory
from TWLight.applications.models import Application
from TWLight.resources.factories import PartnerFactory
from TWLight.resources.models import Partner
from TWLight.resources.tests import EditorCraftRoom
from TWLight.users.factories import EditorFactory, UserFactory
from TWLight.users.groups import get_coordinators
# We need to import these in order to register the signal handlers; if we don't,
# when we test that those handler functions have been called, we will get
# False even when they work in real life.
from .tasks import (send_comment_notification_emails,
send_approval_notification_email,
send_rejection_notification_email,
contact_us_emails)
from .tasks import ContactUsEmail
class ApplicationCommentTest(TestCase):
def setUp(self):
super(ApplicationCommentTest, self).setUp()
self.editor = EditorFactory(user__email='editor@example.com').user
coordinators = get_coordinators()
self.coordinator1 = EditorFactory(user__email='c1@example.com',
user__username='c1').user
self.coordinator2 = EditorFactory(user__email='c2@example.com',
user__username='c2').user
coordinators.user_set.add(self.coordinator1)
coordinators.user_set.add(self.coordinator2)
self.partner = PartnerFactory()
def _create_comment(self, app, user):
CT = ContentType.objects.get_for_model
comm = Comment.objects.create(
content_type=CT(Application),
object_pk=app.pk,
user=user,
user_name=user.username,
user_email=user.email,
comment="Content!",
site=Site.objects.get_current(),
)
comm.save()
return comm
def _set_up_email_test_objects(self):
app = ApplicationFactory(editor=self.editor.editor,
partner=self.partner)
factory = RequestFactory()
request = factory.post(get_form_target())
return app, request
def test_comment_email_sending_1(self):
"""
A coordinator posts a comment to an Editor's application and an email
is send to that Editor. An email is not sent to the coordinator.
"""
app, request = self._set_up_email_test_objects()
request.user = UserFactory()
self.assertEqual(len(mail.outbox), 0)
comment1 = self._create_comment(app, self.coordinator1)
comment_was_posted.send(
sender=Comment,
comment=comment1,
request=request
)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].to, [self.editor.email])
def test_comment_email_sending_2(self):
"""
After a coordinator posts a comment, the Editor posts an additional
comment. An email is sent to the coordinator who posted the earlier
comment. An email is not sent to the editor.
"""
app, request = self._set_up_email_test_objects()
request.user = UserFactory()
self.assertEqual(len(mail.outbox), 0)
_ = self._create_comment(app, self.coordinator1)
comment2 = self._create_comment(app, self.editor)
comment_was_posted.send(
sender=Comment,
comment=comment2,
request=request
)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].to, [self.coordinator1.email])
def test_comment_email_sending_3(self):
"""
After the editor and coordinator post a comment, an additional
coordinator posts a comment. One email is sent to the first coordinator,
and a distinct email is sent to the editor.
"""
app, request = self._set_up_email_test_objects()
request.user = UserFactory()
self.assertEqual(len(mail.outbox), 0)
_ = self._create_comment(app, self.coordinator1)
_ = self._create_comment(app, self.editor)
comment3 = self._create_comment(app, self.coordinator2)
comment_was_posted.send(
sender=Comment,
comment=comment3,
request=request
)
self.assertEqual(len(mail.outbox), 2)
# Either order of email sending is fine.
try:
self.assertEqual(mail.outbox[0].to, [self.coordinator1.email])
self.assertEqual(mail.outbox[1].to, [self.editor.email])
except AssertionError:
self.assertEqual(mail.outbox[1].to, [self.coordinator1.email])
self.assertEqual(mail.outbox[0].to, [self.editor.email])
def test_comment_email_sending_4(self):
"""
A comment made on an application that's any further along the process
than PENDING (i.e. a coordinator has taken some action on it) should
fire an email to the coordinator who took the last action on it.
"""
app, request = self._set_up_email_test_objects()
request.user = UserFactory()
self.assertEqual(len(mail.outbox), 0)
# Create a coordinator with a test client session
coordinator = EditorCraftRoom(self, Terms=True, Coordinator=True)
self.partner.coordinator = coordinator.user
self.partner.save()
# Approve the application
url = reverse('applications:evaluate',
kwargs={'pk': app.pk})
response = self.client.post(url,
data={'status': Application.QUESTION},
follow=True)
comment4 = self._create_comment(app, self.editor)
comment_was_posted.send(
sender=Comment,
comment=comment4,
request=request
)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].to, [coordinator.user.email])
def test_comment_email_sending_5(self):
"""
A comment from the applying editor made on an application that
has had no actions taken on it and no existing comments should
not fire an email to anyone.
"""
app, request = self._set_up_email_test_objects()
request.user = UserFactory()
self.assertEqual(len(mail.outbox), 0)
comment5 = self._create_comment(app, self.editor)
comment_was_posted.send(
sender=Comment,
comment=comment5,
request=request
)
self.assertEqual(len(mail.outbox), 0)
# We'd like to mock out send_comment_notification_emails and test that
# it is called when comment_was_posted is fired, but we can't; the signal
# handler is attached to the real send_comment_notification_emails, not
# the mocked one.
class ApplicationStatusTest(TestCase):
@patch('TWLight.emails.tasks.send_approval_notification_email')
def test_approval_calls_email_function(self, mock_email):
app = ApplicationFactory(status=Application.PENDING)
app.status = Application.APPROVED
app.save()
self.assertTrue(mock_email.called)
@patch('TWLight.emails.tasks.send_approval_notification_email')
def test_reapproval_does_not_call_email_function(self, mock_email):
"""
Saving an Application with APPROVED status, when it already had an
APPROVED status, should not re-send the email.
"""
app = ApplicationFactory(status=Application.PENDING)
app.status = Application.APPROVED
app.save()
app.save()
self.assertEqual(mock_email.call_count, 1)
@patch('TWLight.emails.tasks.send_rejection_notification_email')
def test_rejection_calls_email_function(self, mock_email):
app = ApplicationFactory(status=Application.PENDING)
app.status = Application.NOT_APPROVED
app.save()
self.assertTrue(mock_email.called)
@patch('TWLight.emails.tasks.send_rejection_notification_email')
def test_rerejection_does_not_call_email_function(self, mock_email):
app = ApplicationFactory(status=Application.PENDING)
app.status = Application.NOT_APPROVED
app.save()
app.save()
self.assertEqual(mock_email.call_count, 1)
def test_pending_does_not_call_email_function(self):
"""
Applications saved with a PENDING status should not generate email.
"""
orig_outbox = len(mail.outbox)
_ = ApplicationFactory(status=Application.PENDING)
self.assertEqual(len(mail.outbox), orig_outbox)
def test_question_does_not_call_email_function(self):
"""
Applications saved with a QUESTION status should not generate email.
"""
orig_outbox = len(mail.outbox)
_ = ApplicationFactory(status=Application.QUESTION)
self.assertEqual(len(mail.outbox), orig_outbox)
def test_sent_does_not_call_email_function(self):
"""
Applications saved with a SENT status should not generate email.
"""
orig_outbox = len(mail.outbox)
_ = ApplicationFactory(status=Application.SENT)
self.assertEqual(len(mail.outbox), orig_outbox)
@patch('TWLight.emails.tasks.send_waitlist_notification_email')
def test_waitlist_calls_email_function(self, mock_email):
partner = PartnerFactory(status=Partner.WAITLIST)
app = ApplicationFactory(status=Application.PENDING, partner=partner)
self.assertTrue(mock_email.called)
partner.delete()
app.delete()
@patch('TWLight.emails.tasks.send_waitlist_notification_email')
def test_nonwaitlist_does_not_call_email_function(self, mock_email):
partner = PartnerFactory(status=Partner.AVAILABLE)
app = ApplicationFactory(status=Application.PENDING, partner=partner)
self.assertFalse(mock_email.called)
partner.delete()
app.delete()
partner = PartnerFactory(status=Partner.NOT_AVAILABLE)
app = ApplicationFactory(status=Application.PENDING, partner=partner)
self.assertFalse(mock_email.called)
partner.delete()
app.delete()
@patch('TWLight.emails.tasks.send_waitlist_notification_email')
def test_waitlisting_partner_calls_email_function(self, mock_email):
"""
Switching a Partner to WAITLIST status should call the email function
for apps to that partner with open statuses.
"""
partner = PartnerFactory(status=Partner.AVAILABLE)
app = ApplicationFactory(status=Application.PENDING, partner=partner)
self.assertFalse(mock_email.called)
partner.status = Partner.WAITLIST
partner.save()
self.assertTrue(mock_email.called)
mock_email.assert_called_with(app)
@patch('TWLight.emails.tasks.send_waitlist_notification_email')
def test_waitlisting_partner_does_not_call_email_function(self, mock_email):
"""
Switching a Partner to WAITLIST status should NOT call the email
function for apps to that partner with closed statuses.
"""
partner = PartnerFactory(status=Partner.AVAILABLE)
app = ApplicationFactory(status=Application.APPROVED, partner=partner)
app = ApplicationFactory(status=Application.NOT_APPROVED, partner=partner)
app = ApplicationFactory(status=Application.SENT, partner=partner)
self.assertFalse(mock_email.called)
partner.status = Partner.WAITLIST
partner.save()
self.assertFalse(mock_email.called)
class ContactUsTest(TestCase):
def setUp(self):
super(ContactUsTest, self).setUp()
self.editor = EditorFactory(user__email='editor@example.com').user
@patch('TWLight.emails.tasks.contact_us_emails')
def test_contact_us_emails(self, mock_email):
factory = RequestFactory()
request = factory.post(get_form_target())
request.user = UserFactory()
editor = EditorFactory()
self.assertEqual(len(mail.outbox), 0)
email = ContactUsEmail()
email.send('wikipedialibrary@wikimedia.org',
{'user_email': self.editor.email,
'editor_wp_username': editor.wp_username,
'body': 'This is a test email'})
self.assertEqual(len(mail.outbox), 1) |
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import reverse, reverse_lazy
from django.core.mail import BadHeaderError, send_mail
from django.http import HttpResponse, HttpResponseRedirect
from django.utils.decorators import method_decorator
from django.utils.translation import ugettext_lazy as _
from django.views.generic.edit import FormView
from TWLight.emails.forms import ContactUsForm
from TWLight.emails.signals import ContactUs
@method_decorator(login_required, name="post")
class ContactUsView(FormView):
template_name = "emails/contact.html"
form_class = ContactUsForm
success_url = reverse_lazy("contact")
def get_initial(self):
initial = super(ContactUsView, self).get_initial()
# @TODO: This sort of gets repeated in ContactUsForm.
# We could probably be factored out to a common place for DRYness.
if self.request.user.is_authenticated():
if self.request.user.email:
initial.update({"email": self.request.user.email})
if "message" in self.request.GET:
initial.update({"message": self.request.GET["message"]})
initial.update({"next": reverse_lazy("contact")})
return initial
def form_valid(self, form):
# Adding an extra check to ensure the user is a wikipedia editor.
try:
assert self.request.user.editor
email = form.cleaned_data["email"]
message = form.cleaned_data["message"]
carbon_copy = form.cleaned_data["cc"]
ContactUs.new_email.send(
sender=self.__class__,
user_email=email,
cc=carbon_copy,
editor_wp_username=self.request.user.editor.wp_username,
body=message,
)
messages.add_message(
self.request,
messages.SUCCESS,
# Translators: Shown to users when they successfully submit a new message using the contact us form.
_("Your message has been sent. We'll get back to you soon!"),
)
return HttpResponseRedirect(reverse("contact"))
except (AssertionError, AttributeError) as e:
messages.add_message(
self.request,
messages.WARNING,
# Translators: This message is shown to non-wikipedia editors who attempt to post data to the contact us form.
_("You must be a Wikipedia editor to do that."),
)
raise PermissionDenied
return self.request.user.editor
Tidy unused imports
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import reverse, reverse_lazy
from django.http import HttpResponseRedirect
from django.utils.decorators import method_decorator
from django.utils.translation import ugettext_lazy as _
from django.views.generic.edit import FormView
from TWLight.emails.forms import ContactUsForm
from TWLight.emails.signals import ContactUs
@method_decorator(login_required, name="post")
class ContactUsView(FormView):
template_name = "emails/contact.html"
form_class = ContactUsForm
success_url = reverse_lazy("contact")
def get_initial(self):
initial = super(ContactUsView, self).get_initial()
# @TODO: This sort of gets repeated in ContactUsForm.
# We could probably be factored out to a common place for DRYness.
if self.request.user.is_authenticated():
if self.request.user.email:
initial.update({"email": self.request.user.email})
if "message" in self.request.GET:
initial.update({"message": self.request.GET["message"]})
initial.update({"next": reverse_lazy("contact")})
return initial
def form_valid(self, form):
# Adding an extra check to ensure the user is a wikipedia editor.
try:
assert self.request.user.editor
email = form.cleaned_data["email"]
message = form.cleaned_data["message"]
carbon_copy = form.cleaned_data["cc"]
ContactUs.new_email.send(
sender=self.__class__,
user_email=email,
cc=carbon_copy,
editor_wp_username=self.request.user.editor.wp_username,
body=message,
)
messages.add_message(
self.request,
messages.SUCCESS,
# Translators: Shown to users when they successfully submit a new message using the contact us form.
_("Your message has been sent. We'll get back to you soon!"),
)
return HttpResponseRedirect(reverse("contact"))
except (AssertionError, AttributeError) as e:
messages.add_message(
self.request,
messages.WARNING,
# Translators: This message is shown to non-wikipedia editors who attempt to post data to the contact us form.
_("You must be a Wikipedia editor to do that."),
)
raise PermissionDenied
return self.request.user.editor
|
print "hello world"
read the data from the webpage
import urllib2
import sys
data = urllib2.urlopen("http://www.mountainview.gov/depts/pw/recycling/hazard/default.asp")
for lines in data.readlines():
sys.stdout.write(lines) |
import asyncio
import collections
from websockets.exceptions import ConnectionClosed
from .proto import encode, decode
states = {'open': 0, 'closing': 1, 'closed': 2}
class NoPacketException(Exception):
"""
This error is thrown when you attempt to get_packet() without
there being a packet in the queue. Instead, you should wait
using wait_message().
"""
pass
class Connection():
"""
This is used to interface with the Tetris Robot client. It
provides methods for reading data as well as pushing protobuf
packets on.
Much of the functionality here is inspired by aioredis. Kudos
to them for supplementing my woefully lacking knowledge of
Python concurrency features. You can check them out here:
https://github.com/aio-libs/aioredis
"""
def __init__(self, socket, loop):
self._socket = socket
self._loop = loop
self._state = states['open']
self._read_task = asyncio.Task(self._read_data(), loop=loop)
self._read_queue = collections.deque()
self._read_waiter = None
def _push_packet(self, packet):
"""
Appends a packet to the internal read queue, or notifies
a waiting listener that a packet just came in.
"""
self._read_queue.append((decode(packet), packet))
if self._read_waiter is not None:
w, self._read_waiter = self._read_waiter, None
w.set_result(None)
@asyncio.coroutine
def _read_data(self):
"""
Reads data from the connection and adds it to _push_packet,
until the connection is closed or the task in cancelled.
"""
while True:
try:
data = yield from self._socket.recv()
except asyncio.CancelledError:
break
except ConnectionClosed:
break
self._push_packet(data)
self._loop.call_soon(self.close)
@asyncio.coroutine
def wait_message(self):
"""
Waits until a connection is available on the wire, or until
the connection is in a state that it can't accept messages.
It returns True if a message is available, False otherwise.
"""
if self._state != states['open']:
return False
if len(self._read_queue) > 0:
return True
assert self._read_waiter is None or self._read_waiter.cancelled(), \
"You may only use one wait_message() per connection."
self._read_waiter = asyncio.Future(loop=self._loop)
yield from self._read_waiter
return self.wait_message()
def get_packet(self):
"""
Returns the last packet from the queue of read packets.
If there are none, it throws a NoPacketException.
"""
if len(self._read_queue) == 0:
raise NoPacketException()
return self._read_queue.popleft()
@asyncio.coroutine
def send(self, packet):
"""
Sends a packet to the Interactive daemon over the wire.
"""
print("sending")
# print(packet)
yield from self._socket.send(encode(packet))
def _do_close(self):
"""
Underlying closer function.
"""
self._socket.close()
self._state = states['closed']
def close(self):
"""
Closes the connection if it is open.
"""
if self._state == states['open']:
self._do_close()
@property
def open(self):
"""
Returns true if the connection is still open.
"""
return self._state == states['open']
@property
def closed(self):
"""
Returns true if the connection is closed, or
in the process of closing.
"""
return not self.open
Removed print statement from debugging
import asyncio
import collections
from websockets.exceptions import ConnectionClosed
from .proto import encode, decode
states = {'open': 0, 'closing': 1, 'closed': 2}
class NoPacketException(Exception):
"""
This error is thrown when you attempt to get_packet() without
there being a packet in the queue. Instead, you should wait
using wait_message().
"""
pass
class Connection():
"""
This is used to interface with the Tetris Robot client. It
provides methods for reading data as well as pushing protobuf
packets on.
Much of the functionality here is inspired by aioredis. Kudos
to them for supplementing my woefully lacking knowledge of
Python concurrency features. You can check them out here:
https://github.com/aio-libs/aioredis
"""
def __init__(self, socket, loop):
self._socket = socket
self._loop = loop
self._state = states['open']
self._read_task = asyncio.Task(self._read_data(), loop=loop)
self._read_queue = collections.deque()
self._read_waiter = None
def _push_packet(self, packet):
"""
Appends a packet to the internal read queue, or notifies
a waiting listener that a packet just came in.
"""
self._read_queue.append((decode(packet), packet))
if self._read_waiter is not None:
w, self._read_waiter = self._read_waiter, None
w.set_result(None)
@asyncio.coroutine
def _read_data(self):
"""
Reads data from the connection and adds it to _push_packet,
until the connection is closed or the task in cancelled.
"""
while True:
try:
data = yield from self._socket.recv()
except asyncio.CancelledError:
break
except ConnectionClosed:
break
self._push_packet(data)
self._loop.call_soon(self.close)
@asyncio.coroutine
def wait_message(self):
"""
Waits until a connection is available on the wire, or until
the connection is in a state that it can't accept messages.
It returns True if a message is available, False otherwise.
"""
if self._state != states['open']:
return False
if len(self._read_queue) > 0:
return True
assert self._read_waiter is None or self._read_waiter.cancelled(), \
"You may only use one wait_message() per connection."
self._read_waiter = asyncio.Future(loop=self._loop)
yield from self._read_waiter
return self.wait_message()
def get_packet(self):
"""
Returns the last packet from the queue of read packets.
If there are none, it throws a NoPacketException.
"""
if len(self._read_queue) == 0:
raise NoPacketException()
return self._read_queue.popleft()
@asyncio.coroutine
def send(self, packet):
"""
Sends a packet to the Interactive daemon over the wire.
"""
yield from self._socket.send(encode(packet))
def _do_close(self):
"""
Underlying closer function.
"""
self._socket.close()
self._state = states['closed']
def close(self):
"""
Closes the connection if it is open.
"""
if self._state == states['open']:
self._do_close()
@property
def open(self):
"""
Returns true if the connection is still open.
"""
return self._state == states['open']
@property
def closed(self):
"""
Returns true if the connection is closed, or
in the process of closing.
"""
return not self.open
|
#!/usr/bin/env python3
# SPDX-License-Identifier: Apache-2.0
# -----------------------------------------------------------------------------
# Copyright 2019-2021 Arm Limited
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# -----------------------------------------------------------------------------
"""
The image test runner is used for image quality and performance testing.
It is designed to process directories of arbitrary test images, using the
directory structure and path naming conventions to self-describe how each image
is to be compressed. Some built-in test sets are provided in the ./Test/Images
directory, and others can be downloaded by running the astc_test_image_dl
script.
Attributes:
RESULT_THRESHOLD_WARN: The result threshold (dB) for getting a WARN.
RESULT_THRESHOLD_FAIL: The result threshold (dB) for getting a FAIL.
TEST_BLOCK_SIZES: The block sizes we can test. This is a subset of the
block sizes supported by ASTC, simply to keep test run times
manageable.
"""
import argparse
import os
import platform
import sys
import testlib.encoder as te
import testlib.testset as tts
import testlib.resultset as trs
# Require bit exact with reference scores
RESULT_THRESHOLD_WARN = -0.00
RESULT_THRESHOLD_FAIL = -0.00
RESULT_THRESHOLD_3D_FAIL = -0.00
TEST_BLOCK_SIZES = ["4x4", "5x5", "6x6", "8x8", "12x12",
"3x3x3", "6x6x6"]
TEST_QUALITIES = ["fastest", "fast", "medium", "thorough"]
def is_3d(blockSize):
"""
Is the given block size a 3D block type?
Args:
blockSize (str): The block size.
Returns:
bool: ``True`` if the block string is a 3D block size, ``False`` if 2D.
"""
return blockSize.count("x") == 2
def count_test_set(testSet, blockSizes):
"""
Count the number of test executions needed for a test set.
Args:
testSet (TestSet): The test set to run.
blockSizes (list(str)): The block sizes to run.
Returns:
int: The number of test executions needed.
"""
count = 0
for blkSz in blockSizes:
for image in testSet.tests:
# 3D block sizes require 3D images
if is_3d(blkSz) != image.is3D:
continue
count += 1
return count
def determine_result(image, reference, result):
"""
Determine a test result against a reference and thresholds.
Args:
image (TestImage): The image being compressed.
reference (Record): The reference result to compare against.
result (Record): The test result.
Returns:
Result: The result code.
"""
dPSNR = result.psnr - reference.psnr
if (dPSNR < RESULT_THRESHOLD_FAIL) and (not image.is3D):
return trs.Result.FAIL
if (dPSNR < RESULT_THRESHOLD_3D_FAIL) and image.is3D:
return trs.Result.FAIL
if dPSNR < RESULT_THRESHOLD_WARN:
return trs.Result.WARN
return trs.Result.PASS
def format_solo_result(image, result):
"""
Format a metrics string for a single (no compare) result.
Args:
image (TestImage): The image being tested.
result (Record): The test result.
Returns:
str: The metrics string.
"""
name = "%5s %s" % (result.blkSz, result.name)
tPSNR = "%2.3f dB" % result.psnr
tTTime = "%.3f s" % result.tTime
tCTime = "%.3f s" % result.cTime
tCMTS = "%.3f MT/s" % result.cRate
return "%-32s | %8s | %9s | %9s | %10s" % \
(name, tPSNR, tTTime, tCTime, tCMTS)
def format_result(image, reference, result):
"""
Format a metrics string for a comparison result.
Args:
image (TestImage): The image being tested.
reference (Record): The reference result to compare against.
result (Record): The test result.
Returns:
str: The metrics string.
"""
dPSNR = result.psnr - reference.psnr
sTTime = reference.tTime / result.tTime
sCTime = reference.cTime / result.cTime
name = "%5s %s" % (result.blkSz, result.name)
tPSNR = "%2.3f dB (% 1.3f dB)" % (result.psnr, dPSNR)
tTTime = "%.3f s (%1.2fx)" % (result.tTime, sTTime)
tCTime = "%.3f s (%1.2fx)" % (result.cTime, sCTime)
tCMTS = "%.3f MT/s" % (result.cRate)
result = determine_result(image, reference, result)
return "%-32s | %22s | %15s | %15s | %10s | %s" % \
(name, tPSNR, tTTime, tCTime, tCMTS, result.name)
def run_test_set(encoder, testRef, testSet, quality, blockSizes, testRuns,
keepOutput):
"""
Execute all tests in the test set.
Args:
encoder (EncoderBase): The encoder to use.
testRef (ResultSet): The test reference results.
testSet (TestSet): The test set.
quality (str): The quality level to execute the test against.
blockSizes (list(str)): The block sizes to execute each test against.
testRuns (int): The number of test repeats to run for each image test.
keepOutput (bool): Should the test preserve output images? This is
only a hint and discarding output may be ignored if the encoder
version used can't do it natively.
Returns:
ResultSet: The test results.
"""
resultSet = trs.ResultSet(testSet.name)
curCount = 0
maxCount = count_test_set(testSet, blockSizes)
dat = (testSet.name, encoder.name, quality)
title = "Test Set: %s / Encoder: %s -%s" % dat
print(title)
print("=" * len(title))
for blkSz in blockSizes:
for image in testSet.tests:
# 3D block sizes require 3D images
if is_3d(blkSz) != image.is3D:
continue
curCount += 1
dat = (curCount, maxCount, blkSz, image.testFile)
print("Running %u/%u %s %s ... " % dat, end='', flush=True)
res = encoder.run_test(image, blkSz, "-%s" % quality, testRuns,
keepOutput)
res = trs.Record(blkSz, image.testFile, res[0], res[1], res[2], res[3])
resultSet.add_record(res)
if testRef:
refResult = testRef.get_matching_record(res)
res.set_status(determine_result(image, refResult, res))
res.tTimeRel = refResult.tTime / res.tTime
res.cTimeRel = refResult.cTime / res.cTime
res.psnrRel = res.psnr - refResult.psnr
res = format_result(image, refResult, res)
else:
res = format_solo_result(image, res)
print("\r[%3u] %s" % (curCount, res))
return resultSet
def get_encoder_params(encoderName, referenceName, imageSet):
"""
The the encoder and image set parameters for a test run.
Args:
encoderName (str): The encoder name.
referenceName (str): The reference encoder name.
imageSet (str): The test image set.
Returns:
tuple(EncoderBase, str, str, str): The test parameters for the
requested encoder and test set. An instance of the encoder wrapper
class, the output data name, the output result directory, and the
reference to use.
"""
# 1.7 variants
if encoderName == "ref-1.7":
encoder = te.Encoder1_7()
name = "reference-1.7"
outDir = "Test/Images/%s" % imageSet
refName = None
return (encoder, name, outDir, refName)
if encoderName.startswith("ref"):
_, version, simd = encoderName.split("-")
# 2.x variants
if version.startswith("2."):
encoder = te.Encoder2xRel(version, simd)
name = f"reference-{version}-{simd}"
outDir = "Test/Images/%s" % imageSet
refName = None
return (encoder, name, outDir, refName)
# Latest main
if version == "main":
encoder = te.Encoder2x(simd)
name = f"reference-{version}-{simd}"
outDir = "Test/Images/%s" % imageSet
refName = None
return (encoder, name, outDir, refName)
assert False, f"Encoder {encoderName} not recognized"
encoder = te.Encoder2x(encoderName)
name = "develop-%s" % encoderName
outDir = "TestOutput/%s" % imageSet
refName = referenceName.replace("ref", "reference")
return (encoder, name, outDir, refName)
def parse_command_line():
"""
Parse the command line.
Returns:
Namespace: The parsed command line container.
"""
parser = argparse.ArgumentParser()
# All reference encoders
refcoders = ["ref-1.7",
"ref-2.5-neon", "ref-2.5-sse2", "ref-2.5-sse4.1", "ref-2.5-avx2",
"ref-main-neon", "ref-main-sse2", "ref-main-sse4.1", "ref-main-avx2"]
# All test encoders
testcoders = ["none", "neon", "sse2", "sse4.1", "avx2"]
testcodersAArch64 = ["none", "neon"]
testcodersX86 = ["none", "sse2", "sse4.1", "avx2"]
coders = refcoders + testcoders + ["all-aarch64", "all-x86"]
parser.add_argument("--encoder", dest="encoders", default="avx2",
choices=coders, help="test encoder variant")
parser.add_argument("--reference", dest="reference", default="ref-main-avx2",
choices=refcoders, help="reference encoder variant")
astcProfile = ["ldr", "ldrs", "hdr", "all"]
parser.add_argument("--color-profile", dest="profiles", default="all",
choices=astcProfile, help="test color profile")
imgFormat = ["l", "xy", "rgb", "rgba", "all"]
parser.add_argument("--color-format", dest="formats", default="all",
choices=imgFormat, help="test color format")
choices = list(TEST_BLOCK_SIZES) + ["all"]
parser.add_argument("--block-size", dest="blockSizes",
action="append", choices=choices,
help="test block size")
testDir = os.path.dirname(__file__)
testDir = os.path.join(testDir, "Images")
testSets = []
for path in os.listdir(testDir):
fqPath = os.path.join(testDir, path)
if os.path.isdir(fqPath):
testSets.append(path)
testSets.append("all")
parser.add_argument("--test-set", dest="testSets", default="Small",
choices=testSets, help="test image test set")
parser.add_argument("--test-image", dest="testImage", default=None,
help="select a specific test image from the test set")
choices = list(TEST_QUALITIES) + ["all"]
parser.add_argument("--test-quality", dest="testQual", default="thorough",
choices=choices, help="select a specific test quality")
parser.add_argument("--repeats", dest="testRepeats", default=1,
type=int, help="test iteration count")
parser.add_argument("--keep-output", dest="keepOutput", default=False,
action="store_true", help="keep image output")
args = parser.parse_args()
# Turn things into canonical format lists
if args.encoders == "all-aarch64":
args.encoders = testcodersAArch64
elif args.encoders == "all-x86":
args.encoders = testcodersX86
else:
args.encoders = [args.encoders]
args.testQual = TEST_QUALITIES if args.testQual == "all" \
else [args.testQual]
if not args.blockSizes or ("all" in args.blockSizes):
args.blockSizes = TEST_BLOCK_SIZES
args.testSets = testSets[:-1] if args.testSets == "all" \
else [args.testSets]
args.profiles = astcProfile[:-1] if args.profiles == "all" \
else [args.profiles]
args.formats = imgFormat[:-1] if args.formats == "all" \
else [args.formats]
return args
def main():
"""
The main function.
Returns:
int: The process return code.
"""
# Parse command lines
args = parse_command_line()
testSetCount = 0
worstResult = trs.Result.NOTRUN
for quality in args.testQual:
for imageSet in args.testSets:
for encoderName in args.encoders:
(encoder, name, outDir, refName) = \
get_encoder_params(encoderName, args.reference, imageSet)
testDir = "Test/Images/%s" % imageSet
testRes = "%s/astc_%s_%s_results.csv" % (outDir, name, quality)
testRef = None
if refName:
dat = (testDir, refName, quality)
testRefPath = "%s/astc_%s_%s_results.csv" % dat
testRef = trs.ResultSet(imageSet)
testRef.load_from_file(testRefPath)
testSetCount += 1
testSet = tts.TestSet(imageSet, testDir,
args.profiles, args.formats, args.testImage)
# The fast and fastest presets are now sufficiently fast that
# the results are noisy without more repeats
testRepeats = args.testRepeats
if quality == "fast" and testRepeats > 1:
testRepeats *= 2
elif quality == "fastest" and testRepeats > 1:
testRepeats *= 4
resultSet = run_test_set(encoder, testRef, testSet, quality,
args.blockSizes, testRepeats,
args.keepOutput)
resultSet.save_to_file(testRes)
if refName:
summary = resultSet.get_results_summary()
worstResult = max(summary.get_worst_result(), worstResult)
print(summary)
if (testSetCount > 1) and (worstResult != trs.Result.NOTRUN):
print("OVERALL STATUS: %s" % worstResult.name)
if worstResult == trs.Result.FAIL:
return 1
return 0
if __name__ == "__main__":
sys.exit(main())
Widen MT/s column in test runner report
#!/usr/bin/env python3
# SPDX-License-Identifier: Apache-2.0
# -----------------------------------------------------------------------------
# Copyright 2019-2021 Arm Limited
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# -----------------------------------------------------------------------------
"""
The image test runner is used for image quality and performance testing.
It is designed to process directories of arbitrary test images, using the
directory structure and path naming conventions to self-describe how each image
is to be compressed. Some built-in test sets are provided in the ./Test/Images
directory, and others can be downloaded by running the astc_test_image_dl
script.
Attributes:
RESULT_THRESHOLD_WARN: The result threshold (dB) for getting a WARN.
RESULT_THRESHOLD_FAIL: The result threshold (dB) for getting a FAIL.
TEST_BLOCK_SIZES: The block sizes we can test. This is a subset of the
block sizes supported by ASTC, simply to keep test run times
manageable.
"""
import argparse
import os
import platform
import sys
import testlib.encoder as te
import testlib.testset as tts
import testlib.resultset as trs
# Require bit exact with reference scores
RESULT_THRESHOLD_WARN = -0.00
RESULT_THRESHOLD_FAIL = -0.00
RESULT_THRESHOLD_3D_FAIL = -0.00
TEST_BLOCK_SIZES = ["4x4", "5x5", "6x6", "8x8", "12x12",
"3x3x3", "6x6x6"]
TEST_QUALITIES = ["fastest", "fast", "medium", "thorough"]
def is_3d(blockSize):
"""
Is the given block size a 3D block type?
Args:
blockSize (str): The block size.
Returns:
bool: ``True`` if the block string is a 3D block size, ``False`` if 2D.
"""
return blockSize.count("x") == 2
def count_test_set(testSet, blockSizes):
"""
Count the number of test executions needed for a test set.
Args:
testSet (TestSet): The test set to run.
blockSizes (list(str)): The block sizes to run.
Returns:
int: The number of test executions needed.
"""
count = 0
for blkSz in blockSizes:
for image in testSet.tests:
# 3D block sizes require 3D images
if is_3d(blkSz) != image.is3D:
continue
count += 1
return count
def determine_result(image, reference, result):
"""
Determine a test result against a reference and thresholds.
Args:
image (TestImage): The image being compressed.
reference (Record): The reference result to compare against.
result (Record): The test result.
Returns:
Result: The result code.
"""
dPSNR = result.psnr - reference.psnr
if (dPSNR < RESULT_THRESHOLD_FAIL) and (not image.is3D):
return trs.Result.FAIL
if (dPSNR < RESULT_THRESHOLD_3D_FAIL) and image.is3D:
return trs.Result.FAIL
if dPSNR < RESULT_THRESHOLD_WARN:
return trs.Result.WARN
return trs.Result.PASS
def format_solo_result(image, result):
"""
Format a metrics string for a single (no compare) result.
Args:
image (TestImage): The image being tested.
result (Record): The test result.
Returns:
str: The metrics string.
"""
name = "%5s %s" % (result.blkSz, result.name)
tPSNR = "%2.3f dB" % result.psnr
tTTime = "%.3f s" % result.tTime
tCTime = "%.3f s" % result.cTime
tCMTS = "%.3f MT/s" % result.cRate
return "%-32s | %8s | %9s | %9s | %11s" % \
(name, tPSNR, tTTime, tCTime, tCMTS)
def format_result(image, reference, result):
"""
Format a metrics string for a comparison result.
Args:
image (TestImage): The image being tested.
reference (Record): The reference result to compare against.
result (Record): The test result.
Returns:
str: The metrics string.
"""
dPSNR = result.psnr - reference.psnr
sTTime = reference.tTime / result.tTime
sCTime = reference.cTime / result.cTime
name = "%5s %s" % (result.blkSz, result.name)
tPSNR = "%2.3f dB (% 1.3f dB)" % (result.psnr, dPSNR)
tTTime = "%.3f s (%1.2fx)" % (result.tTime, sTTime)
tCTime = "%.3f s (%1.2fx)" % (result.cTime, sCTime)
tCMTS = "%.3f MT/s" % (result.cRate)
result = determine_result(image, reference, result)
return "%-32s | %22s | %15s | %15s | %11s | %s" % \
(name, tPSNR, tTTime, tCTime, tCMTS, result.name)
def run_test_set(encoder, testRef, testSet, quality, blockSizes, testRuns,
keepOutput):
"""
Execute all tests in the test set.
Args:
encoder (EncoderBase): The encoder to use.
testRef (ResultSet): The test reference results.
testSet (TestSet): The test set.
quality (str): The quality level to execute the test against.
blockSizes (list(str)): The block sizes to execute each test against.
testRuns (int): The number of test repeats to run for each image test.
keepOutput (bool): Should the test preserve output images? This is
only a hint and discarding output may be ignored if the encoder
version used can't do it natively.
Returns:
ResultSet: The test results.
"""
resultSet = trs.ResultSet(testSet.name)
curCount = 0
maxCount = count_test_set(testSet, blockSizes)
dat = (testSet.name, encoder.name, quality)
title = "Test Set: %s / Encoder: %s -%s" % dat
print(title)
print("=" * len(title))
for blkSz in blockSizes:
for image in testSet.tests:
# 3D block sizes require 3D images
if is_3d(blkSz) != image.is3D:
continue
curCount += 1
dat = (curCount, maxCount, blkSz, image.testFile)
print("Running %u/%u %s %s ... " % dat, end='', flush=True)
res = encoder.run_test(image, blkSz, "-%s" % quality, testRuns,
keepOutput)
res = trs.Record(blkSz, image.testFile, res[0], res[1], res[2], res[3])
resultSet.add_record(res)
if testRef:
refResult = testRef.get_matching_record(res)
res.set_status(determine_result(image, refResult, res))
res.tTimeRel = refResult.tTime / res.tTime
res.cTimeRel = refResult.cTime / res.cTime
res.psnrRel = res.psnr - refResult.psnr
res = format_result(image, refResult, res)
else:
res = format_solo_result(image, res)
print("\r[%3u] %s" % (curCount, res))
return resultSet
def get_encoder_params(encoderName, referenceName, imageSet):
"""
The the encoder and image set parameters for a test run.
Args:
encoderName (str): The encoder name.
referenceName (str): The reference encoder name.
imageSet (str): The test image set.
Returns:
tuple(EncoderBase, str, str, str): The test parameters for the
requested encoder and test set. An instance of the encoder wrapper
class, the output data name, the output result directory, and the
reference to use.
"""
# 1.7 variants
if encoderName == "ref-1.7":
encoder = te.Encoder1_7()
name = "reference-1.7"
outDir = "Test/Images/%s" % imageSet
refName = None
return (encoder, name, outDir, refName)
if encoderName.startswith("ref"):
_, version, simd = encoderName.split("-")
# 2.x variants
if version.startswith("2."):
encoder = te.Encoder2xRel(version, simd)
name = f"reference-{version}-{simd}"
outDir = "Test/Images/%s" % imageSet
refName = None
return (encoder, name, outDir, refName)
# 3.x variants
if version.startswith("3."):
encoder = te.Encoder2xRel(version, simd)
name = f"reference-{version}-{simd}"
outDir = "Test/Images/%s" % imageSet
refName = None
return (encoder, name, outDir, refName)
# Latest main
if version == "main":
encoder = te.Encoder2x(simd)
name = f"reference-{version}-{simd}"
outDir = "Test/Images/%s" % imageSet
refName = None
return (encoder, name, outDir, refName)
assert False, f"Encoder {encoderName} not recognized"
encoder = te.Encoder2x(encoderName)
name = "develop-%s" % encoderName
outDir = "TestOutput/%s" % imageSet
refName = referenceName.replace("ref", "reference")
return (encoder, name, outDir, refName)
def parse_command_line():
"""
Parse the command line.
Returns:
Namespace: The parsed command line container.
"""
parser = argparse.ArgumentParser()
# All reference encoders
refcoders = ["ref-1.7",
"ref-2.5-neon", "ref-2.5-sse2", "ref-2.5-sse4.1", "ref-2.5-avx2",
"ref-3.0-neon", "ref-3.0-sse2", "ref-3.0-sse4.1", "ref-3.0-avx2",
"ref-main-neon", "ref-main-sse2", "ref-main-sse4.1", "ref-main-avx2"]
# All test encoders
testcoders = ["none", "neon", "sse2", "sse4.1", "avx2"]
testcodersAArch64 = ["none", "neon"]
testcodersX86 = ["none", "sse2", "sse4.1", "avx2"]
coders = refcoders + testcoders + ["all-aarch64", "all-x86"]
parser.add_argument("--encoder", dest="encoders", default="avx2",
choices=coders, help="test encoder variant")
parser.add_argument("--reference", dest="reference", default="ref-main-avx2",
choices=refcoders, help="reference encoder variant")
astcProfile = ["ldr", "ldrs", "hdr", "all"]
parser.add_argument("--color-profile", dest="profiles", default="all",
choices=astcProfile, help="test color profile")
imgFormat = ["l", "xy", "rgb", "rgba", "all"]
parser.add_argument("--color-format", dest="formats", default="all",
choices=imgFormat, help="test color format")
choices = list(TEST_BLOCK_SIZES) + ["all"]
parser.add_argument("--block-size", dest="blockSizes",
action="append", choices=choices,
help="test block size")
testDir = os.path.dirname(__file__)
testDir = os.path.join(testDir, "Images")
testSets = []
for path in os.listdir(testDir):
fqPath = os.path.join(testDir, path)
if os.path.isdir(fqPath):
testSets.append(path)
testSets.append("all")
parser.add_argument("--test-set", dest="testSets", default="Small",
choices=testSets, help="test image test set")
parser.add_argument("--test-image", dest="testImage", default=None,
help="select a specific test image from the test set")
choices = list(TEST_QUALITIES) + ["all"]
parser.add_argument("--test-quality", dest="testQual", default="thorough",
choices=choices, help="select a specific test quality")
parser.add_argument("--repeats", dest="testRepeats", default=1,
type=int, help="test iteration count")
parser.add_argument("--keep-output", dest="keepOutput", default=False,
action="store_true", help="keep image output")
args = parser.parse_args()
# Turn things into canonical format lists
if args.encoders == "all-aarch64":
args.encoders = testcodersAArch64
elif args.encoders == "all-x86":
args.encoders = testcodersX86
else:
args.encoders = [args.encoders]
args.testQual = TEST_QUALITIES if args.testQual == "all" \
else [args.testQual]
if not args.blockSizes or ("all" in args.blockSizes):
args.blockSizes = TEST_BLOCK_SIZES
args.testSets = testSets[:-1] if args.testSets == "all" \
else [args.testSets]
args.profiles = astcProfile[:-1] if args.profiles == "all" \
else [args.profiles]
args.formats = imgFormat[:-1] if args.formats == "all" \
else [args.formats]
return args
def main():
"""
The main function.
Returns:
int: The process return code.
"""
# Parse command lines
args = parse_command_line()
testSetCount = 0
worstResult = trs.Result.NOTRUN
for quality in args.testQual:
for imageSet in args.testSets:
for encoderName in args.encoders:
(encoder, name, outDir, refName) = \
get_encoder_params(encoderName, args.reference, imageSet)
testDir = "Test/Images/%s" % imageSet
testRes = "%s/astc_%s_%s_results.csv" % (outDir, name, quality)
testRef = None
if refName:
dat = (testDir, refName, quality)
testRefPath = "%s/astc_%s_%s_results.csv" % dat
testRef = trs.ResultSet(imageSet)
testRef.load_from_file(testRefPath)
testSetCount += 1
testSet = tts.TestSet(imageSet, testDir,
args.profiles, args.formats, args.testImage)
# The fast and fastest presets are now sufficiently fast that
# the results are noisy without more repeats
testRepeats = args.testRepeats
if quality == "fast" and testRepeats > 1:
testRepeats *= 2
elif quality == "fastest" and testRepeats > 1:
testRepeats *= 4
resultSet = run_test_set(encoder, testRef, testSet, quality,
args.blockSizes, testRepeats,
args.keepOutput)
resultSet.save_to_file(testRes)
if refName:
summary = resultSet.get_results_summary()
worstResult = max(summary.get_worst_result(), worstResult)
print(summary)
if (testSetCount > 1) and (worstResult != trs.Result.NOTRUN):
print("OVERALL STATUS: %s" % worstResult.name)
if worstResult == trs.Result.FAIL:
return 1
return 0
if __name__ == "__main__":
sys.exit(main())
|
# coding: utf-8
import logging
import tortilla
from optparse import make_option
from couchdb.http import ResourceNotFound
from django.core.management import BaseCommand, call_command
from django.conf import settings
from bilanci.utils import couch
from bilanci.utils import gdocs, email_utils
from bilanci.utils.comuni import FLMapper
__author__ = 'guglielmo'
class Command(BaseCommand):
"""
Reads data from a source couchdb instance and produces (upgrades) couchdb destination documents,
by translating the document keys, according to the content of gdoc mappings.
Substitutes and supersedes the couchdb_scripts/translate_keys script.
"""
option_list = BaseCommand.option_list + (
make_option('--dry-run',
dest='dryrun',
action='store_true',
default=False,
help='Set the dry-run command mode: nothing is written in the couchdb'),
make_option('--type',
dest='type',
help='Select translation type: [(v)oce | (t)itolo]'),
make_option('--years',
dest='years',
default='',
help='Years to fetch. From 2002 to 2012. Use one of this formats: 2012 or 2003-2006 or 2002,2004,2006'),
make_option('--cities',
dest='cities',
default='',
help='Cities codes or slugs. Use comma to separate values: Roma,Napoli,Torino or "All"'),
make_option('--couchdb-server',
dest='couchdb_server',
default=settings.COUCHDB_DEFAULT_SERVER,
help='CouchDB server to connect to (defaults to staging).'),
make_option('--skip-existing',
dest='skip_existing',
action='store_true',
default=False,
help='Skip existing documents. Use to speed up long import of many cities, when errors occur'),
make_option('--force-google',
dest='force_google',
action='store_true',
default=False,
help='Force reloading mapping files from gdocs (invalidate the csv cache)'),
make_option('--design-documents',
dest='design_documents',
action='store_true',
default=False,
help='Copy design documents into destination db'),
make_option('--append',
dest='append',
action='store_true',
default=False,
help='Use the log file appending instead of overwriting (used when launching shell scripts)'),
)
help = 'Translate the keys of couchdb documents, normalizing them.'
logger = logging.getLogger('management')
comuni_dicts = {}
docs_bulk = []
bulk_size = 80
couchdb_source = None
couchdb_dest = None
couchdb_dest_tortilla = None
def handle(self, *args, **options):
verbosity = options['verbosity']
if verbosity == '0':
self.logger.setLevel(logging.ERROR)
elif verbosity == '1':
self.logger.setLevel(logging.WARNING)
elif verbosity == '2':
self.logger.setLevel(logging.INFO)
elif verbosity == '3':
self.logger.setLevel(logging.DEBUG)
dryrun = options['dryrun']
if options['append'] is True:
self.logger = logging.getLogger('management_append')
# type option, different values are accepted:
# v, V, voce, Voce, VOCE or
# t, T, titolo, Titolo, TITOLO, Title
if 'type' not in options:
raise Exception("Missing type parameter")
if options['type'].lower()[0] not in ('v', 't'):
raise Exception("Wrong type parameter value (voce|titolo)")
translation_type = options['type'][0].lower()
force_google = options['force_google']
skip_existing = options['skip_existing']
design_documents = options['design_documents']
cities_codes = options['cities']
if not cities_codes:
raise Exception("Missing city parameter")
self.logger.info("Opening Lista Comuni")
mapper = FLMapper()
cities = mapper.get_cities(cities_codes)
if cities_codes.lower() != 'all':
self.logger.info("Processing cities: {0}".format(cities))
years = options['years']
if not years:
raise Exception("Missing years parameter")
if "-" in years:
(start_year, end_year) = years.split("-")
years = range(int(start_year), int(end_year) + 1)
else:
years = [int(y.strip()) for y in years.split(",") if
settings.APP_START_YEAR <= int(y.strip()) <= settings.APP_END_YEAR]
if not years:
raise Exception("No suitable year found in {0}".format(years))
self.logger.info("Processing years: {0}".format(years))
###
# couchdb connections
###
couchdb_server_alias = options['couchdb_server']
# set couch source and destination names
if translation_type == 't':
couchdb_source_name = settings.COUCHDB_RAW_NAME
couchdb_dest_name = settings.COUCHDB_NORMALIZED_TITOLI_NAME
elif translation_type == 'v':
couchdb_source_name = settings.COUCHDB_NORMALIZED_TITOLI_NAME
couchdb_dest_name = settings.COUCHDB_NORMALIZED_VOCI_NAME
else:
self.logger.critical(u"Translation type not accepted:{}".format(translation_type))
exit()
if couchdb_server_alias not in settings.COUCHDB_SERVERS:
raise Exception("Unknown couchdb server alias.")
self.logger.info("Connecting to server: {}".format(couchdb_server_alias, ))
self.logger.info("Connecting source db: {}".format(couchdb_source_name))
try:
self.couchdb_source = couch.connect(
couchdb_source_name,
couchdb_server_settings=settings.COUCHDB_SERVERS[couchdb_server_alias]
)
except ResourceNotFound:
self.logger.error("Could not find source db. Quitting")
return
self.logger.info("Connecting to destination db: {0}".format(couchdb_dest_name))
couchdb_dest_settings = settings.COUCHDB_SERVERS[couchdb_server_alias]
try:
self.couchdb_dest = couch.connect(
couchdb_dest_name,
couchdb_server_settings=couchdb_dest_settings
)
except ResourceNotFound:
self.logger.error("Could not find destination db. Quitting")
return
server_connection_string = "http://{}:{}".format(couchdb_dest_settings['host'], couchdb_dest_settings['port'])
self.couchdb_dest_tortilla = tortilla.wrap(server_connection_string)
self.logger.info("Compact destination db...")
self.couchdb_dest.compact()
self.logger.info("Done")
###
# Mapping files from gdoc
###
# connect to google account and fetch tree mapping and simple tree structure
normalized_map = gdocs.get_normalized_map(translation_type, n_header_lines=2, force_google=force_google)
normalized_titoli_sheet = {'preventivo': [row[2] for row in normalized_map['preventivo']],
'consuntivo': [row[2] for row in normalized_map['consuntivo']],
}
normalized_voci_sheet = {'preventivo': [(row[2], row[3]) for row in normalized_map['preventivo']],
'consuntivo': [(row[2], row[3]) for row in normalized_map['consuntivo']],
}
# copying design documents
if design_documents:
self.logger.info(u"Copying design documents")
source_design_docs = self.couchdb_source.view("_all_docs",
startkey="_design/", endkey="_design0",
include_docs=True
)
for row in source_design_docs.rows:
source_design_doc = row.doc
self.logger.info(u" document id: ".format(source_design_doc.id))
destination_document = {'_id': source_design_doc.id}
destination_document['language'] = source_design_doc['language']
destination_document['views'] = source_design_doc['views']
if not dryrun:
self.couchdb_dest.save(destination_document)
if cities and years:
for city in cities:
self.logger.info(u"Updating {}".format(city))
for year in years:
doc_id = u"{0}_{1}".format(year, city)
if doc_id in self.couchdb_dest and skip_existing:
self.logger.info("Skipping city of {}, as already existing".format(city))
continue
# identify source document or skip
source_document = self.couchdb_source.get(doc_id)
if source_document is None:
self.logger.warning("{0} doc_id not found in source db. skipping.".format(doc_id))
continue
# create destination document, to REPLACE old one
destination_document = {'_id': doc_id, }
# if a doc with that id already exists on the destination document, gets the _rev value
# and insert it in the dest. document.
# this avoids document conflict on writing
# otherwise you should delete the old doc before writing the new one
old_destination_doc = self.couchdb_dest.get(doc_id, None)
if old_destination_doc:
revision = old_destination_doc.get('_rev', None)
if revision:
destination_document['_rev'] = revision
self.logger.debug("Adds rev value to doc")
for bilancio_type in ['preventivo', 'consuntivo']:
if bilancio_type in source_document.keys():
bilancio_object = source_document[bilancio_type]
destination_document[bilancio_type] = {}
for quadro_name, quadro_object in bilancio_object.iteritems():
destination_document[bilancio_type][quadro_name] = {}
for titolo_name, titolo_object in quadro_object.iteritems():
if translation_type == 't':
# for each titolo, apply translation_map, if valid
try:
idx = normalized_titoli_sheet[bilancio_type].index(titolo_name)
titolo_name = normalized_map[bilancio_type][idx][3]
except ValueError:
pass
# create dest doc titolo dictionary
destination_document[bilancio_type][quadro_name][titolo_name] = {}
# copy meta
if 'meta' in titolo_object.keys():
destination_document[bilancio_type][quadro_name][titolo_name]['meta'] = {}
destination_document[bilancio_type][quadro_name][titolo_name]['meta'] = \
titolo_object['meta']
# copy data (normalize voci if needed)
if 'data' in titolo_object.keys():
destination_document[bilancio_type][quadro_name][titolo_name]['data'] = {}
if translation_type == 'v':
# voci translation
for voce_name, voce_obj in titolo_object['data'].iteritems():
# voci are always translated into lowercase, unicode strings
# trailing dash is removed, if present
voce_name = unicode(voce_name.lower())
if voce_name.find("- ") == 0:
voce_name = voce_name.replace("- ", "")
# for each voce, apply translation_map, if valid
try:
idx = normalized_voci_sheet[bilancio_type].index(
(titolo_name, voce_name))
voce_name = normalized_map[bilancio_type][idx][4]
except ValueError:
pass
# create voice dictionary with normalized name
destination_document[bilancio_type][quadro_name][titolo_name]['data'][
voce_name] = {}
destination_document[bilancio_type][quadro_name][titolo_name]['data'][
voce_name] = voce_obj
else:
# copy all voci in data, with no normalization
destination_document[bilancio_type][quadro_name][titolo_name]['data'] = \
titolo_object['data']
# add the document to the list that will be written to couchdb in bulks
self.docs_bulk.append(destination_document)
if len(self.docs_bulk) == self.bulk_size:
if not dryrun:
ret_value = couch.write_bulk(
couchdb_dest=self.couchdb_dest_tortilla,
couchdb_name=couchdb_dest_name,
docs_bulk=self.docs_bulk,
logger=self.logger)
if ret_value is False:
email_utils.send_notification_email(msg_string='Couch translate key has encountered problems')
self.docs_bulk = []
# if the last set was < bulk_size write the last documents
if len(self.docs_bulk) > 0:
if not dryrun:
ret_value = couch.write_bulk(
couchdb_dest=self.couchdb_dest_tortilla,
couchdb_name=couchdb_dest_name,
docs_bulk=self.docs_bulk,
logger=self.logger)
if ret_value is False:
email_utils.send_notification_email(msg_string='Couch translate key has encountered problems')
self.docs_bulk = []
self.logger.info("Compact destination db...")
self.couchdb_dest.compact()
self.logger.info("Done compacting")
if not dryrun and couchdb_dest_name == settings.COUCHDB_NORMALIZED_VOCI_NAME and settings.INSTANCE_TYPE == 'production' or settings.INSTANCE_TYPE == 'staging':
self.logger.info(u"============Run patch 2013 for consuntivo======================")
call_command('consuntivo_13_patch', verbosity=2, interactive=False)
email_utils.send_notification_email(msg_string="Couch translate key has finished")
self.logger.info("finish couch translate keys")
couch translate added flag for no patch
# coding: utf-8
import logging
import tortilla
from optparse import make_option
from couchdb.http import ResourceNotFound
from django.core.management import BaseCommand, call_command
from django.conf import settings
from bilanci.utils import couch
from bilanci.utils import gdocs, email_utils
from bilanci.utils.comuni import FLMapper
__author__ = 'guglielmo'
class Command(BaseCommand):
"""
Reads data from a source couchdb instance and produces (upgrades) couchdb destination documents,
by translating the document keys, according to the content of gdoc mappings.
Substitutes and supersedes the couchdb_scripts/translate_keys script.
"""
option_list = BaseCommand.option_list + (
make_option('--dry-run',
dest='dryrun',
action='store_true',
default=False,
help='Set the dry-run command mode: nothing is written in the couchdb'),
make_option('--type',
dest='type',
help='Select translation type: [(v)oce | (t)itolo]'),
make_option('--years',
dest='years',
default='',
help='Years to fetch. From 2002 to 2012. Use one of this formats: 2012 or 2003-2006 or 2002,2004,2006'),
make_option('--cities',
dest='cities',
default='',
help='Cities codes or slugs. Use comma to separate values: Roma,Napoli,Torino or "All"'),
make_option('--couchdb-server',
dest='couchdb_server',
default=settings.COUCHDB_DEFAULT_SERVER,
help='CouchDB server to connect to (defaults to staging).'),
make_option('--skip-existing',
dest='skip_existing',
action='store_true',
default=False,
help='Skip existing documents. Use to speed up long import of many cities, when errors occur'),
make_option('--force-google',
dest='force_google',
action='store_true',
default=False,
help='Force reloading mapping files from gdocs (invalidate the csv cache)'),
make_option('--design-documents',
dest='design_documents',
action='store_true',
default=False,
help='Copy design documents into destination db'),
make_option('--append',
dest='append',
action='store_true',
default=False,
help='Use the log file appending instead of overwriting (used when launching shell scripts)'),
make_option('--no-patch',
dest='no_patch',
action='store_true',
default=False,
help='When translating Voci excludes Patch 2013 Consuntivo mng task (development only)'),
)
help = 'Translate the keys of couchdb documents, normalizing them.'
logger = logging.getLogger('management')
comuni_dicts = {}
docs_bulk = []
bulk_size = 80
couchdb_source = None
couchdb_dest = None
couchdb_dest_tortilla = None
def handle(self, *args, **options):
verbosity = options['verbosity']
if verbosity == '0':
self.logger.setLevel(logging.ERROR)
elif verbosity == '1':
self.logger.setLevel(logging.WARNING)
elif verbosity == '2':
self.logger.setLevel(logging.INFO)
elif verbosity == '3':
self.logger.setLevel(logging.DEBUG)
dryrun = options['dryrun']
no_patch = options['no_patch']
if options['append'] is True:
self.logger = logging.getLogger('management_append')
# type option, different values are accepted:
# v, V, voce, Voce, VOCE or
# t, T, titolo, Titolo, TITOLO, Title
if 'type' not in options:
raise Exception("Missing type parameter")
if options['type'].lower()[0] not in ('v', 't'):
raise Exception("Wrong type parameter value (voce|titolo)")
translation_type = options['type'][0].lower()
force_google = options['force_google']
skip_existing = options['skip_existing']
design_documents = options['design_documents']
cities_codes = options['cities']
if not cities_codes:
raise Exception("Missing city parameter")
self.logger.info("Opening Lista Comuni")
mapper = FLMapper()
cities = mapper.get_cities(cities_codes)
if cities_codes.lower() != 'all':
self.logger.info("Processing cities: {0}".format(cities))
years = options['years']
if not years:
raise Exception("Missing years parameter")
if "-" in years:
(start_year, end_year) = years.split("-")
years = range(int(start_year), int(end_year) + 1)
else:
years = [int(y.strip()) for y in years.split(",") if
settings.APP_START_YEAR <= int(y.strip()) <= settings.APP_END_YEAR]
if not years:
raise Exception("No suitable year found in {0}".format(years))
self.logger.info("Processing years: {0}".format(years))
###
# couchdb connections
###
couchdb_server_alias = options['couchdb_server']
# set couch source and destination names
if translation_type == 't':
couchdb_source_name = settings.COUCHDB_RAW_NAME
couchdb_dest_name = settings.COUCHDB_NORMALIZED_TITOLI_NAME
elif translation_type == 'v':
couchdb_source_name = settings.COUCHDB_NORMALIZED_TITOLI_NAME
couchdb_dest_name = settings.COUCHDB_NORMALIZED_VOCI_NAME
else:
self.logger.critical(u"Translation type not accepted:{}".format(translation_type))
exit()
if couchdb_server_alias not in settings.COUCHDB_SERVERS:
raise Exception("Unknown couchdb server alias.")
self.logger.info("Connecting to server: {}".format(couchdb_server_alias, ))
self.logger.info("Connecting source db: {}".format(couchdb_source_name))
try:
self.couchdb_source = couch.connect(
couchdb_source_name,
couchdb_server_settings=settings.COUCHDB_SERVERS[couchdb_server_alias]
)
except ResourceNotFound:
self.logger.error("Could not find source db. Quitting")
return
self.logger.info("Connecting to destination db: {0}".format(couchdb_dest_name))
couchdb_dest_settings = settings.COUCHDB_SERVERS[couchdb_server_alias]
try:
self.couchdb_dest = couch.connect(
couchdb_dest_name,
couchdb_server_settings=couchdb_dest_settings
)
except ResourceNotFound:
self.logger.error("Could not find destination db. Quitting")
return
server_connection_string = "http://{}:{}".format(couchdb_dest_settings['host'], couchdb_dest_settings['port'])
self.couchdb_dest_tortilla = tortilla.wrap(server_connection_string)
self.logger.info("Compact destination db...")
self.couchdb_dest.compact()
self.logger.info("Done")
###
# Mapping files from gdoc
###
# connect to google account and fetch tree mapping and simple tree structure
normalized_map = gdocs.get_normalized_map(translation_type, n_header_lines=2, force_google=force_google)
normalized_titoli_sheet = {'preventivo': [row[2] for row in normalized_map['preventivo']],
'consuntivo': [row[2] for row in normalized_map['consuntivo']],
}
normalized_voci_sheet = {'preventivo': [(row[2], row[3]) for row in normalized_map['preventivo']],
'consuntivo': [(row[2], row[3]) for row in normalized_map['consuntivo']],
}
# copying design documents
if design_documents:
self.logger.info(u"Copying design documents")
source_design_docs = self.couchdb_source.view("_all_docs",
startkey="_design/", endkey="_design0",
include_docs=True
)
for row in source_design_docs.rows:
source_design_doc = row.doc
self.logger.info(u" document id: ".format(source_design_doc.id))
destination_document = {'_id': source_design_doc.id}
destination_document['language'] = source_design_doc['language']
destination_document['views'] = source_design_doc['views']
if not dryrun:
self.couchdb_dest.save(destination_document)
if cities and years:
for city in cities:
self.logger.info(u"Updating {}".format(city))
for year in years:
doc_id = u"{0}_{1}".format(year, city)
if doc_id in self.couchdb_dest and skip_existing:
self.logger.info("Skipping city of {}, as already existing".format(city))
continue
# identify source document or skip
source_document = self.couchdb_source.get(doc_id)
if source_document is None:
self.logger.warning("{0} doc_id not found in source db. skipping.".format(doc_id))
continue
# create destination document, to REPLACE old one
destination_document = {'_id': doc_id, }
# if a doc with that id already exists on the destination document, gets the _rev value
# and insert it in the dest. document.
# this avoids document conflict on writing
# otherwise you should delete the old doc before writing the new one
old_destination_doc = self.couchdb_dest.get(doc_id, None)
if old_destination_doc:
revision = old_destination_doc.get('_rev', None)
if revision:
destination_document['_rev'] = revision
self.logger.debug("Adds rev value to doc")
for bilancio_type in ['preventivo', 'consuntivo']:
if bilancio_type in source_document.keys():
bilancio_object = source_document[bilancio_type]
destination_document[bilancio_type] = {}
for quadro_name, quadro_object in bilancio_object.iteritems():
destination_document[bilancio_type][quadro_name] = {}
for titolo_name, titolo_object in quadro_object.iteritems():
if translation_type == 't':
# for each titolo, apply translation_map, if valid
try:
idx = normalized_titoli_sheet[bilancio_type].index(titolo_name)
titolo_name = normalized_map[bilancio_type][idx][3]
except ValueError:
pass
# create dest doc titolo dictionary
destination_document[bilancio_type][quadro_name][titolo_name] = {}
# copy meta
if 'meta' in titolo_object.keys():
destination_document[bilancio_type][quadro_name][titolo_name]['meta'] = {}
destination_document[bilancio_type][quadro_name][titolo_name]['meta'] = \
titolo_object['meta']
# copy data (normalize voci if needed)
if 'data' in titolo_object.keys():
destination_document[bilancio_type][quadro_name][titolo_name]['data'] = {}
if translation_type == 'v':
# voci translation
for voce_name, voce_obj in titolo_object['data'].iteritems():
# voci are always translated into lowercase, unicode strings
# trailing dash is removed, if present
voce_name = unicode(voce_name.lower())
if voce_name.find("- ") == 0:
voce_name = voce_name.replace("- ", "")
# for each voce, apply translation_map, if valid
try:
idx = normalized_voci_sheet[bilancio_type].index(
(titolo_name, voce_name))
voce_name = normalized_map[bilancio_type][idx][4]
except ValueError:
pass
# create voice dictionary with normalized name
destination_document[bilancio_type][quadro_name][titolo_name]['data'][
voce_name] = {}
destination_document[bilancio_type][quadro_name][titolo_name]['data'][
voce_name] = voce_obj
else:
# copy all voci in data, with no normalization
destination_document[bilancio_type][quadro_name][titolo_name]['data'] = \
titolo_object['data']
# add the document to the list that will be written to couchdb in bulks
self.docs_bulk.append(destination_document)
if len(self.docs_bulk) == self.bulk_size:
if not dryrun:
ret_value = couch.write_bulk(
couchdb_dest=self.couchdb_dest_tortilla,
couchdb_name=couchdb_dest_name,
docs_bulk=self.docs_bulk,
logger=self.logger)
if ret_value is False:
email_utils.send_notification_email(msg_string='Couch translate key has encountered problems')
self.docs_bulk = []
# if the last set was < bulk_size write the last documents
if len(self.docs_bulk) > 0:
if not dryrun:
ret_value = couch.write_bulk(
couchdb_dest=self.couchdb_dest_tortilla,
couchdb_name=couchdb_dest_name,
docs_bulk=self.docs_bulk,
logger=self.logger)
if ret_value is False:
email_utils.send_notification_email(msg_string='Couch translate key has encountered problems')
self.docs_bulk = []
self.logger.info("Compact destination db...")
self.couchdb_dest.compact()
self.logger.info("Done compacting")
if not dryrun and couchdb_dest_name == settings.COUCHDB_NORMALIZED_VOCI_NAME and settings.INSTANCE_TYPE == 'production' or settings.INSTANCE_TYPE == 'staging' and no_patch is False:
self.logger.info(u"============Run patch 2013 for consuntivo======================")
call_command('consuntivo_13_patch', verbosity=2, interactive=False)
email_utils.send_notification_email(msg_string="Couch translate key has finished")
self.logger.info("finish couch translate keys")
|
Prevent the Pool instance from having 'start' called more than once
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import xmlrpclib
import ConfigParser
import optparse
import sys
import thread
import threading
import os
import time
import pickle
import base64
import socket
admin_passwd = 'admin'
waittime = 10
wait_count = 0
wait_limit = 12
def start_server(root_path, port, addons_path):
if root_path:
root_path += '/'
os.system('python2.5 '+root_path+'openerp-server.py --pidfile=openerp.pid --port=%s --no-netrpc --addons-path=%s' %(str(port),addons_path))
def clean():
if os.path.isfile('openerp.pid'):
ps = open('openerp.pid')
if ps:
pid = int(ps.read())
ps.close()
if pid:
os.kill(pid,9)
def execute(connector, method, *args):
global wait_count
res = False
try:
res = getattr(connector,method)(*args)
except socket.error,e:
if e.args[0] == 111:
if wait_count > wait_limit:
print "Server is taking too long to start, it has exceeded the maximum limit of %d seconds."%(wait_limit)
clean()
sys.exit(1)
print 'Please wait %d sec to start server....'%(waittime)
wait_count += 1
time.sleep(waittime)
res = execute(connector, method, *args)
else:
raise e
wait_count = 0
return res
def login(uri, dbname, user, pwd):
conn = xmlrpclib.ServerProxy(uri + '/xmlrpc/common')
uid = execute(conn,'login',dbname, user, pwd)
return uid
def import_translate(uri, user, pwd, dbname, translate_in):
uid = login(uri, dbname, user, pwd)
if uid:
conn = xmlrpclib.ServerProxy(uri + '/xmlrpc/wizard')
wiz_id = execute(conn,'create',dbname, uid, pwd, 'module.lang.import')
for trans_in in translate_in:
lang,ext = os.path.splitext(trans_in.split('/')[-1])
state = 'init'
datas = {'form':{}}
while state!='end':
res = execute(conn,'execute',dbname, uid, pwd, wiz_id, datas, state, {})
if 'datas' in res:
datas['form'].update( res['datas'].get('form',{}) )
if res['type']=='form':
for field in res['fields'].keys():
datas['form'][field] = res['fields'][field].get('value', False)
state = res['state'][-1][0]
trans_obj = open(trans_in)
datas['form'].update({
'name': lang,
'code': lang,
'data' : base64.encodestring(trans_obj.read())
})
trans_obj.close()
elif res['type']=='action':
state = res['state']
def check_quality(uri, user, pwd, dbname, modules):
uid = login(uri, dbname, user, pwd)
if uid:
conn = xmlrpclib.ServerProxy(uri + '/xmlrpc/object')
qualityresult = {}
final = {}
test_detail = {}
for module in modules:
quality_result = execute(conn,'execute', dbname, uid, pwd,'module.quality.check','check_quality',module)
detail_html = ''
html = '''<html><html><html><html><body><a name="TOP"></a>'''
html +="<h1> Module : %s </h1>"%(quality_result['name'])
html += "<h2> Final score : %s</h2>"%(quality_result['final_score'])
html += "<div id='tabs'>"
html += "<ul>"
for x,y,detail in quality_result['check_detail_ids']:
test = detail.get('name')
msg = detail.get('message','')
score = round(float(detail.get('score',0)),2)
html += "<li><a href=\"#%s\">%s</a></li>"%(test.replace(' ','-'),test)
detail_html +="<div id=\"%s\"><h3>%s (Score : %s)</h3>%s</div>"%(test.replace(' ','-'),test,score,detail.get('detail'))
test_detail[test] = (score,msg,detail.get('detail',''))
html += "</ul>%s</body></html></html></html></html></html>"%(detail_html)
html += "</div>"
final[quality_result['name']] = (quality_result['final_score'],html,test_detail)
fp = open('quality_log.pck','wb')
pck_obj = pickle.dump(final,fp)
fp.close()
print "LOG PATH%s"%(os.path.realpath('quality_log.pck'))
return final
else:
print 'Login Failed...'
clean()
sys.exit(1)
def wait(id,url=''):
progress=0.0
sock2 = xmlrpclib.ServerProxy(url+'/db')
while not progress==1.0:
progress,users = execute(sock2,'get_progress',admin_passwd, id)
return True
def create_db(uri, dbname, user='admin', pwd='admin', lang='en_US'):
conn = xmlrpclib.ServerProxy(uri + '/xmlrpc/db')
obj_conn = xmlrpclib.ServerProxy(uri + '/xmlrpc/object')
wiz_conn = xmlrpclib.ServerProxy(uri + '/xmlrpc/wizard')
login_conn = xmlrpclib.ServerProxy(uri + '/xmlrpc/common')
db_list = execute(conn, 'list')
if dbname not in db_list:
id = execute(conn,'create',admin_passwd, dbname, True, lang)
wait(id,uri)
uid = login_conn.login(dbname, user, pwd)
wiz_id = execute(wiz_conn,'create', dbname, uid, user, 'base_setup.base_setup')
state = 'init'
datas = {'form':{}}
while state!='config':
res = execute(wiz_conn, 'execute', dbname, uid, pwd, wiz_id, datas, state, {})
if state=='init':
datas['form'].update( res['datas'] )
if res['type']=='form':
for field in res['fields'].keys():
datas['form'][field] = datas['form'].get(field,False)
state = res['state'][-1][0]
datas['form'].update({
'profile': -1
})
elif res['type']=='state':
state = res['state']
res = execute(wiz_conn, 'execute', dbname, uid, pwd, wiz_id, datas, state, {})
install_module(uri, dbname, ['base_module_quality'],user=user,pwd=pwd)
return True
def drop_db(uri, dbname):
conn = xmlrpclib.ServerProxy(uri + '/xmlrpc/db')
db_list = execute(conn,'list')
if dbname in db_list:
execute(conn, 'drop', admin_passwd, dbname)
return True
def make_links(uri, uid, dbname, source, destination, module, user, pwd):
if module in ('base','quality_integration_server'):
return True
if not os.path.islink(destination + module):
if not os.path.isdir(destination + module):
for path in source:
if os.path.isdir(path + '/' + module):
os.symlink(path + '/' + module, destination + '/' + module)
obj_conn = xmlrpclib.ServerProxy(uri + '/xmlrpc/object')
execute(obj_conn, 'execute', dbname, uid, pwd, 'ir.module.module', 'update_list')
module_ids = execute(obj_conn, 'execute', dbname, uid, pwd, 'ir.module.module', 'search', [('name','=',module)])
if len(module_ids):
data = execute(obj_conn, 'execute', dbname, uid, pwd, 'ir.module.module', 'read', module_ids[0],['name','dependencies_id'])
dep_datas = execute(obj_conn, 'execute', dbname, uid, pwd, 'ir.module.module.dependency', 'read', data['dependencies_id'],['name'])
for dep_data in dep_datas:
make_links(uri, uid, dbname, source, destination, dep_data['name'], user, pwd)
return True
return False
def install_module(uri, dbname, modules, addons='', extra_addons='', user='admin', pwd='admin'):
uid = login(uri, dbname, user, pwd)
if extra_addons:
extra_addons = extra_addons.split(',')
if uid:
if addons and extra_addons:
for module in modules:
make_links(uri, uid, dbname, extra_addons, addons, module, user, pwd)
obj_conn = xmlrpclib.ServerProxy(uri + '/xmlrpc/object')
wizard_conn = xmlrpclib.ServerProxy(uri + '/xmlrpc/wizard')
module_ids = execute(obj_conn, 'execute', dbname, uid, pwd, 'ir.module.module', 'search', [('name','in',modules)])
execute(obj_conn, 'execute', dbname, uid, pwd, 'ir.module.module', 'button_install', module_ids)
wiz_id = execute(wizard_conn, 'create', dbname, uid, pwd, 'module.upgrade.simple')
state = 'init'
datas = {}
#while state!='menu':
while state!='end':
res = execute(wizard_conn, 'execute', dbname, uid, pwd, wiz_id, datas, state, {})
if state == 'init':
state = 'start'
elif state == 'start':
state = 'end'
return True
def upgrade_module(uri, dbname, modules, user='admin', pwd='admin'):
uid = login(uri, dbname, user, pwd)
if uid:
obj_conn = xmlrpclib.ServerProxy(uri + '/xmlrpc/object')
wizard_conn = xmlrpclib.ServerProxy(uri + '/xmlrpc/wizard')
module_ids = execute(obj_conn, 'execute', dbname, uid, pwd, 'ir.module.module', 'search', [('name','in',modules)])
execute(obj_conn, 'execute', dbname, uid, pwd, 'ir.module.module', 'button_upgrade', module_ids)
wiz_id = execute(wizard_conn, 'create', dbname, uid, pwd, 'module.upgrade.simple')
state = 'init'
datas = {}
#while state!='menu':
while state!='end':
res = execute(wizard_conn, 'execute', dbname, uid, pwd, wiz_id, datas, state, {})
if state == 'init':
state = 'start'
elif state == 'start':
state = 'end'
return True
usage = """%prog command [options]
Basic Commands:
start-server Start Server
create-db Create new database
drop-db Drop database
install-module Install module
upgrade-module Upgrade module
install-translation Install translation file
check-quality Calculate quality and dump quality result into quality_log.pck using pickle
"""
parser = optparse.OptionParser(usage)
parser.add_option("--modules", dest="modules",
help="specify modules to install or check quality")
parser.add_option("--addons-path", dest="addons_path", help="specify the addons path")
parser.add_option("--root-path", dest="root_path", help="specify the root path")
parser.add_option("-p", "--port", dest="port", help="specify the TCP port", type="int")
parser.add_option("-d", "--database", dest="db_name", help="specify the database name")
parser.add_option("--login", dest="login", help="specify the User Login")
parser.add_option("--password", dest="pwd", help="specify the User Password")
parser.add_option("--translate-in", dest="translate_in",
help="specify .po files to import translation terms")
parser.add_option("--extra-addons", dest="extra_addons",
help="specify extra_addons and trunkCommunity modules path ")
(opt, args) = parser.parse_args()
if len(args) != 1:
parser.error("incorrect number of arguments")
command = args[0]
if command not in ('start-server','create-db','drop-db','install-module','upgrade-module','check-quality','install-translation'):
parser.error("incorrect command")
def die(cond, msg):
if cond:
print msg
sys.exit(1)
die(opt.modules and (not opt.db_name),
"the modules option cannot be used without the database (-d) option")
die(opt.translate_in and (not opt.db_name),
"the translate-in option cannot be used without the database (-d) option")
options = {
'addons-path' : opt.addons_path or 'addons',
'root-path' : opt.root_path or '',
'translate-in': opt.translate_in,
'port' : opt.port or 8069,
'database': opt.db_name or 'terp',
'modules' : opt.modules or [],
'login' : opt.login or 'admin',
'pwd' : opt.pwd or '',
'extra-addons':opt.extra_addons or []
}
options['modules'] = opt.modules and map(lambda m: m.strip(), opt.modules.split(',')) or []
options['translate_in'] = opt.translate_in and map(lambda m: m.strip(), opt.translate_in.split(',')) or []
uri = 'http://localhost:' + str(options['port'])
server_thread = threading.Thread(target=start_server,
args=(options['root-path'], options['port'], options['addons-path']))
try:
server_thread.start()
if command == 'create-db':
create_db(uri, options['database'], options['login'], options['pwd'])
if command == 'drop-db':
drop_db(uri, options['database'])
if command == 'install-module':
install_module(uri, options['database'], options['modules'],options['addons-path'],options['extra-addons'],options['login'], options['pwd'])
if command == 'upgrade-module':
upgrade_module(uri, options['database'], options['modules'], options['login'], options['pwd'])
if command == 'check-quality':
check_quality(uri, options['login'], options['pwd'], options['database'], options['modules'])
if command == 'install-translation':
import_translate(uri, options['login'], options['pwd'], options['database'], options['translate_in'])
clean()
sys.exit(0)
except xmlrpclib.Fault, e:
print e.faultString
clean()
sys.exit(1)
except Exception, e:
print e
clean()
sys.exit(1)
[FIX]quality_integration_server : quality log : reduce overwrite last test log in other test logs
bzr revid: hmo@tinyerp.com-20090917044714-onj9maung7o7gi97
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import xmlrpclib
import ConfigParser
import optparse
import sys
import thread
import threading
import os
import time
import pickle
import base64
import socket
admin_passwd = 'admin'
waittime = 10
wait_count = 0
wait_limit = 12
def start_server(root_path, port, addons_path):
if root_path:
root_path += '/'
os.system('python2.5 '+root_path+'openerp-server.py --pidfile=openerp.pid --port=%s --no-netrpc --addons-path=%s' %(str(port),addons_path))
def clean():
if os.path.isfile('openerp.pid'):
ps = open('openerp.pid')
if ps:
pid = int(ps.read())
ps.close()
if pid:
os.kill(pid,9)
def execute(connector, method, *args):
global wait_count
res = False
try:
res = getattr(connector,method)(*args)
except socket.error,e:
if e.args[0] == 111:
if wait_count > wait_limit:
print "Server is taking too long to start, it has exceeded the maximum limit of %d seconds."%(wait_limit)
clean()
sys.exit(1)
print 'Please wait %d sec to start server....'%(waittime)
wait_count += 1
time.sleep(waittime)
res = execute(connector, method, *args)
else:
raise e
wait_count = 0
return res
def login(uri, dbname, user, pwd):
conn = xmlrpclib.ServerProxy(uri + '/xmlrpc/common')
uid = execute(conn,'login',dbname, user, pwd)
return uid
def import_translate(uri, user, pwd, dbname, translate_in):
uid = login(uri, dbname, user, pwd)
if uid:
conn = xmlrpclib.ServerProxy(uri + '/xmlrpc/wizard')
wiz_id = execute(conn,'create',dbname, uid, pwd, 'module.lang.import')
for trans_in in translate_in:
lang,ext = os.path.splitext(trans_in.split('/')[-1])
state = 'init'
datas = {'form':{}}
while state!='end':
res = execute(conn,'execute',dbname, uid, pwd, wiz_id, datas, state, {})
if 'datas' in res:
datas['form'].update( res['datas'].get('form',{}) )
if res['type']=='form':
for field in res['fields'].keys():
datas['form'][field] = res['fields'][field].get('value', False)
state = res['state'][-1][0]
trans_obj = open(trans_in)
datas['form'].update({
'name': lang,
'code': lang,
'data' : base64.encodestring(trans_obj.read())
})
trans_obj.close()
elif res['type']=='action':
state = res['state']
def check_quality(uri, user, pwd, dbname, modules):
uid = login(uri, dbname, user, pwd)
if uid:
conn = xmlrpclib.ServerProxy(uri + '/xmlrpc/object')
final = {}
for module in modules:
qualityresult = {}
test_detail = {}
quality_result = execute(conn,'execute', dbname, uid, pwd,'module.quality.check','check_quality',module)
detail_html = ''
html = '''<html><html><html><html><body><a name="TOP"></a>'''
html +="<h1> Module : %s </h1>"%(quality_result['name'])
html += "<h2> Final score : %s</h2>"%(quality_result['final_score'])
html += "<div id='tabs'>"
html += "<ul>"
for x,y,detail in quality_result['check_detail_ids']:
test = detail.get('name')
msg = detail.get('message','')
score = round(float(detail.get('score',0)),2)
html += "<li><a href=\"#%s\">%s</a></li>"%(test.replace(' ','-'),test)
if test == 'Unit Test':
if not detail.get('detail',''):
detail['detail'] = '''<html><body><b>%s</b></body></html>'''%(detail.get('summary',''))
detail_html +="<div id=\"%s\"><h3>%s (Score : %s)</h3>%s</div>"%(test.replace(' ','-'),test,score,detail.get('detail',''))
test_detail[test] = (score,msg,detail.get('detail',''))
html += "</ul>%s</body></html></html></html></html></html>"%(detail_html)
html += "</div>"
final[quality_result['name']] = (quality_result['final_score'],html,test_detail)
fp = open('quality_log.pck','wb')
pck_obj = pickle.dump(final,fp)
fp.close()
print "LOG PATH%s"%(os.path.realpath('quality_log.pck'))
return final
else:
print 'Login Failed...'
clean()
sys.exit(1)
def wait(id,url=''):
progress=0.0
sock2 = xmlrpclib.ServerProxy(url+'/db')
while not progress==1.0:
progress,users = execute(sock2,'get_progress',admin_passwd, id)
return True
def create_db(uri, dbname, user='admin', pwd='admin', lang='en_US'):
conn = xmlrpclib.ServerProxy(uri + '/xmlrpc/db')
obj_conn = xmlrpclib.ServerProxy(uri + '/xmlrpc/object')
wiz_conn = xmlrpclib.ServerProxy(uri + '/xmlrpc/wizard')
login_conn = xmlrpclib.ServerProxy(uri + '/xmlrpc/common')
db_list = execute(conn, 'list')
if dbname not in db_list:
id = execute(conn,'create',admin_passwd, dbname, True, lang)
wait(id,uri)
uid = login_conn.login(dbname, user, pwd)
wiz_id = execute(wiz_conn,'create', dbname, uid, user, 'base_setup.base_setup')
state = 'init'
datas = {'form':{}}
while state!='config':
res = execute(wiz_conn, 'execute', dbname, uid, pwd, wiz_id, datas, state, {})
if state=='init':
datas['form'].update( res['datas'] )
if res['type']=='form':
for field in res['fields'].keys():
datas['form'][field] = datas['form'].get(field,False)
state = res['state'][-1][0]
datas['form'].update({
'profile': -1
})
elif res['type']=='state':
state = res['state']
res = execute(wiz_conn, 'execute', dbname, uid, pwd, wiz_id, datas, state, {})
install_module(uri, dbname, ['base_module_quality'],user=user,pwd=pwd)
return True
def drop_db(uri, dbname):
conn = xmlrpclib.ServerProxy(uri + '/xmlrpc/db')
db_list = execute(conn,'list')
if dbname in db_list:
execute(conn, 'drop', admin_passwd, dbname)
return True
def make_links(uri, uid, dbname, source, destination, module, user, pwd):
if module in ('base','quality_integration_server'):
return True
if not os.path.islink(destination + module):
if not os.path.isdir(destination + module):
for path in source:
if os.path.isdir(path + '/' + module):
os.symlink(path + '/' + module, destination + '/' + module)
obj_conn = xmlrpclib.ServerProxy(uri + '/xmlrpc/object')
execute(obj_conn, 'execute', dbname, uid, pwd, 'ir.module.module', 'update_list')
module_ids = execute(obj_conn, 'execute', dbname, uid, pwd, 'ir.module.module', 'search', [('name','=',module)])
if len(module_ids):
data = execute(obj_conn, 'execute', dbname, uid, pwd, 'ir.module.module', 'read', module_ids[0],['name','dependencies_id'])
dep_datas = execute(obj_conn, 'execute', dbname, uid, pwd, 'ir.module.module.dependency', 'read', data['dependencies_id'],['name'])
for dep_data in dep_datas:
make_links(uri, uid, dbname, source, destination, dep_data['name'], user, pwd)
return True
return False
def install_module(uri, dbname, modules, addons='', extra_addons='', user='admin', pwd='admin'):
uid = login(uri, dbname, user, pwd)
if extra_addons:
extra_addons = extra_addons.split(',')
if uid:
if addons and extra_addons:
for module in modules:
make_links(uri, uid, dbname, extra_addons, addons, module, user, pwd)
obj_conn = xmlrpclib.ServerProxy(uri + '/xmlrpc/object')
wizard_conn = xmlrpclib.ServerProxy(uri + '/xmlrpc/wizard')
module_ids = execute(obj_conn, 'execute', dbname, uid, pwd, 'ir.module.module', 'search', [('name','in',modules)])
execute(obj_conn, 'execute', dbname, uid, pwd, 'ir.module.module', 'button_install', module_ids)
wiz_id = execute(wizard_conn, 'create', dbname, uid, pwd, 'module.upgrade.simple')
state = 'init'
datas = {}
#while state!='menu':
while state!='end':
res = execute(wizard_conn, 'execute', dbname, uid, pwd, wiz_id, datas, state, {})
if state == 'init':
state = 'start'
elif state == 'start':
state = 'end'
return True
def upgrade_module(uri, dbname, modules, user='admin', pwd='admin'):
uid = login(uri, dbname, user, pwd)
if uid:
obj_conn = xmlrpclib.ServerProxy(uri + '/xmlrpc/object')
wizard_conn = xmlrpclib.ServerProxy(uri + '/xmlrpc/wizard')
module_ids = execute(obj_conn, 'execute', dbname, uid, pwd, 'ir.module.module', 'search', [('name','in',modules)])
execute(obj_conn, 'execute', dbname, uid, pwd, 'ir.module.module', 'button_upgrade', module_ids)
wiz_id = execute(wizard_conn, 'create', dbname, uid, pwd, 'module.upgrade.simple')
state = 'init'
datas = {}
#while state!='menu':
while state!='end':
res = execute(wizard_conn, 'execute', dbname, uid, pwd, wiz_id, datas, state, {})
if state == 'init':
state = 'start'
elif state == 'start':
state = 'end'
return True
usage = """%prog command [options]
Basic Commands:
start-server Start Server
create-db Create new database
drop-db Drop database
install-module Install module
upgrade-module Upgrade module
install-translation Install translation file
check-quality Calculate quality and dump quality result into quality_log.pck using pickle
"""
parser = optparse.OptionParser(usage)
parser.add_option("--modules", dest="modules",
help="specify modules to install or check quality")
parser.add_option("--addons-path", dest="addons_path", help="specify the addons path")
parser.add_option("--root-path", dest="root_path", help="specify the root path")
parser.add_option("-p", "--port", dest="port", help="specify the TCP port", type="int")
parser.add_option("-d", "--database", dest="db_name", help="specify the database name")
parser.add_option("--login", dest="login", help="specify the User Login")
parser.add_option("--password", dest="pwd", help="specify the User Password")
parser.add_option("--translate-in", dest="translate_in",
help="specify .po files to import translation terms")
parser.add_option("--extra-addons", dest="extra_addons",
help="specify extra_addons and trunkCommunity modules path ")
(opt, args) = parser.parse_args()
if len(args) != 1:
parser.error("incorrect number of arguments")
command = args[0]
if command not in ('start-server','create-db','drop-db','install-module','upgrade-module','check-quality','install-translation'):
parser.error("incorrect command")
def die(cond, msg):
if cond:
print msg
sys.exit(1)
die(opt.modules and (not opt.db_name),
"the modules option cannot be used without the database (-d) option")
die(opt.translate_in and (not opt.db_name),
"the translate-in option cannot be used without the database (-d) option")
options = {
'addons-path' : opt.addons_path or 'addons',
'root-path' : opt.root_path or '',
'translate-in': opt.translate_in,
'port' : opt.port or 8069,
'database': opt.db_name or 'terp',
'modules' : opt.modules or [],
'login' : opt.login or 'admin',
'pwd' : opt.pwd or '',
'extra-addons':opt.extra_addons or []
}
options['modules'] = opt.modules and map(lambda m: m.strip(), opt.modules.split(',')) or []
options['translate_in'] = opt.translate_in and map(lambda m: m.strip(), opt.translate_in.split(',')) or []
uri = 'http://localhost:' + str(options['port'])
server_thread = threading.Thread(target=start_server,
args=(options['root-path'], options['port'], options['addons-path']))
try:
server_thread.start()
if command == 'create-db':
create_db(uri, options['database'], options['login'], options['pwd'])
if command == 'drop-db':
drop_db(uri, options['database'])
if command == 'install-module':
install_module(uri, options['database'], options['modules'],options['addons-path'],options['extra-addons'],options['login'], options['pwd'])
if command == 'upgrade-module':
upgrade_module(uri, options['database'], options['modules'], options['login'], options['pwd'])
if command == 'check-quality':
check_quality(uri, options['login'], options['pwd'], options['database'], options['modules'])
if command == 'install-translation':
import_translate(uri, options['login'], options['pwd'], options['database'], options['translate_in'])
clean()
sys.exit(0)
except xmlrpclib.Fault, e:
print e.faultString
clean()
sys.exit(1)
except Exception, e:
print e
clean()
sys.exit(1)
|
#!/usr/bin/env python3
import os
import sys
import urllib.request
from socket import timeout
def main(api_key):
if len(sys.argv) == 3: query = sys.argv[2].replace(' ', '%20')
else: return print("No query was given.")
opening = "<plaintext>"
closure = "</plaintext>"
# weather
if sys.argv[1] == '-w':
url = "http://api.wolframalpha.com/v1/query?appid={}&input=weather%20{}".format(api_key, query)
try: resp = str( urllib.request.urlopen(url, timeout=12).read() )
except timeout: return print("Request timed out")
title_hook = resp.find(opening)
if title_hook != -1:
title = resp[title_hook+11:resp.find(closure)] + "\n"
hook = resp.find('"InstantaneousWeather:WeatherData"')
result = resp[resp.find(opening, hook)+11:resp.find(closure, hook)]
# cleaning up result because urllib...
result = result.replace(" \\xc2\\xb0C", "°").replace("\\n", "\n")
print(title.replace(" |", ":") + result.replace(" |", ":"))
else:
print("Not found.")
# population
elif sys.argv[1] == '-p':
url = "http://api.wolframalpha.com/v1/query?appid={}&input=population%20{}".format(api_key, query)
try: resp = str( urllib.request.urlopen(url, timeout=12).read() )
except timeout: return print("Request timed out")
title_opening = resp.find(opening)
if title_opening != -1:
title_closure = resp.find(closure)
title = resp[title_opening+11:title_closure] + "\n"
result = resp[resp.find(opening, title_opening+1)+11:resp.find(closure, title_closure+1)]
print(title.replace(" |", ":") + result)
else:
print("Not found.")
else:
print("No valid argument found.")
if __name__ == '__main__':
api_key = os.environ['WOLFRAMALPHA_API_KEY']
main(api_key)
Weather parsing major fix
The `hook` was `"InstantaneousWeather:WeatherData"` with double quotes surrounding it when it should be just `InstantaneousWeather:WeatherData`.
This resulted into a parsing malfunction most probably due to Wolframalpha API changing its source code format (`id='InstantaneousWeather:WeatherData'` instead of `id="InstantaneousWeather:WeatherData"`).
Moreover a hook check (`if hook != -1`) has been added to prevent any further unexpected malfunction.
#!/usr/bin/env python3
import os
import sys
import urllib.request
from socket import timeout
def main(api_key):
if len(sys.argv) == 3: query = sys.argv[2].replace(' ', '%20')
else: return print("No query was given.")
opening = "<plaintext>"
closure = "</plaintext>"
# weather
if sys.argv[1] == '-w':
url = "http://api.wolframalpha.com/v1/query?appid={}&input=weather%20{}".format(api_key, query)
try: resp = str( urllib.request.urlopen(url, timeout=12).read() )
except timeout: return print("Request timed out")
title_hook = resp.find(opening)
if title_hook != -1:
title = resp[title_hook+11:resp.find(closure)] + "\n"
hook = resp.find('InstantaneousWeather:WeatherData')
if hook != -1:
result = resp[resp.find(opening, hook)+11:resp.find(closure, hook)]
# cleaning up result because urllib...
result = result.replace(" \\xc2\\xb0C", "°").replace("\\n", "\n")
print(title.replace(" |", ":") + result.replace(" |", ":"))
else:
print("No weather found for result '{}'.".format(title.replace(" |", ":")))
else:
print("Not found.")
# population
elif sys.argv[1] == '-p':
url = "http://api.wolframalpha.com/v1/query?appid={}&input=population%20{}".format(api_key, query)
try: resp = str( urllib.request.urlopen(url, timeout=12).read() )
except timeout: return print("Request timed out")
title_opening = resp.find(opening)
if title_opening != -1:
title_closure = resp.find(closure)
title = resp[title_opening+11:title_closure] + "\n"
result = resp[resp.find(opening, title_opening+1)+11:resp.find(closure, title_closure+1)]
print(title.replace(" |", ":") + result)
else:
print("Not found.")
else:
print("No valid argument found.")
if __name__ == '__main__':
api_key = os.environ['WOLFRAMALPHA_API_KEY']
main(api_key)
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import xmlrpclib
import ConfigParser
import optparse
import sys
import thread
import threading
import os
import time
import pickle
import base64
import socket
admin_passwd = 'admin'
waittime = 10
wait_count = 0
wait_limit = 12
def start_server(root_path, port, addons_path):
os.system('python2.5 %sopenerp-server.py --pidfile=openerp.pid --port=%s --no-netrpc --addons-path=%s' %(root_path, str(port), addons_path))
def clean():
if os.path.isfile('openerp.pid'):
ps = open('openerp.pid')
if ps:
pid = int(ps.read())
ps.close()
if pid:
os.kill(pid,9)
def execute(connector, method, *args):
global wait_count
res = False
try:
res = getattr(connector,method)(*args)
except socket.error,e:
if e.args[0] == 111:
if wait_count > wait_limit:
print "Server is taking too long to start, it has exceeded the maximum limit of %d seconds."%(wait_limit)
clean()
sys.exit(1)
print 'Please wait %d sec to start server....'%(waittime)
wait_count += 1
time.sleep(waittime)
res = execute(connector, method, *args)
else:
raise e
wait_count = 0
return res
def login(uri, dbname, user, pwd):
conn = xmlrpclib.ServerProxy(uri + '/xmlrpc/common')
uid = execute(conn,'login',dbname, user, pwd)
return uid
def import_translate(uri, user, pwd, dbname, translate_in):
uid = login(uri, dbname, user, pwd)
if uid:
conn = xmlrpclib.ServerProxy(uri + '/xmlrpc/wizard')
wiz_id = execute(conn,'create',dbname, uid, pwd, 'module.lang.import')
for trans_in in translate_in:
lang,ext = os.path.splitext(trans_in.split('/')[-1])
state = 'init'
datas = {'form':{}}
while state!='end':
res = execute(conn,'execute',dbname, uid, pwd, wiz_id, datas, state, {})
if 'datas' in res:
datas['form'].update( res['datas'].get('form',{}) )
if res['type']=='form':
for field in res['fields'].keys():
datas['form'][field] = res['fields'][field].get('value', False)
state = res['state'][-1][0]
trans_obj = open(trans_in)
datas['form'].update({
'name': lang,
'code': lang,
'data' : base64.encodestring(trans_obj.read())
})
trans_obj.close()
elif res['type']=='action':
state = res['state']
def check_quality(uri, user, pwd, dbname, modules):
uid = login(uri, dbname, user, pwd)
if uid:
conn = xmlrpclib.ServerProxy(uri + '/xmlrpc/object')
final = {}
for module in modules:
qualityresult = {}
test_detail = {}
quality_result = execute(conn,'execute', dbname, uid, pwd,'module.quality.check','check_quality',module)
detail_html = ''
html = '''<html><html><html><html><body><a name="TOP"></a>'''
html +="<h1> Module : %s </h1>"%(quality_result['name'])
html += "<h2> Final score : %s</h2>"%(quality_result['final_score'])
html += "<div id='tabs'>"
html += "<ul>"
for x,y,detail in quality_result['check_detail_ids']:
test = detail.get('name')
msg = detail.get('message','')
score = round(float(detail.get('score',0)),2)
html += "<li><a href=\"#%s\">%s</a></li>"%(test.replace(' ','-'),test)
if test == 'Unit Test':
if not detail.get('detail',''):
detail['detail'] = '''<html><body><b>%s</b></body></html>'''%(detail.get('summary',''))
detail_html +="<div id=\"%s\"><h3>%s (Score : %s)</h3>%s</div>"%(test.replace(' ','-'),test,score,detail.get('detail',''))
test_detail[test] = (score,msg,detail.get('detail',''))
html += "</ul>%s</body></html></html></html></html></html>"%(detail_html)
html += "</div>"
final[quality_result['name']] = (quality_result['final_score'],html,test_detail)
fp = open('quality_log.pck','wb')
pck_obj = pickle.dump(final,fp)
fp.close()
print "LOG PATH%s"%(os.path.realpath('quality_log.pck'))
return final
else:
print 'Login Failed...'
clean()
sys.exit(1)
def wait(id,url=''):
progress=0.0
sock2 = xmlrpclib.ServerProxy(url+'/db')
while not progress==1.0:
progress,users = execute(sock2,'get_progress',admin_passwd, id)
return True
def create_db(uri, dbname, user='admin', pwd='admin', lang='en_US'):
conn = xmlrpclib.ServerProxy(uri + '/xmlrpc/db')
obj_conn = xmlrpclib.ServerProxy(uri + '/xmlrpc/object')
wiz_conn = xmlrpclib.ServerProxy(uri + '/xmlrpc/wizard')
login_conn = xmlrpclib.ServerProxy(uri + '/xmlrpc/common')
db_list = execute(conn, 'list')
if dbname not in db_list:
id = execute(conn,'create',admin_passwd, dbname, True, lang)
wait(id,uri)
uid = login_conn.login(dbname, user, pwd)
wiz_id = execute(wiz_conn,'create', dbname, uid, user, 'base_setup.base_setup')
state = 'init'
datas = {'form':{}}
while state!='config':
res = execute(wiz_conn, 'execute', dbname, uid, pwd, wiz_id, datas, state, {})
if state=='init':
datas['form'].update( res['datas'] )
if res['type']=='form':
for field in res['fields'].keys():
datas['form'][field] = datas['form'].get(field,False)
state = res['state'][-1][0]
datas['form'].update({
'profile': -1
})
elif res['type']=='state':
state = res['state']
res = execute(wiz_conn, 'execute', dbname, uid, pwd, wiz_id, datas, state, {})
install_module(uri, dbname, ['base_module_quality'],user=user,pwd=pwd)
return True
def drop_db(uri, dbname):
conn = xmlrpclib.ServerProxy(uri + '/xmlrpc/db')
db_list = execute(conn,'list')
if dbname in db_list:
execute(conn, 'drop', admin_passwd, dbname)
return True
def make_links(uri, uid, dbname, source, destination, module, user, pwd):
if module in ('base','quality_integration_server'):
return True
if not os.path.islink(destination + '/' + module):
if not os.path.isdir(destination + '/' + module):
for path in source:
if os.path.isdir(path + '/' + module):
os.symlink(path + '/' + module, destination + '/' + module)
obj_conn = xmlrpclib.ServerProxy(uri + '/xmlrpc/object')
execute(obj_conn, 'execute', dbname, uid, pwd, 'ir.module.module', 'update_list')
module_ids = execute(obj_conn, 'execute', dbname, uid, pwd, 'ir.module.module', 'search', [('name','=',module)])
if len(module_ids):
data = execute(obj_conn, 'execute', dbname, uid, pwd, 'ir.module.module', 'read', module_ids[0],['name','dependencies_id'])
dep_datas = execute(obj_conn, 'execute', dbname, uid, pwd, 'ir.module.module.dependency', 'read', data['dependencies_id'],['name'])
for dep_data in dep_datas:
make_links(uri, uid, dbname, source, destination, dep_data['name'], user, pwd)
return True
return False
def install_module(uri, dbname, modules, addons='', extra_addons='', user='admin', pwd='admin'):
uid = login(uri, dbname, user, pwd)
if extra_addons:
extra_addons = extra_addons.split(',')
if uid:
if addons and extra_addons:
for module in modules:
make_links(uri, uid, dbname, extra_addons, addons, module, user, pwd)
obj_conn = xmlrpclib.ServerProxy(uri + '/xmlrpc/object')
wizard_conn = xmlrpclib.ServerProxy(uri + '/xmlrpc/wizard')
module_ids = execute(obj_conn, 'execute', dbname, uid, pwd, 'ir.module.module', 'search', [('name','in',modules)])
execute(obj_conn, 'execute', dbname, uid, pwd, 'ir.module.module', 'button_install', module_ids)
wiz_id = execute(wizard_conn, 'create', dbname, uid, pwd, 'module.upgrade.simple')
state = 'init'
datas = {}
#while state!='menu':
while state!='end':
res = execute(wizard_conn, 'execute', dbname, uid, pwd, wiz_id, datas, state, {})
if state == 'init':
state = 'start'
elif state == 'start':
state = 'end'
return True
def upgrade_module(uri, dbname, modules, user='admin', pwd='admin'):
uid = login(uri, dbname, user, pwd)
if uid:
obj_conn = xmlrpclib.ServerProxy(uri + '/xmlrpc/object')
wizard_conn = xmlrpclib.ServerProxy(uri + '/xmlrpc/wizard')
module_ids = execute(obj_conn, 'execute', dbname, uid, pwd, 'ir.module.module', 'search', [('name','in',modules)])
execute(obj_conn, 'execute', dbname, uid, pwd, 'ir.module.module', 'button_upgrade', module_ids)
wiz_id = execute(wizard_conn, 'create', dbname, uid, pwd, 'module.upgrade.simple')
state = 'init'
datas = {}
#while state!='menu':
while state!='end':
res = execute(wizard_conn, 'execute', dbname, uid, pwd, wiz_id, datas, state, {})
if state == 'init':
state = 'start'
elif state == 'start':
state = 'end'
return True
usage = """%prog command [options]
Basic Commands:
start-server Start Server
create-db Create new database
drop-db Drop database
install-module Install module
upgrade-module Upgrade module
install-translation Install translation file
check-quality Calculate quality and dump quality result into quality_log.pck using pickle
"""
parser = optparse.OptionParser(usage)
parser.add_option("--modules", dest="modules",
help="specify modules to install or check quality")
parser.add_option("--addons-path", dest="addons_path", help="specify the addons path")
parser.add_option("--root-path", dest="root_path", help="specify the root path")
parser.add_option("-p", "--port", dest="port", help="specify the TCP port", type="int")
parser.add_option("-d", "--database", dest="db_name", help="specify the database name")
parser.add_option("--login", dest="login", help="specify the User Login")
parser.add_option("--password", dest="pwd", help="specify the User Password")
parser.add_option("--translate-in", dest="translate_in",
help="specify .po files to import translation terms")
parser.add_option("--extra-addons", dest="extra_addons",
help="specify extra_addons and trunkCommunity modules path ")
(opt, args) = parser.parse_args()
if len(args) != 1:
parser.error("incorrect number of arguments")
command = args[0]
if command not in ('start-server','create-db','drop-db','install-module','upgrade-module','check-quality','install-translation'):
parser.error("incorrect command")
def die(cond, msg):
if cond:
print msg
sys.exit(1)
die(opt.modules and (not opt.db_name),
"the modules option cannot be used without the database (-d) option")
die(opt.translate_in and (not opt.db_name),
"the translate-in option cannot be used without the database (-d) option")
options = {
'addons-path' : opt.addons_path or 'addons',
'root-path' : opt.root_path or '',
'translate-in': [],
'port' : opt.port or 8069,
'database': opt.db_name or 'terp',
'modules' : opt.modules or [],
'login' : opt.login or 'admin',
'pwd' : opt.pwd or '',
'extra-addons':opt.extra_addons or []
}
options['modules'] = opt.modules and map(lambda m: m.strip(), opt.modules.split(',')) or []
# Hint:i18n-import=purchase:ar_AR.po+sale:fr_FR.po,nl_BE.po
if opt.translate_in:
translate = opt.translate_in
for module_name,po_files in map(lambda x:tuple(x.split(':')),translate.split('+')):
for po_file in po_files.split(','):
po_link = '%s/%s/i18n/%s'%(options['addons-path'], module_name, po_file)
options['translate-in'].append(po_link)
uri = 'http://localhost:' + str(options['port'])
server_thread = threading.Thread(target=start_server,
args=(options['root-path'], options['port'], options['addons-path']))
try:
server_thread.start()
if command == 'create-db':
create_db(uri, options['database'], options['login'], options['pwd'])
if command == 'drop-db':
drop_db(uri, options['database'])
if command == 'install-module':
install_module(uri, options['database'], options['modules'],options['addons-path'],options['extra-addons'],options['login'], options['pwd'])
if command == 'upgrade-module':
upgrade_module(uri, options['database'], options['modules'], options['login'], options['pwd'])
if command == 'check-quality':
check_quality(uri, options['login'], options['pwd'], options['database'], options['modules'])
if command == 'install-translation':
import_translate(uri, options['login'], options['pwd'], options['database'], options['translate-in'])
clean()
sys.exit(0)
except xmlrpclib.Fault, e:
print e.faultString
clean()
sys.exit(1)
except Exception, e:
print e
clean()
sys.exit(1)
[IMP]quality_integration_server: quality log : add new option to specify qualitylog path to store html pages of log
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import xmlrpclib
import ConfigParser
import optparse
import sys
import thread
import threading
import os
import time
import pickle
import base64
import socket
admin_passwd = 'admin'
waittime = 10
wait_count = 0
wait_limit = 12
def start_server(root_path, port, addons_path):
os.system('python2.5 %sopenerp-server.py --pidfile=openerp.pid --port=%s --no-netrpc --addons-path=%s' %(root_path, str(port), addons_path))
def clean():
if os.path.isfile('openerp.pid'):
ps = open('openerp.pid')
if ps:
pid = int(ps.read())
ps.close()
if pid:
os.kill(pid,9)
def execute(connector, method, *args):
global wait_count
res = False
try:
res = getattr(connector,method)(*args)
except socket.error,e:
if e.args[0] == 111:
if wait_count > wait_limit:
print "Server is taking too long to start, it has exceeded the maximum limit of %d seconds."%(wait_limit)
clean()
sys.exit(1)
print 'Please wait %d sec to start server....'%(waittime)
wait_count += 1
time.sleep(waittime)
res = execute(connector, method, *args)
else:
raise e
wait_count = 0
return res
def login(uri, dbname, user, pwd):
conn = xmlrpclib.ServerProxy(uri + '/xmlrpc/common')
uid = execute(conn,'login',dbname, user, pwd)
return uid
def import_translate(uri, user, pwd, dbname, translate_in):
uid = login(uri, dbname, user, pwd)
if uid:
conn = xmlrpclib.ServerProxy(uri + '/xmlrpc/wizard')
wiz_id = execute(conn,'create',dbname, uid, pwd, 'module.lang.import')
for trans_in in translate_in:
lang,ext = os.path.splitext(trans_in.split('/')[-1])
state = 'init'
datas = {'form':{}}
while state!='end':
res = execute(conn,'execute',dbname, uid, pwd, wiz_id, datas, state, {})
if 'datas' in res:
datas['form'].update( res['datas'].get('form',{}) )
if res['type']=='form':
for field in res['fields'].keys():
datas['form'][field] = res['fields'][field].get('value', False)
state = res['state'][-1][0]
trans_obj = open(trans_in)
datas['form'].update({
'name': lang,
'code': lang,
'data' : base64.encodestring(trans_obj.read())
})
trans_obj.close()
elif res['type']=='action':
state = res['state']
def check_quality(uri, user, pwd, dbname, modules, quality_logs):
uid = login(uri, dbname, user, pwd)
quality_logs += 'quality-logs'
if uid:
conn = xmlrpclib.ServerProxy(uri + '/xmlrpc/object')
final = {}
for module in modules:
qualityresult = {}
test_detail = {}
quality_result = execute(conn,'execute', dbname, uid, pwd,'module.quality.check','check_quality',module)
detail_html = ''
html = '''<html><body><a name="TOP"></a>'''
html +="<h1> Module : %s </h1>"%(quality_result['name'])
html += "<h2> Final score : %s</h2>"%(quality_result['final_score'])
html += "<div id='tabs'>"
html += "<ul>"
for x,y,detail in quality_result['check_detail_ids']:
test = detail.get('name')
msg = detail.get('message','')
score = round(float(detail.get('score',0)),2)
html += "<li><a href=\"#%s\">%s</a></li>"%(test.replace(' ','-'),test)
if test == 'Unit Test':
if not detail.get('detail',''):
detail['detail'] = '''<html><body><b>%s</b></body></html>'''%(detail.get('summary',''))
detail_html +="<div id=\"%s\"><h3>%s (Score : %s)</h3>%s</div>"%(test.replace(' ','-'),test,score,detail.get('detail',''))
test_detail[test] = (score,msg,detail.get('detail',''))
html += "</ul>"
html += "%s"%(detail_html)
html += "</div></body></html>"
if not os.path.isdir(quality_logs):
os.mkdir(quality_logs)
fp = open('%s/%s.html'%(quality_logs,module),'wb')
fp.write(str(html))
fp.close()
#final[quality_result['name']] = (quality_result['final_score'],html,test_detail)
#fp = open('quality_log.pck','wb')
#pck_obj = pickle.dump(final,fp)
#fp.close()
#print "LOG PATH%s"%(os.path.realpath('quality_log.pck'))
return True
else:
print 'Login Failed...'
clean()
sys.exit(1)
def wait(id,url=''):
progress=0.0
sock2 = xmlrpclib.ServerProxy(url+'/db')
while not progress==1.0:
progress,users = execute(sock2,'get_progress',admin_passwd, id)
return True
def create_db(uri, dbname, user='admin', pwd='admin', lang='en_US'):
conn = xmlrpclib.ServerProxy(uri + '/xmlrpc/db')
obj_conn = xmlrpclib.ServerProxy(uri + '/xmlrpc/object')
wiz_conn = xmlrpclib.ServerProxy(uri + '/xmlrpc/wizard')
login_conn = xmlrpclib.ServerProxy(uri + '/xmlrpc/common')
db_list = execute(conn, 'list')
if dbname not in db_list:
id = execute(conn,'create',admin_passwd, dbname, True, lang)
wait(id,uri)
uid = login_conn.login(dbname, user, pwd)
wiz_id = execute(wiz_conn,'create', dbname, uid, user, 'base_setup.base_setup')
state = 'init'
datas = {'form':{}}
while state!='config':
res = execute(wiz_conn, 'execute', dbname, uid, pwd, wiz_id, datas, state, {})
if state=='init':
datas['form'].update( res['datas'] )
if res['type']=='form':
for field in res['fields'].keys():
datas['form'][field] = datas['form'].get(field,False)
state = res['state'][-1][0]
datas['form'].update({
'profile': -1
})
elif res['type']=='state':
state = res['state']
res = execute(wiz_conn, 'execute', dbname, uid, pwd, wiz_id, datas, state, {})
install_module(uri, dbname, ['base_module_quality'],user=user,pwd=pwd)
return True
def drop_db(uri, dbname):
conn = xmlrpclib.ServerProxy(uri + '/xmlrpc/db')
db_list = execute(conn,'list')
if dbname in db_list:
execute(conn, 'drop', admin_passwd, dbname)
return True
def make_links(uri, uid, dbname, source, destination, module, user, pwd):
if module in ('base','quality_integration_server'):
return True
if not os.path.islink(destination + '/' + module):
if not os.path.isdir(destination + '/' + module):
for path in source:
if os.path.isdir(path + '/' + module):
os.symlink(path + '/' + module, destination + '/' + module)
obj_conn = xmlrpclib.ServerProxy(uri + '/xmlrpc/object')
execute(obj_conn, 'execute', dbname, uid, pwd, 'ir.module.module', 'update_list')
module_ids = execute(obj_conn, 'execute', dbname, uid, pwd, 'ir.module.module', 'search', [('name','=',module)])
if len(module_ids):
data = execute(obj_conn, 'execute', dbname, uid, pwd, 'ir.module.module', 'read', module_ids[0],['name','dependencies_id'])
dep_datas = execute(obj_conn, 'execute', dbname, uid, pwd, 'ir.module.module.dependency', 'read', data['dependencies_id'],['name'])
for dep_data in dep_datas:
make_links(uri, uid, dbname, source, destination, dep_data['name'], user, pwd)
return True
return False
def install_module(uri, dbname, modules, addons='', extra_addons='', user='admin', pwd='admin'):
uid = login(uri, dbname, user, pwd)
if extra_addons:
extra_addons = extra_addons.split(',')
if uid:
if addons and extra_addons:
for module in modules:
make_links(uri, uid, dbname, extra_addons, addons, module, user, pwd)
obj_conn = xmlrpclib.ServerProxy(uri + '/xmlrpc/object')
wizard_conn = xmlrpclib.ServerProxy(uri + '/xmlrpc/wizard')
module_ids = execute(obj_conn, 'execute', dbname, uid, pwd, 'ir.module.module', 'search', [('name','in',modules)])
execute(obj_conn, 'execute', dbname, uid, pwd, 'ir.module.module', 'button_install', module_ids)
wiz_id = execute(wizard_conn, 'create', dbname, uid, pwd, 'module.upgrade.simple')
state = 'init'
datas = {}
#while state!='menu':
while state!='end':
res = execute(wizard_conn, 'execute', dbname, uid, pwd, wiz_id, datas, state, {})
if state == 'init':
state = 'start'
elif state == 'start':
state = 'end'
return True
def upgrade_module(uri, dbname, modules, user='admin', pwd='admin'):
uid = login(uri, dbname, user, pwd)
if uid:
obj_conn = xmlrpclib.ServerProxy(uri + '/xmlrpc/object')
wizard_conn = xmlrpclib.ServerProxy(uri + '/xmlrpc/wizard')
module_ids = execute(obj_conn, 'execute', dbname, uid, pwd, 'ir.module.module', 'search', [('name','in',modules)])
execute(obj_conn, 'execute', dbname, uid, pwd, 'ir.module.module', 'button_upgrade', module_ids)
wiz_id = execute(wizard_conn, 'create', dbname, uid, pwd, 'module.upgrade.simple')
state = 'init'
datas = {}
#while state!='menu':
while state!='end':
res = execute(wizard_conn, 'execute', dbname, uid, pwd, wiz_id, datas, state, {})
if state == 'init':
state = 'start'
elif state == 'start':
state = 'end'
return True
usage = """%prog command [options]
Basic Commands:
start-server Start Server
create-db Create new database
drop-db Drop database
install-module Install module
upgrade-module Upgrade module
install-translation Install translation file
check-quality Calculate quality and dump quality result into quality_log.pck using pickle
"""
parser = optparse.OptionParser(usage)
parser.add_option("--modules", dest="modules",
help="specify modules to install or check quality")
parser.add_option("--addons-path", dest="addons_path", help="specify the addons path")
parser.add_option("--quality-logs", dest="quality_logs", help="specify the path of quality logs files which has to stores")
parser.add_option("--root-path", dest="root_path", help="specify the root path")
parser.add_option("-p", "--port", dest="port", help="specify the TCP port", type="int")
parser.add_option("-d", "--database", dest="db_name", help="specify the database name")
parser.add_option("--login", dest="login", help="specify the User Login")
parser.add_option("--password", dest="pwd", help="specify the User Password")
parser.add_option("--translate-in", dest="translate_in",
help="specify .po files to import translation terms")
parser.add_option("--extra-addons", dest="extra_addons",
help="specify extra_addons and trunkCommunity modules path ")
(opt, args) = parser.parse_args()
if len(args) != 1:
parser.error("incorrect number of arguments")
command = args[0]
if command not in ('start-server','create-db','drop-db','install-module','upgrade-module','check-quality','install-translation'):
parser.error("incorrect command")
def die(cond, msg):
if cond:
print msg
sys.exit(1)
die(opt.modules and (not opt.db_name),
"the modules option cannot be used without the database (-d) option")
die(opt.translate_in and (not opt.db_name),
"the translate-in option cannot be used without the database (-d) option")
options = {
'addons-path' : opt.addons_path or 'addons',
'quality-logs' : opt.quality_logs or '',
'root-path' : opt.root_path or '',
'translate-in': [],
'port' : opt.port or 8069,
'database': opt.db_name or 'terp',
'modules' : opt.modules or [],
'login' : opt.login or 'admin',
'pwd' : opt.pwd or '',
'extra-addons':opt.extra_addons or []
}
options['modules'] = opt.modules and map(lambda m: m.strip(), opt.modules.split(',')) or []
# Hint:i18n-import=purchase:ar_AR.po+sale:fr_FR.po,nl_BE.po
if opt.translate_in:
translate = opt.translate_in
for module_name,po_files in map(lambda x:tuple(x.split(':')),translate.split('+')):
for po_file in po_files.split(','):
po_link = '%s/%s/i18n/%s'%(options['addons-path'], module_name, po_file)
options['translate-in'].append(po_link)
uri = 'http://localhost:' + str(options['port'])
server_thread = threading.Thread(target=start_server,
args=(options['root-path'], options['port'], options['addons-path']))
try:
server_thread.start()
if command == 'create-db':
create_db(uri, options['database'], options['login'], options['pwd'])
if command == 'drop-db':
drop_db(uri, options['database'])
if command == 'install-module':
install_module(uri, options['database'], options['modules'],options['addons-path'],options['extra-addons'],options['login'], options['pwd'])
if command == 'upgrade-module':
upgrade_module(uri, options['database'], options['modules'], options['login'], options['pwd'])
if command == 'check-quality':
check_quality(uri, options['login'], options['pwd'], options['database'], options['modules'], options['quality-logs'])
if command == 'install-translation':
import_translate(uri, options['login'], options['pwd'], options['database'], options['translate-in'])
clean()
sys.exit(0)
except xmlrpclib.Fault, e:
print e.faultString
clean()
sys.exit(1)
except Exception, e:
print e
clean()
sys.exit(1)
|
#!/usr/bin/python -OO
# -*- coding: utf-8 -*-
#
# This file is part of Archivematica.
#
# Copyright 2010-2011 Artefactual Systems Inc. <http://artefactual.com>
#
# Archivematica is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Archivematica is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Archivematica. If not, see <http://www.gnu.org/licenses/>.
# @package Archivematica
# @subpackage Ingest
# @author Joseph Perry <joseph@artefactual.com>
# @version svn: $Id$
from archivematicaXMLNamesSpace import *
import lxml.etree as etree
from xml.sax.saxutils import quoteattr
import os
import sys
import MySQLdb
from archivematicaCreateMETSRights import archivematicaGetRights
sys.path.append("/usr/lib/archivematica/archivematicaCommon")
import databaseInterface
from archivematicaFunctions import escape
from archivematicaFunctions import unicodeToStr
from archivematicaFunctions import strToUnicode
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-s", "--baseDirectoryPath", action="store", dest="baseDirectoryPath", default="")
parser.add_option("-b", "--baseDirectoryPathString", action="store", dest="baseDirectoryPathString", default="SIPDirectory") #transferDirectory/
parser.add_option("-f", "--fileGroupIdentifier", action="store", dest="fileGroupIdentifier", default="") #transferUUID/sipUUID
parser.add_option("-t", "--fileGroupType", action="store", dest="fileGroupType", default="sipUUID") #
parser.add_option("-x", "--xmlFile", action="store", dest="xmlFile", default="")
parser.add_option("-a", "--amdSec", action="store_true", dest="amdSec", default=False)
(opts, args) = parser.parse_args()
baseDirectoryPath = opts.baseDirectoryPath
XMLFile = opts.xmlFile
includeAmdSec = opts.amdSec
baseDirectoryPathString = "%%%s%%" % (opts.baseDirectoryPathString)
fileGroupIdentifier = opts.fileGroupIdentifier
fileGroupType = opts.fileGroupType
includeAmdSec = opts.amdSec
#Global Variables
globalFileGrps = {}
globalFileGrpsUses = ["original", "submissionDocumentation", "preservation", "service", "access", "license", "text/ocr"]
for use in globalFileGrpsUses:
grp = etree.Element("fileGrp")
grp.set("USE", use)
globalFileGrps[use] = grp
##counters
#globalCounter = 0
global globalErrorCount
globalErrorCount = 0
global amdSecs
amdSecs = []
global dmdSecs
dmdSecs = []
global globalDmdSecCounter
globalDmdSecCounter = 0
global globalAmdSecCounter
globalAmdSecCounter = 0
global globalTechMDCounter
globalTechMDCounter = 0
global globalRightsMDCounter
globalRightsMDCounter = 0
global globalDigiprovMDCounter
globalDigiprovMDCounter = 0
#GROUPID="G1" -> GROUPID="Group-%object's UUID%"
##group of the object and it's related access, license
#move to common
def newChild(parent, tag, text=None, tailText=None, sets=[]):
child = etree.Element(tag)
parent.append(child)
child.text = strToUnicode(text)
if tailText:
child.tail = strToUnicode(tailText)
for set in sets:
key, value = set
child.set(key, value)
return child
def createAgent(agentIdentifierType, agentIdentifierValue, agentName, agentType):
ret = etree.Element("agent")
agentIdentifier = etree.SubElement( ret, "agentIdentifier")
etree.SubElement( agentIdentifier, "agentIdentifierType").text = agentIdentifierType
etree.SubElement( agentIdentifier, "agentIdentifierValue").text = agentIdentifierValue
etree.SubElement( ret, "agentName").text = agentName
etree.SubElement( ret, "agentType").text = agentType
return ret
SIPMetadataAppliesToType = 1
TransferMetadataAppliesToType = 2
FileMetadataAppliesToType = 3
def getDublinCore(type_, id):
sql = """SELECT title, creator, subject, description, publisher, contributor, date, type, format, identifier, source, isPartOf, language, coverage, rights
FROM Dublincore WHERE metadataAppliesToType = %s AND metadataAppliesToidentifier = '%s';""" % \
(type_.__str__(), id.__str__())
c, sqlLock = databaseInterface.querySQL(sql)
row = c.fetchone()
if row == None:
sqlLock.release()
return None
ret = etree.Element( "dublincore", nsmap = {None: dcNS, "dcterms": dctermsNS} )
ret.set(xsiBNS+"schemaLocation", dcNS + " http://dublincore.org/schemas/xmls/qdc/dc.xsd " + dctermsNS + " http://dublincore.org/schemas/xmls/qdc/2008/02/11/dcterms.xsd")
dctermsElements= ["isPartOf"]
while row != None:
key = ["title", "creator", "subject", "description", "publisher", "contributor", "date", "type", "format", "identifier", "source", "isPartOf", "language", "coverage", "rights"]
#title, creator, subject, description, publisher, contributor, date, type, format, identifier, source, isPartOf, language, coverage, rights = row
#key.index("title") == title
i = 0
for term in key:
if row[i] != None:
txt = row[i]
else:
txt = ""
if term in dctermsElements:
etree.SubElement(ret, dctermsBNS + term).text = txt
else:
newChild(ret, term, text=txt)
i+=1
row = c.fetchone()
sqlLock.release()
return ret
def createDublincoreDMDSec(type, id):
dc = getDublinCore(type, id)
if dc == None:
return None
global globalDmdSecCounter
globalDmdSecCounter += 1
dmdSec = etree.Element("dmdSec")
ID = "dmdSec_" + globalDmdSecCounter.__str__()
dmdSec.set("ID", ID)
mdWrap = newChild(dmdSec, "mdWrap")
mdWrap.set("MDTYPE", "DC")
xmlData = newChild(mdWrap, "xmlData")
xmlData.append(dc)
return (dmdSec, ID)
def createMDRefDMDSec(LABEL, itemdirectoryPath, directoryPathSTR):
global globalDmdSecCounter
globalDmdSecCounter += 1
dmdSec = etree.Element("dmdSec")
ID = "dmdSec_" + globalDmdSecCounter.__str__()
dmdSec.set("ID", ID)
XPTR = "xpointer(id("
tree = etree.parse(itemdirectoryPath)
root = tree.getroot()
for item in root.findall("{http://www.loc.gov/METS/}dmdSec"):
XPTR = "%s %s" % (XPTR, item.get("ID"))
XPTR = XPTR.replace(" ", "'", 1) + "'))"
newChild(dmdSec, "mdRef", text=None, sets=[("LABEL", LABEL), (xlinkBNS +"href", directoryPathSTR), ("MDTYPE", "OTHER"), ("LOCTYPE","OTHER"), ("OTHERLOCTYPE", "SYSTEM"), ("XPTR", XPTR)])
return (dmdSec, ID)
def createTechMD(fileUUID):
ret = etree.Element("techMD")
techMD = ret #newChild(amdSec, "digiprovMD")
#digiprovMD.set("ID", "digiprov-"+ os.path.basename(filename) + "-" + fileUUID)
global globalTechMDCounter
globalTechMDCounter += 1
techMD.set("ID", "techMD_"+ globalTechMDCounter.__str__())
mdWrap = newChild(techMD,"mdWrap")
mdWrap.set("MDTYPE", "PREMIS:OBJECT")
xmlData = newChild(mdWrap, "xmlData")
#premis = etree.SubElement( xmlData, "premis", nsmap={None: premisNS}, \
# attrib = { "{" + xsiNS + "}schemaLocation" : "info:lc/xmlns/premis-v2 http://www.loc.gov/standards/premis/premis.xsd" })
#premis.set("version", "2.0")
#premis = etree.SubElement( xmlData, "premis", attrib = {xsiBNS+"type": "premis:file"})
sql = "SELECT fileSize, checksum FROM Files WHERE fileUUID = '%s';" % (fileUUID)
c, sqlLock = databaseInterface.querySQL(sql)
row = c.fetchone()
while row != None:
fileSize = row[0].__str__()
checksum = row[1].__str__()
row = c.fetchone()
sqlLock.release()
#OBJECT
object = etree.SubElement(xmlData, "object", nsmap={None: premisNS})
object.set( xsiBNS+"type", "file")
object.set(xsiBNS+"schemaLocation", premisNS + " http://www.loc.gov/standards/premis/v2/premis-v2-1.xsd")
object.set("version", "2.1")
objectIdentifier = etree.SubElement(object, "objectIdentifier")
etree.SubElement(objectIdentifier, "objectIdentifierType").text = "UUID"
etree.SubElement(objectIdentifier, "objectIdentifierValue").text = fileUUID
#etree.SubElement(object, "objectCategory").text = "file"
objectCharacteristics = etree.SubElement(object, "objectCharacteristics")
etree.SubElement(objectCharacteristics, "compositionLevel").text = "0"
fixity = etree.SubElement(objectCharacteristics, "fixity")
etree.SubElement(fixity, "messageDigestAlgorithm").text = "sha256"
etree.SubElement(fixity, "messageDigest").text = checksum
etree.SubElement(objectCharacteristics, "size").text = fileSize
sql = "SELECT formatName, formatVersion, formatRegistryName, formatRegistryKey FROM FilesIDs WHERE fileUUID = '%s';" % (fileUUID)
c, sqlLock = databaseInterface.querySQL(sql)
row = c.fetchone()
if not row:
format = etree.SubElement(objectCharacteristics, "format")
formatDesignation = etree.SubElement(format, "formatDesignation")
etree.SubElement(formatDesignation, "formatName").text = "Unknown"
while row != None:
#print row
format = etree.SubElement(objectCharacteristics, "format")
#fileUUID = row[0]
formatDesignation = etree.SubElement(format, "formatDesignation")
etree.SubElement(formatDesignation, "formatName").text = row[0]
etree.SubElement(formatDesignation, "formatVersion").text = row[1]
formatRegistry = etree.SubElement(format, "formatRegistry")
etree.SubElement(formatRegistry, "formatRegistryName").text = row[2]
etree.SubElement(formatRegistry, "formatRegistryKey").text = row[3]
row = c.fetchone()
sqlLock.release()
objectCharacteristicsExtension = etree.SubElement(objectCharacteristics, "objectCharacteristicsExtension")
sql = "SELECT FilesFits.FITSxml FROM FilesFits WHERE fileUUID = '" + fileUUID + "';"
c, sqlLock = databaseInterface.querySQL(sql)
row = c.fetchone()
#if not row:
# print >>sys.stderr, "Error no fits.", fileUUID
parser = etree.XMLParser(remove_blank_text=True)
while row != None:
#fits = etree.fromstring(row[0])
fits = etree.XML(row[0], parser)
objectCharacteristicsExtension.append(fits)
row = c.fetchone()
sqlLock.release()
sql = "SELECT Files.originalLocation FROM Files WHERE Files.fileUUID = '" + fileUUID + "';"
c, sqlLock = databaseInterface.querySQL(sql)
row = c.fetchone()
if not row:
print >>sys.stderr, "Error no fits."
while row != None:
etree.SubElement(object, "originalName").text = escape(row[0])
row = c.fetchone()
sqlLock.release()
#Derivations
sql = "SELECT sourceFileUUID, derivedFileUUID, relatedEventUUID FROM Derivations WHERE sourceFileUUID = '" + fileUUID + "';"
c, sqlLock = databaseInterface.querySQL(sql)
row = c.fetchone()
while row != None:
relationship = etree.SubElement(object, "relationship")
etree.SubElement(relationship, "relationshipType").text = "derivation"
etree.SubElement(relationship, "relationshipSubType").text = "is source of"
relatedObjectIdentification = etree.SubElement(relationship, "relatedObjectIdentification")
etree.SubElement(relatedObjectIdentification, "relatedObjectIdentifierType").text = "UUID"
etree.SubElement(relatedObjectIdentification, "relatedObjectIdentifierValue").text = row[1]
relatedEventIdentification = etree.SubElement(relationship, "relatedEventIdentification")
etree.SubElement(relatedEventIdentification, "relatedEventIdentifierType").text = "UUID"
etree.SubElement(relatedEventIdentification, "relatedEventIdentifierValue").text = row[2]
row = c.fetchone()
sqlLock.release()
sql = "SELECT sourceFileUUID, derivedFileUUID, relatedEventUUID FROM Derivations WHERE derivedFileUUID = '" + fileUUID + "';"
c, sqlLock = databaseInterface.querySQL(sql)
row = c.fetchone()
while row != None:
relationship = etree.SubElement(object, "relationship")
etree.SubElement(relationship, "relationshipType").text = "derivation"
etree.SubElement(relationship, "relationshipSubType").text = "has source"
relatedObjectIdentification = etree.SubElement(relationship, "relatedObjectIdentification")
etree.SubElement(relatedObjectIdentification, "relatedObjectIdentifierType").text = "UUID"
etree.SubElement(relatedObjectIdentification, "relatedObjectIdentifierValue").text = row[0]
relatedEventIdentification = etree.SubElement(relationship, "relatedEventIdentification")
etree.SubElement(relatedEventIdentification, "relatedEventIdentifierType").text = "UUID"
etree.SubElement(relatedEventIdentification, "relatedEventIdentifierValue").text = row[2]
row = c.fetchone()
sqlLock.release()
return ret
def createDigiprovMD(fileUUID):
ret = []
#EVENTS
#| pk | fileUUID | eventIdentifierUUID | eventType | eventDateTime | eventDetail | eventOutcome | eventOutcomeDetailNote | linkingAgentIdentifier |
sql = "SELECT * FROM Events WHERE fileUUID = '" + fileUUID + "';"
rows = databaseInterface.queryAllSQL(sql)
for row in rows:
digiprovMD = etree.Element("digiprovMD")
ret.append(digiprovMD) #newChild(amdSec, "digiprovMD")
#digiprovMD.set("ID", "digiprov-"+ os.path.basename(filename) + "-" + fileUUID)
global globalDigiprovMDCounter
globalDigiprovMDCounter += 1
digiprovMD.set("ID", "digiprovMD_"+ globalDigiprovMDCounter.__str__())
mdWrap = newChild(digiprovMD,"mdWrap")
mdWrap.set("MDTYPE", "PREMIS:EVENT")
xmlData = newChild(mdWrap,"xmlData")
event = etree.SubElement(xmlData, "event", nsmap={None: premisNS})
event.set(xsiBNS+"schemaLocation", premisNS + " http://www.loc.gov/standards/premis/v2/premis-v2-1.xsd")
event.set("version", "2.1")
eventIdentifier = etree.SubElement(event, "eventIdentifier")
etree.SubElement(eventIdentifier, "eventIdentifierType").text = "UUID"
etree.SubElement(eventIdentifier, "eventIdentifierValue").text = row[2]
etree.SubElement(event, "eventType").text = row[3]
etree.SubElement(event, "eventDateTime").text = row[4].__str__().replace(" ", "T")
etree.SubElement(event, "eventDetail").text = escape(row[5])
eventOutcomeInformation = etree.SubElement(event, "eventOutcomeInformation")
etree.SubElement(eventOutcomeInformation, "eventOutcome").text = row[6]
eventOutcomeDetail = etree.SubElement(eventOutcomeInformation, "eventOutcomeDetail")
etree.SubElement(eventOutcomeDetail, "eventOutcomeDetailNote").text = escape(row[7])
#linkingAgentIdentifier
sql = """SELECT agentIdentifierType, agentIdentifierValue, agentName, agentType FROM Agents;"""
c, sqlLock = databaseInterface.querySQL(sql)
row = c.fetchone()
while row != None:
linkingAgentIdentifier = etree.SubElement(event, "linkingAgentIdentifier")
etree.SubElement(linkingAgentIdentifier, "linkingAgentIdentifierType").text = row[0]
etree.SubElement(linkingAgentIdentifier, "linkingAgentIdentifierValue").text = row[1]
row = c.fetchone()
sqlLock.release()
return ret
def createDigiprovMDAgents():
ret = []
#AGENTS
sql = """SELECT agentIdentifierType, agentIdentifierValue, agentName, agentType FROM Agents;"""
c, sqlLock = databaseInterface.querySQL(sql)
row = c.fetchone()
while row != None:
global globalDigiprovMDCounter
globalDigiprovMDCounter += 1
digiprovMD = etree.Element("digiprovMD")
digiprovMD.set("ID", "digiprovMD_"+ globalDigiprovMDCounter.__str__())
ret.append(digiprovMD) #newChild(amdSec, "digiprovMD")
mdWrap = newChild(digiprovMD,"mdWrap")
mdWrap.set("MDTYPE", "PREMIS:AGENT")
xmlData = newChild(mdWrap,"xmlData")
#agents = etree.SubElement(xmlData, "agents")
xmlData.append(createAgent(row[0], row[1], row[2], row[3]))
row = c.fetchone()
sqlLock.release()
return ret
def getAMDSec(fileUUID, filePath, use, type, id, transferUUID):
global globalAmdSecCounter
global globalRightsMDCounter
globalAmdSecCounter += 1
AMDID = "amdSec_%s" % (globalAmdSecCounter.__str__())
AMD = etree.Element("amdSec")
AMD.set("ID", AMDID)
ret = (AMD, AMDID)
#tech MD
#digiprob MD
AMD.append(createTechMD(fileUUID))
if use == "original":
metadataAppliesToList = [(fileUUID, FileMetadataAppliesToType), (fileGroupIdentifier, SIPMetadataAppliesToType), (transferUUID.__str__(), TransferMetadataAppliesToType)]
for a in archivematicaGetRights(metadataAppliesToList, fileUUID):
globalRightsMDCounter +=1
rightsMD = etree.SubElement(AMD, "rightsMD")
rightsMD.set("ID", "rightsMD_" + globalRightsMDCounter.__str__())
mdWrap = newChild(rightsMD,"mdWrap")
mdWrap.set("MDTYPE", "PREMIS:RIGHTS")
xmlData = newChild(mdWrap, "xmlData")
xmlData.append(a)
for a in createDigiprovMD(fileUUID):
AMD.append(a)
for a in createDigiprovMDAgents():
AMD.append(a)
return ret
#DMDID="dmdSec_01" for an object goes in here
#<file ID="file1-UUID" GROUPID="G1" DMDID="dmdSec_02" ADMID="amdSec_01">
def createFileSec(directoryPath, structMapDiv):
delayed = []
filesInThisDirectory = []
dspaceMetsDMDID = None
for item in os.listdir(directoryPath):
itemdirectoryPath = os.path.join(directoryPath, item)
if os.path.isdir(itemdirectoryPath):
delayed.append(item)
elif os.path.isfile(itemdirectoryPath):
#myuuid = uuid.uuid4()
myuuid=""
#directoryPathSTR = itemdirectoryPath.replace(baseDirectoryPath + "objects", "objects", 1)
directoryPathSTR = itemdirectoryPath.replace(baseDirectoryPath, baseDirectoryPathString, 1)
sql = """SELECT fileUUID, fileGrpUse, transferUUID FROM Files WHERE removedTime = 0 AND %s = '%s' AND Files.currentLocation = '%s';""" % (fileGroupType, fileGroupIdentifier, MySQLdb.escape_string(directoryPathSTR))
c, sqlLock = databaseInterface.querySQL(sql)
row = c.fetchone()
if row == None:
print >>sys.stderr, "No uuid for file: \"", directoryPathSTR, "\""
global globalErrorCount
globalErrorCount += 1
sqlLock.release()
continue
while row != None:
myuuid = row[0]
use = row[1]
transferUUID = row[2]
row = c.fetchone()
sqlLock.release()
filename = ''.join(quoteattr(item).split("\"")[1:-1])
directoryPathSTR = itemdirectoryPath.replace(baseDirectoryPath, "", 1)
#print filename, directoryPathSTR
FILEID="%s-%s" % (item, myuuid)
if FILEID[0].isdigit():
FILEID = "_" + FILEID
#<fptr FILEID="file1-UUID"/>
newChild(structMapDiv, "fptr", sets=[("FILEID",FILEID)])
GROUPID=""
if use == "original" or use == "submissionDocumentation":
GROUPID = "Group-%s" % (myuuid)
if use == "preservation":
sql = "SELECT * FROM Derivations WHERE derivedFileUUID = '" + myuuid + "';"
c, sqlLock = databaseInterface.querySQL(sql)
row = c.fetchone()
while row != None:
GROUPID = "Group-%s" % (row[1])
row = c.fetchone()
sqlLock.release()
elif use == "license" or use == "text/ocr" or use == "DSPACEMETS":
sql = """SELECT originalLocation FROM Files where fileUUID = '%s'""" % (myuuid)
originalLocation = databaseInterface.queryAllSQL(sql)[0][0]
sql = """SELECT fileUUID FROM Files WHERE removedTime = 0 AND %s = '%s' AND fileGrpUse = 'original' AND originalLocation LIKE '%s/%%'""" % (fileGroupType, fileGroupIdentifier, MySQLdb.escape_string(os.path.dirname(originalLocation)).replace("%", "%%"))
c, sqlLock = databaseInterface.querySQL(sql)
row = c.fetchone()
while row != None:
GROUPID = "Group-%s" % (row[0])
row = c.fetchone()
sqlLock.release()
elif use == "service":
fileFileIDPath = itemdirectoryPath.replace(baseDirectoryPath + "objects/service/", baseDirectoryPathString + "objects/")
objectNameExtensionIndex = fileFileIDPath.rfind(".")
fileFileIDPath = fileFileIDPath[:objectNameExtensionIndex + 1]
sql = """SELECT fileUUID FROM Files WHERE removedTime = 0 AND %s = '%s' AND fileGrpUse = 'original' AND currentLocation LIKE '%s%%'""" % (fileGroupType, fileGroupIdentifier, MySQLdb.escape_string(fileFileIDPath.replace("%", "%%")))
c, sqlLock = databaseInterface.querySQL(sql)
row = c.fetchone()
while row != None:
GROUPID = "Group-%s" % (row[0])
row = c.fetchone()
sqlLock.release()
if transferUUID:
sql = "SELECT type FROM Transfers WHERE transferUUID = '%s';" % (transferUUID)
rows = databaseInterface.queryAllSQL(sql)
if rows[0][0] == "Dspace1.7":
if use == "original":
print "original"
elif use == "DSPACEMETS":
use = "submissionDocumentation"
admidApplyTo = None
if GROUPID=="": #is an AIP identifier
GROUPID = myuuid
admidApplyTo = structMapDiv.getparent()
LABEL = "mets.xml-%s" % (GROUPID)
dmdSec, ID = createMDRefDMDSec(LABEL, itemdirectoryPath, directoryPathSTR)
dmdSecs.append(dmdSec)
if admidApplyTo != None:
admidApplyTo.set("ADMID", ID)
else:
dspaceMetsDMDID = ID
if GROUPID=="":
globalErrorCount += 1
print >>sys.stderr, "No groupID for file: \"", directoryPathSTR, "\""
if use not in globalFileGrps:
print >>sys.stderr, "Invalid use: \"", use, "\""
globalErrorCount += 1
else:
file = newChild(globalFileGrps[use], "file", sets=[("ID",FILEID), ("GROUPID",GROUPID)])
if use == "original":
filesInThisDirectory.append(file)
#<Flocat xlink:href="objects/file1-UUID" locType="other" otherLocType="system"/>
Flocat = newChild(file, "FLocat", sets=[(xlinkBNS +"href",directoryPathSTR), ("LOCTYPE","OTHER"), ("OTHERLOCTYPE", "SYSTEM")])
if includeAmdSec:
AMD, ADMID = getAMDSec(myuuid, directoryPathSTR, use, fileGroupType, fileGroupIdentifier, transferUUID)
global amdSecs
amdSecs.append(AMD)
file.set("ADMID", ADMID)
#fileI = etree.SubElement( structMapDiv, xlinkBNS + "fits", nsmap=NSMAP)
#filename = replace /tmp/"UUID" with /objects/
#fileI.set("ID", "file-" + item.__str__() + "-" + myuuid.__str__())
#fileI.set("ADMID", "digiprov-" + item.__str__() + "-" + myuuid.__str__())
#Flocat = newChild(fileI, "Flocat")
#Flocat.set(xlinkBNS + "href", directoryPathSTR )
#Flocat.set("locType", "other")
#Flocat.set("otherLocType", "system")
# structMap file
#div = newChild(structMapDiv, "div")
#fptr = newChild(div, "fptr")
#fptr.set("FILEID","file-" + item.__str__() + "-" + myuuid.__str__())
if dspaceMetsDMDID != None:
for file in filesInThisDirectory:
file.set("DMDID", dspaceMetsDMDID)
for item in delayed:
itemdirectoryPath = os.path.join(directoryPath, item)
createFileSec(itemdirectoryPath, newChild(structMapDiv, "div", sets=[("TYPE","directory"), ("LABEL",item)]))
if __name__ == '__main__':
while False: #used to stall the mcp and stop the client for testing this module
import time
time.sleep(10)
if False: #True: #insert sample dc for testing
sql = """ INSERT INTO Dublincore (metadataAppliesToType, metadataAppliesToidentifier, title, creator, subject, description, publisher, contributor, date, type, format, identifier, source, isPartOf, language, coverage, rights)
VALUES (1, '%s', "Je l'apprécititle3", "Je l'apprécicreator4", "Je l'apprécisubject5", "Je l'apprécidescription6", "Je l'apprécipublisher7", "Je l'apprécicontributor8", "Je l'apprécidate9", "Je l'apprécitype0", "Je l'appréciformat11", "Je l'appréciidentifier12", "Je l'apprécisource13", "Je l'appréciisPartOf14", "Je l'apprécilanguage15", "Je l'apprécicoverage16", "Je l'apprécirights17"); """ % (fileGroupIdentifier)
#VALUES (1, '%s', "title3", "creator4", "subject5", "description6", "publisher7", "contributor8", "date9", "type0", "format11", "identifier12", "source13", "isPartOf14", "language15", "coverage16", "rights17"); """ % (fileGroupIdentifier)
databaseInterface.runSQL(sql)
if not baseDirectoryPath.endswith('/'):
baseDirectoryPath += '/'
structMap = etree.Element("structMap")
structMap.set("TYPE", "physical")
structMapDiv = newChild(structMap, "div", sets=[("TYPE","directory"), ("LABEL","%s-%s" % (os.path.basename(baseDirectoryPath[:-1]), fileGroupIdentifier))])
#dmdSec, dmdSecID = createDublincoreDMDSec(SIP)
structMapDiv = newChild(structMapDiv, "div", sets=[("TYPE","directory"), ("LABEL","objects") ])
createFileSec(os.path.join(baseDirectoryPath, "objects"), structMapDiv)
fileSec = etree.Element( "fileSec")
for group in globalFileGrpsUses: #globalFileGrps.itervalues():
grp = globalFileGrps[group]
if len(grp) > 0:
fileSec.append(grp)
rootNSMap = {None: metsNS}
rootNSMap.update(NSMAP)
root = etree.Element( "mets", \
nsmap = rootNSMap, \
attrib = { "{" + xsiNS + "}schemaLocation" : "http://www.loc.gov/METS/ http://www.loc.gov/standards/mets/version18/mets.xsd" } )
dc = createDublincoreDMDSec(SIPMetadataAppliesToType, fileGroupIdentifier)
if dc != None:
(dmdSec, ID) = dc
structMapDiv.set("DMDID", ID)
root.append(dmdSec)
for dmdSec in dmdSecs:
root.append(dmdSec)
for amdSec in amdSecs:
root.append(amdSec)
root.append(fileSec)
root.append(structMap)
if False: #debug
print etree.tostring(root, pretty_print=True)
#<div TYPE="directory" LABEL="AIP1-UUID">
#<div TYPE="directory" LABEL="objects" DMDID="dmdSec_01">
#Recursive function for creating structmap and fileSec
tree = etree.ElementTree(root)
#tree.write(XMLFile)
tree.write(XMLFile, pretty_print=True, xml_declaration=True)
writeTestXMLFile = True
if writeTestXMLFile:
import cgi
fileName = XMLFile + ".validatorTester.html"
fileContents = """<html>
<body>
<form method="post" action="http://pim.fcla.edu/validate/results">
<label for="document">Enter XML Document:</label>
<br/>
<textarea id="directinput" rows="12" cols="76" name="document">%s</textarea>
<br/>
<br/>
<input type="submit" value="Validate" />
<br/>
</form>
</body>
</html>""" % (cgi.escape(etree.tostring(root, pretty_print=True, xml_declaration=True)))
f = open(fileName, 'w')
f.write(fileContents)
f.close
exit(globalErrorCount)
Issue 862: ADMID in structMap should be DMDID (DSpace export
Autoconverted from SVN (revision:2179)
#!/usr/bin/python -OO
# -*- coding: utf-8 -*-
#
# This file is part of Archivematica.
#
# Copyright 2010-2011 Artefactual Systems Inc. <http://artefactual.com>
#
# Archivematica is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Archivematica is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Archivematica. If not, see <http://www.gnu.org/licenses/>.
# @package Archivematica
# @subpackage Ingest
# @author Joseph Perry <joseph@artefactual.com>
# @version svn: $Id$
from archivematicaXMLNamesSpace import *
import lxml.etree as etree
from xml.sax.saxutils import quoteattr
import os
import sys
import MySQLdb
from archivematicaCreateMETSRights import archivematicaGetRights
sys.path.append("/usr/lib/archivematica/archivematicaCommon")
import databaseInterface
from archivematicaFunctions import escape
from archivematicaFunctions import unicodeToStr
from archivematicaFunctions import strToUnicode
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-s", "--baseDirectoryPath", action="store", dest="baseDirectoryPath", default="")
parser.add_option("-b", "--baseDirectoryPathString", action="store", dest="baseDirectoryPathString", default="SIPDirectory") #transferDirectory/
parser.add_option("-f", "--fileGroupIdentifier", action="store", dest="fileGroupIdentifier", default="") #transferUUID/sipUUID
parser.add_option("-t", "--fileGroupType", action="store", dest="fileGroupType", default="sipUUID") #
parser.add_option("-x", "--xmlFile", action="store", dest="xmlFile", default="")
parser.add_option("-a", "--amdSec", action="store_true", dest="amdSec", default=False)
(opts, args) = parser.parse_args()
baseDirectoryPath = opts.baseDirectoryPath
XMLFile = opts.xmlFile
includeAmdSec = opts.amdSec
baseDirectoryPathString = "%%%s%%" % (opts.baseDirectoryPathString)
fileGroupIdentifier = opts.fileGroupIdentifier
fileGroupType = opts.fileGroupType
includeAmdSec = opts.amdSec
#Global Variables
globalFileGrps = {}
globalFileGrpsUses = ["original", "submissionDocumentation", "preservation", "service", "access", "license", "text/ocr"]
for use in globalFileGrpsUses:
grp = etree.Element("fileGrp")
grp.set("USE", use)
globalFileGrps[use] = grp
##counters
#globalCounter = 0
global globalErrorCount
globalErrorCount = 0
global amdSecs
amdSecs = []
global dmdSecs
dmdSecs = []
global globalDmdSecCounter
globalDmdSecCounter = 0
global globalAmdSecCounter
globalAmdSecCounter = 0
global globalTechMDCounter
globalTechMDCounter = 0
global globalRightsMDCounter
globalRightsMDCounter = 0
global globalDigiprovMDCounter
globalDigiprovMDCounter = 0
#GROUPID="G1" -> GROUPID="Group-%object's UUID%"
##group of the object and it's related access, license
#move to common
def newChild(parent, tag, text=None, tailText=None, sets=[]):
child = etree.Element(tag)
parent.append(child)
child.text = strToUnicode(text)
if tailText:
child.tail = strToUnicode(tailText)
for set in sets:
key, value = set
child.set(key, value)
return child
def createAgent(agentIdentifierType, agentIdentifierValue, agentName, agentType):
ret = etree.Element("agent")
agentIdentifier = etree.SubElement( ret, "agentIdentifier")
etree.SubElement( agentIdentifier, "agentIdentifierType").text = agentIdentifierType
etree.SubElement( agentIdentifier, "agentIdentifierValue").text = agentIdentifierValue
etree.SubElement( ret, "agentName").text = agentName
etree.SubElement( ret, "agentType").text = agentType
return ret
SIPMetadataAppliesToType = 1
TransferMetadataAppliesToType = 2
FileMetadataAppliesToType = 3
def getDublinCore(type_, id):
sql = """SELECT title, creator, subject, description, publisher, contributor, date, type, format, identifier, source, isPartOf, language, coverage, rights
FROM Dublincore WHERE metadataAppliesToType = %s AND metadataAppliesToidentifier = '%s';""" % \
(type_.__str__(), id.__str__())
c, sqlLock = databaseInterface.querySQL(sql)
row = c.fetchone()
if row == None:
sqlLock.release()
return None
ret = etree.Element( "dublincore", nsmap = {None: dcNS, "dcterms": dctermsNS} )
ret.set(xsiBNS+"schemaLocation", dcNS + " http://dublincore.org/schemas/xmls/qdc/dc.xsd " + dctermsNS + " http://dublincore.org/schemas/xmls/qdc/2008/02/11/dcterms.xsd")
dctermsElements= ["isPartOf"]
while row != None:
key = ["title", "creator", "subject", "description", "publisher", "contributor", "date", "type", "format", "identifier", "source", "isPartOf", "language", "coverage", "rights"]
#title, creator, subject, description, publisher, contributor, date, type, format, identifier, source, isPartOf, language, coverage, rights = row
#key.index("title") == title
i = 0
for term in key:
if row[i] != None:
txt = row[i]
else:
txt = ""
if term in dctermsElements:
etree.SubElement(ret, dctermsBNS + term).text = txt
else:
newChild(ret, term, text=txt)
i+=1
row = c.fetchone()
sqlLock.release()
return ret
def createDublincoreDMDSec(type, id):
dc = getDublinCore(type, id)
if dc == None:
return None
global globalDmdSecCounter
globalDmdSecCounter += 1
dmdSec = etree.Element("dmdSec")
ID = "dmdSec_" + globalDmdSecCounter.__str__()
dmdSec.set("ID", ID)
mdWrap = newChild(dmdSec, "mdWrap")
mdWrap.set("MDTYPE", "DC")
xmlData = newChild(mdWrap, "xmlData")
xmlData.append(dc)
return (dmdSec, ID)
def createMDRefDMDSec(LABEL, itemdirectoryPath, directoryPathSTR):
global globalDmdSecCounter
globalDmdSecCounter += 1
dmdSec = etree.Element("dmdSec")
ID = "dmdSec_" + globalDmdSecCounter.__str__()
dmdSec.set("ID", ID)
XPTR = "xpointer(id("
tree = etree.parse(itemdirectoryPath)
root = tree.getroot()
for item in root.findall("{http://www.loc.gov/METS/}dmdSec"):
XPTR = "%s %s" % (XPTR, item.get("ID"))
XPTR = XPTR.replace(" ", "'", 1) + "'))"
newChild(dmdSec, "mdRef", text=None, sets=[("LABEL", LABEL), (xlinkBNS +"href", directoryPathSTR), ("MDTYPE", "OTHER"), ("LOCTYPE","OTHER"), ("OTHERLOCTYPE", "SYSTEM"), ("XPTR", XPTR)])
return (dmdSec, ID)
def createTechMD(fileUUID):
ret = etree.Element("techMD")
techMD = ret #newChild(amdSec, "digiprovMD")
#digiprovMD.set("ID", "digiprov-"+ os.path.basename(filename) + "-" + fileUUID)
global globalTechMDCounter
globalTechMDCounter += 1
techMD.set("ID", "techMD_"+ globalTechMDCounter.__str__())
mdWrap = newChild(techMD,"mdWrap")
mdWrap.set("MDTYPE", "PREMIS:OBJECT")
xmlData = newChild(mdWrap, "xmlData")
#premis = etree.SubElement( xmlData, "premis", nsmap={None: premisNS}, \
# attrib = { "{" + xsiNS + "}schemaLocation" : "info:lc/xmlns/premis-v2 http://www.loc.gov/standards/premis/premis.xsd" })
#premis.set("version", "2.0")
#premis = etree.SubElement( xmlData, "premis", attrib = {xsiBNS+"type": "premis:file"})
sql = "SELECT fileSize, checksum FROM Files WHERE fileUUID = '%s';" % (fileUUID)
c, sqlLock = databaseInterface.querySQL(sql)
row = c.fetchone()
while row != None:
fileSize = row[0].__str__()
checksum = row[1].__str__()
row = c.fetchone()
sqlLock.release()
#OBJECT
object = etree.SubElement(xmlData, "object", nsmap={None: premisNS})
object.set( xsiBNS+"type", "file")
object.set(xsiBNS+"schemaLocation", premisNS + " http://www.loc.gov/standards/premis/v2/premis-v2-1.xsd")
object.set("version", "2.1")
objectIdentifier = etree.SubElement(object, "objectIdentifier")
etree.SubElement(objectIdentifier, "objectIdentifierType").text = "UUID"
etree.SubElement(objectIdentifier, "objectIdentifierValue").text = fileUUID
#etree.SubElement(object, "objectCategory").text = "file"
objectCharacteristics = etree.SubElement(object, "objectCharacteristics")
etree.SubElement(objectCharacteristics, "compositionLevel").text = "0"
fixity = etree.SubElement(objectCharacteristics, "fixity")
etree.SubElement(fixity, "messageDigestAlgorithm").text = "sha256"
etree.SubElement(fixity, "messageDigest").text = checksum
etree.SubElement(objectCharacteristics, "size").text = fileSize
sql = "SELECT formatName, formatVersion, formatRegistryName, formatRegistryKey FROM FilesIDs WHERE fileUUID = '%s';" % (fileUUID)
c, sqlLock = databaseInterface.querySQL(sql)
row = c.fetchone()
if not row:
format = etree.SubElement(objectCharacteristics, "format")
formatDesignation = etree.SubElement(format, "formatDesignation")
etree.SubElement(formatDesignation, "formatName").text = "Unknown"
while row != None:
#print row
format = etree.SubElement(objectCharacteristics, "format")
#fileUUID = row[0]
formatDesignation = etree.SubElement(format, "formatDesignation")
etree.SubElement(formatDesignation, "formatName").text = row[0]
etree.SubElement(formatDesignation, "formatVersion").text = row[1]
formatRegistry = etree.SubElement(format, "formatRegistry")
etree.SubElement(formatRegistry, "formatRegistryName").text = row[2]
etree.SubElement(formatRegistry, "formatRegistryKey").text = row[3]
row = c.fetchone()
sqlLock.release()
objectCharacteristicsExtension = etree.SubElement(objectCharacteristics, "objectCharacteristicsExtension")
sql = "SELECT FilesFits.FITSxml FROM FilesFits WHERE fileUUID = '" + fileUUID + "';"
c, sqlLock = databaseInterface.querySQL(sql)
row = c.fetchone()
#if not row:
# print >>sys.stderr, "Error no fits.", fileUUID
parser = etree.XMLParser(remove_blank_text=True)
while row != None:
#fits = etree.fromstring(row[0])
fits = etree.XML(row[0], parser)
objectCharacteristicsExtension.append(fits)
row = c.fetchone()
sqlLock.release()
sql = "SELECT Files.originalLocation FROM Files WHERE Files.fileUUID = '" + fileUUID + "';"
c, sqlLock = databaseInterface.querySQL(sql)
row = c.fetchone()
if not row:
print >>sys.stderr, "Error no fits."
while row != None:
etree.SubElement(object, "originalName").text = escape(row[0])
row = c.fetchone()
sqlLock.release()
#Derivations
sql = "SELECT sourceFileUUID, derivedFileUUID, relatedEventUUID FROM Derivations WHERE sourceFileUUID = '" + fileUUID + "';"
c, sqlLock = databaseInterface.querySQL(sql)
row = c.fetchone()
while row != None:
relationship = etree.SubElement(object, "relationship")
etree.SubElement(relationship, "relationshipType").text = "derivation"
etree.SubElement(relationship, "relationshipSubType").text = "is source of"
relatedObjectIdentification = etree.SubElement(relationship, "relatedObjectIdentification")
etree.SubElement(relatedObjectIdentification, "relatedObjectIdentifierType").text = "UUID"
etree.SubElement(relatedObjectIdentification, "relatedObjectIdentifierValue").text = row[1]
relatedEventIdentification = etree.SubElement(relationship, "relatedEventIdentification")
etree.SubElement(relatedEventIdentification, "relatedEventIdentifierType").text = "UUID"
etree.SubElement(relatedEventIdentification, "relatedEventIdentifierValue").text = row[2]
row = c.fetchone()
sqlLock.release()
sql = "SELECT sourceFileUUID, derivedFileUUID, relatedEventUUID FROM Derivations WHERE derivedFileUUID = '" + fileUUID + "';"
c, sqlLock = databaseInterface.querySQL(sql)
row = c.fetchone()
while row != None:
relationship = etree.SubElement(object, "relationship")
etree.SubElement(relationship, "relationshipType").text = "derivation"
etree.SubElement(relationship, "relationshipSubType").text = "has source"
relatedObjectIdentification = etree.SubElement(relationship, "relatedObjectIdentification")
etree.SubElement(relatedObjectIdentification, "relatedObjectIdentifierType").text = "UUID"
etree.SubElement(relatedObjectIdentification, "relatedObjectIdentifierValue").text = row[0]
relatedEventIdentification = etree.SubElement(relationship, "relatedEventIdentification")
etree.SubElement(relatedEventIdentification, "relatedEventIdentifierType").text = "UUID"
etree.SubElement(relatedEventIdentification, "relatedEventIdentifierValue").text = row[2]
row = c.fetchone()
sqlLock.release()
return ret
def createDigiprovMD(fileUUID):
ret = []
#EVENTS
#| pk | fileUUID | eventIdentifierUUID | eventType | eventDateTime | eventDetail | eventOutcome | eventOutcomeDetailNote | linkingAgentIdentifier |
sql = "SELECT * FROM Events WHERE fileUUID = '" + fileUUID + "';"
rows = databaseInterface.queryAllSQL(sql)
for row in rows:
digiprovMD = etree.Element("digiprovMD")
ret.append(digiprovMD) #newChild(amdSec, "digiprovMD")
#digiprovMD.set("ID", "digiprov-"+ os.path.basename(filename) + "-" + fileUUID)
global globalDigiprovMDCounter
globalDigiprovMDCounter += 1
digiprovMD.set("ID", "digiprovMD_"+ globalDigiprovMDCounter.__str__())
mdWrap = newChild(digiprovMD,"mdWrap")
mdWrap.set("MDTYPE", "PREMIS:EVENT")
xmlData = newChild(mdWrap,"xmlData")
event = etree.SubElement(xmlData, "event", nsmap={None: premisNS})
event.set(xsiBNS+"schemaLocation", premisNS + " http://www.loc.gov/standards/premis/v2/premis-v2-1.xsd")
event.set("version", "2.1")
eventIdentifier = etree.SubElement(event, "eventIdentifier")
etree.SubElement(eventIdentifier, "eventIdentifierType").text = "UUID"
etree.SubElement(eventIdentifier, "eventIdentifierValue").text = row[2]
etree.SubElement(event, "eventType").text = row[3]
etree.SubElement(event, "eventDateTime").text = row[4].__str__().replace(" ", "T")
etree.SubElement(event, "eventDetail").text = escape(row[5])
eventOutcomeInformation = etree.SubElement(event, "eventOutcomeInformation")
etree.SubElement(eventOutcomeInformation, "eventOutcome").text = row[6]
eventOutcomeDetail = etree.SubElement(eventOutcomeInformation, "eventOutcomeDetail")
etree.SubElement(eventOutcomeDetail, "eventOutcomeDetailNote").text = escape(row[7])
#linkingAgentIdentifier
sql = """SELECT agentIdentifierType, agentIdentifierValue, agentName, agentType FROM Agents;"""
c, sqlLock = databaseInterface.querySQL(sql)
row = c.fetchone()
while row != None:
linkingAgentIdentifier = etree.SubElement(event, "linkingAgentIdentifier")
etree.SubElement(linkingAgentIdentifier, "linkingAgentIdentifierType").text = row[0]
etree.SubElement(linkingAgentIdentifier, "linkingAgentIdentifierValue").text = row[1]
row = c.fetchone()
sqlLock.release()
return ret
def createDigiprovMDAgents():
ret = []
#AGENTS
sql = """SELECT agentIdentifierType, agentIdentifierValue, agentName, agentType FROM Agents;"""
c, sqlLock = databaseInterface.querySQL(sql)
row = c.fetchone()
while row != None:
global globalDigiprovMDCounter
globalDigiprovMDCounter += 1
digiprovMD = etree.Element("digiprovMD")
digiprovMD.set("ID", "digiprovMD_"+ globalDigiprovMDCounter.__str__())
ret.append(digiprovMD) #newChild(amdSec, "digiprovMD")
mdWrap = newChild(digiprovMD,"mdWrap")
mdWrap.set("MDTYPE", "PREMIS:AGENT")
xmlData = newChild(mdWrap,"xmlData")
#agents = etree.SubElement(xmlData, "agents")
xmlData.append(createAgent(row[0], row[1], row[2], row[3]))
row = c.fetchone()
sqlLock.release()
return ret
def getAMDSec(fileUUID, filePath, use, type, id, transferUUID):
global globalAmdSecCounter
global globalRightsMDCounter
globalAmdSecCounter += 1
AMDID = "amdSec_%s" % (globalAmdSecCounter.__str__())
AMD = etree.Element("amdSec")
AMD.set("ID", AMDID)
ret = (AMD, AMDID)
#tech MD
#digiprob MD
AMD.append(createTechMD(fileUUID))
if use == "original":
metadataAppliesToList = [(fileUUID, FileMetadataAppliesToType), (fileGroupIdentifier, SIPMetadataAppliesToType), (transferUUID.__str__(), TransferMetadataAppliesToType)]
for a in archivematicaGetRights(metadataAppliesToList, fileUUID):
globalRightsMDCounter +=1
rightsMD = etree.SubElement(AMD, "rightsMD")
rightsMD.set("ID", "rightsMD_" + globalRightsMDCounter.__str__())
mdWrap = newChild(rightsMD,"mdWrap")
mdWrap.set("MDTYPE", "PREMIS:RIGHTS")
xmlData = newChild(mdWrap, "xmlData")
xmlData.append(a)
for a in createDigiprovMD(fileUUID):
AMD.append(a)
for a in createDigiprovMDAgents():
AMD.append(a)
return ret
#DMDID="dmdSec_01" for an object goes in here
#<file ID="file1-UUID" GROUPID="G1" DMDID="dmdSec_02" ADMID="amdSec_01">
def createFileSec(directoryPath, structMapDiv):
delayed = []
filesInThisDirectory = []
dspaceMetsDMDID = None
for item in os.listdir(directoryPath):
itemdirectoryPath = os.path.join(directoryPath, item)
if os.path.isdir(itemdirectoryPath):
delayed.append(item)
elif os.path.isfile(itemdirectoryPath):
#myuuid = uuid.uuid4()
myuuid=""
#directoryPathSTR = itemdirectoryPath.replace(baseDirectoryPath + "objects", "objects", 1)
directoryPathSTR = itemdirectoryPath.replace(baseDirectoryPath, baseDirectoryPathString, 1)
sql = """SELECT fileUUID, fileGrpUse, transferUUID FROM Files WHERE removedTime = 0 AND %s = '%s' AND Files.currentLocation = '%s';""" % (fileGroupType, fileGroupIdentifier, MySQLdb.escape_string(directoryPathSTR))
c, sqlLock = databaseInterface.querySQL(sql)
row = c.fetchone()
if row == None:
print >>sys.stderr, "No uuid for file: \"", directoryPathSTR, "\""
global globalErrorCount
globalErrorCount += 1
sqlLock.release()
continue
while row != None:
myuuid = row[0]
use = row[1]
transferUUID = row[2]
row = c.fetchone()
sqlLock.release()
filename = ''.join(quoteattr(item).split("\"")[1:-1])
directoryPathSTR = itemdirectoryPath.replace(baseDirectoryPath, "", 1)
#print filename, directoryPathSTR
FILEID="%s-%s" % (item, myuuid)
if FILEID[0].isdigit():
FILEID = "_" + FILEID
#<fptr FILEID="file1-UUID"/>
newChild(structMapDiv, "fptr", sets=[("FILEID",FILEID)])
GROUPID=""
if use == "original" or use == "submissionDocumentation":
GROUPID = "Group-%s" % (myuuid)
if use == "preservation":
sql = "SELECT * FROM Derivations WHERE derivedFileUUID = '" + myuuid + "';"
c, sqlLock = databaseInterface.querySQL(sql)
row = c.fetchone()
while row != None:
GROUPID = "Group-%s" % (row[1])
row = c.fetchone()
sqlLock.release()
elif use == "license" or use == "text/ocr" or use == "DSPACEMETS":
sql = """SELECT originalLocation FROM Files where fileUUID = '%s'""" % (myuuid)
originalLocation = databaseInterface.queryAllSQL(sql)[0][0]
sql = """SELECT fileUUID FROM Files WHERE removedTime = 0 AND %s = '%s' AND fileGrpUse = 'original' AND originalLocation LIKE '%s/%%'""" % (fileGroupType, fileGroupIdentifier, MySQLdb.escape_string(os.path.dirname(originalLocation)).replace("%", "%%"))
c, sqlLock = databaseInterface.querySQL(sql)
row = c.fetchone()
while row != None:
GROUPID = "Group-%s" % (row[0])
row = c.fetchone()
sqlLock.release()
elif use == "service":
fileFileIDPath = itemdirectoryPath.replace(baseDirectoryPath + "objects/service/", baseDirectoryPathString + "objects/")
objectNameExtensionIndex = fileFileIDPath.rfind(".")
fileFileIDPath = fileFileIDPath[:objectNameExtensionIndex + 1]
sql = """SELECT fileUUID FROM Files WHERE removedTime = 0 AND %s = '%s' AND fileGrpUse = 'original' AND currentLocation LIKE '%s%%'""" % (fileGroupType, fileGroupIdentifier, MySQLdb.escape_string(fileFileIDPath.replace("%", "%%")))
c, sqlLock = databaseInterface.querySQL(sql)
row = c.fetchone()
while row != None:
GROUPID = "Group-%s" % (row[0])
row = c.fetchone()
sqlLock.release()
if transferUUID:
sql = "SELECT type FROM Transfers WHERE transferUUID = '%s';" % (transferUUID)
rows = databaseInterface.queryAllSQL(sql)
if rows[0][0] == "Dspace1.7":
if use == "original":
print "original"
elif use == "DSPACEMETS":
use = "submissionDocumentation"
admidApplyTo = None
if GROUPID=="": #is an AIP identifier
GROUPID = myuuid
admidApplyTo = structMapDiv.getparent()
LABEL = "mets.xml-%s" % (GROUPID)
dmdSec, ID = createMDRefDMDSec(LABEL, itemdirectoryPath, directoryPathSTR)
dmdSecs.append(dmdSec)
if admidApplyTo != None:
admidApplyTo.set("DMDID", ID)
else:
dspaceMetsDMDID = ID
if GROUPID=="":
globalErrorCount += 1
print >>sys.stderr, "No groupID for file: \"", directoryPathSTR, "\""
if use not in globalFileGrps:
print >>sys.stderr, "Invalid use: \"", use, "\""
globalErrorCount += 1
else:
file = newChild(globalFileGrps[use], "file", sets=[("ID",FILEID), ("GROUPID",GROUPID)])
if use == "original":
filesInThisDirectory.append(file)
#<Flocat xlink:href="objects/file1-UUID" locType="other" otherLocType="system"/>
Flocat = newChild(file, "FLocat", sets=[(xlinkBNS +"href",directoryPathSTR), ("LOCTYPE","OTHER"), ("OTHERLOCTYPE", "SYSTEM")])
if includeAmdSec:
AMD, ADMID = getAMDSec(myuuid, directoryPathSTR, use, fileGroupType, fileGroupIdentifier, transferUUID)
global amdSecs
amdSecs.append(AMD)
file.set("ADMID", ADMID)
#fileI = etree.SubElement( structMapDiv, xlinkBNS + "fits", nsmap=NSMAP)
#filename = replace /tmp/"UUID" with /objects/
#fileI.set("ID", "file-" + item.__str__() + "-" + myuuid.__str__())
#fileI.set("ADMID", "digiprov-" + item.__str__() + "-" + myuuid.__str__())
#Flocat = newChild(fileI, "Flocat")
#Flocat.set(xlinkBNS + "href", directoryPathSTR )
#Flocat.set("locType", "other")
#Flocat.set("otherLocType", "system")
# structMap file
#div = newChild(structMapDiv, "div")
#fptr = newChild(div, "fptr")
#fptr.set("FILEID","file-" + item.__str__() + "-" + myuuid.__str__())
if dspaceMetsDMDID != None:
for file in filesInThisDirectory:
file.set("DMDID", dspaceMetsDMDID)
for item in delayed:
itemdirectoryPath = os.path.join(directoryPath, item)
createFileSec(itemdirectoryPath, newChild(structMapDiv, "div", sets=[("TYPE","directory"), ("LABEL",item)]))
if __name__ == '__main__':
while False: #used to stall the mcp and stop the client for testing this module
import time
time.sleep(10)
if not baseDirectoryPath.endswith('/'):
baseDirectoryPath += '/'
structMap = etree.Element("structMap")
structMap.set("TYPE", "physical")
structMapDiv = newChild(structMap, "div", sets=[("TYPE","directory"), ("LABEL","%s-%s" % (os.path.basename(baseDirectoryPath[:-1]), fileGroupIdentifier))])
#dmdSec, dmdSecID = createDublincoreDMDSec(SIP)
structMapDiv = newChild(structMapDiv, "div", sets=[("TYPE","directory"), ("LABEL","objects") ])
createFileSec(os.path.join(baseDirectoryPath, "objects"), structMapDiv)
fileSec = etree.Element( "fileSec")
for group in globalFileGrpsUses: #globalFileGrps.itervalues():
grp = globalFileGrps[group]
if len(grp) > 0:
fileSec.append(grp)
rootNSMap = {None: metsNS}
rootNSMap.update(NSMAP)
root = etree.Element( "mets", \
nsmap = rootNSMap, \
attrib = { "{" + xsiNS + "}schemaLocation" : "http://www.loc.gov/METS/ http://www.loc.gov/standards/mets/version18/mets.xsd" } )
dc = createDublincoreDMDSec(SIPMetadataAppliesToType, fileGroupIdentifier)
if dc != None:
(dmdSec, ID) = dc
structMapDiv.set("DMDID", ID)
root.append(dmdSec)
for dmdSec in dmdSecs:
root.append(dmdSec)
for amdSec in amdSecs:
root.append(amdSec)
root.append(fileSec)
root.append(structMap)
if False: #debug
print etree.tostring(root, pretty_print=True)
#<div TYPE="directory" LABEL="AIP1-UUID">
#<div TYPE="directory" LABEL="objects" DMDID="dmdSec_01">
#Recursive function for creating structmap and fileSec
tree = etree.ElementTree(root)
#tree.write(XMLFile)
tree.write(XMLFile, pretty_print=True, xml_declaration=True)
writeTestXMLFile = True
if writeTestXMLFile:
import cgi
fileName = XMLFile + ".validatorTester.html"
fileContents = """<html>
<body>
<form method="post" action="http://pim.fcla.edu/validate/results">
<label for="document">Enter XML Document:</label>
<br/>
<textarea id="directinput" rows="12" cols="76" name="document">%s</textarea>
<br/>
<br/>
<input type="submit" value="Validate" />
<br/>
</form>
</body>
</html>""" % (cgi.escape(etree.tostring(root, pretty_print=True, xml_declaration=True)))
f = open(fileName, 'w')
f.write(fileContents)
f.close
exit(globalErrorCount)
|
# -*- coding: utf-8 -*-
u"""
(c) Copyright 2014 Telefónica, I+D. Printed in Spain (Europe). All Rights
Reserved.
The copyright to the software program(s) is property of Telefónica I+D.
The program(s) may be used and or copied only with the express written
consent of Telefónica I+D or in accordance with the terms and conditions
stipulated in the agreement/contract under which the program(s) have
been supplied.
"""
import unittest
import logging
import sys
from seleniumtid import selenium_driver
from seleniumtid.utils import Utils
from seleniumtid.jira import change_all_jira_status
from seleniumtid.visual_test import VisualTest
from seleniumtid.config_driver import get_error_message_from_exception
class BasicTestCase(unittest.TestCase):
"""A class whose instances are api test cases."""
@classmethod
def get_subclass_name(cls):
return cls.__name__
def get_method_name(self):
# Split remove the test suffix added by ddt library
return self._testMethodName.split('___')[0]
def get_subclassmethod_name(self):
return self.__class__.__name__ + "." + self.get_method_name()
@classmethod
def tearDownClass(cls):
change_all_jira_status()
def setUp(self):
# Configure logger and properties
if not isinstance(self, SeleniumTestCase):
selenium_driver.configure_logger()
selenium_driver.configure_properties()
# Configure logger
self.logger = logging.getLogger(__name__)
self.logger.info("Running new test: {0}".format(self.get_subclassmethod_name()))
def tearDown(self):
# Check test result
result = sys.exc_info()[:2]
if result == (None, None):
self._test_passed = True
self.logger.info("The test '{0}' has passed".format(self.get_subclassmethod_name()))
else:
self._test_passed = False
error_message = get_error_message_from_exception(result[1])
self.logger.error("The test '{0}' has failed: {1}".format(self.get_subclassmethod_name(), error_message))
class SeleniumTestCase(BasicTestCase):
"""A class whose instances are Selenium test cases.
Attributes:
driver: webdriver instance
utils: test utils instance
remote_video_node: hostname of the remote node if it has enabled a video recorder
"""
_driver = None
_utils = None
remote_video_node = None
@property
def driver(self):
"""Get the Selenium driver
This method allows to autocomplete self.driver in IDEs
:returns: Selenium driver
:rtype: selenium.webdriver.remote.webdriver.WebDriver
"""
return self._driver
@property
def utils(self):
"""Get the utils object
This method allows to autocomplete self.utils in IDEs
:returns: utils object
:rtype: seleniumtid.utils.Utils
"""
return self._utils
@classmethod
def tearDownClass(cls):
# Call BasicTestCase tearDownClass
super(SeleniumTestCase, cls).tearDownClass()
# Stop driver
if SeleniumTestCase._driver:
class_name = cls.get_subclass_name()
cls._finalize_driver(class_name)
@classmethod
def _finalize_driver(cls, video_name, test_passed=True):
# Get session id to request the saved video
session_id = cls._driver.session_id
# Close browser and stop driver
cls._driver.quit()
cls._driver = None
SeleniumTestCase._driver = None
# Download saved video if video is enabled or if test fails
if cls.remote_video_node and (selenium_driver.config.getboolean_optional('Server', 'video_enabled')
or not test_passed):
video_name = video_name if test_passed else 'error_{}'.format(video_name)
cls._utils.download_remote_video(cls.remote_video_node, session_id, video_name)
def setUp(self):
# Create driver
if not SeleniumTestCase._driver:
SeleniumTestCase._driver = selenium_driver.connect()
SeleniumTestCase._utils = Utils(SeleniumTestCase._driver)
SeleniumTestCase.remote_video_node = SeleniumTestCase._utils.get_remote_video_node()
# Get common configuration of reusing driver
self.reuse_driver = selenium_driver.config.getboolean_optional('Common', 'reuse_driver')
# Set implicitly wait
self._utils.set_implicit_wait()
# Maximize browser
if selenium_driver.is_maximizable():
SeleniumTestCase._driver.maximize_window()
# Call BasicTestCase setUp
super(SeleniumTestCase, self).setUp()
def tearDown(self):
# Call BasicTestCase tearDown
super(SeleniumTestCase, self).tearDown()
# Capture screenshot on error
test_name = self.get_subclassmethod_name().replace('.', '_')
if not self._test_passed:
self._utils.capture_screenshot(test_name)
# Stop driver
if not self.reuse_driver:
SeleniumTestCase._finalize_driver(test_name, self._test_passed)
def assertScreenshot(self, element_or_selector, filename, threshold=0, exclude_elements=[]):
"""Assert that a screenshot of an element is the same as a screenshot on disk, within a given threshold.
:param element_or_selector: either a CSS/XPATH selector as a string or a WebElement object.
If None, a full screenshot is taken.
:param filename: the filename for the screenshot, which will be appended with ``.png``
:param threshold: the threshold for triggering a test failure
:param exclude_elements: list of CSS/XPATH selectors as a string or WebElement objects that must be excluded
from the assertion.
"""
file_suffix = self.get_method_name()
VisualTest().assertScreenshot(element_or_selector, filename, file_suffix, threshold, exclude_elements)
def assertFullScreenshot(self, filename, threshold=0, exclude_elements=[]):
"""Assert that a driver screenshot is the same as a screenshot on disk, within a given threshold.
:param filename: the filename for the screenshot, which will be appended with ``.png``
:param threshold: the threshold for triggering a test failure
:param exclude_elements: list of CSS/XPATH selectors as a string or WebElement objects that must be excluded
from the assertion.
"""
file_suffix = self.get_method_name()
VisualTest().assertScreenshot(None, filename, file_suffix, threshold, exclude_elements)
class AppiumTestCase(SeleniumTestCase):
"""A class whose instances are Appium test cases.
Attributes:
app_strings: dict with application strings
"""
app_strings = {}
@property
def driver(self):
"""Get the Appium driver
This method allows to autocomplete self.driver in IDEs
:returns: Appium driver
:rtype: appium.webdriver.webdriver.WebDriver
"""
return self._driver
def setUp(self):
super(AppiumTestCase, self).setUp()
if not AppiumTestCase.app_strings:
AppiumTestCase.app_strings = self._driver.app_strings()
def tearDown(self):
# Call SeleniumTestCase tearDown
super(AppiumTestCase, self).tearDown()
# Remove app strings
if not self.reuse_driver:
AppiumTestCase.app_strings = None
@classmethod
def tearDownClass(cls):
# Call SeleniumTestCase tearDownClass
super(AppiumTestCase, cls).tearDownClass()
# Remove app strings
AppiumTestCase.app_strings = None
Fix app_strings error in mobile web tests
# -*- coding: utf-8 -*-
u"""
(c) Copyright 2014 Telefónica, I+D. Printed in Spain (Europe). All Rights
Reserved.
The copyright to the software program(s) is property of Telefónica I+D.
The program(s) may be used and or copied only with the express written
consent of Telefónica I+D or in accordance with the terms and conditions
stipulated in the agreement/contract under which the program(s) have
been supplied.
"""
import unittest
import logging
import sys
from seleniumtid import selenium_driver
from seleniumtid.utils import Utils
from seleniumtid.jira import change_all_jira_status
from seleniumtid.visual_test import VisualTest
from seleniumtid.config_driver import get_error_message_from_exception
class BasicTestCase(unittest.TestCase):
"""A class whose instances are api test cases."""
@classmethod
def get_subclass_name(cls):
return cls.__name__
def get_method_name(self):
# Split remove the test suffix added by ddt library
return self._testMethodName.split('___')[0]
def get_subclassmethod_name(self):
return self.__class__.__name__ + "." + self.get_method_name()
@classmethod
def tearDownClass(cls):
change_all_jira_status()
def setUp(self):
# Configure logger and properties
if not isinstance(self, SeleniumTestCase):
selenium_driver.configure_logger()
selenium_driver.configure_properties()
# Configure logger
self.logger = logging.getLogger(__name__)
self.logger.info("Running new test: {0}".format(self.get_subclassmethod_name()))
def tearDown(self):
# Check test result
result = sys.exc_info()[:2]
if result == (None, None):
self._test_passed = True
self.logger.info("The test '{0}' has passed".format(self.get_subclassmethod_name()))
else:
self._test_passed = False
error_message = get_error_message_from_exception(result[1])
self.logger.error("The test '{0}' has failed: {1}".format(self.get_subclassmethod_name(), error_message))
class SeleniumTestCase(BasicTestCase):
"""A class whose instances are Selenium test cases.
Attributes:
driver: webdriver instance
utils: test utils instance
remote_video_node: hostname of the remote node if it has enabled a video recorder
"""
_driver = None
_utils = None
remote_video_node = None
@property
def driver(self):
"""Get the Selenium driver
This method allows to autocomplete self.driver in IDEs
:returns: Selenium driver
:rtype: selenium.webdriver.remote.webdriver.WebDriver
"""
return self._driver
@property
def utils(self):
"""Get the utils object
This method allows to autocomplete self.utils in IDEs
:returns: utils object
:rtype: seleniumtid.utils.Utils
"""
return self._utils
@classmethod
def tearDownClass(cls):
# Call BasicTestCase tearDownClass
super(SeleniumTestCase, cls).tearDownClass()
# Stop driver
if SeleniumTestCase._driver:
class_name = cls.get_subclass_name()
cls._finalize_driver(class_name)
@classmethod
def _finalize_driver(cls, video_name, test_passed=True):
# Get session id to request the saved video
session_id = cls._driver.session_id
# Close browser and stop driver
cls._driver.quit()
cls._driver = None
SeleniumTestCase._driver = None
# Download saved video if video is enabled or if test fails
if cls.remote_video_node and (selenium_driver.config.getboolean_optional('Server', 'video_enabled')
or not test_passed):
video_name = video_name if test_passed else 'error_{}'.format(video_name)
cls._utils.download_remote_video(cls.remote_video_node, session_id, video_name)
def setUp(self):
# Create driver
if not SeleniumTestCase._driver:
SeleniumTestCase._driver = selenium_driver.connect()
SeleniumTestCase._utils = Utils(SeleniumTestCase._driver)
SeleniumTestCase.remote_video_node = SeleniumTestCase._utils.get_remote_video_node()
# Get common configuration of reusing driver
self.reuse_driver = selenium_driver.config.getboolean_optional('Common', 'reuse_driver')
# Set implicitly wait
self._utils.set_implicit_wait()
# Maximize browser
if selenium_driver.is_maximizable():
SeleniumTestCase._driver.maximize_window()
# Call BasicTestCase setUp
super(SeleniumTestCase, self).setUp()
def tearDown(self):
# Call BasicTestCase tearDown
super(SeleniumTestCase, self).tearDown()
# Capture screenshot on error
test_name = self.get_subclassmethod_name().replace('.', '_')
if not self._test_passed:
self._utils.capture_screenshot(test_name)
# Stop driver
if not self.reuse_driver:
SeleniumTestCase._finalize_driver(test_name, self._test_passed)
def assertScreenshot(self, element_or_selector, filename, threshold=0, exclude_elements=[]):
"""Assert that a screenshot of an element is the same as a screenshot on disk, within a given threshold.
:param element_or_selector: either a CSS/XPATH selector as a string or a WebElement object.
If None, a full screenshot is taken.
:param filename: the filename for the screenshot, which will be appended with ``.png``
:param threshold: the threshold for triggering a test failure
:param exclude_elements: list of CSS/XPATH selectors as a string or WebElement objects that must be excluded
from the assertion.
"""
file_suffix = self.get_method_name()
VisualTest().assertScreenshot(element_or_selector, filename, file_suffix, threshold, exclude_elements)
def assertFullScreenshot(self, filename, threshold=0, exclude_elements=[]):
"""Assert that a driver screenshot is the same as a screenshot on disk, within a given threshold.
:param filename: the filename for the screenshot, which will be appended with ``.png``
:param threshold: the threshold for triggering a test failure
:param exclude_elements: list of CSS/XPATH selectors as a string or WebElement objects that must be excluded
from the assertion.
"""
file_suffix = self.get_method_name()
VisualTest().assertScreenshot(None, filename, file_suffix, threshold, exclude_elements)
class AppiumTestCase(SeleniumTestCase):
"""A class whose instances are Appium test cases.
Attributes:
app_strings: dict with application strings
"""
app_strings = None
@property
def driver(self):
"""Get the Appium driver
This method allows to autocomplete self.driver in IDEs
:returns: Appium driver
:rtype: appium.webdriver.webdriver.WebDriver
"""
return self._driver
def setUp(self):
super(AppiumTestCase, self).setUp()
if AppiumTestCase.app_strings is None and not selenium_driver.is_web_test():
AppiumTestCase.app_strings = self._driver.app_strings()
def tearDown(self):
# Call SeleniumTestCase tearDown
super(AppiumTestCase, self).tearDown()
# Remove app strings
if not self.reuse_driver:
AppiumTestCase.app_strings = None
@classmethod
def tearDownClass(cls):
# Call SeleniumTestCase tearDownClass
super(AppiumTestCase, cls).tearDownClass()
# Remove app strings
AppiumTestCase.app_strings = None
|
"""Provides device automations for NEW_NAME."""
from typing import List
import voluptuous as vol
from homeassistant.const import (
ATTR_ENTITY_ID,
CONF_CONDITION,
CONF_DOMAIN,
CONF_TYPE,
CONF_DEVICE_ID,
CONF_ENTITY_ID,
STATE_OFF,
STATE_ON,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers import condition, config_validation as cv, entity_registry
from homeassistant.helpers.typing import ConfigType, TemplateVarsType
from homeassistant.helpers.config_validation import DEVICE_CONDITION_BASE_SCHEMA
from . import DOMAIN
# TODO specify your supported condition types.
CONDITION_TYPES = {"is_on", "is_off"}
CONDITION_SCHEMA = DEVICE_CONDITION_BASE_SCHEMA.extend(
{
vol.Required(CONF_ENTITY_ID): cv.entity_id,
vol.Required(CONF_TYPE): vol.In(CONDITION_TYPES),
}
)
async def async_get_conditions(hass: HomeAssistant, device_id: str) -> List[str]:
"""List device conditions for NEW_NAME devices."""
registry = await entity_registry.async_get_registry(hass)
conditions = []
# Get all the integrations entities for this device
for entry in entity_registry.async_entries_for_device(registry, device_id):
if entry.domain != DOMAIN:
continue
# Add conditions for each entity that belongs to this integration
# TODO add your own conditions.
conditions.append(
{
CONF_CONDITION: "device",
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
CONF_TYPE: "is_on",
}
)
conditions.append(
{
CONF_CONDITION: "device",
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
CONF_TYPE: "is_off",
}
)
return conditions
def async_condition_from_config(
config: ConfigType, config_validation: bool
) -> condition.ConditionCheckerType:
"""Create a function to test a device condition."""
if config_validation:
config = CONDITION_SCHEMA(config)
if config[CONF_TYPE] == "is_on":
state = STATE_ON
else:
state = STATE_OFF
def test_is_state(hass: HomeAssistant, variables: TemplateVarsType) -> bool:
"""Test if an entity is a certain state."""
return condition.state(hass, config[ATTR_ENTITY_ID], state)
return test_is_state
Fix typing for device condition scaffold (#27487)
"""Provides device automations for NEW_NAME."""
from typing import List
import voluptuous as vol
from homeassistant.const import (
ATTR_ENTITY_ID,
CONF_CONDITION,
CONF_DOMAIN,
CONF_TYPE,
CONF_DEVICE_ID,
CONF_ENTITY_ID,
STATE_OFF,
STATE_ON,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers import condition, config_validation as cv, entity_registry
from homeassistant.helpers.typing import ConfigType, TemplateVarsType
from homeassistant.helpers.config_validation import DEVICE_CONDITION_BASE_SCHEMA
from . import DOMAIN
# TODO specify your supported condition types.
CONDITION_TYPES = {"is_on", "is_off"}
CONDITION_SCHEMA = DEVICE_CONDITION_BASE_SCHEMA.extend(
{
vol.Required(CONF_ENTITY_ID): cv.entity_id,
vol.Required(CONF_TYPE): vol.In(CONDITION_TYPES),
}
)
async def async_get_conditions(hass: HomeAssistant, device_id: str) -> List[dict]:
"""List device conditions for NEW_NAME devices."""
registry = await entity_registry.async_get_registry(hass)
conditions = []
# Get all the integrations entities for this device
for entry in entity_registry.async_entries_for_device(registry, device_id):
if entry.domain != DOMAIN:
continue
# Add conditions for each entity that belongs to this integration
# TODO add your own conditions.
conditions.append(
{
CONF_CONDITION: "device",
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
CONF_TYPE: "is_on",
}
)
conditions.append(
{
CONF_CONDITION: "device",
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
CONF_TYPE: "is_off",
}
)
return conditions
def async_condition_from_config(
config: ConfigType, config_validation: bool
) -> condition.ConditionCheckerType:
"""Create a function to test a device condition."""
if config_validation:
config = CONDITION_SCHEMA(config)
if config[CONF_TYPE] == "is_on":
state = STATE_ON
else:
state = STATE_OFF
def test_is_state(hass: HomeAssistant, variables: TemplateVarsType) -> bool:
"""Test if an entity is a certain state."""
return condition.state(hass, config[ATTR_ENTITY_ID], state)
return test_is_state
|
#!/usr/bin/python
import commands
import cv2
import fileinput
import fnmatch
import json
import math
from matplotlib import pyplot as plt
import numpy as np
import os.path
from progress.bar import Bar
import subprocess
import sys
import geojson
from getchar import find_getch
import Camera
import Image
import ImageList
import Matcher
import Placer
import Render
import transformations
class ProjectMgr():
def __init__(self, project_dir=None):
# directories
self.project_dir = None # project working directory
self.source_dir = None # original images
self.image_dir = None # working set of images
self.cam = Camera.Camera()
self.image_list = []
self.detector_params = { 'detector': 'SIFT', # { SIFT, SURF, ORB, Star }
'grid-detect': 1,
'sift-max-features': 2000,
'surf-hessian-threshold': 600,
'surf-noctaves': 4,
'orb-max-features': 2000,
'star-max-size': 16,
'star-response-threshold': 30,
'star-line-threshold-projected': 10,
'star-line-threshold-binarized': 8,
'star-suppress-nonmax-size': 5 }
self.matcher_params = { 'matcher': 'FLANN', # { FLANN or 'BF' }
'match-ratio': 0.75 }
self.ned_reference_lla = []
# the following member variables need to be reviewed/organized
self.ac3d_steps = 8
self.shutter_latency = 0.0
self.group_roll_bias = 0.0
self.group_pitch_bias = 0.0
self.group_yaw_bias = 0.0
#self.group_alt_bias = 0.0
self.k1 = 0.0
self.k2 = 0.0
#self.m = Matcher.Matcher()
self.placer = Placer.Placer()
self.render = Render.Render()
if project_dir != None:
self.load( project_dir )
# project_dir is a new folder for all derived files
def set_project_dir(self, project_dir, create_if_needed=True):
self.project_dir = project_dir
if not os.path.exists(self.project_dir):
if create_if_needed:
print "Notice: creating project directory =", self.project_dir
os.makedirs(self.project_dir)
else:
print "Error: project dir doesn't exist =", self.project_dir
return False
# and make children directories
self.image_dir = project_dir + "/" + "Images"
if not os.path.exists(self.image_dir):
if create_if_needed:
print "Notice: creating image directory =", self.image_dir
os.makedirs(self.image_dir)
else:
print "Error: image dir doesn't exist =", self.image_dir
return False
# all is good
return True
# source_dir is the folder containing all the raw/original images.
# The expected work flow is that we will import/scale all the
# original images into our project folder leaving the original
# image set completely untouched.
def set_source_dir(self, source_dir):
if source_dir == self.project_dir:
print "Error: image source and project dirs must be different."
return
if not os.path.exists(source_dir):
print "Error: image source path does not exist =", source_path
self.source_dir = source_dir
def save(self):
# create a project dictionary and write it out as json
if not os.path.exists(self.project_dir):
print "Error: project doesn't exist =", self.project_dir
return
dirs = {}
dirs['images-source'] = self.source_dir
project_dict = {}
project_dict['detector'] = self.detector_params
project_dict['matcher'] = self.matcher_params
project_dict['directories'] = dirs
project_dict['ned-reference-lla'] = self.ned_reference_lla
project_file = self.project_dir + "/Project.json"
try:
f = open(project_file, 'w')
json.dump(project_dict, f, indent=4, sort_keys=True)
f.close()
except IOError as e:
print "Save project(): I/O error({0}): {1}".format(e.errno, e.strerror)
return
except:
raise
# save camera configuration
self.cam.save(self.project_dir)
def load(self, project_dir, create_if_needed=True):
if not self.set_project_dir( project_dir ):
return
# load project configuration
project_file = self.project_dir + "/Project.json"
try:
f = open(project_file, 'r')
project_dict = json.load(f)
f.close()
if 'detector' in project_dict:
self.detector_params = project_dict['detector']
if 'matcher' in project_dict:
self.matcher_params = project_dict['matcher']
dirs = project_dict['directories']
self.source_dir = dirs['images-source']
self.ned_reference_lla = project_dict['ned-reference-lla']
except:
print "load error: " + str(sys.exc_info()[1])
print "Notice: unable to read =", project_file
print "Continuing with an empty project configuration"
# load camera configuration
self.cam.load(self.project_dir)
# import an image set into the project directory, possibly scaling them
# to a lower resolution for faster processing.
def import_images(self, scale=0.25, converter='imagemagick'):
if self.source_dir == None:
print "Error: source_dir not defined."
return
if self.image_dir == None:
print "Error: project's image_dir not defined."
return
if self.source_dir == self.image_dir:
print "Error: source and destination directories must be different."
return
if not os.path.exists(self.source_dir):
print "Error: source directory not found =", self.source_dir
return
if not os.path.exists(self.image_dir):
print "Error: destination directory not found =", self.image_dir
return
files = []
for file in os.listdir(self.source_dir):
if fnmatch.fnmatch(file, '*.jpg') or fnmatch.fnmatch(file, '*.JPG'):
files.append(file)
files.sort()
for file in files:
name_in = self.source_dir + "/" + file
name_out = self.image_dir + "/" + file
if converter == 'imagemagick':
command = "convert -resize %d%% %s %s" % ( int(scale*100.0), name_in, name_out )
print command
commands.getstatusoutput( command )
elif converter == 'opencv':
src = cv2.imread(name_in)
#method = cv2.INTER_AREA
method = cv2.INTER_LANCZOS4
dst = cv2.resize(src, (0,0), fx=scale, fy=scale,
interpolation=method)
cv2.imwrite(name_out, dst)
print "Scaling (%.1f%%) %s to %s" % ((scale*100.0), name_in, name_out)
else:
print "Error: unknown converter =", converter
def load_image_info(self):
file_list = []
for file in os.listdir(self.image_dir):
if fnmatch.fnmatch(file, '*.jpg') or fnmatch.fnmatch(file, '*.JPG'):
file_list.append(file)
file_list.sort()
# wipe image list (so we don't double load)
self.image_list = []
for file_name in file_list:
image = Image.Image(self.image_dir, file_name)
self.image_list.append( image )
# make sure our matcher gets a copy of the image list
#self.m.setImageList(self.image_list)
self.placer.setImageList(self.image_list)
self.render.setImageList(self.image_list)
def load_features(self):
bar = Bar('Loading keypoints and descriptors:',
max = len(self.image_list))
for image in self.image_list:
image.load_features()
image.load_descriptors()
bar.next()
bar.finish()
def load_matches(self):
bar = Bar('Loading keypoint (pair) matches:',
max = len(self.image_list))
for image in self.image_list:
image.load_matches()
bar.next()
bar.finish()
def save_images_meta(self):
for image in self.image_list:
image.save_meta()
def set_detector_params(self, dparams):
self.detector_params = dparams
def set_matcher_params(self, mparams):
self.matcher_params = mparams
def detect_features(self, force=True, show=False):
if not show:
bar = Bar('Detecting features:', max = len(self.image_list))
for image in self.image_list:
if force or len(image.kp_list) == 0 or image.des_list == None:
#print "detecting features and computing descriptors: " + image.name
if image.img_rgb == None:
image.load_rgb()
image.detect_features(self.detector_params)
image.save_features()
image.save_descriptors()
image.save_matches()
if show:
result = image.show_features()
if result == 27 or result == ord('q'):
break
if not show:
bar.next()
if not show:
bar.finish()
def show_features_image(self, image):
if image.img_rgb == None:
image.load_rgb()
result = image.show_features()
return result
def show_features_images(self, name=None):
for image in self.image_list:
result = self.show_features_image(image)
if result == 27 or result == ord('q'):
break
def findImageByName(self, name):
for i in self.image_list:
if i.name == name:
return i
return None
# compute a center reference location (lon, lat) for the group of
# images.
def compute_ned_reference_lla(self):
# requires images to have their location computed/loaded
lon_sum = 0.0
lat_sum = 0.0
for image in self.image_list:
lla, ypr, quat = image.get_aircraft_pose()
lon_sum += lla[1]
lat_sum += lla[0]
self.ned_reference_lla = [ lat_sum / len(self.image_list),
lon_sum / len(self.image_list),
0.0 ]
self.render.setRefCoord(self.ned_reference_lla)
# for each feature in each image, compute the undistorted pixel
# location (from the calibrated distortion parameters)
def undistort_keypoints(self):
print "Notice: undistort keypoints"
for image in self.image_list:
if len(image.kp_list) == 0:
continue
uv_raw = np.zeros((len(image.kp_list),1,2), dtype=np.float32)
for i, kp in enumerate(image.kp_list):
uv_raw[i][0] = (kp.pt[0], kp.pt[1])
dist_coeffs = np.array(self.cam.camera_dict['dist-coeffs'],
dtype=np.float32)
uv_new = cv2.undistortPoints(uv_raw, self.cam.K, dist_coeffs,
P=self.cam.K)
image.uv_list = []
for i, uv in enumerate(uv_new):
image.uv_list.append(uv_new[i][0])
#print " orig = %s undistort = %s" % (uv_raw[i][0], uv_new[i][0])
# for each uv in the provided uv list, apply the distortion
# formula to compute the original distorted value.
def redistort(self, uv_list, K, dist_coeffs):
fx = K[0,0]
fy = K[1,1]
cx = K[0,2]
cy = K[1,2]
k1, k2, p1, p2, k3 = dist_coeffs
uv_distorted = []
for pt in uv_list:
x = (pt[0] - cx) / fx
y = (pt[1] - cy) / fy
# Compute radius^2
r2 = x**2 + y**2
r4, r6 = r2**2, r2**3
# Compute tangential distortion
dx = 2*p1*x*y + p2*(r2 + 2*x*x)
dy = p1*(r2 + 2*y*y) + 2*p2*x*y
# Compute radial factor
Lr = 1.0 + k1*r2 + k2*r4 + k3*r6
ud = Lr*x + dx
vd = Lr*y + dy
uv_distorted.append( [ud * fx + cx, vd * fy + cy] )
return uv_distorted
# project the list of (u, v) pixels from image space into camera
# space, remap that to a vector in ned space (for camera
# ypr=[0,0,0], and then transform that by the camera pose, returns
# the vector from the camera, through the pixel, into ned space
def projectVectors(self, IK, quat, uv_list):
proj_list = []
for uv in uv_list:
v_lens = IK.dot( np.array([uv[0], uv[1], 1.0]) )
v_lens_norm = transformations.unit_vector( v_lens )
# remap lens space to ned body space (change this here, to
# switch to (u,v=0,0) in upper right hand corner of image
# to match kp coordinate system and be more standard? try not negating z/d)
v_body = np.array([ v_lens_norm[2], v_lens_norm[0], v_lens_norm[1] ])
# transform camera vector (in body reference frame) to ned
# reference frame
proj = transformations.quaternion_backTransform(quat, v_body)
#print "proj = ", proj
proj_list.append(proj)
return proj_list
# given a set of vectors in the ned frame, and a starting point.
# Find the ground intersection point. For any vectors which point into
# the sky, return just the original reference/starting point.
def intersectVectorsWithGroundPlane(self, pose, ground_m, v_list):
pose_ned = pose['ned']
pt_list = []
for v in v_list:
# solve projection
p = pose_ned
if v[2] > 0.0:
d_proj = -(pose_ned[2] + ground_m)
factor = d_proj / v[2]
n_proj = v[0] * factor
e_proj = v[1] * factor
p = [ pose_ned[0] + n_proj, pose_ned[1] + e_proj, pose_ned[2] + d_proj ]
pt_list.append(p)
return pt_list
#
# Below this point all the code needs to be reviewed/refactored
#
def setWorldParams(self, ground_alt_m=0.0, shutter_latency=0.0,
yaw_bias=0.0, roll_bias=0.0, pitch_bias=0.0):
print "Setting ground=%.1f shutter=%.2f yaw=%.2f roll=%.2f pitch=%.2f"\
% (ground_alt_m, shutter_latency, yaw_bias, roll_bias, pitch_bias)
self.ground_alt_m = ground_alt_m
self.shutter_latency = shutter_latency
self.group_yaw_bias = yaw_bias
self.group_roll_bias = roll_bias
self.group_pitch_bias = pitch_bias
def genKeypointUsageMap(self):
# make the keypoint usage map (used so we don't have to
# project every keypoint every time)
print "Building the keypoint usage map... ",
for i1 in self.image_list:
i1.kp_usage = np.zeros(len(i1.kp_list), np.bool_)
for i, i1 in enumerate(self.image_list):
for j, pairs in enumerate(i1.match_list):
if len(pairs) == 0:
continue
if i == j:
continue
i2 = self.image_list[j]
print "%s vs %s" % (i1.name, i2.name)
for pair in pairs:
i1.kp_usage[pair[0]] = True
i2.kp_usage[pair[1]] = True
print "done."
def interpolateAircraftPositions(self, correlator, force=False,
weight=True):
# tag each image with the flight data parameters at the time
# the image was taken
for match in correlator.best_matchups:
pict, trig = correlator.get_match(match)
image = self.findImageByName(pict[2])
if image != None:
if force or (math.fabs(image.aircraft_lon) < 0.01 and math.fabs(image.aircraft_lat) < 0.01):
# only if we are forcing a new position
# calculation or the position is not already set
# from a save file.
t = trig[0] + self.shutter_latency
lon, lat, msl = correlator.get_position(t)
roll, pitch, yaw = correlator.get_attitude(t)
image.set_aircraft_pose( lon, lat, msl, roll, pitch, yaw )
if weight:
# presumes a pitch/roll distance of 10, 10 gives a
# zero weight
w = 1.0 - (roll*roll + pitch*pitch)/200.0
if w < 0.01:
w = 0.01
image.weight = w
else:
image.weight = 1.0
image.save_meta()
#print "%s roll=%.1f pitch=%.1f weight=%.2f" % (image.name, roll, pitch, image.weight)
def computeWeights(self, force=None):
# tag each image with the flight data parameters at the time
# the image was taken
for image in self.image_list:
roll = image.aircraft_roll + image.roll_bias
pitch = image.aircraft_pitch + image.pitch_bias
if force != None:
image.weight = force
else:
# presumes a pitch/roll distance of 10, 10 gives a
# zero weight
w = 1.0 - (roll*roll + pitch*pitch)/200.0
if w < 0.01:
w = 0.01
image.weight = w
image.save_meta()
#print "%s roll=%.1f pitch=%.1f weight=%.2f" % (image.name, roll, pitch, image.weight)
def computeConnections(self, force=None):
for image in self.image_list:
image.connections = 0
for pairs in image.match_list:
if len(pairs) >= self.m.min_pairs:
image.connections += 1
image.save_meta()
print "%s connections: %d" % (image.name, image.connections)
# depricate this function .... or replace with better one (or just
# use opencv)
#
# undistort x, y using a simple radial lens distortion model. (We
# call the original image values the 'distorted' values.) Input
# x,y are expected to be normalize (0.0 - 1.0) in image pixel
# space with 0.5 being the center of image (and hopefully the
# center of distortion.)
def doLensUndistort(self, aspect_ratio, xnorm, ynorm):
print "DEPRICATED..."
xd = (xnorm * 2.0 - 1.0) * aspect_ratio
yd = ynorm * 2.0 - 1.0
r = math.sqrt(xd*xd + yd*yd)
#print "ar=%.3f xd=%.3f yd=%.3f r=%.2f" % (aspect_ratio, xd, yd, r)
factor = 1.0 + self.k1 * r*r + self.k2 * r*r*r*r
xu = xd * factor
yu = yd * factor
xnorm_u = (xu / aspect_ratio + 1.0) / 2.0
ynorm_u = (yu + 1.0) / 2.0
#print " (%.3f %.3f) -> (%.3f %.3f)" % (xnorm, ynorm, xnorm_u, ynorm_u)
return xnorm_u, ynorm_u
def projectPoint2(self, image, q, pt, z_m):
horiz_mm, vert_mm, focal_len_mm = self.cam.get_lens_params()
h = image.height
w = image.width
print [h, w, self.cam.get_lens_params()]
ar = float(w)/float(h) # aspect ratio
# normalized pixel coordinates to [0.0, 1.0]
xnorm = pt[0] / float(w-1)
ynorm = pt[1] / float(h-1)
print "norm = %.4f %.4f" % (xnorm, ynorm)
# lens un-distortion
xnorm_u, ynorm_u = self.doLensUndistort(ar, xnorm, ynorm)
print "norm_u = %.4f %.4f" % (xnorm_u, ynorm_u)
# compute pixel coordinate in sensor coordinate space (mm
# units) with (0mm, 0mm) being the center of the image.
x_mm = (xnorm_u * 2.0 - 1.0) * (horiz_mm * 0.5)
y_mm = (ynorm_u * 2.0 - 1.0) * (vert_mm * 0.5)
print "x_mm = %.4f y_mm = %.4f" % ( x_mm, y_mm )
# the forward vector (out the nose when the aircraft is
# straight, level, and flying north) is (x=1.0, y=0.0, z=0.0).
# This vector will get projected to the camera center point,
# thus we have to remap the axes.
#camvec = [y_mm, x_mm, focal_len_mm]
camvec = [focal_len_mm, x_mm, y_mm]
print "camvec orig = ", camvec
camvec = transformations.unit_vector(camvec) # normalize
print "camvec = %.3f %.3f %.3f" % (camvec[0], camvec[1], camvec[2])
# transform camera vector (in body reference frame) to ned
# reference frame
ned = transformations.quaternion_backTransform(q, camvec)
print "q = %s ned = %s" % (str(q), str(ned))
# solve projection
if ned[2] < 0.0:
# no interseciton
return [0.0, 0.0]
factor = z_m / ned[2]
#print "z_m = %s" % str(z_m)
x_proj = -ned[0] * factor
y_proj = -ned[1] * factor
#print "proj dist = %.2f" % math.sqrt(x_proj*x_proj + y_proj*y_proj)
return [x_proj, y_proj]
# project keypoints based on body reference system + body biases
# transformed by camera mounting + camera mounting biases
def projectImageKeypointsNative2(self, image, yaw_bias=0.0,
roll_bias=0.0, pitch_bias=0.0,
alt_bias=0.0):
if image.img == None:
image.load_rgb()
h = image.height
w = image.width
ar = float(w)/float(h) # aspect ratio
pose = self.computeCameraPoseFromAircraft(image)
#print "Computed new image pose for %s = %s" % (image.name, str(pose))
# save the computed camera pose
image.camera_yaw = pose[0]
image.camera_pitch = pose[1]
image.camera_roll = pose[2]
image.camera_x = pose[3]
image.camera_y = pose[4]
image.camera_z = pose[5]
image.save_meta()
(coord_list, corner_list, grid_list) = \
self.projectImageKeypointsNative3(image, pose, yaw_bias, roll_bias,
pitch_bias, alt_bias)
return coord_list, corner_list, grid_list
d2r = math.pi / 180.0
# project keypoints using the provided camera pose
# pose = (yaw_deg, pitch_deg, roll_deg, x_m, y_m, z_m)
def projectImageKeypointsNative3(self, image, pose,
yaw_bias=0.0, roll_bias=0.0,
pitch_bias=0.0, alt_bias=0.0,
all_keypoints=False):
#print "Project3 for %s" % image.name
if image.img == None:
image.load_rgb()
h = image.height
w = image.width
ar = float(w)/float(h) # aspect ratio
ned2cam = transformations.quaternion_from_euler((pose[0]+yaw_bias)*d2r,
(pose[1]+pitch_bias)*d2r,
(pose[2]+roll_bias)*d2r,
'rzyx')
x_m = pose[3]
y_m = pose[4]
z_m = pose[5] + alt_bias
#print "ref offset = %.2f %.2f" % (x_m, y_m)
coord_list = [None] * len(image.kp_list)
corner_list = []
grid_list = []
# project the paired keypoints into world space
for i, kp in enumerate(image.kp_list):
if not all_keypoints and not image.kp_usage[i]:
continue
# print "ned2cam = %s" % str(ned2cam)
proj = self.projectPoint2(image, ned2cam, kp.pt, z_m)
#print "project3: kp=%s proj=%s" %(str(kp.pt), str(proj))
coord_list[i] = [proj[1] + x_m, proj[0] + y_m]
#print "coord_list = %s" % str(coord_list)
# compute the corners (2x2 polygon grid) in image space
dx = image.width - 1
dy = image.height - 1
y = 0.0
for j in xrange(2):
x = 0.0
for i in xrange(2):
#print "corner %.2f %.2f" % (x, y)
proj = self.projectPoint2(image, ned2cam, [x, y], z_m)
corner_list.append( [proj[1] + x_m, proj[0] + y_m] )
x += dx
y += dy
# compute the ac3d polygon grid in image space
dx = image.width / float(self.ac3d_steps)
dy = image.height / float(self.ac3d_steps)
y = 0.0
for j in xrange(self.ac3d_steps+1):
x = 0.0
for i in xrange(self.ac3d_steps+1):
#print "grid %.2f %.2f" % (xnorm_u, ynorm_u)
proj = self.projectPoint2(image, ned2cam, [x, y], z_m)
grid_list.append( [proj[1] + x_m, proj[0] + y_m] )
x += dx
y += dy
return coord_list, corner_list, grid_list
def projectKeypoints(self, all_keypoints=False):
for image in self.image_list:
pose = (image.camera_yaw, image.camera_pitch, image.camera_roll,
image.camera_x, image.camera_y, image.camera_z)
# print "project from pose = %s" % str(pose)
coord_list, corner_list, grid_list \
= self.projectImageKeypointsNative3(image, pose,
all_keypoints=all_keypoints)
image.coord_list = coord_list
image.corner_list = corner_list
image.grid_list = grid_list
# test
# coord_list, corner_list, grid_list \
# = self.projectImageKeypointsNative2(image)
#print "orig corners = %s" % str(image.corner_list)
#print "new corners = %s" % str(corner_list)
def findImageRotate(self, i1, gain):
#self.findImageAffine(i1) # temp test
error_sum = 0.0
weight_sum = i1.weight # give ourselves an appropriate weight
for i, match in enumerate(i1.match_list):
if len(match) >= self.m.min_pairs:
i2 = self.image_list[i]
print "Rotating %s vs %s" % (i1.name, i2.name)
for pair in match:
# + 180 (camera is mounted backwards)
y1 = i1.yaw + i1.rotate + 180.0
y2 = i2.yaw + i2.rotate + 180.0
dy = y2 - y1
while dy < -180.0:
dy += 360.0;
while dy > 180.0:
dy -= 360.0
# angle is in opposite direction from yaw
#a1 = i1.yaw + i1.rotate + 180 + i1.kp_list[pair[0]].angle
#a2 = i2.yaw + i2.rotate + 180 + i2.kp_list[pair[1]].angle
a1 = i1.kp_list[pair[0]].angle
a2 = i2.kp_list[pair[1]].angle
da = a1 - a2
while da < -180.0:
da += 360.0;
while da > 180.0:
da -= 360.0
print "yaw diff = %.1f angle diff = %.1f" % (dy, da)
error = dy - da
while error < -180.0:
error += 360.0;
while error > 180.0:
error -= 360.0
error_sum += error * i2.weight
weight_sum += i2.weight
print str(pair)
print " i1: %.1f %.3f %.1f" % (i1.yaw, i1.kp_list[pair[0]].angle, a1)
print " i2: %.1f %.3f %.1f" % (i2.yaw, i2.kp_list[pair[1]].angle, a2)
print " error: %.1f weight: %.2f" % (error, i2.weight)
print
#self.showMatch(i1, i2, match)
update = 0.0
if weight_sum > 0.0:
update = error_sum / weight_sum
i1.rotate += update * gain
print "Rotate %s delta=%.2f = %.2f" % (i1.name, update, i1.rotate)
def rotateImages(self, gain=0.10):
for image in self.image_list:
self.findImageRotate(image, gain)
for image in self.image_list:
print "%s: yaw error = %.2f" % (image.name, image.rotate)
def findImagePairShift(self, i1, i2, match):
xerror_sum = 0.0
yerror_sum = 0.0
for pair in match:
c1 = i1.coord_list[pair[0]]
c2 = i2.coord_list[pair[1]]
dx = c2[0] - c1[0]
dy = c2[1] - c1[1]
xerror_sum += dx
yerror_sum += dy
# divide by pairs + 1 gives some weight to our own position
# (i.e. a zero rotate)
xshift = xerror_sum / len(match)
yshift = yerror_sum / len(match)
#print " %s -> %s = (%.2f %.2f)" % (i1.name, i2.name, xshift, yshift)
return (xshift, yshift)
def findImageShift(self, i1, gain=0.10, placing=False):
xerror_sum = 0.0
yerror_sum = 0.0
weight_sum = i1.weight # give ourselves an appropriate weight
for i, match in enumerate(i1.match_list):
if len(match) < self.m.min_pairs:
continue
i2 = self.image_list[i]
#if not i2.placed:
# continue
(xerror, yerror) = self.findImagePairShift( i1, i2, match )
xerror_sum += xerror * i2.weight
yerror_sum += yerror * i2.weight
weight_sum += i2.weight
xshift = xerror_sum / weight_sum
yshift = yerror_sum / weight_sum
print "Shift %s -> (%.2f %.2f)" % (i1.name, xshift, yshift)
#print " %s bias before (%.2f %.2f)" % (i1.name, i1.x_bias, i1.y_bias)
i1.x_bias += xshift * gain
i1.y_bias += yshift * gain
#print " %s bias after (%.2f %.2f)" % (i1.name, i1.x_bias, i1.y_bias)
i1.save_meta()
def shiftImages(self, gain=0.10):
for image in self.image_list:
self.findImageShift(image, gain)
# method="average": return the weighted average of the errors.
# method="stddev": return the weighted average of the stddev of the errors.
# method="max": return the max error of the subcomponents.
def groupError(self, method="average"):
#print "compute group error, method = %s" % method
if len(self.image_list):
error_sum = 0.0
weight_sum = 0.0
for i, image in enumerate(self.image_list):
e = 0.0
e = self.m.imageError(i, method=method)
#print "%s error = %.2f" % (image.name, e)
error_sum += e*e * image.weight
weight_sum += image.weight
return math.sqrt(error_sum / weight_sum)
else:
return 0.0
# zero all biases (if we want to start over with a from scratch fit)
def zeroImageBiases(self):
for image in self.image_list:
image.yaw_bias = 0.0
image.roll_bias = 0.0
image.pitch_bias = 0.0
image.alt_bias = 0.0
image.x_bias = 0.0
image.y_bias = 0.0
image.save_meta()
# try to fit individual images by manipulating various parameters
# and testing to see if that produces a better fit metric
def estimateParameter(self, i, ground_alt_m, method,
param="", start_value=0.0, step_size=1.0,
refinements=3):
image = self.image_list[i]
pose = (image.camera_yaw, image.camera_pitch, image.camera_roll,
image.camera_x, image.camera_y, image.camera_z)
#print "Estimate %s for %s" % (param, image.name)
var = False
if method == "average":
var = False
elif method == "stddev":
var = True
for k in xrange(refinements):
best_error = self.m.imageError(i, method=method)
best_value = start_value
test_value = start_value - 5*step_size
#print "start value = %.2f error = %.1f" % (best_value, best_error)
while test_value <= start_value + 5*step_size + (step_size*0.1):
coord_list = []
corner_list = []
grid_list = []
if param == "yaw":
coord_list, corner_list, grid_list \
= self.projectImageKeypointsNative3(image, pose,
yaw_bias=test_value)
elif param == "roll":
coord_list, corner_list, grid_list \
= self.projectImageKeypointsNative3(image, pose,
roll_bias=test_value)
elif param == "pitch":
coord_list, corner_list, grid_list \
= self.projectImageKeypointsNative3(image, pose,
pitch_bias=test_value)
elif param == "altitude":
coord_list, corner_list, grid_list \
= self.projectImageKeypointsNative3(image, pose,
alt_bias=test_value)
error = self.m.imageError(i, alt_coord_list=coord_list,
method=method)
#print "Test %s error @ %.2f = %.2f" % ( param, test_value, error )
if error < best_error:
best_error = error
best_value = test_value
#print " better value = %.2f, error = %.1f" % (best_value, best_error)
test_value += step_size
# update values for next iteration
start_value = best_value
step_size /= 5.0
return best_value, best_error
# try to fit individual images by manipulating various parameters
# and testing to see if that produces a better fit metric
def fitImage(self, i, method, gain):
# parameters to manipulate = yaw, roll, pitch
yaw_step = 2.0
roll_step = 1.0
pitch_step = 1.0
refinements = 4
image = self.image_list[i]
# start values should be zero because previous values are
# already included so we are computing a new offset from the
# past solution.
yaw, e = self.estimateParameter(i, self.ground_alt_m, method,
"yaw", start_value=0.0,
step_size=1.0, refinements=refinements)
roll, e = self.estimateParameter(i, self.ground_alt_m, method,
"roll", start_value=0.0,
step_size=1.0, refinements=refinements)
pitch, e = self.estimateParameter(i, self.ground_alt_m, method,
"pitch", start_value=0.0,
step_size=1.0,
refinements=refinements)
alt, e = self.estimateParameter(i, self.ground_alt_m, method,
"altitude", start_value=0.0,
step_size=2.0, refinements=refinements)
image.camera_yaw += yaw*gain
image.camera_roll += roll*gain
image.camera_pitch += pitch*gain
image.camera_z += alt*gain
coord_list = []
corner_list = []
grid_list = []
# but don't save the results so we don't bias future elements
# with moving previous elements
coord_list, corner_list, grid_list = self.projectImageKeypointsNative2(image)
error = self.m.imageError(i, alt_coord_list=coord_list, method=method)
if method == "average":
image.error = error
elif method == "stddev":
image.stddev = error
print "Fit %s (%s) is %.2f %.2f %.2f %.2f (avg=%.3f stddev=%.3f)" \
% (image.name, method,
image.camera_yaw, image.camera_roll, image.camera_pitch,
image.camera_z, image.error, image.stddev)
image.save_meta()
# try to fit individual images by manipulating various parameters
# and testing to see if that produces a better fit metric
def fitImageAffine3d(self, i, method, gain):
i1 = self.image_list[i]
angles_sum = [0.0, 0.0, 0.0]
weight_sum = i1.weight
for j, pairs in enumerate(i1.match_list):
if len(pairs) < self.m.min_pairs:
continue
i2 = self.image_list[j]
src = [[], [], []]
dst = [[], [], []]
for pair in pairs:
c1 = i1.coord_list[pair[0]]
c2 = i2.coord_list[pair[1]]
src[0].append(c1[0])
src[1].append(c1[1])
src[2].append(0.0)
dst[0].append(c2[0])
dst[1].append(c2[1])
dst[2].append(0.0)
Aff3D = transformations.superimposition_matrix(src, dst)
scale, shear, angles, trans, persp = transformations.decompose_matrix(Aff3D)
print "%s vs. %s" % (i1.name, i2.name)
#print " scale = %s" % str(scale)
#print " shear = %s" % str(shear)
print " angles = %s" % str(angles)
#print " trans = %s" % str(trans)
#print " persp = %s" % str(persp)
# this is all based around the assumption that our angle
# differences area relatively small
for k in range(3):
a = angles[k]
if a < -180.0:
a += 360.0
if a > 180.0:
a -= 360.0
angles_sum[k] += a
weight_sum += i2.weight
angles = [ angles_sum[0] / weight_sum,
angles_sum[1] / weight_sum,
angles_sum[2] / weight_sum ]
print "average angles = %s" % str(angles)
rad2deg = 180.0 / math.pi
i1.roll_bias += angles[0] * rad2deg * gain
i1.pitch_bias += angles[1] * rad2deg * gain
i1.yaw_bias += angles[2] * rad2deg * gain
coord_list = []
corner_list = []
grid_list = []
# but don't save the results so we don't bias future elements
# with moving previous elements
coord_list, corner_list, grid_list = self.projectImageKeypointsNative2(i1)
error = self.m.imageError(i, alt_coord_list=coord_list, method="average")
stddev = self.m.imageError(i, alt_coord_list=coord_list, method="stddev")
print "average error = %.3f" % error
print "average stddev = %.3f" % stddev
i1.save_meta()
def fitImagesIndividually(self, method, gain):
for i, image in enumerate(self.image_list):
self.fitImage(i, method, gain)
#self.fitImageAffine3d(i, method, gain)
def geotag_pictures( self, correlator, dir = ".", geotag_dir = "." ):
ground_sum = 0.0
ground_count = 0
print "master_time_offset = " + str(correlator.master_time_offset)
for match in correlator.best_matchups:
pict, trig = correlator.get_match(match)
trig_time = trig[0] + correlator.master_time_offset
pict_time = pict[0]
time_diff = trig_time - pict_time
#print str(match[0]) + " <=> " + str(match[1])
#print str(pict_time) + " <=> " + str(trig_time)
print pict[2] + " -> " + str(trig[2]) + ", " + str(trig[3]) + ": " + str(trig[4]) + " (" + str(time_diff) + ")"
agl_ft = trig[4]
lon_deg, lat_deg, msl = correlator.get_position( trig[0] )
msl_ft = msl / 0.3048
ground_sum += (msl_ft - agl_ft)
ground_count += 1
ground_agl_ft = ground_sum / ground_count
print " MSL: " + str( msl_ft ) + " AGL: " + str(agl_ft) + " Ground: " + str(ground_agl_ft)
# double check geotag dir exists and make it if not
if not os.path.exists(geotag_dir):
os.makedirs(geotag_dir)
# update a resized copy if needed
name_in = dir + "/" + pict[2]
name_out = geotag_dir + "/" + pict[2]
if not os.path.isfile( name_out ):
command = 'convert -geometry 684x456 ' + name_in + ' ' + name_out
#command = 'convert -geometry 512x512\! ' + name_in + ' ' + name_out
print command
commands.getstatusoutput( command )
# update the gps meta data
exif = pyexiv2.ImageMetadata(name_out)
exif.read()
#exif.set_gps_info(lat_deg, lon_deg, (msl_ft*0.3048))
altitude = msl_ft*0.3048
GPS = 'Exif.GPSInfo.GPS'
exif[GPS + 'AltitudeRef'] = '0' if altitude >= 0 else '1'
exif[GPS + 'Altitude'] = Fraction(altitude)
exif[GPS + 'Latitude'] = decimal_to_dms(lat_deg)
exif[GPS + 'LatitudeRef'] = 'N' if lat_deg >= 0 else 'S'
exif[GPS + 'Longitude'] = decimal_to_dms(lon_deg)
exif[GPS + 'LongitudeRef'] = 'E' if lon_deg >= 0 else 'W'
exif[GPS + 'MapDatum'] = 'WGS-84'
exif.write()
def fixup_timestamps( self, correlator, camera_time_error, geotag_dir = "." ):
for match in correlator.best_matchups:
pict, trig = correlator.get_match(match)
unixtime = pict[0]
name = geotag_dir + "/" + pict[2]
unixtime += camera_time_error
newdatetime = datetime.datetime.utcfromtimestamp(round(unixtime)).strftime('%Y:%m:%d %H:%M:%S')
exif = pyexiv2.ImageMetadata(name)
exif.read()
print "old: " + str(exif['Exif.Image.DateTime']) + " new: " + newdatetime
exif['Exif.Image.DateTime'] = newdatetime
exif.write()
def generate_aircraft_location_report(self):
for image in self.image_list:
print "%s\t%.10f\t%.10f\t%.2f" \
% (image.name, image.aircraft_lon, image.aircraft_lat,
image.aircraft_msl)
def draw_epilines(self, img1, img2, lines, pts1, pts2):
''' img1 - image on which we draw the epilines for the points in img2
lines - corresponding epilines '''
r,c,d = img1.shape
print img1.shape
for r,pt1,pt2 in zip(lines,pts1,pts2):
color = tuple(np.random.randint(0,255,3).tolist())
x0,y0 = map(int, [0, -r[2]/r[1] ])
x1,y1 = map(int, [c, -(r[2]+r[0]*c)/r[1] ])
cv2.line(img1, (x0,y0), (x1,y1), color,1)
cv2.circle(img1,tuple(pt1),5,color,-1)
cv2.circle(img2,tuple(pt2),5,color,-1)
return img1,img2
def sfm_test(self):
for i, i1 in enumerate(self.image_list):
for j, pairs in enumerate(i1.match_list):
if i == j:
continue
if len(pairs) < 8:
# 8+ pairs are required to compute the fundamental matrix
continue
i2 = self.image_list[j]
pts1 = []
pts2 = []
for pair in pairs:
p1 = i1.kp_list[pair[0]].pt
p2 = i2.kp_list[pair[1]].pt
pts1.append( p1 )
pts2.append( p2 )
pts1 = np.float32(pts1)
pts2 = np.float32(pts2)
print "pts1 = %s" % str(pts1)
print "pts2 = %s" % str(pts2)
F, mask = cv2.findFundamentalMat(pts1, pts2, cv2.FM_LMEDS)
print "loading full res images ..."
img1 = i1.load_source_rgb(self.source_dir)
img2 = i2.load_source_rgb(self.source_dir)
# Find epilines corresponding to points in right image
# (second image) and drawing its lines on left image
lines1 = cv2.computeCorrespondEpilines(pts2.reshape(-1,1,2), 2, F)
lines1 = lines1.reshape(-1,3)
img5,img6 = self.draw_epilines(img1,img2,lines1,pts1,pts2)
# Find epilines corresponding to points in left image (first image) and
# drawing its lines on right image
lines2 = cv2.computeCorrespondEpilines(pts1.reshape(-1,1,2), 1,F)
lines2 = lines2.reshape(-1,3)
img3,img4 = self.draw_epilines(img2,img1,lines2,pts2,pts1)
plt.subplot(121),plt.imshow(img5)
plt.subplot(122),plt.imshow(img3)
plt.show()
# this really doesn't work right because the euler pose angles derived
# might be correct, but aren't all consistent apparently ... the back
# solver to extract angles from an arbitrary rotation matrix doesn't seem
# always be consistant. (this probably should be depricated at some point)
def fitImagesWithSolvePnP1(self):
for i, i1 in enumerate(self.image_list):
#print "sovelPNP() for %s" % i1.name
K = self.cam.get_K()
att_sum = [ [0.0, 0.0], [0.0, 0.0], [0.0, 0.0] ]
pos_sum = [0.0, 0.0, 0.0]
weight_sum = 0.0
for j, pairs in enumerate(i1.match_list):
if i == j:
continue
if len(pairs) < 8:
# we need at least 8 pairs to call solvePNP()
continue
i2 = self.image_list[j]
img_pts = []
obj_pts = []
for pair in pairs:
p1 = i1.kp_list[pair[0]].pt
p2 = i2.coord_list[pair[1]]
img_pts.append( p1 )
obj_pts.append( [p2[0], p2[1], 0.0] )
img_pts = np.float32(img_pts)
obj_pts = np.float32(obj_pts)
#print "img_pts = %s" % str(img_pts)
#print "obj_pts = %s" % str(obj_pts)
(result, rvec, tvec) = cv2.solvePnP(obj_pts, img_pts, K, None)
#print " result = %s, rvec = %s, tvec = %s" \
# % (result, rvec, tvec)
print " rvec = %.2f %.2f %.2f" % (rvec[0], rvec[1], rvec[2])
R, jac = cv2.Rodrigues(rvec)
#print " R =\n%s" % str(R)
# googled how to derive the position in object
# coordinates from solvePNP()
pos = -np.matrix(R).T * np.matrix(tvec)
#print "solved pos = %s" % str(pos)
#print " pos[0] = %s" % str(pos[0])
#print " pos.item(0) = %s" % str(pos.item(0))
for k in range(0,3):
pos_sum[k] += pos.item(k)
#print " PNP pos = %s" % str(pos)
# Remap the R matrix to match our coordinate system
# (by inspection...)
# [ [a, b, c], [d, e, f], [g, h, i] ] =>
# [ [h, b, a], [g, -e, -d], [i, c, -f] ]
# this will be tedius code ...
#Rconv[:3, 0] = R[:3, 2] # swap col 0 <=> col 2
#Rconv[:3, 2] = R[:3, 0]
#Rconv[1, :3] *= -1.0 # negate the middle row
Rconv = R.copy()
Rconv[0,0] = R[2,1]
Rconv[0,1] = R[0,1]
Rconv[0,2] = R[0,0]
Rconv[1,0] = R[2,0]
Rconv[1,1] = -R[1,1]
Rconv[1,2] = -R[1,0]
Rconv[2,0] = R[2,2]
Rconv[2,1] = R[0,2]
Rconv[2,2] = -R[1,2]
#print "Rconv =\n%s" % str(Rconv)
(yaw, pitch, roll) = transformations.euler_from_matrix(Rconv, 'rzyx')
att_sum[0][0] += math.cos(yaw)
att_sum[0][1] += math.sin(yaw)
att_sum[1][0] += math.cos(pitch)
att_sum[1][1] += math.sin(pitch)
att_sum[2][0] += math.cos(roll)
att_sum[2][1] += math.sin(roll)
weight_sum += i2.weight
deg2rad = math.pi / 180.0
print " pair euler = %.2f %.2f %.2f" % (yaw/deg2rad,
pitch/deg2rad,
roll/deg2rad)
#print " est = %.2f %.2f %.2f" % (i1.camera_yaw,
# i1.camera_pitch,
# i1.camera_roll)
Rcam = transformations.euler_matrix(i1.camera_yaw*deg2rad,
i1.camera_pitch*deg2rad,
i1.camera_roll*deg2rad,
'rzyx')
#print "solvePNP =\n%s" % str(Rconv)
#print "my FIT =\n%s" % str(Rcam)
v = np.array( [1.0, 0.0, 0.0] )
vh = np.array( [1.0, 0.0, 0.0, 1.0] )
#print " v = %s" % str(v)
#print " Rconv * v = %s" % str(np.dot(Rconv, v))
#print " Rcam * v = %s" % str(np.dot(Rcam, vh))
if weight_sum < 0.0001:
continue
i1.camera_x = pos_sum[0] / weight_sum
i1.camera_y = pos_sum[1] / weight_sum
i1.camera_z = pos_sum[2] / weight_sum
print "Camera pose for image %s:" % i1.name
print " PNP pos = %.2f %.2f %.2f" % (i1.camera_x,
i1.camera_y,
i1.camera_z)
yaw_avg = math.atan2(att_sum[0][1]/weight_sum,
att_sum[0][0]/weight_sum)
pitch_avg = math.atan2(att_sum[1][1]/weight_sum,
att_sum[1][0]/weight_sum)
roll_avg = math.atan2(att_sum[2][1]/weight_sum,
att_sum[2][0]/weight_sum)
i1.camera_yaw = yaw_avg / deg2rad
i1.camera_pitch = pitch_avg / deg2rad
i1.camera_roll = roll_avg / deg2rad
print " PNP att = %.2f %.2f %.2f" % (i1.camera_yaw,
i1.camera_pitch,
i1.camera_roll)
i1.save_meta()
# call solvePnP() on all the matching pairs from all the matching
# images simultaneously. This works, but inherently weights the
# fit much more towards the images with more matching pairs ... on
# the other hand, that may be kind of what we want because images
# with a few matches over a small area can grossly magnify any
# errors into the result of solvePnP().
def fitImagesWithSolvePnP2(self):
for i, i1 in enumerate(self.image_list):
#print "sovlePNP() for %s" % i1.name
K = self.cam.get_K()
img_pts = []
obj_pts = []
for j, pairs in enumerate(i1.match_list):
if i == j:
# include the match with ourselves ... we have
# self worth too!
for k, flag in enumerate(i1.kp_usage):
if flag:
p1 = i1.kp_list[k].pt
p2 = i1.coord_list[k]
img_pts.append( p1 )
obj_pts.append( [p2[0], p2[1], 0.0] )
if len(pairs) < 8:
# we need at least 8 pairs to call solvePNP()
continue
i2 = self.image_list[j]
for pair in pairs:
p1 = i1.kp_list[pair[0]].pt
p2 = i2.coord_list[pair[1]]
img_pts.append( p1 )
obj_pts.append( [p2[0], p2[1], 0.0] )
# now call the solver if we have enough points
if len(img_pts) < 8:
continue
img_pts = np.float32(img_pts)
obj_pts = np.float32(obj_pts)
#print "img_pts = %s" % str(img_pts)
#print "obj_pts = %s" % str(obj_pts)
#(result, rvec, tvec) = cv2.solvePnP(obj_pts, img_pts, cam, None)
if hasattr(i1, 'rvec'):
(result, i1.rvec, i1.tvec) \
= cv2.solvePnP(obj_pts, img_pts, K, None,
i1.rvec, i1.tvec,
useExtrinsicGuess=True)
else:
# first time
(result, i1.rvec, i1.tvec) \
= cv2.solvePnP(obj_pts, img_pts, K, None)
#print " result = %s, rvec = %s, tvec = %s" \
# % (result, i1.rvec, i1.tvec)
# print " rvec = %.2f %.2f %.2f" % (i1.rvec[0], i1.rvec[1], i1.rvec[2])
R, jac = cv2.Rodrigues(i1.rvec)
#print " R =\n%s" % str(R)
# googled how to derive the position in object
# coordinates from solvePNP()
pos = -np.matrix(R).T * np.matrix(i1.tvec)
#print "solved pos = %s" % str(pos)
#print " pos[0] = %s" % str(pos[0])
#print " pos.item(0) = %s" % str(pos.item(0))
#print " PNP pos = %s" % str(pos)
# Remap the R matrix to match our coordinate system
# (by inspection...)
# [ [a, b, c], [d, e, f], [g, h, i] ] =>
# [ [h, b, a], [g, -e, -d], [i, c, -f] ]
# this will be tedius code ...
#Rconv[:3, 0] = R[:3, 2] # swap col 0 <=> col 2
#Rconv[:3, 2] = R[:3, 0]
#Rconv[1, :3] *= -1.0 # negate the middle row
Rconv = R.copy()
Rconv[0,0] = R[2,1]
Rconv[0,1] = R[0,1]
Rconv[0,2] = R[0,0]
Rconv[1,0] = R[2,0]
Rconv[1,1] = -R[1,1]
Rconv[1,2] = -R[1,0]
Rconv[2,0] = R[2,2]
Rconv[2,1] = R[0,2]
Rconv[2,2] = -R[1,2]
#print "Rconv =\n%s" % str(Rconv)
(yaw, pitch, roll) = transformations.euler_from_matrix(Rconv, 'rzyx')
deg2rad = math.pi / 180.0
#print " pair euler = %.2f %.2f %.2f" % (yaw/deg2rad,
# pitch/deg2rad,
# roll/deg2rad)
#print " est = %.2f %.2f %.2f" % (i1.camera_yaw,
# i1.camera_pitch,
# i1.camera_roll)
Rcam = transformations.euler_matrix(i1.camera_yaw*deg2rad,
i1.camera_pitch*deg2rad,
i1.camera_roll*deg2rad,
'rzyx')
print "Beg cam pose %s %.2f %.2f %.2f %.2f %.2f %.2f" \
% (i1.name, i1.camera_yaw, i1.camera_pitch, i1.camera_roll,
i1.camera_x, i1.camera_y, i1.camera_z)
i1.camera_yaw = yaw/deg2rad
i1.camera_pitch = pitch/deg2rad
i1.camera_roll = roll/deg2rad
i1.camera_x = pos.item(0)
i1.camera_y = pos.item(1)
i1.camera_z = pos.item(2)
i1.save_meta()
print "New cam pose %s %.2f %.2f %.2f %.2f %.2f %.2f" \
% (i1.name, i1.camera_yaw, i1.camera_pitch, i1.camera_roll,
i1.camera_x, i1.camera_y, i1.camera_z)
# find the pose estimate for each match individually and use that
# pose to project the keypoints. Then average all the keypoint
# projections together ... this weights image pairs equally and
# averaging points in cartesian space is much easier than trying
# to figure out how to average euler angles.
#
# Problem ... too many pairwise matches are unstable for
# solvePnP() because of clustered or linear data leading to a
# whole lot of nonsense
def fitImagesWithSolvePnP3(self):
for i, i1 in enumerate(self.image_list):
print "solvePnP() (3) for %s" % i1.name
if i1.connections == 0:
print " ... no connections, skipping ..."
continue
K = self.cam.get_K()
master_list = []
master_list.append(i1.coord_list) # weight ourselves in the mix
for j, pairs in enumerate(i1.match_list):
# include the match with ourselves ... we have self worth too!
#if i == j:
# continue
if len(pairs) < 8:
# we need at least 8 pairs to call solvePNP()
continue
i2 = self.image_list[j]
# assemble the data points for the solver
img_pts = []
obj_pts = []
for pair in pairs:
p1 = i1.kp_list[pair[0]].pt
p2 = i2.coord_list[pair[1]]
img_pts.append( p1 )
obj_pts.append( [p2[0], p2[1], 0.0] )
# now call the solver
img_pts = np.float32(img_pts)
obj_pts = np.float32(obj_pts)
#(result, rvec, tvec) \
# = cv2.solvePnP(obj_pts, img_pts, K, None)
(rvec, tvec, status) \
= cv2.solvePnPRansac(obj_pts, img_pts, K, None)
size = len(status)
inliers = np.sum(status)
if inliers < size:
print '%s vs %s: %d / %d inliers/matched' \
% (i1.name, i2.name, inliers, size)
status = self.m.showMatch(i1, i2, matches, status)
delete_list = []
for k, flag in enumerate(status):
if not flag:
print " deleting: " + str(matches[k])
#match[i] = (-1, -1)
delete_list.append(matches[k])
for pair in delete_list:
self.deletePair(i, j, pair)
#print " result = %s, rvec = %s, tvec = %s" \
# % (result, rvec, tvec)
# print " rvec = %.2f %.2f %.2f" % (rvec[0], rvec[1], rvec[2])
R, jac = cv2.Rodrigues(rvec)
#print " R =\n%s" % str(R)
# googled how to derive the position in object
# coordinates from solvePNP()
pos = -np.matrix(R).T * np.matrix(tvec)
#print "solved pos = %s" % str(pos)
#print " pos[0] = %s" % str(pos[0])
#print " pos.item(0) = %s" % str(pos.item(0))
#print " PNP pos = %s" % str(pos)
# Remap the R matrix to match our coordinate system
# (by inspection...)
# [ [a, b, c], [d, e, f], [g, h, i] ] =>
# [ [h, b, a], [g, -e, -d], [i, c, -f] ]
Rconv = R.copy()
Rconv[0,0] = R[2,1]
Rconv[0,1] = R[0,1]
Rconv[0,2] = R[0,0]
Rconv[1,0] = R[2,0]
Rconv[1,1] = -R[1,1]
Rconv[1,2] = -R[1,0]
Rconv[2,0] = R[2,2]
Rconv[2,1] = R[0,2]
Rconv[2,2] = -R[1,2]
#print "Rconv =\n%s" % str(Rconv)
(yaw, pitch, roll) = transformations.euler_from_matrix(Rconv,
'rzyx')
deg2rad = math.pi / 180.0
camera_pose = (yaw/deg2rad, pitch/deg2rad, roll/deg2rad,
pos.item(0), pos.item(1), pos.item(2))
# project out the image keypoints for this pair's
# estimated camera pose
coord_list, corner_list, grid_list \
= self.projectImageKeypointsNative3(i1, camera_pose)
#print "len(coord_list) = %d" % len(coord_list)
# save the results for averaging purposes
master_list.append(coord_list)
#print " pair euler = %.2f %.2f %.2f" % (yaw/deg2rad,
# pitch/deg2rad,
# roll/deg2rad)
#print " est = %.2f %.2f %.2f" % (i1.camera_yaw,
# i1.camera_pitch,
# i1.camera_roll)
#Rcam = transformations.euler_matrix(i1.camera_yaw*deg2rad,
# i1.camera_pitch*deg2rad,
# i1.camera_roll*deg2rad,
# 'rzyx')
#print "solvePNP =\n%s" % str(Rconv)
#print "my FIT =\n%s" % str(Rcam)
print " %s vs %s cam pose %.2f %.2f %.2f %.2f %.2f %.2f" \
% (i1.name, i2.name,
camera_pose[0], camera_pose[1], camera_pose[2],
camera_pose[3], camera_pose[4], camera_pose[5])
# find the average coordinate locations from the set of pair
# projections
coord_list = []
size = len(master_list[0]) # number of coordinates
#print "size = %d" % size
n = len(master_list) # number of projections
#print "n = %d" % n
for i in range(0, size):
#print "i = %d" % i
if not i1.kp_usage[i]:
coord_list.append(None)
continue
x_sum = 0.0
y_sum = 0.0
for list in master_list:
#print "len(list) = %d" % len(list)
x_sum += list[i][0]
y_sum += list[i][1]
x = x_sum / float(n)
y = y_sum / float(n)
coord_list.append( [x, y] )
# now finally call solvePnP() on the average of the projections
img_pts = []
obj_pts = []
for i in range(0, size):
if not i1.kp_usage[i]:
continue
img_pts.append( i1.kp_list[i].pt )
obj_pts.append( [coord_list[i][0], coord_list[i][1], 0.0] )
img_pts = np.float32(img_pts)
obj_pts = np.float32(obj_pts)
(result, rvec, tvec) = cv2.solvePnP(obj_pts, img_pts, cam, None)
# and extract the average camera pose
R, jac = cv2.Rodrigues(rvec)
pos = -np.matrix(R).T * np.matrix(tvec)
# Remap the R matrix to match our coordinate system
# (by inspection...)
# [ [a, b, c], [d, e, f], [g, h, i] ] =>
# [ [h, b, a], [g, -e, -d], [i, c, -f] ]
Rconv = R.copy()
Rconv[0,0] = R[2,1]
Rconv[0,1] = R[0,1]
Rconv[0,2] = R[0,0]
Rconv[1,0] = R[2,0]
Rconv[1,1] = -R[1,1]
Rconv[1,2] = -R[1,0]
Rconv[2,0] = R[2,2]
Rconv[2,1] = R[0,2]
Rconv[2,2] = -R[1,2]
(yaw, pitch, roll) = transformations.euler_from_matrix(Rconv,
'rzyx')
deg2rad = math.pi / 180.0
print "Beg cam pose %s %.2f %.2f %.2f %.2f %.2f %.2f" \
% (i1.name, i1.camera_yaw, i1.camera_pitch, i1.camera_roll,
i1.camera_x, i1.camera_y, i1.camera_z)
i1.camera_yaw = yaw/deg2rad
i1.camera_pitch = pitch/deg2rad
i1.camera_roll = roll/deg2rad
i1.camera_x = pos.item(0)
i1.camera_y = pos.item(1)
i1.camera_z = pos.item(2)
i1.save_meta()
print "New cam pose %s %.2f %.2f %.2f %.2f %.2f %.2f" \
% (i1.name, i1.camera_yaw, i1.camera_pitch, i1.camera_roll,
i1.camera_x, i1.camera_y, i1.camera_z)
def triangulate_test(self):
for i, i1 in enumerate(self.image_list):
print "pnp for %s" % i1.name
K = self.cam.get_K()
att_sum = [ [0.0, 0.0], [0.0, 0.0], [0.0, 0.0] ]
pos_sum = [0.0, 0.0, 0.0]
weight_sum = 0.0
for j, pairs in enumerate(i1.match_list):
if i == j:
continue
if len(pairs) < 8:
# start with only well correlated pairs
continue
i2 = self.image_list[j]
R1, jac = cv2.Rodrigues(i1.rvec)
R2, jac = cv2.Rodrigues(i2.rvec)
def pnp_test(self):
for i, i1 in enumerate(self.image_list):
print "pnp for %s" % i1.name
K = self.cam.get_K()
att_sum = [ [0.0, 0.0], [0.0, 0.0], [0.0, 0.0] ]
pos_sum = [0.0, 0.0, 0.0]
weight_sum = 0.0
for j, pairs in enumerate(i1.match_list):
if i == j:
continue
if len(pairs) < 8:
# start with only well correlated pairs
continue
i2 = self.image_list[j]
img_pts = []
obj_pts = []
for pair in pairs:
p1 = i1.kp_list[pair[0]].pt
p2 = i2.coord_list[pair[1]]
img_pts.append( p1 )
obj_pts.append( [p2[0], p2[1], 0.0] )
img_pts = np.float32(img_pts)
obj_pts = np.float32(obj_pts)
#print "img_pts = %s" % str(img_pts)
#print "obj_pts = %s" % str(obj_pts)
(result, rvec, tvec) = cv2.solvePnP(obj_pts, img_pts, K, None)
print " result = %s, rvec = %s, tvec = %s" \
% (result, rvec, tvec)
R, jac = cv2.Rodrigues(rvec)
print " R =\n%s" % str(R)
# googled how to derive the position in object
# coordinates from solvePNP()
pos = -np.matrix(R).T * np.matrix(tvec)
for k in range(0,3):
pos_sum[k] += pos[k]
print " PNP pos = %s" % str(pos)
# Remap the R matrix to match our coordinate system
# (by inspection...)
# [ [a, b, c], [d, e, f], [g, h, i] ] =>
# [ [h, b, a], [g, -e, -d], [i, c, -f] ]
# this will be tedius code ...
#Rconv[:3, 0] = R[:3, 2] # swap col 0 <=> col 2
#Rconv[:3, 2] = R[:3, 0]
#Rconv[1, :3] *= -1.0 # negate the middle row
Rconv = R.copy()
Rconv[0,0] = R[2,1]
Rconv[0,1] = R[0,1]
Rconv[0,2] = R[0,0]
Rconv[1,0] = R[2,0]
Rconv[1,1] = -R[1,1]
Rconv[1,2] = -R[1,0]
Rconv[2,0] = R[2,2]
Rconv[2,1] = R[0,2]
Rconv[2,2] = -R[1,2]
print "Rconv =\n%s" % str(Rconv)
(yaw, pitch, roll) = transformations.euler_from_matrix(Rconv, 'rzyx')
att_sum[0][0] += math.cos(yaw)
att_sum[0][1] += math.sin(yaw)
att_sum[1][0] += math.cos(pitch)
att_sum[1][1] += math.sin(pitch)
att_sum[2][0] += math.cos(roll)
att_sum[2][1] += math.sin(roll)
weight_sum += i2.weight
deg2rad = math.pi / 180.0
print " euler = %.2f %.2f %.2f" % (yaw/deg2rad,
pitch/deg2rad,
roll/deg2rad)
print " est = %.2f %.2f %.2f" % (i1.camera_yaw,
i1.camera_pitch,
i1.camera_roll)
Rcam = transformations.euler_matrix(i1.camera_yaw*deg2rad,
i1.camera_pitch*deg2rad,
i1.camera_roll*deg2rad,
'rzyx')
print "solvePNP =\n%s" % str(Rconv)
print "my FIT =\n%s" % str(Rcam)
v = np.array( [1.0, 0.0, 0.0] )
vh = np.array( [1.0, 0.0, 0.0, 1.0] )
print " v = %s" % str(v)
print " Rconv * v = %s" % str(np.dot(Rconv, v))
print " Rcam * v = %s" % str(np.dot(Rcam, vh))
if weight_sum < 0.0001:
continue
print "Camera pose for image %s:" % i1.name
print " PNP pos = %.2f %.2f %.2f" % (pos_sum[0]/weight_sum,
pos_sum[1]/weight_sum,
pos_sum[2]/weight_sum)
print " Fit pos = %s" % str((i1.camera_x, i1.camera_y, i1.camera_z))
yaw_avg = math.atan2(att_sum[0][1]/weight_sum,
att_sum[0][0]/weight_sum)
pitch_avg = math.atan2(att_sum[1][1]/weight_sum,
att_sum[1][0]/weight_sum)
roll_avg = math.atan2(att_sum[2][1]/weight_sum,
att_sum[2][0]/weight_sum)
print " PNP att = %.2f %.2f %.2f" % ( yaw_avg / deg2rad,
pitch_avg / deg2rad,
roll_avg / deg2rad )
print " Fit att = %.2f %.2f %.2f" % (i1.camera_yaw,
i1.camera_pitch,
i1.camera_roll)
# should reset the width, height values in the keys files
def recomputeWidthHeight(self):
for image in self.image_list:
if image.img == None:
image.load_features()
image.load_rgb()
image.save_keys()
# write out the camera positions as geojson
def save_geojson(self, path="mymap", cm_per_pixel=15.0 ):
feature_list = []
if not os.path.exists(path):
os.makedirs(path)
for i, image in enumerate(self.image_list):
# camera point
cam = geojson.Point( (image.aircraft_lon, image.aircraft_lat) )
# coverage polys
geo_list = []
for pt in image.corner_list:
lon = self.render.x2lon(pt[0])
lat = self.render.y2lat(pt[1])
geo_list.append( (lon, lat) )
if len(geo_list) == 4:
tmp = geo_list[2]
geo_list[2] = geo_list[3]
geo_list[3] = tmp
poly = geojson.Polygon( [ geo_list ] )
# group
gc = geojson.GeometryCollection( [cam, poly] )
source = "%s/%s" % (self.source_dir, image.name)
work = "%s/%s" % (self.image_dir, image.name)
f = geojson.Feature(geometry=gc, id=i,
properties={"name": image.name,
"source": source,
"work": work})
feature_list.append( f )
fc = geojson.FeatureCollection( feature_list )
dump = geojson.dumps(fc)
print str(dump)
f = open( path + "/points.geojson", "w" )
f.write(dump)
f.close()
warped_dir = path + "/warped"
if not os.path.exists(warped_dir):
os.makedirs(warped_dir )
for i, image in enumerate(self.image_list):
print "rendering %s" % image.name
w, h, warp = \
self.render.drawImage(image,
source_dir=self.source_dir,
cm_per_pixel=cm_per_pixel,
keypoints=False,
bounds=None)
cv2.imwrite( warped_dir + "/" + image.name, warp )
Updated vector projection code ... trying to move towards a more
common formulation with OpenCV so I can better use solvePnP() and
triangulatePoints()
Former-commit-id: 434353f626dfa93fc4f10407ac0ae443d23c724b
#!/usr/bin/python
import commands
import cv2
import fileinput
import fnmatch
import json
import math
from matplotlib import pyplot as plt
import numpy as np
import os.path
from progress.bar import Bar
import subprocess
import sys
import geojson
from getchar import find_getch
import Camera
import Image
import ImageList
import Matcher
import Placer
import Render
import transformations
class ProjectMgr():
def __init__(self, project_dir=None):
# directories
self.project_dir = None # project working directory
self.source_dir = None # original images
self.image_dir = None # working set of images
self.cam = Camera.Camera()
self.image_list = []
self.detector_params = { 'detector': 'SIFT', # { SIFT, SURF, ORB, Star }
'grid-detect': 1,
'sift-max-features': 2000,
'surf-hessian-threshold': 600,
'surf-noctaves': 4,
'orb-max-features': 2000,
'star-max-size': 16,
'star-response-threshold': 30,
'star-line-threshold-projected': 10,
'star-line-threshold-binarized': 8,
'star-suppress-nonmax-size': 5 }
self.matcher_params = { 'matcher': 'FLANN', # { FLANN or 'BF' }
'match-ratio': 0.75 }
self.ned_reference_lla = []
# the following member variables need to be reviewed/organized
self.ac3d_steps = 8
self.shutter_latency = 0.0
self.group_roll_bias = 0.0
self.group_pitch_bias = 0.0
self.group_yaw_bias = 0.0
#self.group_alt_bias = 0.0
self.k1 = 0.0
self.k2 = 0.0
#self.m = Matcher.Matcher()
self.placer = Placer.Placer()
self.render = Render.Render()
if project_dir != None:
self.load( project_dir )
# project_dir is a new folder for all derived files
def set_project_dir(self, project_dir, create_if_needed=True):
self.project_dir = project_dir
if not os.path.exists(self.project_dir):
if create_if_needed:
print "Notice: creating project directory =", self.project_dir
os.makedirs(self.project_dir)
else:
print "Error: project dir doesn't exist =", self.project_dir
return False
# and make children directories
self.image_dir = project_dir + "/" + "Images"
if not os.path.exists(self.image_dir):
if create_if_needed:
print "Notice: creating image directory =", self.image_dir
os.makedirs(self.image_dir)
else:
print "Error: image dir doesn't exist =", self.image_dir
return False
# all is good
return True
# source_dir is the folder containing all the raw/original images.
# The expected work flow is that we will import/scale all the
# original images into our project folder leaving the original
# image set completely untouched.
def set_source_dir(self, source_dir):
if source_dir == self.project_dir:
print "Error: image source and project dirs must be different."
return
if not os.path.exists(source_dir):
print "Error: image source path does not exist =", source_path
self.source_dir = source_dir
def save(self):
# create a project dictionary and write it out as json
if not os.path.exists(self.project_dir):
print "Error: project doesn't exist =", self.project_dir
return
dirs = {}
dirs['images-source'] = self.source_dir
project_dict = {}
project_dict['detector'] = self.detector_params
project_dict['matcher'] = self.matcher_params
project_dict['directories'] = dirs
project_dict['ned-reference-lla'] = self.ned_reference_lla
project_file = self.project_dir + "/Project.json"
try:
f = open(project_file, 'w')
json.dump(project_dict, f, indent=4, sort_keys=True)
f.close()
except IOError as e:
print "Save project(): I/O error({0}): {1}".format(e.errno, e.strerror)
return
except:
raise
# save camera configuration
self.cam.save(self.project_dir)
def load(self, project_dir, create_if_needed=True):
if not self.set_project_dir( project_dir ):
return
# load project configuration
project_file = self.project_dir + "/Project.json"
try:
f = open(project_file, 'r')
project_dict = json.load(f)
f.close()
if 'detector' in project_dict:
self.detector_params = project_dict['detector']
if 'matcher' in project_dict:
self.matcher_params = project_dict['matcher']
dirs = project_dict['directories']
self.source_dir = dirs['images-source']
self.ned_reference_lla = project_dict['ned-reference-lla']
except:
print "load error: " + str(sys.exc_info()[1])
print "Notice: unable to read =", project_file
print "Continuing with an empty project configuration"
# load camera configuration
self.cam.load(self.project_dir)
# import an image set into the project directory, possibly scaling them
# to a lower resolution for faster processing.
def import_images(self, scale=0.25, converter='imagemagick'):
if self.source_dir == None:
print "Error: source_dir not defined."
return
if self.image_dir == None:
print "Error: project's image_dir not defined."
return
if self.source_dir == self.image_dir:
print "Error: source and destination directories must be different."
return
if not os.path.exists(self.source_dir):
print "Error: source directory not found =", self.source_dir
return
if not os.path.exists(self.image_dir):
print "Error: destination directory not found =", self.image_dir
return
files = []
for file in os.listdir(self.source_dir):
if fnmatch.fnmatch(file, '*.jpg') or fnmatch.fnmatch(file, '*.JPG'):
files.append(file)
files.sort()
for file in files:
name_in = self.source_dir + "/" + file
name_out = self.image_dir + "/" + file
if converter == 'imagemagick':
command = "convert -resize %d%% %s %s" % ( int(scale*100.0), name_in, name_out )
print command
commands.getstatusoutput( command )
elif converter == 'opencv':
src = cv2.imread(name_in)
#method = cv2.INTER_AREA
method = cv2.INTER_LANCZOS4
dst = cv2.resize(src, (0,0), fx=scale, fy=scale,
interpolation=method)
cv2.imwrite(name_out, dst)
print "Scaling (%.1f%%) %s to %s" % ((scale*100.0), name_in, name_out)
else:
print "Error: unknown converter =", converter
def load_image_info(self):
file_list = []
for file in os.listdir(self.image_dir):
if fnmatch.fnmatch(file, '*.jpg') or fnmatch.fnmatch(file, '*.JPG'):
file_list.append(file)
file_list.sort()
# wipe image list (so we don't double load)
self.image_list = []
for file_name in file_list:
image = Image.Image(self.image_dir, file_name)
self.image_list.append( image )
# make sure our matcher gets a copy of the image list
#self.m.setImageList(self.image_list)
self.placer.setImageList(self.image_list)
self.render.setImageList(self.image_list)
def load_features(self):
bar = Bar('Loading keypoints and descriptors:',
max = len(self.image_list))
for image in self.image_list:
image.load_features()
image.load_descriptors()
bar.next()
bar.finish()
def load_matches(self):
bar = Bar('Loading keypoint (pair) matches:',
max = len(self.image_list))
for image in self.image_list:
image.load_matches()
bar.next()
bar.finish()
def save_images_meta(self):
for image in self.image_list:
image.save_meta()
def set_detector_params(self, dparams):
self.detector_params = dparams
def set_matcher_params(self, mparams):
self.matcher_params = mparams
def detect_features(self, force=True, show=False):
if not show:
bar = Bar('Detecting features:', max = len(self.image_list))
for image in self.image_list:
if force or len(image.kp_list) == 0 or image.des_list == None:
#print "detecting features and computing descriptors: " + image.name
if image.img_rgb == None:
image.load_rgb()
image.detect_features(self.detector_params)
image.save_features()
image.save_descriptors()
image.save_matches()
if show:
result = image.show_features()
if result == 27 or result == ord('q'):
break
if not show:
bar.next()
if not show:
bar.finish()
def show_features_image(self, image):
if image.img_rgb == None:
image.load_rgb()
result = image.show_features()
return result
def show_features_images(self, name=None):
for image in self.image_list:
result = self.show_features_image(image)
if result == 27 or result == ord('q'):
break
def findImageByName(self, name):
for i in self.image_list:
if i.name == name:
return i
return None
# compute a center reference location (lon, lat) for the group of
# images.
def compute_ned_reference_lla(self):
# requires images to have their location computed/loaded
lon_sum = 0.0
lat_sum = 0.0
for image in self.image_list:
lla, ypr, quat = image.get_aircraft_pose()
lon_sum += lla[1]
lat_sum += lla[0]
self.ned_reference_lla = [ lat_sum / len(self.image_list),
lon_sum / len(self.image_list),
0.0 ]
self.render.setRefCoord(self.ned_reference_lla)
# for each feature in each image, compute the undistorted pixel
# location (from the calibrated distortion parameters)
def undistort_keypoints(self):
print "Notice: undistort keypoints"
camw, camh = self.cam.get_image_params()
for image in self.image_list:
if len(image.kp_list) == 0:
continue
scale = float(image.width) / float(camw)
K = self.cam.get_K(scale)
uv_raw = np.zeros((len(image.kp_list),1,2), dtype=np.float32)
for i, kp in enumerate(image.kp_list):
uv_raw[i][0] = (kp.pt[0], kp.pt[1])
dist_coeffs = np.array(self.cam.camera_dict['dist-coeffs'],
dtype=np.float32)
uv_new = cv2.undistortPoints(uv_raw, K, dist_coeffs, P=K)
image.uv_list = []
for i, uv in enumerate(uv_new):
image.uv_list.append(uv_new[i][0])
#print " orig = %s undistort = %s" % (uv_raw[i][0], uv_new[i][0])
# for each uv in the provided uv list, apply the distortion
# formula to compute the original distorted value.
def redistort(self, uv_list, K, dist_coeffs):
fx = K[0,0]
fy = K[1,1]
cx = K[0,2]
cy = K[1,2]
k1, k2, p1, p2, k3 = dist_coeffs
uv_distorted = []
for pt in uv_list:
x = (pt[0] - cx) / fx
y = (pt[1] - cy) / fy
# Compute radius^2
r2 = x**2 + y**2
r4, r6 = r2**2, r2**3
# Compute tangential distortion
dx = 2*p1*x*y + p2*(r2 + 2*x*x)
dy = p1*(r2 + 2*y*y) + 2*p2*x*y
# Compute radial factor
Lr = 1.0 + k1*r2 + k2*r4 + k3*r6
ud = Lr*x + dx
vd = Lr*y + dy
uv_distorted.append( [ud * fx + cx, vd * fy + cy] )
return uv_distorted
# project the list of (u, v) pixels from image space into camera
# space, remap that to a vector in ned space (for camera
# ypr=[0,0,0], and then transform that by the camera pose, returns
# the vector from the camera, through the pixel, into ned space
def projectVectors(self, K, quat, uv_list):
IK = np.linalg.inv(K)
IR = transformations.quaternion_matrix(quat)[:3,:3]
# M is a transform to map the lens coordinate system (at zero
# roll/pitch/yaw to the ned coordinate system at zero
# roll/pitch/yaw). It is essentially a +90 pitch followed by
# +90 roll (or equivalently a +90 yaw followed by +90 pitch.)
M = np.array( [[0, 0, 1], [1, 0, 0], [0, 1, 0]], dtype=float )
proj_list = []
for uv in uv_list:
uvh = np.array([uv[0], uv[1], 1.0])
proj = IR.dot(M).dot(IK).dot(uvh)
proj_norm = transformations.unit_vector(proj)
proj_list.append(proj_norm)
#d2r = math.pi / 180.0
#Rx = transformations.rotation_matrix(90*d2r, [1, 0, 0])
#Ry = transformations.rotation_matrix(90*d2r, [0, 1, 0])
#Rz = transformations.rotation_matrix(90*d2r, [0, 0, 1])
#print Rx.dot(Ry)
#print Ry.dot(Rz)
#for uv in uv_list:
# print "uv:", uv
# uvh = np.array([uv[0], uv[1], 1.0])
# print "cam vec=", transformations.unit_vector(IR.dot(IK).dot(uvh))
return proj_list
# given a set of vectors in the ned frame, and a starting point.
# Find the ground intersection point. For any vectors which point into
# the sky, return just the original reference/starting point.
def intersectVectorsWithGroundPlane(self, pose, ground_m, v_list):
pose_ned = pose['ned']
pt_list = []
for v in v_list:
# solve projection
p = pose_ned
if v[2] > 0.0:
d_proj = -(pose_ned[2] + ground_m)
factor = d_proj / v[2]
n_proj = v[0] * factor
e_proj = v[1] * factor
p = [ pose_ned[0] + n_proj, pose_ned[1] + e_proj, pose_ned[2] + d_proj ]
pt_list.append(p)
return pt_list
#
# Below this point all the code needs to be reviewed/refactored
#
def setWorldParams(self, ground_alt_m=0.0, shutter_latency=0.0,
yaw_bias=0.0, roll_bias=0.0, pitch_bias=0.0):
print "Setting ground=%.1f shutter=%.2f yaw=%.2f roll=%.2f pitch=%.2f"\
% (ground_alt_m, shutter_latency, yaw_bias, roll_bias, pitch_bias)
self.ground_alt_m = ground_alt_m
self.shutter_latency = shutter_latency
self.group_yaw_bias = yaw_bias
self.group_roll_bias = roll_bias
self.group_pitch_bias = pitch_bias
def genKeypointUsageMap(self):
# make the keypoint usage map (used so we don't have to
# project every keypoint every time)
print "Building the keypoint usage map... ",
for i1 in self.image_list:
i1.kp_usage = np.zeros(len(i1.kp_list), np.bool_)
for i, i1 in enumerate(self.image_list):
for j, pairs in enumerate(i1.match_list):
if len(pairs) == 0:
continue
if i == j:
continue
i2 = self.image_list[j]
print "%s vs %s" % (i1.name, i2.name)
for pair in pairs:
i1.kp_usage[pair[0]] = True
i2.kp_usage[pair[1]] = True
print "done."
def interpolateAircraftPositions(self, correlator, force=False,
weight=True):
# tag each image with the flight data parameters at the time
# the image was taken
for match in correlator.best_matchups:
pict, trig = correlator.get_match(match)
image = self.findImageByName(pict[2])
if image != None:
if force or (math.fabs(image.aircraft_lon) < 0.01 and math.fabs(image.aircraft_lat) < 0.01):
# only if we are forcing a new position
# calculation or the position is not already set
# from a save file.
t = trig[0] + self.shutter_latency
lon, lat, msl = correlator.get_position(t)
roll, pitch, yaw = correlator.get_attitude(t)
image.set_aircraft_pose( lon, lat, msl, roll, pitch, yaw )
if weight:
# presumes a pitch/roll distance of 10, 10 gives a
# zero weight
w = 1.0 - (roll*roll + pitch*pitch)/200.0
if w < 0.01:
w = 0.01
image.weight = w
else:
image.weight = 1.0
image.save_meta()
#print "%s roll=%.1f pitch=%.1f weight=%.2f" % (image.name, roll, pitch, image.weight)
def computeWeights(self, force=None):
# tag each image with the flight data parameters at the time
# the image was taken
for image in self.image_list:
roll = image.aircraft_roll + image.roll_bias
pitch = image.aircraft_pitch + image.pitch_bias
if force != None:
image.weight = force
else:
# presumes a pitch/roll distance of 10, 10 gives a
# zero weight
w = 1.0 - (roll*roll + pitch*pitch)/200.0
if w < 0.01:
w = 0.01
image.weight = w
image.save_meta()
#print "%s roll=%.1f pitch=%.1f weight=%.2f" % (image.name, roll, pitch, image.weight)
def computeConnections(self, force=None):
for image in self.image_list:
image.connections = 0
for pairs in image.match_list:
if len(pairs) >= self.m.min_pairs:
image.connections += 1
image.save_meta()
print "%s connections: %d" % (image.name, image.connections)
# depricate this function .... or replace with better one (or just
# use opencv)
#
# undistort x, y using a simple radial lens distortion model. (We
# call the original image values the 'distorted' values.) Input
# x,y are expected to be normalize (0.0 - 1.0) in image pixel
# space with 0.5 being the center of image (and hopefully the
# center of distortion.)
def doLensUndistort(self, aspect_ratio, xnorm, ynorm):
print "DEPRICATED..."
xd = (xnorm * 2.0 - 1.0) * aspect_ratio
yd = ynorm * 2.0 - 1.0
r = math.sqrt(xd*xd + yd*yd)
#print "ar=%.3f xd=%.3f yd=%.3f r=%.2f" % (aspect_ratio, xd, yd, r)
factor = 1.0 + self.k1 * r*r + self.k2 * r*r*r*r
xu = xd * factor
yu = yd * factor
xnorm_u = (xu / aspect_ratio + 1.0) / 2.0
ynorm_u = (yu + 1.0) / 2.0
#print " (%.3f %.3f) -> (%.3f %.3f)" % (xnorm, ynorm, xnorm_u, ynorm_u)
return xnorm_u, ynorm_u
def projectPoint2(self, image, q, pt, z_m):
horiz_mm, vert_mm, focal_len_mm = self.cam.get_lens_params()
h = image.height
w = image.width
print [h, w, self.cam.get_lens_params()]
ar = float(w)/float(h) # aspect ratio
# normalized pixel coordinates to [0.0, 1.0]
xnorm = pt[0] / float(w-1)
ynorm = pt[1] / float(h-1)
print "norm = %.4f %.4f" % (xnorm, ynorm)
# lens un-distortion
xnorm_u, ynorm_u = self.doLensUndistort(ar, xnorm, ynorm)
print "norm_u = %.4f %.4f" % (xnorm_u, ynorm_u)
# compute pixel coordinate in sensor coordinate space (mm
# units) with (0mm, 0mm) being the center of the image.
x_mm = (xnorm_u * 2.0 - 1.0) * (horiz_mm * 0.5)
y_mm = (ynorm_u * 2.0 - 1.0) * (vert_mm * 0.5)
print "x_mm = %.4f y_mm = %.4f" % ( x_mm, y_mm )
# the forward vector (out the nose when the aircraft is
# straight, level, and flying north) is (x=1.0, y=0.0, z=0.0).
# This vector will get projected to the camera center point,
# thus we have to remap the axes.
#camvec = [y_mm, x_mm, focal_len_mm]
camvec = [focal_len_mm, x_mm, y_mm]
print "camvec orig = ", camvec
camvec = transformations.unit_vector(camvec) # normalize
print "camvec = %.3f %.3f %.3f" % (camvec[0], camvec[1], camvec[2])
# transform camera vector (in body reference frame) to ned
# reference frame
ned = transformations.quaternion_backTransform(q, camvec)
print "q = %s ned = %s" % (str(q), str(ned))
# solve projection
if ned[2] < 0.0:
# no interseciton
return [0.0, 0.0]
factor = z_m / ned[2]
#print "z_m = %s" % str(z_m)
x_proj = -ned[0] * factor
y_proj = -ned[1] * factor
#print "proj dist = %.2f" % math.sqrt(x_proj*x_proj + y_proj*y_proj)
return [x_proj, y_proj]
# project keypoints based on body reference system + body biases
# transformed by camera mounting + camera mounting biases
def projectImageKeypointsNative2(self, image, yaw_bias=0.0,
roll_bias=0.0, pitch_bias=0.0,
alt_bias=0.0):
if image.img == None:
image.load_rgb()
h = image.height
w = image.width
ar = float(w)/float(h) # aspect ratio
pose = self.computeCameraPoseFromAircraft(image)
#print "Computed new image pose for %s = %s" % (image.name, str(pose))
# save the computed camera pose
image.camera_yaw = pose[0]
image.camera_pitch = pose[1]
image.camera_roll = pose[2]
image.camera_x = pose[3]
image.camera_y = pose[4]
image.camera_z = pose[5]
image.save_meta()
(coord_list, corner_list, grid_list) = \
self.projectImageKeypointsNative3(image, pose, yaw_bias, roll_bias,
pitch_bias, alt_bias)
return coord_list, corner_list, grid_list
d2r = math.pi / 180.0
# project keypoints using the provided camera pose
# pose = (yaw_deg, pitch_deg, roll_deg, x_m, y_m, z_m)
def projectImageKeypointsNative3(self, image, pose,
yaw_bias=0.0, roll_bias=0.0,
pitch_bias=0.0, alt_bias=0.0,
all_keypoints=False):
#print "Project3 for %s" % image.name
if image.img == None:
image.load_rgb()
h = image.height
w = image.width
ar = float(w)/float(h) # aspect ratio
ned2cam = transformations.quaternion_from_euler((pose[0]+yaw_bias)*d2r,
(pose[1]+pitch_bias)*d2r,
(pose[2]+roll_bias)*d2r,
'rzyx')
x_m = pose[3]
y_m = pose[4]
z_m = pose[5] + alt_bias
#print "ref offset = %.2f %.2f" % (x_m, y_m)
coord_list = [None] * len(image.kp_list)
corner_list = []
grid_list = []
# project the paired keypoints into world space
for i, kp in enumerate(image.kp_list):
if not all_keypoints and not image.kp_usage[i]:
continue
# print "ned2cam = %s" % str(ned2cam)
proj = self.projectPoint2(image, ned2cam, kp.pt, z_m)
#print "project3: kp=%s proj=%s" %(str(kp.pt), str(proj))
coord_list[i] = [proj[1] + x_m, proj[0] + y_m]
#print "coord_list = %s" % str(coord_list)
# compute the corners (2x2 polygon grid) in image space
dx = image.width - 1
dy = image.height - 1
y = 0.0
for j in xrange(2):
x = 0.0
for i in xrange(2):
#print "corner %.2f %.2f" % (x, y)
proj = self.projectPoint2(image, ned2cam, [x, y], z_m)
corner_list.append( [proj[1] + x_m, proj[0] + y_m] )
x += dx
y += dy
# compute the ac3d polygon grid in image space
dx = image.width / float(self.ac3d_steps)
dy = image.height / float(self.ac3d_steps)
y = 0.0
for j in xrange(self.ac3d_steps+1):
x = 0.0
for i in xrange(self.ac3d_steps+1):
#print "grid %.2f %.2f" % (xnorm_u, ynorm_u)
proj = self.projectPoint2(image, ned2cam, [x, y], z_m)
grid_list.append( [proj[1] + x_m, proj[0] + y_m] )
x += dx
y += dy
return coord_list, corner_list, grid_list
def projectKeypoints(self, all_keypoints=False):
for image in self.image_list:
pose = (image.camera_yaw, image.camera_pitch, image.camera_roll,
image.camera_x, image.camera_y, image.camera_z)
# print "project from pose = %s" % str(pose)
coord_list, corner_list, grid_list \
= self.projectImageKeypointsNative3(image, pose,
all_keypoints=all_keypoints)
image.coord_list = coord_list
image.corner_list = corner_list
image.grid_list = grid_list
# test
# coord_list, corner_list, grid_list \
# = self.projectImageKeypointsNative2(image)
#print "orig corners = %s" % str(image.corner_list)
#print "new corners = %s" % str(corner_list)
def findImageRotate(self, i1, gain):
#self.findImageAffine(i1) # temp test
error_sum = 0.0
weight_sum = i1.weight # give ourselves an appropriate weight
for i, match in enumerate(i1.match_list):
if len(match) >= self.m.min_pairs:
i2 = self.image_list[i]
print "Rotating %s vs %s" % (i1.name, i2.name)
for pair in match:
# + 180 (camera is mounted backwards)
y1 = i1.yaw + i1.rotate + 180.0
y2 = i2.yaw + i2.rotate + 180.0
dy = y2 - y1
while dy < -180.0:
dy += 360.0;
while dy > 180.0:
dy -= 360.0
# angle is in opposite direction from yaw
#a1 = i1.yaw + i1.rotate + 180 + i1.kp_list[pair[0]].angle
#a2 = i2.yaw + i2.rotate + 180 + i2.kp_list[pair[1]].angle
a1 = i1.kp_list[pair[0]].angle
a2 = i2.kp_list[pair[1]].angle
da = a1 - a2
while da < -180.0:
da += 360.0;
while da > 180.0:
da -= 360.0
print "yaw diff = %.1f angle diff = %.1f" % (dy, da)
error = dy - da
while error < -180.0:
error += 360.0;
while error > 180.0:
error -= 360.0
error_sum += error * i2.weight
weight_sum += i2.weight
print str(pair)
print " i1: %.1f %.3f %.1f" % (i1.yaw, i1.kp_list[pair[0]].angle, a1)
print " i2: %.1f %.3f %.1f" % (i2.yaw, i2.kp_list[pair[1]].angle, a2)
print " error: %.1f weight: %.2f" % (error, i2.weight)
print
#self.showMatch(i1, i2, match)
update = 0.0
if weight_sum > 0.0:
update = error_sum / weight_sum
i1.rotate += update * gain
print "Rotate %s delta=%.2f = %.2f" % (i1.name, update, i1.rotate)
def rotateImages(self, gain=0.10):
for image in self.image_list:
self.findImageRotate(image, gain)
for image in self.image_list:
print "%s: yaw error = %.2f" % (image.name, image.rotate)
def findImagePairShift(self, i1, i2, match):
xerror_sum = 0.0
yerror_sum = 0.0
for pair in match:
c1 = i1.coord_list[pair[0]]
c2 = i2.coord_list[pair[1]]
dx = c2[0] - c1[0]
dy = c2[1] - c1[1]
xerror_sum += dx
yerror_sum += dy
# divide by pairs + 1 gives some weight to our own position
# (i.e. a zero rotate)
xshift = xerror_sum / len(match)
yshift = yerror_sum / len(match)
#print " %s -> %s = (%.2f %.2f)" % (i1.name, i2.name, xshift, yshift)
return (xshift, yshift)
def findImageShift(self, i1, gain=0.10, placing=False):
xerror_sum = 0.0
yerror_sum = 0.0
weight_sum = i1.weight # give ourselves an appropriate weight
for i, match in enumerate(i1.match_list):
if len(match) < self.m.min_pairs:
continue
i2 = self.image_list[i]
#if not i2.placed:
# continue
(xerror, yerror) = self.findImagePairShift( i1, i2, match )
xerror_sum += xerror * i2.weight
yerror_sum += yerror * i2.weight
weight_sum += i2.weight
xshift = xerror_sum / weight_sum
yshift = yerror_sum / weight_sum
print "Shift %s -> (%.2f %.2f)" % (i1.name, xshift, yshift)
#print " %s bias before (%.2f %.2f)" % (i1.name, i1.x_bias, i1.y_bias)
i1.x_bias += xshift * gain
i1.y_bias += yshift * gain
#print " %s bias after (%.2f %.2f)" % (i1.name, i1.x_bias, i1.y_bias)
i1.save_meta()
def shiftImages(self, gain=0.10):
for image in self.image_list:
self.findImageShift(image, gain)
# method="average": return the weighted average of the errors.
# method="stddev": return the weighted average of the stddev of the errors.
# method="max": return the max error of the subcomponents.
def groupError(self, method="average"):
#print "compute group error, method = %s" % method
if len(self.image_list):
error_sum = 0.0
weight_sum = 0.0
for i, image in enumerate(self.image_list):
e = 0.0
e = self.m.imageError(i, method=method)
#print "%s error = %.2f" % (image.name, e)
error_sum += e*e * image.weight
weight_sum += image.weight
return math.sqrt(error_sum / weight_sum)
else:
return 0.0
# zero all biases (if we want to start over with a from scratch fit)
def zeroImageBiases(self):
for image in self.image_list:
image.yaw_bias = 0.0
image.roll_bias = 0.0
image.pitch_bias = 0.0
image.alt_bias = 0.0
image.x_bias = 0.0
image.y_bias = 0.0
image.save_meta()
# try to fit individual images by manipulating various parameters
# and testing to see if that produces a better fit metric
def estimateParameter(self, i, ground_alt_m, method,
param="", start_value=0.0, step_size=1.0,
refinements=3):
image = self.image_list[i]
pose = (image.camera_yaw, image.camera_pitch, image.camera_roll,
image.camera_x, image.camera_y, image.camera_z)
#print "Estimate %s for %s" % (param, image.name)
var = False
if method == "average":
var = False
elif method == "stddev":
var = True
for k in xrange(refinements):
best_error = self.m.imageError(i, method=method)
best_value = start_value
test_value = start_value - 5*step_size
#print "start value = %.2f error = %.1f" % (best_value, best_error)
while test_value <= start_value + 5*step_size + (step_size*0.1):
coord_list = []
corner_list = []
grid_list = []
if param == "yaw":
coord_list, corner_list, grid_list \
= self.projectImageKeypointsNative3(image, pose,
yaw_bias=test_value)
elif param == "roll":
coord_list, corner_list, grid_list \
= self.projectImageKeypointsNative3(image, pose,
roll_bias=test_value)
elif param == "pitch":
coord_list, corner_list, grid_list \
= self.projectImageKeypointsNative3(image, pose,
pitch_bias=test_value)
elif param == "altitude":
coord_list, corner_list, grid_list \
= self.projectImageKeypointsNative3(image, pose,
alt_bias=test_value)
error = self.m.imageError(i, alt_coord_list=coord_list,
method=method)
#print "Test %s error @ %.2f = %.2f" % ( param, test_value, error )
if error < best_error:
best_error = error
best_value = test_value
#print " better value = %.2f, error = %.1f" % (best_value, best_error)
test_value += step_size
# update values for next iteration
start_value = best_value
step_size /= 5.0
return best_value, best_error
# try to fit individual images by manipulating various parameters
# and testing to see if that produces a better fit metric
def fitImage(self, i, method, gain):
# parameters to manipulate = yaw, roll, pitch
yaw_step = 2.0
roll_step = 1.0
pitch_step = 1.0
refinements = 4
image = self.image_list[i]
# start values should be zero because previous values are
# already included so we are computing a new offset from the
# past solution.
yaw, e = self.estimateParameter(i, self.ground_alt_m, method,
"yaw", start_value=0.0,
step_size=1.0, refinements=refinements)
roll, e = self.estimateParameter(i, self.ground_alt_m, method,
"roll", start_value=0.0,
step_size=1.0, refinements=refinements)
pitch, e = self.estimateParameter(i, self.ground_alt_m, method,
"pitch", start_value=0.0,
step_size=1.0,
refinements=refinements)
alt, e = self.estimateParameter(i, self.ground_alt_m, method,
"altitude", start_value=0.0,
step_size=2.0, refinements=refinements)
image.camera_yaw += yaw*gain
image.camera_roll += roll*gain
image.camera_pitch += pitch*gain
image.camera_z += alt*gain
coord_list = []
corner_list = []
grid_list = []
# but don't save the results so we don't bias future elements
# with moving previous elements
coord_list, corner_list, grid_list = self.projectImageKeypointsNative2(image)
error = self.m.imageError(i, alt_coord_list=coord_list, method=method)
if method == "average":
image.error = error
elif method == "stddev":
image.stddev = error
print "Fit %s (%s) is %.2f %.2f %.2f %.2f (avg=%.3f stddev=%.3f)" \
% (image.name, method,
image.camera_yaw, image.camera_roll, image.camera_pitch,
image.camera_z, image.error, image.stddev)
image.save_meta()
# try to fit individual images by manipulating various parameters
# and testing to see if that produces a better fit metric
def fitImageAffine3d(self, i, method, gain):
i1 = self.image_list[i]
angles_sum = [0.0, 0.0, 0.0]
weight_sum = i1.weight
for j, pairs in enumerate(i1.match_list):
if len(pairs) < self.m.min_pairs:
continue
i2 = self.image_list[j]
src = [[], [], []]
dst = [[], [], []]
for pair in pairs:
c1 = i1.coord_list[pair[0]]
c2 = i2.coord_list[pair[1]]
src[0].append(c1[0])
src[1].append(c1[1])
src[2].append(0.0)
dst[0].append(c2[0])
dst[1].append(c2[1])
dst[2].append(0.0)
Aff3D = transformations.superimposition_matrix(src, dst)
scale, shear, angles, trans, persp = transformations.decompose_matrix(Aff3D)
print "%s vs. %s" % (i1.name, i2.name)
#print " scale = %s" % str(scale)
#print " shear = %s" % str(shear)
print " angles = %s" % str(angles)
#print " trans = %s" % str(trans)
#print " persp = %s" % str(persp)
# this is all based around the assumption that our angle
# differences area relatively small
for k in range(3):
a = angles[k]
if a < -180.0:
a += 360.0
if a > 180.0:
a -= 360.0
angles_sum[k] += a
weight_sum += i2.weight
angles = [ angles_sum[0] / weight_sum,
angles_sum[1] / weight_sum,
angles_sum[2] / weight_sum ]
print "average angles = %s" % str(angles)
rad2deg = 180.0 / math.pi
i1.roll_bias += angles[0] * rad2deg * gain
i1.pitch_bias += angles[1] * rad2deg * gain
i1.yaw_bias += angles[2] * rad2deg * gain
coord_list = []
corner_list = []
grid_list = []
# but don't save the results so we don't bias future elements
# with moving previous elements
coord_list, corner_list, grid_list = self.projectImageKeypointsNative2(i1)
error = self.m.imageError(i, alt_coord_list=coord_list, method="average")
stddev = self.m.imageError(i, alt_coord_list=coord_list, method="stddev")
print "average error = %.3f" % error
print "average stddev = %.3f" % stddev
i1.save_meta()
def fitImagesIndividually(self, method, gain):
for i, image in enumerate(self.image_list):
self.fitImage(i, method, gain)
#self.fitImageAffine3d(i, method, gain)
def geotag_pictures( self, correlator, dir = ".", geotag_dir = "." ):
ground_sum = 0.0
ground_count = 0
print "master_time_offset = " + str(correlator.master_time_offset)
for match in correlator.best_matchups:
pict, trig = correlator.get_match(match)
trig_time = trig[0] + correlator.master_time_offset
pict_time = pict[0]
time_diff = trig_time - pict_time
#print str(match[0]) + " <=> " + str(match[1])
#print str(pict_time) + " <=> " + str(trig_time)
print pict[2] + " -> " + str(trig[2]) + ", " + str(trig[3]) + ": " + str(trig[4]) + " (" + str(time_diff) + ")"
agl_ft = trig[4]
lon_deg, lat_deg, msl = correlator.get_position( trig[0] )
msl_ft = msl / 0.3048
ground_sum += (msl_ft - agl_ft)
ground_count += 1
ground_agl_ft = ground_sum / ground_count
print " MSL: " + str( msl_ft ) + " AGL: " + str(agl_ft) + " Ground: " + str(ground_agl_ft)
# double check geotag dir exists and make it if not
if not os.path.exists(geotag_dir):
os.makedirs(geotag_dir)
# update a resized copy if needed
name_in = dir + "/" + pict[2]
name_out = geotag_dir + "/" + pict[2]
if not os.path.isfile( name_out ):
command = 'convert -geometry 684x456 ' + name_in + ' ' + name_out
#command = 'convert -geometry 512x512\! ' + name_in + ' ' + name_out
print command
commands.getstatusoutput( command )
# update the gps meta data
exif = pyexiv2.ImageMetadata(name_out)
exif.read()
#exif.set_gps_info(lat_deg, lon_deg, (msl_ft*0.3048))
altitude = msl_ft*0.3048
GPS = 'Exif.GPSInfo.GPS'
exif[GPS + 'AltitudeRef'] = '0' if altitude >= 0 else '1'
exif[GPS + 'Altitude'] = Fraction(altitude)
exif[GPS + 'Latitude'] = decimal_to_dms(lat_deg)
exif[GPS + 'LatitudeRef'] = 'N' if lat_deg >= 0 else 'S'
exif[GPS + 'Longitude'] = decimal_to_dms(lon_deg)
exif[GPS + 'LongitudeRef'] = 'E' if lon_deg >= 0 else 'W'
exif[GPS + 'MapDatum'] = 'WGS-84'
exif.write()
def fixup_timestamps( self, correlator, camera_time_error, geotag_dir = "." ):
for match in correlator.best_matchups:
pict, trig = correlator.get_match(match)
unixtime = pict[0]
name = geotag_dir + "/" + pict[2]
unixtime += camera_time_error
newdatetime = datetime.datetime.utcfromtimestamp(round(unixtime)).strftime('%Y:%m:%d %H:%M:%S')
exif = pyexiv2.ImageMetadata(name)
exif.read()
print "old: " + str(exif['Exif.Image.DateTime']) + " new: " + newdatetime
exif['Exif.Image.DateTime'] = newdatetime
exif.write()
def generate_aircraft_location_report(self):
for image in self.image_list:
print "%s\t%.10f\t%.10f\t%.2f" \
% (image.name, image.aircraft_lon, image.aircraft_lat,
image.aircraft_msl)
def draw_epilines(self, img1, img2, lines, pts1, pts2):
''' img1 - image on which we draw the epilines for the points in img2
lines - corresponding epilines '''
r,c,d = img1.shape
print img1.shape
for r,pt1,pt2 in zip(lines,pts1,pts2):
color = tuple(np.random.randint(0,255,3).tolist())
x0,y0 = map(int, [0, -r[2]/r[1] ])
x1,y1 = map(int, [c, -(r[2]+r[0]*c)/r[1] ])
cv2.line(img1, (x0,y0), (x1,y1), color,1)
cv2.circle(img1,tuple(pt1),5,color,-1)
cv2.circle(img2,tuple(pt2),5,color,-1)
return img1,img2
def sfm_test(self):
for i, i1 in enumerate(self.image_list):
for j, pairs in enumerate(i1.match_list):
if i == j:
continue
if len(pairs) < 8:
# 8+ pairs are required to compute the fundamental matrix
continue
i2 = self.image_list[j]
pts1 = []
pts2 = []
for pair in pairs:
p1 = i1.kp_list[pair[0]].pt
p2 = i2.kp_list[pair[1]].pt
pts1.append( p1 )
pts2.append( p2 )
pts1 = np.float32(pts1)
pts2 = np.float32(pts2)
print "pts1 = %s" % str(pts1)
print "pts2 = %s" % str(pts2)
F, mask = cv2.findFundamentalMat(pts1, pts2, cv2.FM_LMEDS)
print "loading full res images ..."
img1 = i1.load_source_rgb(self.source_dir)
img2 = i2.load_source_rgb(self.source_dir)
# Find epilines corresponding to points in right image
# (second image) and drawing its lines on left image
lines1 = cv2.computeCorrespondEpilines(pts2.reshape(-1,1,2), 2, F)
lines1 = lines1.reshape(-1,3)
img5,img6 = self.draw_epilines(img1,img2,lines1,pts1,pts2)
# Find epilines corresponding to points in left image (first image) and
# drawing its lines on right image
lines2 = cv2.computeCorrespondEpilines(pts1.reshape(-1,1,2), 1,F)
lines2 = lines2.reshape(-1,3)
img3,img4 = self.draw_epilines(img2,img1,lines2,pts2,pts1)
plt.subplot(121),plt.imshow(img5)
plt.subplot(122),plt.imshow(img3)
plt.show()
# this really doesn't work right because the euler pose angles derived
# might be correct, but aren't all consistent apparently ... the back
# solver to extract angles from an arbitrary rotation matrix doesn't seem
# always be consistant. (this probably should be depricated at some point)
def fitImagesWithSolvePnP1(self):
for i, i1 in enumerate(self.image_list):
#print "sovelPNP() for %s" % i1.name
K = self.cam.get_K()
att_sum = [ [0.0, 0.0], [0.0, 0.0], [0.0, 0.0] ]
pos_sum = [0.0, 0.0, 0.0]
weight_sum = 0.0
for j, pairs in enumerate(i1.match_list):
if i == j:
continue
if len(pairs) < 8:
# we need at least 8 pairs to call solvePNP()
continue
i2 = self.image_list[j]
img_pts = []
obj_pts = []
for pair in pairs:
p1 = i1.kp_list[pair[0]].pt
p2 = i2.coord_list[pair[1]]
img_pts.append( p1 )
obj_pts.append( [p2[0], p2[1], 0.0] )
img_pts = np.float32(img_pts)
obj_pts = np.float32(obj_pts)
#print "img_pts = %s" % str(img_pts)
#print "obj_pts = %s" % str(obj_pts)
(result, rvec, tvec) = cv2.solvePnP(obj_pts, img_pts, K, None)
#print " result = %s, rvec = %s, tvec = %s" \
# % (result, rvec, tvec)
print " rvec = %.2f %.2f %.2f" % (rvec[0], rvec[1], rvec[2])
R, jac = cv2.Rodrigues(rvec)
#print " R =\n%s" % str(R)
# googled how to derive the position in object
# coordinates from solvePNP()
pos = -np.matrix(R).T * np.matrix(tvec)
#print "solved pos = %s" % str(pos)
#print " pos[0] = %s" % str(pos[0])
#print " pos.item(0) = %s" % str(pos.item(0))
for k in range(0,3):
pos_sum[k] += pos.item(k)
#print " PNP pos = %s" % str(pos)
# Remap the R matrix to match our coordinate system
# (by inspection...)
# [ [a, b, c], [d, e, f], [g, h, i] ] =>
# [ [h, b, a], [g, -e, -d], [i, c, -f] ]
# this will be tedius code ...
#Rconv[:3, 0] = R[:3, 2] # swap col 0 <=> col 2
#Rconv[:3, 2] = R[:3, 0]
#Rconv[1, :3] *= -1.0 # negate the middle row
Rconv = R.copy()
Rconv[0,0] = R[2,1]
Rconv[0,1] = R[0,1]
Rconv[0,2] = R[0,0]
Rconv[1,0] = R[2,0]
Rconv[1,1] = -R[1,1]
Rconv[1,2] = -R[1,0]
Rconv[2,0] = R[2,2]
Rconv[2,1] = R[0,2]
Rconv[2,2] = -R[1,2]
#print "Rconv =\n%s" % str(Rconv)
(yaw, pitch, roll) = transformations.euler_from_matrix(Rconv, 'rzyx')
att_sum[0][0] += math.cos(yaw)
att_sum[0][1] += math.sin(yaw)
att_sum[1][0] += math.cos(pitch)
att_sum[1][1] += math.sin(pitch)
att_sum[2][0] += math.cos(roll)
att_sum[2][1] += math.sin(roll)
weight_sum += i2.weight
deg2rad = math.pi / 180.0
print " pair euler = %.2f %.2f %.2f" % (yaw/deg2rad,
pitch/deg2rad,
roll/deg2rad)
#print " est = %.2f %.2f %.2f" % (i1.camera_yaw,
# i1.camera_pitch,
# i1.camera_roll)
Rcam = transformations.euler_matrix(i1.camera_yaw*deg2rad,
i1.camera_pitch*deg2rad,
i1.camera_roll*deg2rad,
'rzyx')
#print "solvePNP =\n%s" % str(Rconv)
#print "my FIT =\n%s" % str(Rcam)
v = np.array( [1.0, 0.0, 0.0] )
vh = np.array( [1.0, 0.0, 0.0, 1.0] )
#print " v = %s" % str(v)
#print " Rconv * v = %s" % str(np.dot(Rconv, v))
#print " Rcam * v = %s" % str(np.dot(Rcam, vh))
if weight_sum < 0.0001:
continue
i1.camera_x = pos_sum[0] / weight_sum
i1.camera_y = pos_sum[1] / weight_sum
i1.camera_z = pos_sum[2] / weight_sum
print "Camera pose for image %s:" % i1.name
print " PNP pos = %.2f %.2f %.2f" % (i1.camera_x,
i1.camera_y,
i1.camera_z)
yaw_avg = math.atan2(att_sum[0][1]/weight_sum,
att_sum[0][0]/weight_sum)
pitch_avg = math.atan2(att_sum[1][1]/weight_sum,
att_sum[1][0]/weight_sum)
roll_avg = math.atan2(att_sum[2][1]/weight_sum,
att_sum[2][0]/weight_sum)
i1.camera_yaw = yaw_avg / deg2rad
i1.camera_pitch = pitch_avg / deg2rad
i1.camera_roll = roll_avg / deg2rad
print " PNP att = %.2f %.2f %.2f" % (i1.camera_yaw,
i1.camera_pitch,
i1.camera_roll)
i1.save_meta()
# call solvePnP() on all the matching pairs from all the matching
# images simultaneously. This works, but inherently weights the
# fit much more towards the images with more matching pairs ... on
# the other hand, that may be kind of what we want because images
# with a few matches over a small area can grossly magnify any
# errors into the result of solvePnP().
def fitImagesWithSolvePnP2(self):
for i, i1 in enumerate(self.image_list):
#print "sovlePNP() for %s" % i1.name
K = self.cam.get_K()
img_pts = []
obj_pts = []
for j, pairs in enumerate(i1.match_list):
if i == j:
# include the match with ourselves ... we have
# self worth too!
for k, flag in enumerate(i1.kp_usage):
if flag:
p1 = i1.kp_list[k].pt
p2 = i1.coord_list[k]
img_pts.append( p1 )
obj_pts.append( [p2[0], p2[1], 0.0] )
if len(pairs) < 8:
# we need at least 8 pairs to call solvePNP()
continue
i2 = self.image_list[j]
for pair in pairs:
p1 = i1.kp_list[pair[0]].pt
p2 = i2.coord_list[pair[1]]
img_pts.append( p1 )
obj_pts.append( [p2[0], p2[1], 0.0] )
# now call the solver if we have enough points
if len(img_pts) < 8:
continue
img_pts = np.float32(img_pts)
obj_pts = np.float32(obj_pts)
#print "img_pts = %s" % str(img_pts)
#print "obj_pts = %s" % str(obj_pts)
#(result, rvec, tvec) = cv2.solvePnP(obj_pts, img_pts, cam, None)
if hasattr(i1, 'rvec'):
(result, i1.rvec, i1.tvec) \
= cv2.solvePnP(obj_pts, img_pts, K, None,
i1.rvec, i1.tvec,
useExtrinsicGuess=True)
else:
# first time
(result, i1.rvec, i1.tvec) \
= cv2.solvePnP(obj_pts, img_pts, K, None)
#print " result = %s, rvec = %s, tvec = %s" \
# % (result, i1.rvec, i1.tvec)
# print " rvec = %.2f %.2f %.2f" % (i1.rvec[0], i1.rvec[1], i1.rvec[2])
R, jac = cv2.Rodrigues(i1.rvec)
#print " R =\n%s" % str(R)
# googled how to derive the position in object
# coordinates from solvePNP()
pos = -np.matrix(R).T * np.matrix(i1.tvec)
#print "solved pos = %s" % str(pos)
#print " pos[0] = %s" % str(pos[0])
#print " pos.item(0) = %s" % str(pos.item(0))
#print " PNP pos = %s" % str(pos)
# Remap the R matrix to match our coordinate system
# (by inspection...)
# [ [a, b, c], [d, e, f], [g, h, i] ] =>
# [ [h, b, a], [g, -e, -d], [i, c, -f] ]
# this will be tedius code ...
Rconv = R.copy()
Rconv[0,0] = R[2,1]
Rconv[0,1] = R[0,1]
Rconv[0,2] = R[0,0]
Rconv[1,0] = R[2,0]
Rconv[1,1] = -R[1,1]
Rconv[1,2] = -R[1,0]
Rconv[2,0] = R[2,2]
Rconv[2,1] = R[0,2]
Rconv[2,2] = -R[1,2]
#print "Rconv =\n%s" % str(Rconv)
(yaw, pitch, roll) = transformations.euler_from_matrix(Rconv, 'rzyx')
deg2rad = math.pi / 180.0
#print " pair euler = %.2f %.2f %.2f" % (yaw/deg2rad,
# pitch/deg2rad,
# roll/deg2rad)
#print " est = %.2f %.2f %.2f" % (i1.camera_yaw,
# i1.camera_pitch,
# i1.camera_roll)
Rcam = transformations.euler_matrix(i1.camera_yaw*deg2rad,
i1.camera_pitch*deg2rad,
i1.camera_roll*deg2rad,
'rzyx')
print "Beg cam pose %s %.2f %.2f %.2f %.2f %.2f %.2f" \
% (i1.name, i1.camera_yaw, i1.camera_pitch, i1.camera_roll,
i1.camera_x, i1.camera_y, i1.camera_z)
i1.camera_yaw = yaw/deg2rad
i1.camera_pitch = pitch/deg2rad
i1.camera_roll = roll/deg2rad
i1.camera_x = pos.item(0)
i1.camera_y = pos.item(1)
i1.camera_z = pos.item(2)
i1.save_meta()
print "New cam pose %s %.2f %.2f %.2f %.2f %.2f %.2f" \
% (i1.name, i1.camera_yaw, i1.camera_pitch, i1.camera_roll,
i1.camera_x, i1.camera_y, i1.camera_z)
# find the pose estimate for each match individually and use that
# pose to project the keypoints. Then average all the keypoint
# projections together ... this weights image pairs equally and
# averaging points in cartesian space is much easier than trying
# to figure out how to average euler angles.
#
# Problem ... too many pairwise matches are unstable for
# solvePnP() because of clustered or linear data leading to a
# whole lot of nonsense
def fitImagesWithSolvePnP3(self):
for i, i1 in enumerate(self.image_list):
print "solvePnP() (3) for %s" % i1.name
if i1.connections == 0:
print " ... no connections, skipping ..."
continue
K = self.cam.get_K()
master_list = []
master_list.append(i1.coord_list) # weight ourselves in the mix
for j, pairs in enumerate(i1.match_list):
# include the match with ourselves ... we have self worth too!
#if i == j:
# continue
if len(pairs) < 8:
# we need at least 8 pairs to call solvePNP()
continue
i2 = self.image_list[j]
# assemble the data points for the solver
img_pts = []
obj_pts = []
for pair in pairs:
p1 = i1.kp_list[pair[0]].pt
p2 = i2.coord_list[pair[1]]
img_pts.append( p1 )
obj_pts.append( [p2[0], p2[1], 0.0] )
# now call the solver
img_pts = np.float32(img_pts)
obj_pts = np.float32(obj_pts)
#(result, rvec, tvec) \
# = cv2.solvePnP(obj_pts, img_pts, K, None)
(rvec, tvec, status) \
= cv2.solvePnPRansac(obj_pts, img_pts, K, None)
size = len(status)
inliers = np.sum(status)
if inliers < size:
print '%s vs %s: %d / %d inliers/matched' \
% (i1.name, i2.name, inliers, size)
status = self.m.showMatch(i1, i2, matches, status)
delete_list = []
for k, flag in enumerate(status):
if not flag:
print " deleting: " + str(matches[k])
#match[i] = (-1, -1)
delete_list.append(matches[k])
for pair in delete_list:
self.deletePair(i, j, pair)
#print " result = %s, rvec = %s, tvec = %s" \
# % (result, rvec, tvec)
# print " rvec = %.2f %.2f %.2f" % (rvec[0], rvec[1], rvec[2])
R, jac = cv2.Rodrigues(rvec)
#print " R =\n%s" % str(R)
# googled how to derive the position in object
# coordinates from solvePNP()
pos = -np.matrix(R).T * np.matrix(tvec)
#print "solved pos = %s" % str(pos)
#print " pos[0] = %s" % str(pos[0])
#print " pos.item(0) = %s" % str(pos.item(0))
#print " PNP pos = %s" % str(pos)
# Remap the R matrix to match our coordinate system
# (by inspection...)
# [ [a, b, c], [d, e, f], [g, h, i] ] =>
# [ [h, b, a], [g, -e, -d], [i, c, -f] ]
Rconv = R.copy()
Rconv[0,0] = R[2,1]
Rconv[0,1] = R[0,1]
Rconv[0,2] = R[0,0]
Rconv[1,0] = R[2,0]
Rconv[1,1] = -R[1,1]
Rconv[1,2] = -R[1,0]
Rconv[2,0] = R[2,2]
Rconv[2,1] = R[0,2]
Rconv[2,2] = -R[1,2]
#print "Rconv =\n%s" % str(Rconv)
(yaw, pitch, roll) = transformations.euler_from_matrix(Rconv,
'rzyx')
deg2rad = math.pi / 180.0
camera_pose = (yaw/deg2rad, pitch/deg2rad, roll/deg2rad,
pos.item(0), pos.item(1), pos.item(2))
# project out the image keypoints for this pair's
# estimated camera pose
coord_list, corner_list, grid_list \
= self.projectImageKeypointsNative3(i1, camera_pose)
#print "len(coord_list) = %d" % len(coord_list)
# save the results for averaging purposes
master_list.append(coord_list)
#print " pair euler = %.2f %.2f %.2f" % (yaw/deg2rad,
# pitch/deg2rad,
# roll/deg2rad)
#print " est = %.2f %.2f %.2f" % (i1.camera_yaw,
# i1.camera_pitch,
# i1.camera_roll)
#Rcam = transformations.euler_matrix(i1.camera_yaw*deg2rad,
# i1.camera_pitch*deg2rad,
# i1.camera_roll*deg2rad,
# 'rzyx')
#print "solvePNP =\n%s" % str(Rconv)
#print "my FIT =\n%s" % str(Rcam)
print " %s vs %s cam pose %.2f %.2f %.2f %.2f %.2f %.2f" \
% (i1.name, i2.name,
camera_pose[0], camera_pose[1], camera_pose[2],
camera_pose[3], camera_pose[4], camera_pose[5])
# find the average coordinate locations from the set of pair
# projections
coord_list = []
size = len(master_list[0]) # number of coordinates
#print "size = %d" % size
n = len(master_list) # number of projections
#print "n = %d" % n
for i in range(0, size):
#print "i = %d" % i
if not i1.kp_usage[i]:
coord_list.append(None)
continue
x_sum = 0.0
y_sum = 0.0
for list in master_list:
#print "len(list) = %d" % len(list)
x_sum += list[i][0]
y_sum += list[i][1]
x = x_sum / float(n)
y = y_sum / float(n)
coord_list.append( [x, y] )
# now finally call solvePnP() on the average of the projections
img_pts = []
obj_pts = []
for i in range(0, size):
if not i1.kp_usage[i]:
continue
img_pts.append( i1.kp_list[i].pt )
obj_pts.append( [coord_list[i][0], coord_list[i][1], 0.0] )
img_pts = np.float32(img_pts)
obj_pts = np.float32(obj_pts)
(result, rvec, tvec) = cv2.solvePnP(obj_pts, img_pts, cam, None)
# and extract the average camera pose
R, jac = cv2.Rodrigues(rvec)
pos = -np.matrix(R).T * np.matrix(tvec)
# Remap the R matrix to match our coordinate system
# (by inspection...)
# [ [a, b, c], [d, e, f], [g, h, i] ] =>
# [ [h, b, a], [g, -e, -d], [i, c, -f] ]
Rconv = R.copy()
Rconv[0,0] = R[2,1]
Rconv[0,1] = R[0,1]
Rconv[0,2] = R[0,0]
Rconv[1,0] = R[2,0]
Rconv[1,1] = -R[1,1]
Rconv[1,2] = -R[1,0]
Rconv[2,0] = R[2,2]
Rconv[2,1] = R[0,2]
Rconv[2,2] = -R[1,2]
(yaw, pitch, roll) = transformations.euler_from_matrix(Rconv,
'rzyx')
deg2rad = math.pi / 180.0
print "Beg cam pose %s %.2f %.2f %.2f %.2f %.2f %.2f" \
% (i1.name, i1.camera_yaw, i1.camera_pitch, i1.camera_roll,
i1.camera_x, i1.camera_y, i1.camera_z)
i1.camera_yaw = yaw/deg2rad
i1.camera_pitch = pitch/deg2rad
i1.camera_roll = roll/deg2rad
i1.camera_x = pos.item(0)
i1.camera_y = pos.item(1)
i1.camera_z = pos.item(2)
i1.save_meta()
print "New cam pose %s %.2f %.2f %.2f %.2f %.2f %.2f" \
% (i1.name, i1.camera_yaw, i1.camera_pitch, i1.camera_roll,
i1.camera_x, i1.camera_y, i1.camera_z)
def triangulate_test(self):
for i, i1 in enumerate(self.image_list):
print "pnp for %s" % i1.name
K = self.cam.get_K()
att_sum = [ [0.0, 0.0], [0.0, 0.0], [0.0, 0.0] ]
pos_sum = [0.0, 0.0, 0.0]
weight_sum = 0.0
for j, pairs in enumerate(i1.match_list):
if i == j:
continue
if len(pairs) < 8:
# start with only well correlated pairs
continue
i2 = self.image_list[j]
R1, jac = cv2.Rodrigues(i1.rvec)
R2, jac = cv2.Rodrigues(i2.rvec)
def pnp_test(self):
for i, i1 in enumerate(self.image_list):
print "pnp for %s" % i1.name
K = self.cam.get_K()
att_sum = [ [0.0, 0.0], [0.0, 0.0], [0.0, 0.0] ]
pos_sum = [0.0, 0.0, 0.0]
weight_sum = 0.0
for j, pairs in enumerate(i1.match_list):
if i == j:
continue
if len(pairs) < 8:
# start with only well correlated pairs
continue
i2 = self.image_list[j]
img_pts = []
obj_pts = []
for pair in pairs:
p1 = i1.kp_list[pair[0]].pt
p2 = i2.coord_list[pair[1]]
img_pts.append( p1 )
obj_pts.append( [p2[0], p2[1], 0.0] )
img_pts = np.float32(img_pts)
obj_pts = np.float32(obj_pts)
#print "img_pts = %s" % str(img_pts)
#print "obj_pts = %s" % str(obj_pts)
(result, rvec, tvec) = cv2.solvePnP(obj_pts, img_pts, K, None)
print " result = %s, rvec = %s, tvec = %s" \
% (result, rvec, tvec)
R, jac = cv2.Rodrigues(rvec)
print " R =\n%s" % str(R)
# googled how to derive the position in object
# coordinates from solvePNP()
pos = -np.matrix(R).T * np.matrix(tvec)
for k in range(0,3):
pos_sum[k] += pos[k]
print " PNP pos = %s" % str(pos)
# Remap the R matrix to match our coordinate system
# (by inspection...)
# [ [a, b, c], [d, e, f], [g, h, i] ] =>
# [ [h, b, a], [g, -e, -d], [i, c, -f] ]
# this will be tedius code ...
#Rconv[:3, 0] = R[:3, 2] # swap col 0 <=> col 2
#Rconv[:3, 2] = R[:3, 0]
#Rconv[1, :3] *= -1.0 # negate the middle row
Rconv = R.copy()
Rconv[0,0] = R[2,1]
Rconv[0,1] = R[0,1]
Rconv[0,2] = R[0,0]
Rconv[1,0] = R[2,0]
Rconv[1,1] = -R[1,1]
Rconv[1,2] = -R[1,0]
Rconv[2,0] = R[2,2]
Rconv[2,1] = R[0,2]
Rconv[2,2] = -R[1,2]
print "Rconv =\n%s" % str(Rconv)
(yaw, pitch, roll) = transformations.euler_from_matrix(Rconv, 'rzyx')
att_sum[0][0] += math.cos(yaw)
att_sum[0][1] += math.sin(yaw)
att_sum[1][0] += math.cos(pitch)
att_sum[1][1] += math.sin(pitch)
att_sum[2][0] += math.cos(roll)
att_sum[2][1] += math.sin(roll)
weight_sum += i2.weight
deg2rad = math.pi / 180.0
print " euler = %.2f %.2f %.2f" % (yaw/deg2rad,
pitch/deg2rad,
roll/deg2rad)
print " est = %.2f %.2f %.2f" % (i1.camera_yaw,
i1.camera_pitch,
i1.camera_roll)
Rcam = transformations.euler_matrix(i1.camera_yaw*deg2rad,
i1.camera_pitch*deg2rad,
i1.camera_roll*deg2rad,
'rzyx')
print "solvePNP =\n%s" % str(Rconv)
print "my FIT =\n%s" % str(Rcam)
v = np.array( [1.0, 0.0, 0.0] )
vh = np.array( [1.0, 0.0, 0.0, 1.0] )
print " v = %s" % str(v)
print " Rconv * v = %s" % str(np.dot(Rconv, v))
print " Rcam * v = %s" % str(np.dot(Rcam, vh))
if weight_sum < 0.0001:
continue
print "Camera pose for image %s:" % i1.name
print " PNP pos = %.2f %.2f %.2f" % (pos_sum[0]/weight_sum,
pos_sum[1]/weight_sum,
pos_sum[2]/weight_sum)
print " Fit pos = %s" % str((i1.camera_x, i1.camera_y, i1.camera_z))
yaw_avg = math.atan2(att_sum[0][1]/weight_sum,
att_sum[0][0]/weight_sum)
pitch_avg = math.atan2(att_sum[1][1]/weight_sum,
att_sum[1][0]/weight_sum)
roll_avg = math.atan2(att_sum[2][1]/weight_sum,
att_sum[2][0]/weight_sum)
print " PNP att = %.2f %.2f %.2f" % ( yaw_avg / deg2rad,
pitch_avg / deg2rad,
roll_avg / deg2rad )
print " Fit att = %.2f %.2f %.2f" % (i1.camera_yaw,
i1.camera_pitch,
i1.camera_roll)
# should reset the width, height values in the keys files
def recomputeWidthHeight(self):
for image in self.image_list:
if image.img == None:
image.load_features()
image.load_rgb()
image.save_keys()
# write out the camera positions as geojson
def save_geojson(self, path="mymap", cm_per_pixel=15.0 ):
feature_list = []
if not os.path.exists(path):
os.makedirs(path)
for i, image in enumerate(self.image_list):
# camera point
cam = geojson.Point( (image.aircraft_lon, image.aircraft_lat) )
# coverage polys
geo_list = []
for pt in image.corner_list:
lon = self.render.x2lon(pt[0])
lat = self.render.y2lat(pt[1])
geo_list.append( (lon, lat) )
if len(geo_list) == 4:
tmp = geo_list[2]
geo_list[2] = geo_list[3]
geo_list[3] = tmp
poly = geojson.Polygon( [ geo_list ] )
# group
gc = geojson.GeometryCollection( [cam, poly] )
source = "%s/%s" % (self.source_dir, image.name)
work = "%s/%s" % (self.image_dir, image.name)
f = geojson.Feature(geometry=gc, id=i,
properties={"name": image.name,
"source": source,
"work": work})
feature_list.append( f )
fc = geojson.FeatureCollection( feature_list )
dump = geojson.dumps(fc)
print str(dump)
f = open( path + "/points.geojson", "w" )
f.write(dump)
f.close()
warped_dir = path + "/warped"
if not os.path.exists(warped_dir):
os.makedirs(warped_dir )
for i, image in enumerate(self.image_list):
print "rendering %s" % image.name
w, h, warp = \
self.render.drawImage(image,
source_dir=self.source_dir,
cm_per_pixel=cm_per_pixel,
keypoints=False,
bounds=None)
cv2.imwrite( warped_dir + "/" + image.name, warp )
|
# coding=utf-8 pylint: disable=too-many-lines
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import re
from enum import Enum
from ._generated.models import (
LanguageInput,
MultiLanguageInput,
)
from ._generated.v3_0 import models as _v3_0_models
from ._generated.v3_2_preview_2 import models as _v3_2_preview_models
from ._version import DEFAULT_API_VERSION
def _get_indices(relation):
return [int(s) for s in re.findall(r"\d+", relation)]
class DictMixin(object):
def __setitem__(self, key, item):
self.__dict__[key] = item
def __getitem__(self, key):
return self.__dict__[key]
def __repr__(self):
return str(self)
def __len__(self):
return len(self.keys())
def __delitem__(self, key):
self.__dict__[key] = None
def __eq__(self, other):
"""Compare objects by comparing all attributes."""
if isinstance(other, self.__class__):
return self.__dict__ == other.__dict__
return False
def __ne__(self, other):
"""Compare objects by comparing all attributes."""
return not self.__eq__(other)
def __contains__(self, key):
return key in self.__dict__
def __str__(self):
return str({k: v for k, v in self.__dict__.items() if not k.startswith("_")})
def has_key(self, k):
return k in self.__dict__
def update(self, *args, **kwargs):
return self.__dict__.update(*args, **kwargs)
def keys(self):
return [k for k in self.__dict__ if not k.startswith("_")]
def values(self):
return [v for k, v in self.__dict__.items() if not k.startswith("_")]
def items(self):
return [(k, v) for k, v in self.__dict__.items() if not k.startswith("_")]
def get(self, key, default=None):
if key in self.__dict__:
return self.__dict__[key]
return default
class EntityAssociation(str, Enum):
"""Describes if the entity is the subject of the text or if it describes someone else."""
SUBJECT = "subject"
OTHER = "other"
class EntityCertainty(str, Enum):
"""Describes the entities certainty and polarity."""
POSITIVE = "positive"
POSITIVE_POSSIBLE = "positivePossible"
NEUTRAL_POSSIBLE = "neutralPossible"
NEGATIVE_POSSIBLE = "negativePossible"
NEGATIVE = "negative"
class EntityConditionality(str, Enum):
"""Describes any conditionality on the entity."""
HYPOTHETICAL = "hypothetical"
CONDITIONAL = "conditional"
class HealthcareEntityRelation(str, Enum):
"""Type of relation. Examples include: 'DosageOfMedication' or 'FrequencyOfMedication', etc."""
ABBREVIATION = "Abbreviation"
DIRECTION_OF_BODY_STRUCTURE = "DirectionOfBodyStructure"
DIRECTION_OF_CONDITION = "DirectionOfCondition"
DIRECTION_OF_EXAMINATION = "DirectionOfExamination"
DIRECTION_OF_TREATMENT = "DirectionOfTreatment"
DOSAGE_OF_MEDICATION = "DosageOfMedication"
FORM_OF_MEDICATION = "FormOfMedication"
FREQUENCY_OF_MEDICATION = "FrequencyOfMedication"
FREQUENCY_OF_TREATMENT = "FrequencyOfTreatment"
QUALIFIER_OF_CONDITION = "QualifierOfCondition"
RELATION_OF_EXAMINATION = "RelationOfExamination"
ROUTE_OF_MEDICATION = "RouteOfMedication"
TIME_OF_CONDITION = "TimeOfCondition"
TIME_OF_EVENT = "TimeOfEvent"
TIME_OF_EXAMINATION = "TimeOfExamination"
TIME_OF_MEDICATION = "TimeOfMedication"
TIME_OF_TREATMENT = "TimeOfTreatment"
UNIT_OF_CONDITION = "UnitOfCondition"
UNIT_OF_EXAMINATION = "UnitOfExamination"
VALUE_OF_CONDITION = "ValueOfCondition"
VALUE_OF_EXAMINATION = "ValueOfExamination"
class PiiEntityCategory(str, Enum):
"""Categories of Personally Identifiable Information (PII)."""
ABA_ROUTING_NUMBER = "ABARoutingNumber"
AR_NATIONAL_IDENTITY_NUMBER = "ARNationalIdentityNumber"
AU_BANK_ACCOUNT_NUMBER = "AUBankAccountNumber"
AU_DRIVERS_LICENSE_NUMBER = "AUDriversLicenseNumber"
AU_MEDICAL_ACCOUNT_NUMBER = "AUMedicalAccountNumber"
AU_PASSPORT_NUMBER = "AUPassportNumber"
AU_TAX_FILE_NUMBER = "AUTaxFileNumber"
AU_BUSINESS_NUMBER = "AUBusinessNumber"
AU_COMPANY_NUMBER = "AUCompanyNumber"
AT_IDENTITY_CARD = "ATIdentityCard"
AT_TAX_IDENTIFICATION_NUMBER = "ATTaxIdentificationNumber"
AT_VALUE_ADDED_TAX_NUMBER = "ATValueAddedTaxNumber"
AZURE_DOCUMENT_DB_AUTH_KEY = "AzureDocumentDBAuthKey"
AZURE_IAAS_DATABASE_CONNECTION_AND_SQL_STRING = (
"AzureIAASDatabaseConnectionAndSQLString"
)
AZURE_IO_T_CONNECTION_STRING = "AzureIoTConnectionString"
AZURE_PUBLISH_SETTING_PASSWORD = "AzurePublishSettingPassword"
AZURE_REDIS_CACHE_STRING = "AzureRedisCacheString"
AZURE_SAS = "AzureSAS"
AZURE_SERVICE_BUS_STRING = "AzureServiceBusString"
AZURE_STORAGE_ACCOUNT_KEY = "AzureStorageAccountKey"
AZURE_STORAGE_ACCOUNT_GENERIC = "AzureStorageAccountGeneric"
BE_NATIONAL_NUMBER = "BENationalNumber"
BE_NATIONAL_NUMBER_V2 = "BENationalNumberV2"
BE_VALUE_ADDED_TAX_NUMBER = "BEValueAddedTaxNumber"
BRCPF_NUMBER = "BRCPFNumber"
BR_LEGAL_ENTITY_NUMBER = "BRLegalEntityNumber"
BR_NATIONAL_IDRG = "BRNationalIDRG"
BG_UNIFORM_CIVIL_NUMBER = "BGUniformCivilNumber"
CA_BANK_ACCOUNT_NUMBER = "CABankAccountNumber"
CA_DRIVERS_LICENSE_NUMBER = "CADriversLicenseNumber"
CA_HEALTH_SERVICE_NUMBER = "CAHealthServiceNumber"
CA_PASSPORT_NUMBER = "CAPassportNumber"
CA_PERSONAL_HEALTH_IDENTIFICATION = "CAPersonalHealthIdentification"
CA_SOCIAL_INSURANCE_NUMBER = "CASocialInsuranceNumber"
CL_IDENTITY_CARD_NUMBER = "CLIdentityCardNumber"
CN_RESIDENT_IDENTITY_CARD_NUMBER = "CNResidentIdentityCardNumber"
CREDIT_CARD_NUMBER = "CreditCardNumber"
HR_IDENTITY_CARD_NUMBER = "HRIdentityCardNumber"
HR_NATIONAL_ID_NUMBER = "HRNationalIDNumber"
HR_PERSONAL_IDENTIFICATION_NUMBER = "HRPersonalIdentificationNumber"
HR_PERSONAL_IDENTIFICATION_OIB_NUMBER_V2 = "HRPersonalIdentificationOIBNumberV2"
CY_IDENTITY_CARD = "CYIdentityCard"
CY_TAX_IDENTIFICATION_NUMBER = "CYTaxIdentificationNumber"
CZ_PERSONAL_IDENTITY_NUMBER = "CZPersonalIdentityNumber"
CZ_PERSONAL_IDENTITY_V2 = "CZPersonalIdentityV2"
DK_PERSONAL_IDENTIFICATION_NUMBER = "DKPersonalIdentificationNumber"
DK_PERSONAL_IDENTIFICATION_V2 = "DKPersonalIdentificationV2"
DRUG_ENFORCEMENT_AGENCY_NUMBER = "DrugEnforcementAgencyNumber"
EE_PERSONAL_IDENTIFICATION_CODE = "EEPersonalIdentificationCode"
EU_DEBIT_CARD_NUMBER = "EUDebitCardNumber"
EU_DRIVERS_LICENSE_NUMBER = "EUDriversLicenseNumber"
EUGPS_COORDINATES = "EUGPSCoordinates"
EU_NATIONAL_IDENTIFICATION_NUMBER = "EUNationalIdentificationNumber"
EU_PASSPORT_NUMBER = "EUPassportNumber"
EU_SOCIAL_SECURITY_NUMBER = "EUSocialSecurityNumber"
EU_TAX_IDENTIFICATION_NUMBER = "EUTaxIdentificationNumber"
FI_EUROPEAN_HEALTH_NUMBER = "FIEuropeanHealthNumber"
FI_NATIONAL_ID = "FINationalID"
FI_NATIONAL_IDV2 = "FINationalIDV2"
FI_PASSPORT_NUMBER = "FIPassportNumber"
FR_DRIVERS_LICENSE_NUMBER = "FRDriversLicenseNumber"
FR_HEALTH_INSURANCE_NUMBER = "FRHealthInsuranceNumber"
FR_NATIONAL_ID = "FRNationalID"
FR_PASSPORT_NUMBER = "FRPassportNumber"
FR_SOCIAL_SECURITY_NUMBER = "FRSocialSecurityNumber"
FR_TAX_IDENTIFICATION_NUMBER = "FRTaxIdentificationNumber"
FR_VALUE_ADDED_TAX_NUMBER = "FRValueAddedTaxNumber"
DE_DRIVERS_LICENSE_NUMBER = "DEDriversLicenseNumber"
DE_PASSPORT_NUMBER = "DEPassportNumber"
DE_IDENTITY_CARD_NUMBER = "DEIdentityCardNumber"
DE_TAX_IDENTIFICATION_NUMBER = "DETaxIdentificationNumber"
DE_VALUE_ADDED_NUMBER = "DEValueAddedNumber"
GR_NATIONAL_ID_CARD = "GRNationalIDCard"
GR_NATIONAL_IDV2 = "GRNationalIDV2"
GR_TAX_IDENTIFICATION_NUMBER = "GRTaxIdentificationNumber"
HK_IDENTITY_CARD_NUMBER = "HKIdentityCardNumber"
HU_VALUE_ADDED_NUMBER = "HUValueAddedNumber"
HU_PERSONAL_IDENTIFICATION_NUMBER = "HUPersonalIdentificationNumber"
HU_TAX_IDENTIFICATION_NUMBER = "HUTaxIdentificationNumber"
IN_PERMANENT_ACCOUNT = "INPermanentAccount"
IN_UNIQUE_IDENTIFICATION_NUMBER = "INUniqueIdentificationNumber"
ID_IDENTITY_CARD_NUMBER = "IDIdentityCardNumber"
INTERNATIONAL_BANKING_ACCOUNT_NUMBER = "InternationalBankingAccountNumber"
IE_PERSONAL_PUBLIC_SERVICE_NUMBER = "IEPersonalPublicServiceNumber"
IE_PERSONAL_PUBLIC_SERVICE_NUMBER_V2 = "IEPersonalPublicServiceNumberV2"
IL_BANK_ACCOUNT_NUMBER = "ILBankAccountNumber"
IL_NATIONAL_ID = "ILNationalID"
IT_DRIVERS_LICENSE_NUMBER = "ITDriversLicenseNumber"
IT_FISCAL_CODE = "ITFiscalCode"
IT_VALUE_ADDED_TAX_NUMBER = "ITValueAddedTaxNumber"
JP_BANK_ACCOUNT_NUMBER = "JPBankAccountNumber"
JP_DRIVERS_LICENSE_NUMBER = "JPDriversLicenseNumber"
JP_PASSPORT_NUMBER = "JPPassportNumber"
JP_RESIDENT_REGISTRATION_NUMBER = "JPResidentRegistrationNumber"
JP_SOCIAL_INSURANCE_NUMBER = "JPSocialInsuranceNumber"
JP_MY_NUMBER_CORPORATE = "JPMyNumberCorporate"
JP_MY_NUMBER_PERSONAL = "JPMyNumberPersonal"
JP_RESIDENCE_CARD_NUMBER = "JPResidenceCardNumber"
LV_PERSONAL_CODE = "LVPersonalCode"
LT_PERSONAL_CODE = "LTPersonalCode"
LU_NATIONAL_IDENTIFICATION_NUMBER_NATURAL = "LUNationalIdentificationNumberNatural"
LU_NATIONAL_IDENTIFICATION_NUMBER_NON_NATURAL = (
"LUNationalIdentificationNumberNonNatural"
)
MY_IDENTITY_CARD_NUMBER = "MYIdentityCardNumber"
MT_IDENTITY_CARD_NUMBER = "MTIdentityCardNumber"
MT_TAX_ID_NUMBER = "MTTaxIDNumber"
NL_CITIZENS_SERVICE_NUMBER = "NLCitizensServiceNumber"
NL_CITIZENS_SERVICE_NUMBER_V2 = "NLCitizensServiceNumberV2"
NL_TAX_IDENTIFICATION_NUMBER = "NLTaxIdentificationNumber"
NL_VALUE_ADDED_TAX_NUMBER = "NLValueAddedTaxNumber"
NZ_BANK_ACCOUNT_NUMBER = "NZBankAccountNumber"
NZ_DRIVERS_LICENSE_NUMBER = "NZDriversLicenseNumber"
NZ_INLAND_REVENUE_NUMBER = "NZInlandRevenueNumber"
NZ_MINISTRY_OF_HEALTH_NUMBER = "NZMinistryOfHealthNumber"
NZ_SOCIAL_WELFARE_NUMBER = "NZSocialWelfareNumber"
NO_IDENTITY_NUMBER = "NOIdentityNumber"
PH_UNIFIED_MULTI_PURPOSE_ID_NUMBER = "PHUnifiedMultiPurposeIDNumber"
PL_IDENTITY_CARD = "PLIdentityCard"
PL_NATIONAL_ID = "PLNationalID"
PL_NATIONAL_IDV2 = "PLNationalIDV2"
PL_PASSPORT_NUMBER = "PLPassportNumber"
PL_TAX_IDENTIFICATION_NUMBER = "PLTaxIdentificationNumber"
PLREGON_NUMBER = "PLREGONNumber"
PT_CITIZEN_CARD_NUMBER = "PTCitizenCardNumber"
PT_CITIZEN_CARD_NUMBER_V2 = "PTCitizenCardNumberV2"
PT_TAX_IDENTIFICATION_NUMBER = "PTTaxIdentificationNumber"
RO_PERSONAL_NUMERICAL_CODE = "ROPersonalNumericalCode"
RU_PASSPORT_NUMBER_DOMESTIC = "RUPassportNumberDomestic"
RU_PASSPORT_NUMBER_INTERNATIONAL = "RUPassportNumberInternational"
SA_NATIONAL_ID = "SANationalID"
SG_NATIONAL_REGISTRATION_IDENTITY_CARD_NUMBER = (
"SGNationalRegistrationIdentityCardNumber"
)
SK_PERSONAL_NUMBER = "SKPersonalNumber"
SI_TAX_IDENTIFICATION_NUMBER = "SITaxIdentificationNumber"
SI_UNIQUE_MASTER_CITIZEN_NUMBER = "SIUniqueMasterCitizenNumber"
ZA_IDENTIFICATION_NUMBER = "ZAIdentificationNumber"
KR_RESIDENT_REGISTRATION_NUMBER = "KRResidentRegistrationNumber"
ESDNI = "ESDNI"
ES_SOCIAL_SECURITY_NUMBER = "ESSocialSecurityNumber"
ES_TAX_IDENTIFICATION_NUMBER = "ESTaxIdentificationNumber"
SQL_SERVER_CONNECTION_STRING = "SQLServerConnectionString"
SE_NATIONAL_ID = "SENationalID"
SE_NATIONAL_IDV2 = "SENationalIDV2"
SE_PASSPORT_NUMBER = "SEPassportNumber"
SE_TAX_IDENTIFICATION_NUMBER = "SETaxIdentificationNumber"
SWIFT_CODE = "SWIFTCode"
CH_SOCIAL_SECURITY_NUMBER = "CHSocialSecurityNumber"
TW_NATIONAL_ID = "TWNationalID"
TW_PASSPORT_NUMBER = "TWPassportNumber"
TW_RESIDENT_CERTIFICATE = "TWResidentCertificate"
TH_POPULATION_IDENTIFICATION_CODE = "THPopulationIdentificationCode"
TR_NATIONAL_IDENTIFICATION_NUMBER = "TRNationalIdentificationNumber"
UK_DRIVERS_LICENSE_NUMBER = "UKDriversLicenseNumber"
UK_ELECTORAL_ROLL_NUMBER = "UKElectoralRollNumber"
UK_NATIONAL_HEALTH_NUMBER = "UKNationalHealthNumber"
UK_NATIONAL_INSURANCE_NUMBER = "UKNationalInsuranceNumber"
UK_UNIQUE_TAXPAYER_NUMBER = "UKUniqueTaxpayerNumber"
USUK_PASSPORT_NUMBER = "USUKPassportNumber"
US_BANK_ACCOUNT_NUMBER = "USBankAccountNumber"
US_DRIVERS_LICENSE_NUMBER = "USDriversLicenseNumber"
US_INDIVIDUAL_TAXPAYER_IDENTIFICATION = "USIndividualTaxpayerIdentification"
US_SOCIAL_SECURITY_NUMBER = "USSocialSecurityNumber"
UA_PASSPORT_NUMBER_DOMESTIC = "UAPassportNumberDomestic"
UA_PASSPORT_NUMBER_INTERNATIONAL = "UAPassportNumberInternational"
ORGANIZATION = "Organization"
EMAIL = "Email"
URL = "URL"
AGE = "Age"
PHONE_NUMBER = "PhoneNumber"
IP_ADDRESS = "IPAddress"
DATE = "Date"
PERSON = "Person"
ADDRESS = "Address"
ALL = "All"
DEFAULT = "Default"
class HealthcareEntityCategory(str, Enum):
"""Healthcare Entity Category."""
BODY_STRUCTURE = "BodyStructure"
AGE = "Age"
GENDER = "Gender"
EXAMINATION_NAME = "ExaminationName"
DATE = "Date"
DIRECTION = "Direction"
FREQUENCY = "Frequency"
MEASUREMENT_VALUE = "MeasurementValue"
MEASUREMENT_UNIT = "MeasurementUnit"
RELATIONAL_OPERATOR = "RelationalOperator"
TIME = "Time"
GENE_OR_PROTEIN = "GeneOrProtein"
VARIANT = "Variant"
ADMINISTRATIVE_EVENT = "AdministrativeEvent"
CARE_ENVIRONMENT = "CareEnvironment"
HEALTHCARE_PROFESSION = "HealthcareProfession"
DIAGNOSIS = "Diagnosis"
SYMPTOM_OR_SIGN = "SymptomOrSign"
CONDITION_QUALIFIER = "ConditionQualifier"
MEDICATION_CLASS = "MedicationClass"
MEDICATION_NAME = "MedicationName"
DOSAGE = "Dosage"
MEDICATION_FORM = "MedicationForm"
MEDICATION_ROUTE = "MedicationRoute"
FAMILY_RELATION = "FamilyRelation"
TREATMENT_NAME = "TreatmentName"
class PiiEntityDomain(str, Enum):
"""The different domains of PII entities that users can filter by"""
PROTECTED_HEALTH_INFORMATION = (
"phi" # See https://aka.ms/tanerpii for more information.
)
class DetectedLanguage(DictMixin):
"""DetectedLanguage contains the predicted language found in text,
its confidence score, and its ISO 639-1 representation.
:ivar name: Long name of a detected language (e.g. English,
French).
:vartype name: str
:ivar iso6391_name: A two letter representation of the detected
language according to the ISO 639-1 standard (e.g. en, fr).
:vartype iso6391_name: str
:ivar confidence_score: A confidence score between 0 and 1. Scores close
to 1 indicate 100% certainty that the identified language is true.
:vartype confidence_score: float
"""
def __init__(self, **kwargs):
self.name = kwargs.get("name", None)
self.iso6391_name = kwargs.get("iso6391_name", None)
self.confidence_score = kwargs.get("confidence_score", None)
@classmethod
def _from_generated(cls, language):
return cls(
name=language.name,
iso6391_name=language.iso6391_name,
confidence_score=language.confidence_score,
)
def __repr__(self):
return "DetectedLanguage(name={}, iso6391_name={}, confidence_score={})".format(
self.name, self.iso6391_name, self.confidence_score
)[:1024]
class RecognizeEntitiesResult(DictMixin):
"""RecognizeEntitiesResult is a result object which contains
the recognized entities from a particular document.
:ivar id: Unique, non-empty document identifier that matches the
document id that was passed in with the request. If not specified
in the request, an id is assigned for the document.
:vartype id: str
:ivar entities: Recognized entities in the document.
:vartype entities:
list[~azure.ai.textanalytics.CategorizedEntity]
:ivar warnings: Warnings encountered while processing document. Results will still be returned
if there are warnings, but they may not be fully accurate.
:vartype warnings: list[~azure.ai.textanalytics.TextAnalyticsWarning]
:ivar statistics: If `show_stats=True` was specified in the request this
field will contain information about the document payload.
:vartype statistics:
~azure.ai.textanalytics.TextDocumentStatistics
:ivar bool is_error: Boolean check for error item when iterating over list of
results. Always False for an instance of a RecognizeEntitiesResult.
"""
def __init__(self, **kwargs):
self.id = kwargs.get("id", None)
self.entities = kwargs.get("entities", None)
self.warnings = kwargs.get("warnings", [])
self.statistics = kwargs.get("statistics", None)
self.is_error = False
def __repr__(self):
return "RecognizeEntitiesResult(id={}, entities={}, warnings={}, statistics={}, is_error={})".format(
self.id,
repr(self.entities),
repr(self.warnings),
repr(self.statistics),
self.is_error,
)[
:1024
]
class RecognizePiiEntitiesResult(DictMixin):
"""RecognizePiiEntitiesResult is a result object which contains
the recognized Personally Identifiable Information (PII) entities
from a particular document.
:ivar str id: Unique, non-empty document identifier that matches the
document id that was passed in with the request. If not specified
in the request, an id is assigned for the document.
:ivar entities: Recognized PII entities in the document.
:vartype entities:
list[~azure.ai.textanalytics.PiiEntity]
:ivar str redacted_text: Returns the text of the input document with all of the PII information
redacted out.
:ivar warnings: Warnings encountered while processing document. Results will still be returned
if there are warnings, but they may not be fully accurate.
:vartype warnings: list[~azure.ai.textanalytics.TextAnalyticsWarning]
:ivar statistics: If `show_stats=True` was specified in the request this
field will contain information about the document payload.
:vartype statistics:
~azure.ai.textanalytics.TextDocumentStatistics
:ivar bool is_error: Boolean check for error item when iterating over list of
results. Always False for an instance of a RecognizePiiEntitiesResult.
"""
def __init__(self, **kwargs):
self.id = kwargs.get("id", None)
self.entities = kwargs.get("entities", None)
self.redacted_text = kwargs.get("redacted_text", None)
self.warnings = kwargs.get("warnings", [])
self.statistics = kwargs.get("statistics", None)
self.is_error = False
def __repr__(self):
return (
"RecognizePiiEntitiesResult(id={}, entities={}, redacted_text={}, warnings={}, "
"statistics={}, is_error={})".format(
self.id,
repr(self.entities),
self.redacted_text,
repr(self.warnings),
repr(self.statistics),
self.is_error,
)[:1024]
)
class AnalyzeHealthcareEntitiesResult(DictMixin):
"""
AnalyzeHealthcareEntitiesResult contains the Healthcare entities from a
particular document.
:ivar str id: Unique, non-empty document identifier that matches the
document id that was passed in with the request. If not specified
in the request, an id is assigned for the document.
:ivar entities: Identified Healthcare entities in the document, i.e. in
the document "The subject took ibuprofen", "ibuprofen" is an identified entity
from the document.
:vartype entities:
list[~azure.ai.textanalytics.HealthcareEntity]
:ivar entity_relations: Identified Healthcare relations between entities. For example, in the
document "The subject took 100mg of ibuprofen", we would identify the relationship
between the dosage of 100mg and the medication ibuprofen.
:vartype entity_relations: list[~azure.ai.textanalytics.HealthcareRelation]
:ivar warnings: Warnings encountered while processing document. Results will still be returned
if there are warnings, but they may not be fully accurate.
:vartype warnings: list[~azure.ai.textanalytics.TextAnalyticsWarning]
:ivar statistics: If show_stats=true was specified in the request this
field will contain information about the document payload.
:vartype statistics:
~azure.ai.textanalytics.TextDocumentStatistics
:ivar bool is_error: Boolean check for error item when iterating over list of
results. Always False for an instance of a AnalyzeHealthcareEntitiesResult.
"""
def __init__(self, **kwargs):
self.id = kwargs.get("id", None)
self.entities = kwargs.get("entities", None)
self.entity_relations = kwargs.get("entity_relations", None)
self.warnings = kwargs.get("warnings", [])
self.statistics = kwargs.get("statistics", None)
self.is_error = False
@classmethod
def _from_generated(cls, healthcare_result):
entities = [
HealthcareEntity._from_generated(e) # pylint: disable=protected-access
for e in healthcare_result.entities
]
relations = [
HealthcareRelation._from_generated( # pylint: disable=protected-access
r, entities
)
for r in healthcare_result.relations
]
return cls(
id=healthcare_result.id,
entities=entities,
entity_relations=relations,
warnings=[
TextAnalyticsWarning._from_generated( # pylint: disable=protected-access
w
)
for w in healthcare_result.warnings
],
statistics=TextDocumentStatistics._from_generated( # pylint: disable=protected-access
healthcare_result.statistics
),
)
def __repr__(self):
return (
"AnalyzeHealthcareEntitiesResult(id={}, entities={}, entity_relations={}, warnings={}, "
"statistics={}, is_error={})".format(
self.id,
repr(self.entities),
repr(self.entity_relations),
repr(self.warnings),
repr(self.statistics),
self.is_error,
)[:1024]
)
class HealthcareRelation(DictMixin):
"""HealthcareRelation is a result object which represents a relation detected in a document.
Every HealthcareRelation is an entity graph of a certain relation type,
where all entities are connected and have specific roles within the relation context.
:ivar relation_type: The type of relation, i.e. the relationship between "100mg" and
"ibuprofen" in the document "The subject took 100 mg of ibuprofen" is "DosageOfMedication".
Possible values found in :class:`~azure.ai.textanalytics.HealthcareEntityRelation`
:vartype relation_type: str
:ivar roles: The roles present in this relation. I.e., in the document
"The subject took 100 mg of ibuprofen", the present roles are "Dosage" and "Medication".
:vartype roles: list[~azure.ai.textanalytics.HealthcareRelationRole]
"""
def __init__(self, **kwargs):
self.relation_type = kwargs.get("relation_type")
self.roles = kwargs.get("roles")
@classmethod
def _from_generated(cls, healthcare_relation_result, entities):
roles = [
HealthcareRelationRole._from_generated( # pylint: disable=protected-access
r, entities
)
for r in healthcare_relation_result.entities
]
return cls(
relation_type=healthcare_relation_result.relation_type,
roles=roles,
)
def __repr__(self):
return "HealthcareRelation(relation_type={}, roles={})".format(
self.relation_type,
repr(self.roles),
)[:1024]
class HealthcareRelationRole(DictMixin):
"""A model representing a role in a relation.
For example, in "The subject took 100 mg of ibuprofen",
"100 mg" is a dosage entity fulfilling the role "Dosage"
in the extracted relation "DosageofMedication".
:ivar name: The role of the entity in the relationship. I.e., in the relation
"The subject took 100 mg of ibuprofen", the dosage entity "100 mg" has role
"Dosage".
:vartype name: str
:ivar entity: The entity that is present in the relationship. For example, in
"The subject took 100 mg of ibuprofen", this property holds the dosage entity
of "100 mg".
:vartype entity: ~azure.ai.textanalytics.HealthcareEntity
"""
def __init__(self, **kwargs):
self.name = kwargs.get("name")
self.entity = kwargs.get("entity")
@staticmethod
def _get_entity(healthcare_role_result, entities):
nums = _get_indices(healthcare_role_result.ref)
entity_index = nums[
1
] # first num parsed from index is document #, second is entity index
return entities[entity_index]
@classmethod
def _from_generated(cls, healthcare_role_result, entities):
return cls(
name=healthcare_role_result.role,
entity=HealthcareRelationRole._get_entity(healthcare_role_result, entities),
)
def __repr__(self):
return "HealthcareRelationRole(name={}, entity={})".format(
self.name, repr(self.entity)
)
class DetectLanguageResult(DictMixin):
"""DetectLanguageResult is a result object which contains
the detected language of a particular document.
:ivar id: Unique, non-empty document identifier that matches the
document id that was passed in with the request. If not specified
in the request, an id is assigned for the document.
:vartype id: str
:ivar primary_language: The primary language detected in the document.
:vartype primary_language: ~azure.ai.textanalytics.DetectedLanguage
:ivar warnings: Warnings encountered while processing document. Results will still be returned
if there are warnings, but they may not be fully accurate.
:vartype warnings: list[~azure.ai.textanalytics.TextAnalyticsWarning]
:ivar statistics: If `show_stats=True` was specified in the request this
field will contain information about the document payload.
:vartype statistics:
~azure.ai.textanalytics.TextDocumentStatistics
:ivar bool is_error: Boolean check for error item when iterating over list of
results. Always False for an instance of a DetectLanguageResult.
"""
def __init__(self, **kwargs):
self.id = kwargs.get("id", None)
self.primary_language = kwargs.get("primary_language", None)
self.warnings = kwargs.get("warnings", [])
self.statistics = kwargs.get("statistics", None)
self.is_error = False
def __repr__(self):
return (
"DetectLanguageResult(id={}, primary_language={}, warnings={}, statistics={}, "
"is_error={})".format(
self.id,
repr(self.primary_language),
repr(self.warnings),
repr(self.statistics),
self.is_error,
)[:1024]
)
class CategorizedEntity(DictMixin):
"""CategorizedEntity contains information about a particular
entity found in text.
:ivar text: Entity text as appears in the request.
:vartype text: str
:ivar category: Entity category, such as Person/Location/Org/SSN etc
:vartype category: str
:ivar subcategory: Entity subcategory, such as Age/Year/TimeRange etc
:vartype subcategory: str
:ivar int length: The entity text length. This value depends on the value of the
`string_index_type` parameter set in the original request, which is UnicodeCodePoints
by default.
:ivar int offset: The entity text offset from the start of the document.
The value depends on the value of the `string_index_type` parameter
set in the original request, which is UnicodeCodePoints by default.
:ivar confidence_score: Confidence score between 0 and 1 of the extracted
entity.
:vartype confidence_score: float
.. versionadded:: v3.1
The *offset* and *length* properties.
"""
def __init__(self, **kwargs):
self.text = kwargs.get("text", None)
self.category = kwargs.get("category", None)
self.subcategory = kwargs.get("subcategory", None)
self.length = kwargs.get("length", None)
self.offset = kwargs.get("offset", None)
self.confidence_score = kwargs.get("confidence_score", None)
@classmethod
def _from_generated(cls, entity):
offset = entity.offset
length = entity.length
if isinstance(entity, _v3_0_models.Entity):
# we do not return offset for v3.0 since
# the correct encoding was not introduced for v3.0
offset = None
length = None
return cls(
text=entity.text,
category=entity.category,
subcategory=entity.subcategory,
length=length,
offset=offset,
confidence_score=entity.confidence_score,
)
def __repr__(self):
return (
"CategorizedEntity(text={}, category={}, subcategory={}, "
"length={}, offset={}, confidence_score={})".format(
self.text,
self.category,
self.subcategory,
self.length,
self.offset,
self.confidence_score,
)[:1024]
)
class PiiEntity(DictMixin):
"""PiiEntity contains information about a Personally Identifiable
Information (PII) entity found in text.
:ivar str text: Entity text as appears in the request.
:ivar str category: Entity category, such as Financial Account
Identification/Social Security Number/Phone Number, etc.
:ivar str subcategory: Entity subcategory, such as Credit Card/EU
Phone number/ABA Routing Numbers, etc.
:ivar int length: The PII entity text length. This value depends on the value
of the `string_index_type` parameter specified in the original request, which
is UnicodeCodePoints by default.
:ivar int offset: The PII entity text offset from the start of the document.
This value depends on the value of the `string_index_type` parameter specified
in the original request, which is UnicodeCodePoints by default.
:ivar float confidence_score: Confidence score between 0 and 1 of the extracted
entity.
"""
def __init__(self, **kwargs):
self.text = kwargs.get("text", None)
self.category = kwargs.get("category", None)
self.subcategory = kwargs.get("subcategory", None)
self.length = kwargs.get("length", None)
self.offset = kwargs.get("offset", None)
self.confidence_score = kwargs.get("confidence_score", None)
@classmethod
def _from_generated(cls, entity):
return cls(
text=entity.text,
category=entity.category,
subcategory=entity.subcategory,
length=entity.length,
offset=entity.offset,
confidence_score=entity.confidence_score,
)
def __repr__(self):
return (
"PiiEntity(text={}, category={}, subcategory={}, length={}, "
"offset={}, confidence_score={})".format(
self.text,
self.category,
self.subcategory,
self.length,
self.offset,
self.confidence_score,
)[:1024]
)
class HealthcareEntity(DictMixin):
"""HealthcareEntity contains information about a Healthcare entity found in text.
:ivar str text: Entity text as appears in the document.
:ivar str normalized_text: Optional. Normalized version of the raw `text` we extract
from the document. Not all `text` will have a normalized version.
:ivar str category: Entity category, see the :class:`~azure.ai.textanalytics.HealthcareEntityCategory`
type for possible healthcare entity categories.
:ivar str subcategory: Entity subcategory.
:ivar assertion: Contains various assertions about this entity. For example, if
an entity is a diagnosis, is this diagnosis 'conditional' on a symptom?
Are the doctors 'certain' about this diagnosis? Is this diagnosis 'associated'
with another diagnosis?
:vartype assertion: ~azure.ai.textanalytics.HealthcareEntityAssertion
:ivar int length: The entity text length. This value depends on the value
of the `string_index_type` parameter specified in the original request, which is
UnicodeCodePoints by default.
:ivar int offset: The entity text offset from the start of the document.
This value depends on the value of the `string_index_type` parameter specified
in the original request, which is UnicodeCodePoints by default.
:ivar float confidence_score: Confidence score between 0 and 1 of the extracted
entity.
:ivar data_sources: A collection of entity references in known data sources.
:vartype data_sources: list[~azure.ai.textanalytics.HealthcareEntityDataSource]
"""
def __init__(self, **kwargs):
self.text = kwargs.get("text", None)
self.normalized_text = kwargs.get("normalized_text", None)
self.category = kwargs.get("category", None)
self.subcategory = kwargs.get("subcategory", None)
self.assertion = kwargs.get("assertion", None)
self.length = kwargs.get("length", None)
self.offset = kwargs.get("offset", None)
self.confidence_score = kwargs.get("confidence_score", None)
self.data_sources = kwargs.get("data_sources", [])
@classmethod
def _from_generated(cls, healthcare_entity):
assertion = None
try:
if healthcare_entity.assertion:
assertion = HealthcareEntityAssertion._from_generated( # pylint: disable=protected-access
healthcare_entity.assertion
)
except AttributeError:
assertion = None
return cls(
text=healthcare_entity.text,
normalized_text=healthcare_entity.name,
category=healthcare_entity.category,
subcategory=healthcare_entity.subcategory,
assertion=assertion,
length=healthcare_entity.length,
offset=healthcare_entity.offset,
confidence_score=healthcare_entity.confidence_score,
data_sources=[
HealthcareEntityDataSource(entity_id=l.id, name=l.data_source)
for l in healthcare_entity.links
]
if healthcare_entity.links
else None,
)
def __hash__(self):
return hash(repr(self))
def __repr__(self):
return (
"HealthcareEntity(text={}, normalized_text={}, category={}, subcategory={}, assertion={}, length={}, "
"offset={}, confidence_score={}, data_sources={})".format(
self.text,
self.normalized_text,
self.category,
self.subcategory,
repr(self.assertion),
self.length,
self.offset,
self.confidence_score,
repr(self.data_sources),
)[:1024]
)
class HealthcareEntityAssertion(DictMixin):
"""Contains various assertions about a `HealthcareEntity`.
For example, if an entity is a diagnosis, is this diagnosis 'conditional' on a symptom?
Are the doctors 'certain' about this diagnosis? Is this diagnosis 'associated'
with another diagnosis?
:ivar str conditionality: Describes whether the healthcare entity it's on is conditional on another entity.
For example, "If the patient has a fever, he has pneumonia", the diagnosis of pneumonia
is 'conditional' on whether the patient has a fever. Possible values are "hypothetical" and
"conditional".
:ivar str certainty: Describes how certain the healthcare entity it's on is. For example,
in "The patient may have a fever", the fever entity is not 100% certain, but is instead
"positivePossible". Possible values are "positive", "positivePossible", "neutralPossible",
"negativePossible", and "negative".
:ivar str association: Describes whether the healthcare entity it's on is the subject of the document, or
if this entity describes someone else in the document. For example, in "The subject's mother has
a fever", the "fever" entity is not associated with the subject themselves, but with the subject's
mother. Possible values are "subject" and "other".
"""
def __init__(self, **kwargs):
self.conditionality = kwargs.get("conditionality", None)
self.certainty = kwargs.get("certainty", None)
self.association = kwargs.get("association", None)
@classmethod
def _from_generated(cls, healthcare_assertion):
return cls(
conditionality=healthcare_assertion.conditionality,
certainty=healthcare_assertion.certainty,
association=healthcare_assertion.association,
)
def __repr__(self):
return "HealthcareEntityAssertion(conditionality={}, certainty={}, association={})".format(
self.conditionality, self.certainty, self.association
)
class HealthcareEntityDataSource(DictMixin):
"""
HealthcareEntityDataSource contains information representing an entity reference in a known data source.
:ivar str entity_id: ID of the entity in the given source catalog.
:ivar str name: The name of the entity catalog from where the entity was identified, such as UMLS, CHV, MSH, etc.
"""
def __init__(self, **kwargs):
self.entity_id = kwargs.get("entity_id", None)
self.name = kwargs.get("name", None)
def __repr__(self):
return "HealthcareEntityDataSource(entity_id={}, name={})".format(
self.entity_id, self.name
)[:1024]
class TextAnalyticsError(DictMixin):
"""TextAnalyticsError contains the error code, message, and
other details that explain why the batch or individual document
failed to be processed by the service.
:ivar code: Error code. Possible values include:
'invalidRequest', 'invalidArgument', 'internalServerError',
'serviceUnavailable', 'invalidParameterValue', 'invalidRequestBodyFormat',
'emptyRequest', 'missingInputRecords', 'invalidDocument', 'modelVersionIncorrect',
'invalidDocumentBatch', 'unsupportedLanguageCode', 'invalidCountryHint'
:vartype code: str
:ivar message: Error message.
:vartype message: str
:ivar target: Error target.
:vartype target: str
"""
def __init__(self, **kwargs):
self.code = kwargs.get("code", None)
self.message = kwargs.get("message", None)
self.target = kwargs.get("target", None)
@classmethod
def _from_generated(cls, err):
if err.innererror:
return cls(
code=err.innererror.code,
message=err.innererror.message,
target=err.innererror.target,
)
return cls(code=err.code, message=err.message, target=err.target)
def __repr__(self):
return "TextAnalyticsError(code={}, message={}, target={})".format(
self.code, self.message, self.target
)[:1024]
class TextAnalyticsWarning(DictMixin):
"""TextAnalyticsWarning contains the warning code and message that explains why
the response has a warning.
:ivar code: Warning code. Possible values include: 'LongWordsInDocument',
'DocumentTruncated'.
:vartype code: str
:ivar message: Warning message.
:vartype message: str
"""
def __init__(self, **kwargs):
self.code = kwargs.get("code", None)
self.message = kwargs.get("message", None)
@classmethod
def _from_generated(cls, warning):
return cls(
code=warning.code,
message=warning.message,
)
def __repr__(self):
return "TextAnalyticsWarning(code={}, message={})".format(
self.code, self.message
)[:1024]
class ExtractKeyPhrasesResult(DictMixin):
"""ExtractKeyPhrasesResult is a result object which contains
the key phrases found in a particular document.
:ivar id: Unique, non-empty document identifier that matches the
document id that was passed in with the request. If not specified
in the request, an id is assigned for the document.
:vartype id: str
:ivar key_phrases: A list of representative words or phrases.
The number of key phrases returned is proportional to the number of words
in the input document.
:vartype key_phrases: list[str]
:ivar warnings: Warnings encountered while processing document. Results will still be returned
if there are warnings, but they may not be fully accurate.
:vartype warnings: list[~azure.ai.textanalytics.TextAnalyticsWarning]
:ivar statistics: If `show_stats=True` was specified in the request this
field will contain information about the document payload.
:vartype statistics:
~azure.ai.textanalytics.TextDocumentStatistics
:ivar bool is_error: Boolean check for error item when iterating over list of
results. Always False for an instance of a ExtractKeyPhrasesResult.
"""
def __init__(self, **kwargs):
self.id = kwargs.get("id", None)
self.key_phrases = kwargs.get("key_phrases", None)
self.warnings = kwargs.get("warnings", [])
self.statistics = kwargs.get("statistics", None)
self.is_error = False
def __repr__(self):
return "ExtractKeyPhrasesResult(id={}, key_phrases={}, warnings={}, statistics={}, is_error={})".format(
self.id,
self.key_phrases,
repr(self.warnings),
repr(self.statistics),
self.is_error,
)[
:1024
]
class RecognizeLinkedEntitiesResult(DictMixin):
"""RecognizeLinkedEntitiesResult is a result object which contains
links to a well-known knowledge base, like for example, Wikipedia or Bing.
:ivar id: Unique, non-empty document identifier that matches the
document id that was passed in with the request. If not specified
in the request, an id is assigned for the document.
:vartype id: str
:ivar entities: Recognized well-known entities in the document.
:vartype entities:
list[~azure.ai.textanalytics.LinkedEntity]
:ivar warnings: Warnings encountered while processing document. Results will still be returned
if there are warnings, but they may not be fully accurate.
:vartype warnings: list[~azure.ai.textanalytics.TextAnalyticsWarning]
:ivar statistics: If `show_stats=True` was specified in the request this
field will contain information about the document payload.
:vartype statistics:
~azure.ai.textanalytics.TextDocumentStatistics
:ivar bool is_error: Boolean check for error item when iterating over list of
results. Always False for an instance of a RecognizeLinkedEntitiesResult.
"""
def __init__(self, **kwargs):
self.id = kwargs.get("id", None)
self.entities = kwargs.get("entities", None)
self.warnings = kwargs.get("warnings", [])
self.statistics = kwargs.get("statistics", None)
self.is_error = False
def __repr__(self):
return "RecognizeLinkedEntitiesResult(id={}, entities={}, warnings={}, statistics={}, is_error={})".format(
self.id,
repr(self.entities),
repr(self.warnings),
repr(self.statistics),
self.is_error,
)[
:1024
]
class AnalyzeSentimentResult(DictMixin):
"""AnalyzeSentimentResult is a result object which contains
the overall predicted sentiment and confidence scores for your document
and a per-sentence sentiment prediction with scores.
:ivar id: Unique, non-empty document identifier that matches the
document id that was passed in with the request. If not specified
in the request, an id is assigned for the document.
:vartype id: str
:ivar sentiment: Predicted sentiment for document (Negative,
Neutral, Positive, or Mixed). Possible values include: 'positive',
'neutral', 'negative', 'mixed'
:vartype sentiment: str
:ivar warnings: Warnings encountered while processing document. Results will still be returned
if there are warnings, but they may not be fully accurate.
:vartype warnings: list[~azure.ai.textanalytics.TextAnalyticsWarning]
:ivar statistics: If `show_stats=True` was specified in the request this
field will contain information about the document payload.
:vartype statistics:
~azure.ai.textanalytics.TextDocumentStatistics
:ivar confidence_scores: Document level sentiment confidence
scores between 0 and 1 for each sentiment label.
:vartype confidence_scores:
~azure.ai.textanalytics.SentimentConfidenceScores
:ivar sentences: Sentence level sentiment analysis.
:vartype sentences:
list[~azure.ai.textanalytics.SentenceSentiment]
:ivar bool is_error: Boolean check for error item when iterating over list of
results. Always False for an instance of a AnalyzeSentimentResult.
"""
def __init__(self, **kwargs):
self.id = kwargs.get("id", None)
self.sentiment = kwargs.get("sentiment", None)
self.warnings = kwargs.get("warnings", [])
self.statistics = kwargs.get("statistics", None)
self.confidence_scores = kwargs.get("confidence_scores", None)
self.sentences = kwargs.get("sentences", None)
self.is_error = False
def __repr__(self):
return (
"AnalyzeSentimentResult(id={}, sentiment={}, warnings={}, statistics={}, confidence_scores={}, "
"sentences={}, is_error={})".format(
self.id,
self.sentiment,
repr(self.warnings),
repr(self.statistics),
repr(self.confidence_scores),
repr(self.sentences),
self.is_error,
)[:1024]
)
class TextDocumentStatistics(DictMixin):
"""TextDocumentStatistics contains information about
the document payload.
:ivar character_count: Number of text elements recognized in
the document.
:vartype character_count: int
:ivar transaction_count: Number of transactions for the document.
:vartype transaction_count: int
"""
def __init__(self, **kwargs):
self.character_count = kwargs.get("character_count", None)
self.transaction_count = kwargs.get("transaction_count", None)
@classmethod
def _from_generated(cls, stats):
if stats is None:
return None
return cls(
character_count=stats.characters_count,
transaction_count=stats.transactions_count,
)
def __repr__(self):
return (
"TextDocumentStatistics(character_count={}, transaction_count={})".format(
self.character_count, self.transaction_count
)[:1024]
)
class DocumentError(DictMixin):
"""DocumentError is an error object which represents an error on
the individual document.
:ivar id: Unique, non-empty document identifier that matches the
document id that was passed in with the request. If not specified
in the request, an id is assigned for the document.
:vartype id: str
:ivar error: The document error.
:vartype error: ~azure.ai.textanalytics.TextAnalyticsError
:ivar bool is_error: Boolean check for error item when iterating over list of
results. Always True for an instance of a DocumentError.
"""
def __init__(self, **kwargs):
self.id = kwargs.get("id", None)
self.error = kwargs.get("error", None)
self.is_error = True
def __getattr__(self, attr):
result_set = set()
result_set.update(
RecognizeEntitiesResult().keys()
+ RecognizePiiEntitiesResult().keys()
+ DetectLanguageResult().keys()
+ RecognizeLinkedEntitiesResult().keys()
+ AnalyzeSentimentResult().keys()
+ ExtractKeyPhrasesResult().keys()
)
result_attrs = result_set.difference(DocumentError().keys())
if attr in result_attrs:
raise AttributeError(
"'DocumentError' object has no attribute '{}'. The service was unable to process this document:\n"
"Document Id: {}\nError: {} - {}\n".format(
attr, self.id, self.error.code, self.error.message
)
)
raise AttributeError(
"'DocumentError' object has no attribute '{}'".format(attr)
)
@classmethod
def _from_generated(cls, doc_err):
return cls(
id=doc_err.id,
error=TextAnalyticsError._from_generated( # pylint: disable=protected-access
doc_err.error
),
is_error=True,
)
def __repr__(self):
return "DocumentError(id={}, error={}, is_error={})".format(
self.id, repr(self.error), self.is_error
)[:1024]
class DetectLanguageInput(LanguageInput):
"""The input document to be analyzed for detecting language.
:keyword str id: Unique, non-empty document identifier.
:keyword str text: The input text to process.
:keyword str country_hint: A country hint to help better detect
the language of the text. Accepts two letter country codes
specified by ISO 3166-1 alpha-2. Defaults to "US". Pass
in the string "none" to not use a country_hint.
:ivar id: Required. Unique, non-empty document identifier.
:vartype id: str
:ivar text: Required. The input text to process.
:vartype text: str
:ivar country_hint: A country hint to help better detect
the language of the text. Accepts two letter country codes
specified by ISO 3166-1 alpha-2. Defaults to "US". Pass
in the string "none" to not use a country_hint.
:vartype country_hint: str
"""
def __init__(self, **kwargs):
super(DetectLanguageInput, self).__init__(**kwargs)
self.id = kwargs.get("id", None)
self.text = kwargs.get("text", None)
self.country_hint = kwargs.get("country_hint", None)
def __repr__(self):
return "DetectLanguageInput(id={}, text={}, country_hint={})".format(
self.id, self.text, self.country_hint
)[:1024]
class LinkedEntity(DictMixin):
"""LinkedEntity contains a link to the well-known recognized
entity in text. The link comes from a data source like Wikipedia
or Bing. It additionally includes all of the matches of this
entity found in the document.
:ivar name: Entity Linking formal name.
:vartype name: str
:ivar matches: List of instances this entity appears in the text.
:vartype matches:
list[~azure.ai.textanalytics.LinkedEntityMatch]
:ivar language: Language used in the data source.
:vartype language: str
:ivar data_source_entity_id: Unique identifier of the recognized entity from the data
source.
:vartype data_source_entity_id: str
:ivar url: URL to the entity's page from the data source.
:vartype url: str
:ivar data_source: Data source used to extract entity linking,
such as Wiki/Bing etc.
:vartype data_source: str
:ivar str bing_entity_search_api_id: Bing Entity Search unique identifier of the recognized entity.
Use in conjunction with the Bing Entity Search SDK to fetch additional relevant information.
.. versionadded:: v3.1
The *bing_entity_search_api_id* property.
"""
def __init__(self, **kwargs):
self.name = kwargs.get("name", None)
self.matches = kwargs.get("matches", None)
self.language = kwargs.get("language", None)
self.data_source_entity_id = kwargs.get("data_source_entity_id", None)
self.url = kwargs.get("url", None)
self.data_source = kwargs.get("data_source", None)
self.bing_entity_search_api_id = kwargs.get("bing_entity_search_api_id", None)
@classmethod
def _from_generated(cls, entity):
bing_entity_search_api_id = (
entity.bing_id if hasattr(entity, "bing_id") else None
)
return cls(
name=entity.name,
matches=[
LinkedEntityMatch._from_generated(e) # pylint: disable=protected-access
for e in entity.matches
],
language=entity.language,
data_source_entity_id=entity.id,
url=entity.url,
data_source=entity.data_source,
bing_entity_search_api_id=bing_entity_search_api_id,
)
def __repr__(self):
return (
"LinkedEntity(name={}, matches={}, language={}, data_source_entity_id={}, url={}, "
"data_source={}, bing_entity_search_api_id={})".format(
self.name,
repr(self.matches),
self.language,
self.data_source_entity_id,
self.url,
self.data_source,
self.bing_entity_search_api_id,
)[:1024]
)
class LinkedEntityMatch(DictMixin):
"""A match for the linked entity found in text. Provides
the confidence score of the prediction and where the entity
was found in the text.
:ivar confidence_score: If a well-known item is recognized, a
decimal number denoting the confidence level between 0 and 1 will be
returned.
:vartype confidence_score: float
:ivar text: Entity text as appears in the request.
:ivar int length: The linked entity match text length. This value depends on the value of the
`string_index_type` parameter set in the original request, which is UnicodeCodePoints by default.
:ivar int offset: The linked entity match text offset from the start of the document.
The value depends on the value of the `string_index_type` parameter
set in the original request, which is UnicodeCodePoints by default.
:vartype text: str
.. versionadded:: v3.1
The *offset* and *length* properties.
"""
def __init__(self, **kwargs):
self.confidence_score = kwargs.get("confidence_score", None)
self.text = kwargs.get("text", None)
self.length = kwargs.get("length", None)
self.offset = kwargs.get("offset", None)
@classmethod
def _from_generated(cls, match):
offset = match.offset
length = match.length
if isinstance(match, _v3_0_models.Match):
# we do not return offset for v3.0 since
# the correct encoding was not introduced for v3.0
offset = None
length = None
return cls(
confidence_score=match.confidence_score,
text=match.text,
length=length,
offset=offset,
)
def __repr__(self):
return "LinkedEntityMatch(confidence_score={}, text={}, length={}, offset={})".format(
self.confidence_score, self.text, self.length, self.offset
)[
:1024
]
class TextDocumentInput(DictMixin, MultiLanguageInput):
"""The input document to be analyzed by the service.
:keyword str id: Unique, non-empty document identifier.
:keyword str text: The input text to process.
:keyword str language: This is the 2 letter ISO 639-1 representation
of a language. For example, use "en" for English; "es" for Spanish etc. If
not set, uses "en" for English as default.
:ivar id: Required. Unique, non-empty document identifier.
:vartype id: str
:ivar text: Required. The input text to process.
:vartype text: str
:ivar language: This is the 2 letter ISO 639-1 representation
of a language. For example, use "en" for English; "es" for Spanish etc. If
not set, uses "en" for English as default.
:vartype language: str
"""
def __init__(self, **kwargs):
super(TextDocumentInput, self).__init__(**kwargs)
self.id = kwargs.get("id", None)
self.text = kwargs.get("text", None)
self.language = kwargs.get("language", None)
def __repr__(self):
return "TextDocumentInput(id={}, text={}, language={})".format(
self.id, self.text, self.language
)[:1024]
class TextDocumentBatchStatistics(DictMixin):
"""TextDocumentBatchStatistics contains information about the
request payload. Note: This object is not returned
in the response and needs to be retrieved by a response hook.
:ivar document_count: Number of documents submitted in the request.
:vartype document_count: int
:ivar valid_document_count: Number of valid documents. This
excludes empty, over-size limit or non-supported languages documents.
:vartype valid_document_count: int
:ivar erroneous_document_count: Number of invalid documents.
This includes empty, over-size limit or non-supported languages documents.
:vartype erroneous_document_count: int
:ivar transaction_count: Number of transactions for the request.
:vartype transaction_count: long
"""
def __init__(self, **kwargs):
self.document_count = kwargs.get("document_count", None)
self.valid_document_count = kwargs.get("valid_document_count", None)
self.erroneous_document_count = kwargs.get("erroneous_document_count", None)
self.transaction_count = kwargs.get("transaction_count", None)
@classmethod
def _from_generated(cls, statistics):
if statistics is None:
return None
return cls(
document_count=statistics["documentsCount"],
valid_document_count=statistics["validDocumentsCount"],
erroneous_document_count=statistics["erroneousDocumentsCount"],
transaction_count=statistics["transactionsCount"],
)
def __repr__(self):
return (
"TextDocumentBatchStatistics(document_count={}, valid_document_count={}, erroneous_document_count={}, "
"transaction_count={})".format(
self.document_count,
self.valid_document_count,
self.erroneous_document_count,
self.transaction_count,
)[:1024]
)
class SentenceSentiment(DictMixin):
"""SentenceSentiment contains the predicted sentiment and
confidence scores for each individual sentence in the document.
:ivar text: The sentence text.
:vartype text: str
:ivar sentiment: The predicted Sentiment for the sentence.
Possible values include: 'positive', 'neutral', 'negative'
:vartype sentiment: str
:ivar confidence_scores: The sentiment confidence score between 0
and 1 for the sentence for all labels.
:vartype confidence_scores:
~azure.ai.textanalytics.SentimentConfidenceScores
:ivar int length: The sentence text length. This value depends on the value of the
`string_index_type` parameter set in the original request, which is UnicodeCodePoints
by default.
:ivar int offset: The sentence text offset from the start of the document.
The value depends on the value of the `string_index_type` parameter
set in the original request, which is UnicodeCodePoints by default.
:ivar mined_opinions: The list of opinions mined from this sentence.
For example in the sentence "The food is good, but the service is bad", we would
mine the two opinions "food is good" and "service is bad". Only returned
if `show_opinion_mining` is set to True in the call to `analyze_sentiment` and
api version is v3.1 and up.
:vartype mined_opinions:
list[~azure.ai.textanalytics.MinedOpinion]
.. versionadded:: v3.1
The *offset*, *length*, and *mined_opinions* properties.
"""
def __init__(self, **kwargs):
self.text = kwargs.get("text", None)
self.sentiment = kwargs.get("sentiment", None)
self.confidence_scores = kwargs.get("confidence_scores", None)
self.length = kwargs.get("length", None)
self.offset = kwargs.get("offset", None)
self.mined_opinions = kwargs.get("mined_opinions", None)
@classmethod
def _from_generated(cls, sentence, results, sentiment):
offset = sentence.offset
length = sentence.length
if isinstance(sentence, _v3_0_models.SentenceSentiment):
# we do not return offset for v3.0 since
# the correct encoding was not introduced for v3.0
offset = None
length = None
if hasattr(sentence, "targets"):
mined_opinions = (
[
MinedOpinion._from_generated( # pylint: disable=protected-access
target, results, sentiment
)
for target in sentence.targets
]
if sentence.targets
else []
)
else:
mined_opinions = None
return cls(
text=sentence.text,
sentiment=sentence.sentiment,
confidence_scores=SentimentConfidenceScores._from_generated( # pylint: disable=protected-access
sentence.confidence_scores
),
length=length,
offset=offset,
mined_opinions=mined_opinions,
)
def __repr__(self):
return (
"SentenceSentiment(text={}, sentiment={}, confidence_scores={}, "
"length={}, offset={}, mined_opinions={})".format(
self.text,
self.sentiment,
repr(self.confidence_scores),
self.length,
self.offset,
repr(self.mined_opinions),
)[:1024]
)
class MinedOpinion(DictMixin):
"""A mined opinion object represents an opinion we've extracted from a sentence.
It consists of both a target that these opinions are about, and the assessments
representing the opinion.
:ivar target: The target of an opinion about a product/service.
:vartype target: ~azure.ai.textanalytics.TargetSentiment
:ivar assessments: The assessments representing the opinion of the target.
:vartype assessments: list[~azure.ai.textanalytics.AssessmentSentiment]
"""
def __init__(self, **kwargs):
self.target = kwargs.get("target", None)
self.assessments = kwargs.get("assessments", None)
@staticmethod
def _get_assessments(
relations, results, sentiment
): # pylint: disable=unused-argument
if not relations:
return []
assessment_relations = [
r.ref for r in relations if r.relation_type == "assessment"
]
assessments = []
for assessment_relation in assessment_relations:
nums = _get_indices(assessment_relation)
sentence_index = nums[1]
assessment_index = nums[2]
assessments.append(
sentiment.sentences[sentence_index].assessments[assessment_index]
)
return assessments
@classmethod
def _from_generated(cls, target, results, sentiment):
return cls(
target=TargetSentiment._from_generated( # pylint: disable=protected-access
target
),
assessments=[
AssessmentSentiment._from_generated( # pylint: disable=protected-access
assessment
)
for assessment in cls._get_assessments(
target.relations, results, sentiment
)
],
)
def __repr__(self):
return "MinedOpinion(target={}, assessments={})".format(
repr(self.target), repr(self.assessments)
)[:1024]
class TargetSentiment(DictMixin):
"""TargetSentiment contains the predicted sentiment,
confidence scores and other information about a key component of a product/service.
For example in "The food at Hotel Foo is good", "food" is an key component of
"Hotel Foo".
:ivar str text: The text value of the target.
:ivar str sentiment: The predicted Sentiment for the target. Possible values
include 'positive', 'mixed', and 'negative'.
:ivar confidence_scores: The sentiment confidence score between 0
and 1 for the target for 'positive' and 'negative' labels. It's score
for 'neutral' will always be 0
:vartype confidence_scores:
~azure.ai.textanalytics.SentimentConfidenceScores
:ivar int length: The target text length. This value depends on the value of the
`string_index_type` parameter set in the original request, which is UnicodeCodePoints
by default.
:ivar int offset: The target text offset from the start of the document.
The value depends on the value of the `string_index_type` parameter
set in the original request, which is UnicodeCodePoints by default.
"""
def __init__(self, **kwargs):
self.text = kwargs.get("text", None)
self.sentiment = kwargs.get("sentiment", None)
self.confidence_scores = kwargs.get("confidence_scores", None)
self.length = kwargs.get("length", None)
self.offset = kwargs.get("offset", None)
@classmethod
def _from_generated(cls, target):
return cls(
text=target.text,
sentiment=target.sentiment,
confidence_scores=SentimentConfidenceScores._from_generated( # pylint: disable=protected-access
target.confidence_scores
),
length=target.length,
offset=target.offset,
)
def __repr__(self):
return (
"TargetSentiment(text={}, sentiment={}, confidence_scores={}, "
"length={}, offset={})".format(
self.text,
self.sentiment,
repr(self.confidence_scores),
self.length,
self.offset,
)[:1024]
)
class AssessmentSentiment(DictMixin):
"""AssessmentSentiment contains the predicted sentiment,
confidence scores and other information about an assessment given about
a particular target. For example, in the sentence "The food is good", the assessment
of the target 'food' is 'good'.
:ivar str text: The assessment text.
:ivar str sentiment: The predicted Sentiment for the assessment. Possible values
include 'positive', 'mixed', and 'negative'.
:ivar confidence_scores: The sentiment confidence score between 0
and 1 for the assessment for 'positive' and 'negative' labels. It's score
for 'neutral' will always be 0
:vartype confidence_scores:
~azure.ai.textanalytics.SentimentConfidenceScores
:ivar int length: The assessment text length. This value depends on the value of the
`string_index_type` parameter set in the original request, which is UnicodeCodePoints
by default.
:ivar int offset: The assessment text offset from the start of the document.
The value depends on the value of the `string_index_type` parameter
set in the original request, which is UnicodeCodePoints by default.
:ivar bool is_negated: Whether the value of the assessment is negated. For example, in
"The food is not good", the assessment "good" is negated.
"""
def __init__(self, **kwargs):
self.text = kwargs.get("text", None)
self.sentiment = kwargs.get("sentiment", None)
self.confidence_scores = kwargs.get("confidence_scores", None)
self.length = kwargs.get("length", None)
self.offset = kwargs.get("offset", None)
self.is_negated = kwargs.get("is_negated", None)
@classmethod
def _from_generated(cls, assessment):
return cls(
text=assessment.text,
sentiment=assessment.sentiment,
confidence_scores=SentimentConfidenceScores._from_generated( # pylint: disable=protected-access
assessment.confidence_scores
),
length=assessment.length,
offset=assessment.offset,
is_negated=assessment.is_negated,
)
def __repr__(self):
return (
"AssessmentSentiment(text={}, sentiment={}, confidence_scores={}, length={}, offset={}, "
"is_negated={})".format(
self.text,
self.sentiment,
repr(self.confidence_scores),
self.length,
self.offset,
self.is_negated,
)[:1024]
)
class SentimentConfidenceScores(DictMixin):
"""The confidence scores (Softmax scores) between 0 and 1.
Higher values indicate higher confidence.
:ivar positive: Positive score.
:vartype positive: float
:ivar neutral: Neutral score.
:vartype neutral: float
:ivar negative: Negative score.
:vartype negative: float
"""
def __init__(self, **kwargs):
self.positive = kwargs.get("positive", 0.0)
self.neutral = kwargs.get("neutral", 0.0)
self.negative = kwargs.get("negative", 0.0)
@classmethod
def _from_generated(cls, score):
return cls(
positive=score.positive,
neutral=score.neutral if hasattr(score, "neutral") else 0.0,
negative=score.negative,
)
def __repr__(self):
return "SentimentConfidenceScores(positive={}, neutral={}, negative={})".format(
self.positive, self.neutral, self.negative
)[:1024]
class _AnalyzeActionsType(str, Enum):
"""The type of action that was applied to the documents"""
RECOGNIZE_ENTITIES = "recognize_entities" #: Entities Recognition action.
RECOGNIZE_PII_ENTITIES = (
"recognize_pii_entities" #: PII Entities Recognition action.
)
EXTRACT_KEY_PHRASES = "extract_key_phrases" #: Key Phrase Extraction action.
RECOGNIZE_LINKED_ENTITIES = (
"recognize_linked_entities" #: Linked Entities Recognition action.
)
ANALYZE_SENTIMENT = "analyze_sentiment" #: Sentiment Analysis action.
EXTRACT_SUMMARY = "extract_summary"
RECOGNIZE_CUSTOM_ENTITIES = "recognize_custom_entities"
SINGLE_CATEGORY_CLASSIFY = "single_category_classify"
MULTI_CATEGORY_CLASSIFY = "multi_category_classify"
class ActionPointerKind(str, Enum):
RECOGNIZE_ENTITIES = "entityRecognitionTasks"
RECOGNIZE_PII_ENTITIES = "entityRecognitionPiiTasks"
EXTRACT_KEY_PHRASES = "keyPhraseExtractionTasks"
RECOGNIZE_LINKED_ENTITIES = "entityLinkingTasks"
ANALYZE_SENTIMENT = "sentimentAnalysisTasks"
EXTRACT_SUMMARY = "extractiveSummarizationTasks"
RECOGNIZE_CUSTOM_ENTITIES = "customEntityRecognitionTasks"
SINGLE_CATEGORY_CLASSIFY = "customSingleClassificationTasks"
MULTI_CATEGORY_CLASSIFY = "customMultiClassificationTasks"
class RecognizeEntitiesAction(DictMixin):
"""RecognizeEntitiesAction encapsulates the parameters for starting a long-running Entities Recognition operation.
If you just want to recognize entities in a list of documents, and not perform multiple
long running actions on the input of documents, call method `recognize_entities` instead
of interfacing with this model.
:keyword str model_version: The model version to use for the analysis.
:keyword str string_index_type: Specifies the method used to interpret string offsets.
`UnicodeCodePoint`, the Python encoding, is the default. To override the Python default,
you can also pass in `Utf16CodePoint` or TextElement_v8`. For additional information
see https://aka.ms/text-analytics-offsets
:keyword bool disable_service_logs: If set to true, you opt-out of having your text input
logged on the service side for troubleshooting. By default, Text Analytics logs your
input text for 48 hours, solely to allow for troubleshooting issues in providing you with
the Text Analytics natural language processing functions. Setting this parameter to true,
disables input logging and may limit our ability to remediate issues that occur. Please see
Cognitive Services Compliance and Privacy notes at https://aka.ms/cs-compliance for
additional details, and Microsoft Responsible AI principles at
https://www.microsoft.com/ai/responsible-ai.
:ivar str model_version: The model version to use for the analysis.
:ivar str string_index_type: Specifies the method used to interpret string offsets.
`UnicodeCodePoint`, the Python encoding, is the default. To override the Python default,
you can also pass in `Utf16CodePoint` or TextElement_v8`. For additional information
see https://aka.ms/text-analytics-offsets
:ivar bool disable_service_logs: If set to true, you opt-out of having your text input
logged on the service side for troubleshooting. By default, Text Analytics logs your
input text for 48 hours, solely to allow for troubleshooting issues in providing you with
the Text Analytics natural language processing functions. Setting this parameter to true,
disables input logging and may limit our ability to remediate issues that occur. Please see
Cognitive Services Compliance and Privacy notes at https://aka.ms/cs-compliance for
additional details, and Microsoft Responsible AI principles at
https://www.microsoft.com/ai/responsible-ai.
"""
def __init__(self, **kwargs):
self.model_version = kwargs.get("model_version", "latest")
self.string_index_type = kwargs.get("string_index_type", "UnicodeCodePoint")
self.disable_service_logs = kwargs.get("disable_service_logs", False)
def __repr__(self, **kwargs):
return "RecognizeEntitiesAction(model_version={}, string_index_type={}, disable_service_logs={})".format(
self.model_version, self.string_index_type, self.disable_service_logs
)[
:1024
]
def _to_generated(self, api_version, task_id):
if api_version == DEFAULT_API_VERSION:
from ._generated.v3_2_preview_2 import models
else:
from ._generated.v3_1 import models
return models.EntitiesTask(
parameters=models.EntitiesTaskParameters(
model_version=self.model_version,
string_index_type=self.string_index_type,
logging_opt_out=self.disable_service_logs,
),
task_name=task_id
)
class AnalyzeSentimentAction(DictMixin):
"""AnalyzeSentimentAction encapsulates the parameters for starting a long-running
Sentiment Analysis operation.
If you just want to analyze sentiment in a list of documents, and not perform multiple
long running actions on the input of documents, call method `analyze_sentiment` instead
of interfacing with this model.
:keyword str model_version: The model version to use for the analysis.
:keyword bool show_opinion_mining: Whether to mine the opinions of a sentence and conduct more
granular analysis around the aspects of a product or service (also known as
aspect-based sentiment analysis). If set to true, the returned
:class:`~azure.ai.textanalytics.SentenceSentiment` objects
will have property `mined_opinions` containing the result of this analysis.
:keyword str string_index_type: Specifies the method used to interpret string offsets.
`UnicodeCodePoint`, the Python encoding, is the default. To override the Python default,
you can also pass in `Utf16CodePoint` or TextElement_v8`. For additional information
see https://aka.ms/text-analytics-offsets
:keyword bool disable_service_logs: If set to true, you opt-out of having your text input
logged on the service side for troubleshooting. By default, Text Analytics logs your
input text for 48 hours, solely to allow for troubleshooting issues in providing you with
the Text Analytics natural language processing functions. Setting this parameter to true,
disables input logging and may limit our ability to remediate issues that occur. Please see
Cognitive Services Compliance and Privacy notes at https://aka.ms/cs-compliance for
additional details, and Microsoft Responsible AI principles at
https://www.microsoft.com/ai/responsible-ai.
:ivar str model_version: The model version to use for the analysis.
:ivar bool show_opinion_mining: Whether to mine the opinions of a sentence and conduct more
granular analysis around the aspects of a product or service (also known as
aspect-based sentiment analysis). If set to true, the returned
:class:`~azure.ai.textanalytics.SentenceSentiment` objects
will have property `mined_opinions` containing the result of this analysis.
:ivar str string_index_type: Specifies the method used to interpret string offsets.
`UnicodeCodePoint`, the Python encoding, is the default. To override the Python default,
you can also pass in `Utf16CodePoint` or TextElement_v8`. For additional information
see https://aka.ms/text-analytics-offsets
:ivar bool disable_service_logs: If set to true, you opt-out of having your text input
logged on the service side for troubleshooting. By default, Text Analytics logs your
input text for 48 hours, solely to allow for troubleshooting issues in providing you with
the Text Analytics natural language processing functions. Setting this parameter to true,
disables input logging and may limit our ability to remediate issues that occur. Please see
Cognitive Services Compliance and Privacy notes at https://aka.ms/cs-compliance for
additional details, and Microsoft Responsible AI principles at
https://www.microsoft.com/ai/responsible-ai.
"""
def __init__(self, **kwargs):
self.model_version = kwargs.get("model_version", "latest")
self.show_opinion_mining = kwargs.get("show_opinion_mining", False)
self.string_index_type = kwargs.get("string_index_type", None)
self.disable_service_logs = kwargs.get("disable_service_logs", False)
def __repr__(self, **kwargs):
return (
"AnalyzeSentimentAction(model_version={}, show_opinion_mining={}, string_index_type={}, "
"disable_service_logs={}".format(
self.model_version,
self.show_opinion_mining,
self.string_index_type,
self.disable_service_logs,
)[:1024]
)
def _to_generated(self, api_version, task_id):
if api_version == DEFAULT_API_VERSION:
from ._generated.v3_2_preview_2 import models
else:
from ._generated.v3_1 import models
return models.SentimentAnalysisTask(
parameters=models.SentimentAnalysisTaskParameters(
model_version=self.model_version,
opinion_mining=self.show_opinion_mining,
string_index_type=self.string_index_type,
logging_opt_out=self.disable_service_logs,
),
task_name=task_id
)
class RecognizePiiEntitiesAction(DictMixin):
"""RecognizePiiEntitiesAction encapsulates the parameters for starting a long-running PII
Entities Recognition operation.
If you just want to recognize pii entities in a list of documents, and not perform multiple
long running actions on the input of documents, call method `recognize_pii_entities` instead
of interfacing with this model.
:keyword str model_version: The model version to use for the analysis.
:keyword str domain_filter: An optional string to set the PII domain to include only a
subset of the PII entity categories. Possible values include 'phi' or None.
:keyword categories_filter: Instead of filtering over all PII entity categories, you can pass in a list of
the specific PII entity categories you want to filter out. For example, if you only want to filter out
U.S. social security numbers in a document, you can pass in
`[PiiEntityCategory.US_SOCIAL_SECURITY_NUMBER]` for this kwarg.
:paramtype categories_filter: list[~azure.ai.textanalytics.PiiEntityCategory]
:keyword str string_index_type: Specifies the method used to interpret string offsets.
`UnicodeCodePoint`, the Python encoding, is the default. To override the Python default,
you can also pass in `Utf16CodePoint` or TextElement_v8`. For additional information
see https://aka.ms/text-analytics-offsets
:keyword bool disable_service_logs: Defaults to true, meaning that Text Analytics will not log your
input text on the service side for troubleshooting. If set to False, Text Analytics logs your
input text for 48 hours, solely to allow for troubleshooting issues in providing you with
the Text Analytics natural language processing functions. Please see
Cognitive Services Compliance and Privacy notes at https://aka.ms/cs-compliance for
additional details, and Microsoft Responsible AI principles at
https://www.microsoft.com/ai/responsible-ai.
:ivar str model_version: The model version to use for the analysis.
:ivar str domain_filter: An optional string to set the PII domain to include only a
subset of the PII entity categories. Possible values include 'phi' or None.
:ivar categories_filter: Instead of filtering over all PII entity categories, you can pass in a list of
the specific PII entity categories you want to filter out. For example, if you only want to filter out
U.S. social security numbers in a document, you can pass in
`[PiiEntityCategory.US_SOCIAL_SECURITY_NUMBER]` for this kwarg.
:vartype categories_filter: list[~azure.ai.textanalytics.PiiEntityCategory]
:ivar str string_index_type: Specifies the method used to interpret string offsets.
`UnicodeCodePoint`, the Python encoding, is the default. To override the Python default,
you can also pass in `Utf16CodePoint` or TextElement_v8`. For additional information
see https://aka.ms/text-analytics-offsets
:ivar bool disable_service_logs: Defaults to true, meaning that Text Analytics will not log your
input text on the service side for troubleshooting. If set to False, Text Analytics logs your
input text for 48 hours, solely to allow for troubleshooting issues in providing you with
the Text Analytics natural language processing functions. Please see
Cognitive Services Compliance and Privacy notes at https://aka.ms/cs-compliance for
additional details, and Microsoft Responsible AI principles at
https://www.microsoft.com/ai/responsible-ai.
"""
def __init__(self, **kwargs):
self.model_version = kwargs.get("model_version", "latest")
self.domain_filter = kwargs.get("domain_filter", None)
self.categories_filter = kwargs.get("categories_filter", None)
self.string_index_type = kwargs.get("string_index_type", "UnicodeCodePoint")
self.disable_service_logs = kwargs.get("disable_service_logs", True)
def __repr__(self, **kwargs):
return (
"RecognizePiiEntitiesAction(model_version={}, domain_filter={}, categories_filter={}, "
"string_index_type={}, disable_service_logs={}".format(
self.model_version,
self.domain_filter,
self.categories_filter,
self.string_index_type,
self.disable_service_logs,
)[:1024]
)
def _to_generated(self, api_version, task_id):
if api_version == DEFAULT_API_VERSION:
from ._generated.v3_2_preview_2 import models
else:
from ._generated.v3_1 import models
return models.PiiTask(
parameters=models.PiiTaskParameters(
model_version=self.model_version,
domain=self.domain_filter,
pii_categories=self.categories_filter,
string_index_type=self.string_index_type,
logging_opt_out=self.disable_service_logs,
),
task_name=task_id
)
class ExtractKeyPhrasesAction(DictMixin):
"""ExtractKeyPhrasesAction encapsulates the parameters for starting a long-running key phrase
extraction operation
If you just want to extract key phrases from a list of documents, and not perform multiple
long running actions on the input of documents, call method `extract_key_phrases` instead
of interfacing with this model.
:keyword str model_version: The model version to use for the analysis.
:keyword bool disable_service_logs: If set to true, you opt-out of having your text input
logged on the service side for troubleshooting. By default, Text Analytics logs your
input text for 48 hours, solely to allow for troubleshooting issues in providing you with
the Text Analytics natural language processing functions. Setting this parameter to true,
disables input logging and may limit our ability to remediate issues that occur. Please see
Cognitive Services Compliance and Privacy notes at https://aka.ms/cs-compliance for
additional details, and Microsoft Responsible AI principles at
https://www.microsoft.com/ai/responsible-ai.
:ivar str model_version: The model version to use for the analysis.
:ivar bool disable_service_logs: If set to true, you opt-out of having your text input
logged on the service side for troubleshooting. By default, Text Analytics logs your
input text for 48 hours, solely to allow for troubleshooting issues in providing you with
the Text Analytics natural language processing functions. Setting this parameter to true,
disables input logging and may limit our ability to remediate issues that occur. Please see
Cognitive Services Compliance and Privacy notes at https://aka.ms/cs-compliance for
additional details, and Microsoft Responsible AI principles at
https://www.microsoft.com/ai/responsible-ai.
"""
def __init__(self, **kwargs):
self.model_version = kwargs.get("model_version", "latest")
self.disable_service_logs = kwargs.get("disable_service_logs", False)
def __repr__(self, **kwargs):
return (
"ExtractKeyPhrasesAction(model_version={}, disable_service_logs={})".format(
self.model_version, self.disable_service_logs
)[:1024]
)
def _to_generated(self, api_version, task_id):
if api_version == DEFAULT_API_VERSION:
from ._generated.v3_2_preview_2 import models
else:
from ._generated.v3_1 import models
return models.KeyPhrasesTask(
parameters=models.KeyPhrasesTaskParameters(
model_version=self.model_version,
logging_opt_out=self.disable_service_logs,
),
task_name=task_id
)
class RecognizeLinkedEntitiesAction(DictMixin):
"""RecognizeLinkedEntitiesAction encapsulates the parameters for starting a long-running Linked Entities
Recognition operation.
If you just want to recognize linked entities in a list of documents, and not perform multiple
long running actions on the input of documents, call method `recognize_linked_entities` instead
of interfacing with this model.
:keyword str model_version: The model version to use for the analysis.
:keyword str string_index_type: Specifies the method used to interpret string offsets.
`UnicodeCodePoint`, the Python encoding, is the default. To override the Python default,
you can also pass in `Utf16CodePoint` or TextElement_v8`. For additional information
see https://aka.ms/text-analytics-offsets
:keyword bool disable_service_logs: If set to true, you opt-out of having your text input
logged on the service side for troubleshooting. By default, Text Analytics logs your
input text for 48 hours, solely to allow for troubleshooting issues in providing you with
the Text Analytics natural language processing functions. Setting this parameter to true,
disables input logging and may limit our ability to remediate issues that occur. Please see
Cognitive Services Compliance and Privacy notes at https://aka.ms/cs-compliance for
additional details, and Microsoft Responsible AI principles at
https://www.microsoft.com/ai/responsible-ai.
:ivar str model_version: The model version to use for the analysis.
:ivar str string_index_type: Specifies the method used to interpret string offsets.
`UnicodeCodePoint`, the Python encoding, is the default. To override the Python default,
you can also pass in `Utf16CodePoint` or TextElement_v8`. For additional information
see https://aka.ms/text-analytics-offsets
:ivar bool disable_service_logs: If set to true, you opt-out of having your text input
logged on the service side for troubleshooting. By default, Text Analytics logs your
input text for 48 hours, solely to allow for troubleshooting issues in providing you with
the Text Analytics natural language processing functions. Setting this parameter to true,
disables input logging and may limit our ability to remediate issues that occur. Please see
Cognitive Services Compliance and Privacy notes at https://aka.ms/cs-compliance for
additional details, and Microsoft Responsible AI principles at
https://www.microsoft.com/ai/responsible-ai.
"""
def __init__(self, **kwargs):
self.model_version = kwargs.get("model_version", "latest")
self.string_index_type = kwargs.get("string_index_type", "UnicodeCodePoint")
self.disable_service_logs = kwargs.get("disable_service_logs", False)
def __repr__(self, **kwargs):
return (
"RecognizeLinkedEntitiesAction(model_version={}, string_index_type={}), "
"disable_service_logs={}".format(
self.model_version, self.string_index_type, self.disable_service_logs
)[:1024]
)
def _to_generated(self, api_version, task_id):
if api_version == DEFAULT_API_VERSION:
from ._generated.v3_2_preview_2 import models
else:
from ._generated.v3_1 import models
return models.EntityLinkingTask(
parameters=models.EntityLinkingTaskParameters(
model_version=self.model_version,
string_index_type=self.string_index_type,
logging_opt_out=self.disable_service_logs,
),
task_name=task_id
)
class ExtractSummaryAction(DictMixin):
"""ExtractSummaryAction encapsulates the parameters for starting a long-running Extractive Text
Summarization operation. For a conceptual discussion of extractive summarization, see the service documentation:
https://docs.microsoft.com/azure/cognitive-services/text-analytics/how-tos/extractive-summarization
:keyword str model_version: The model version to use for the analysis.
:keyword str string_index_type: Specifies the method used to interpret string offsets.
`UnicodeCodePoint`, the Python encoding, is the default. To override the Python default,
you can also pass in `Utf16CodePoint` or TextElement_v8`. For additional information
see https://aka.ms/text-analytics-offsets
:keyword bool disable_service_logs: If set to true, you opt-out of having your text input
logged on the service side for troubleshooting. By default, Text Analytics logs your
input text for 48 hours, solely to allow for troubleshooting issues in providing you with
the Text Analytics natural language processing functions. Setting this parameter to true,
disables input logging and may limit our ability to remediate issues that occur. Please see
Cognitive Services Compliance and Privacy notes at https://aka.ms/cs-compliance for
additional details, and Microsoft Responsible AI principles at
https://www.microsoft.com/ai/responsible-ai.
:keyword int max_sentence_count: Maximum number of sentences to return. Defaults to 3.
:keyword str order_by: Possible values include: "Offset", "Rank". Default value: "Offset".
:ivar str model_version: The model version to use for the analysis.
:ivar str string_index_type: Specifies the method used to interpret string offsets.
`UnicodeCodePoint`, the Python encoding, is the default. To override the Python default,
you can also pass in `Utf16CodePoint` or TextElement_v8`. For additional information
see https://aka.ms/text-analytics-offsets
:ivar bool disable_service_logs: If set to true, you opt-out of having your text input
logged on the service side for troubleshooting. By default, Text Analytics logs your
input text for 48 hours, solely to allow for troubleshooting issues in providing you with
the Text Analytics natural language processing functions. Setting this parameter to true,
disables input logging and may limit our ability to remediate issues that occur. Please see
Cognitive Services Compliance and Privacy notes at https://aka.ms/cs-compliance for
additional details, and Microsoft Responsible AI principles at
https://www.microsoft.com/ai/responsible-ai.
:ivar int max_sentence_count: Number of sentences to return. Defaults to 3.
:ivar str order_by: Possible values include: "Offset", "Rank". Default value: "Offset".
"""
def __init__(self, **kwargs):
self.model_version = kwargs.get("model_version", "latest")
self.string_index_type = kwargs.get("string_index_type", "UnicodeCodePoint")
self.disable_service_logs = kwargs.get("disable_service_logs", False)
self.max_sentence_count = kwargs.get("max_sentence_count", 3)
self.order_by = kwargs.get("order_by", "Offset")
def __repr__(self):
return (
"ExtractSummaryAction(model_version={}, string_index_type={}, disable_service_logs={}, "
"max_sentence_count={}, order_by={})".format(
self.model_version,
self.string_index_type,
self.disable_service_logs,
self.max_sentence_count,
self.order_by,
)[:1024]
)
def _to_generated(self, api_version, task_id): # pylint: disable=unused-argument
return _v3_2_preview_models.ExtractiveSummarizationTask(
parameters=_v3_2_preview_models.ExtractiveSummarizationTaskParameters(
model_version=self.model_version,
string_index_type=self.string_index_type,
logging_opt_out=self.disable_service_logs,
sentence_count=self.max_sentence_count,
sort_by=self.order_by,
),
task_name=task_id
)
class ExtractSummaryResult(DictMixin):
"""ExtractSummaryResult is a result object which contains
the extractive text summarization from a particular document.
:ivar str id: Unique, non-empty document identifier.
:ivar sentences: A ranked list of sentences representing the extracted summary.
:vartype sentences: list[~azure.ai.textanalytics.SummarySentence]
:ivar warnings: Warnings encountered while processing document.
:vartype warnings: list[~azure.ai.textanalytics.TextAnalyticsWarning]
:ivar statistics: If `show_stats=True` was specified in the request this
field will contain information about the document payload.
:vartype statistics: ~azure.ai.textanalytics.TextDocumentStatistics
:ivar bool is_error: Boolean check for error item when iterating over list of
results. Always False for an instance of an ExtractSummaryResult.
"""
def __init__(self, **kwargs):
self.id = kwargs.get("id", None)
self.sentences = kwargs.get("sentences", None)
self.warnings = kwargs.get("warnings", None)
self.statistics = kwargs.get("statistics", None)
self.is_error = False
def __repr__(self):
return "ExtractSummaryResult(id={}, sentences={}, warnings={}, statistics={}, is_error={})".format(
self.id,
repr(self.sentences),
repr(self.warnings),
repr(self.statistics),
self.is_error,
)[
:1024
]
@classmethod
def _from_generated(cls, summary):
return cls(
id=summary.id,
sentences=[
SummarySentence._from_generated( # pylint: disable=protected-access
sentence
)
for sentence in summary.sentences
],
warnings=[
TextAnalyticsWarning._from_generated( # pylint: disable=protected-access
w
)
for w in summary.warnings
],
statistics=TextDocumentStatistics._from_generated( # pylint: disable=protected-access
summary.statistics
),
)
class SummarySentence(DictMixin):
"""Represents a single sentence from the extractive text summarization.
:ivar str text: The extracted sentence text.
:ivar float rank_score: A float value representing the relevance of the sentence within
the summary. Higher values indicate higher importance.
:ivar int offset: The sentence offset from the start of the document.
The value depends on the value of the `string_index_type` parameter
set in the original request, which is UnicodeCodePoint by default.
:ivar int length: The length of the sentence. This value depends on the value of the
`string_index_type` parameter set in the original request, which is UnicodeCodePoint
by default.
"""
def __init__(self, **kwargs):
self.text = kwargs.get("text", None)
self.rank_score = kwargs.get("rank_score", None)
self.offset = kwargs.get("offset", None)
self.length = kwargs.get("length", None)
def __repr__(self):
return "SummarySentence(text={}, rank_score={}, offset={}, length={})".format(
self.text,
self.rank_score,
self.offset,
self.length,
)[:1024]
@classmethod
def _from_generated(cls, sentence):
return cls(
text=sentence.text,
rank_score=sentence.rank_score,
offset=sentence.offset,
length=sentence.length,
)
class RecognizeCustomEntitiesAction(DictMixin):
"""RecognizeCustomEntitiesAction encapsulates the parameters for starting a long-running custom entity
recognition operation. To train a model to recognize your custom entities, see
https://aka.ms/azsdk/textanalytics/customentityrecognition
:param str project_name: Required. This field indicates the project name for the model.
:param str deployment_name: This field indicates the deployment name for the model.
:keyword str string_index_type: Specifies the method used to interpret string offsets.
`UnicodeCodePoint`, the Python encoding, is the default. To override the Python default,
you can also pass in `Utf16CodePoint` or TextElement_v8`. For additional information
see https://aka.ms/text-analytics-offsets
:keyword bool disable_service_logs: If set to true, you opt-out of having your text input
logged on the service side for troubleshooting. By default, Text Analytics logs your
input text for 48 hours, solely to allow for troubleshooting issues in providing you with
the Text Analytics natural language processing functions. Setting this parameter to true,
disables input logging and may limit our ability to remediate issues that occur. Please see
Cognitive Services Compliance and Privacy notes at https://aka.ms/cs-compliance for
additional details, and Microsoft Responsible AI principles at
https://www.microsoft.com/ai/responsible-ai.
:ivar str project_name: This field indicates the project name for the model.
:ivar str deployment_name: This field indicates the deployment name for the model.
:ivar str string_index_type: Specifies the method used to interpret string offsets.
`UnicodeCodePoint`, the Python encoding, is the default. To override the Python default,
you can also pass in `Utf16CodePoint` or TextElement_v8`. For additional information
see https://aka.ms/text-analytics-offsets
:ivar bool disable_service_logs: If set to true, you opt-out of having your text input
logged on the service side for troubleshooting. By default, Text Analytics logs your
input text for 48 hours, solely to allow for troubleshooting issues in providing you with
the Text Analytics natural language processing functions. Setting this parameter to true,
disables input logging and may limit our ability to remediate issues that occur. Please see
Cognitive Services Compliance and Privacy notes at https://aka.ms/cs-compliance for
additional details, and Microsoft Responsible AI principles at
https://www.microsoft.com/ai/responsible-ai.
"""
def __init__(
self,
project_name,
deployment_name,
**kwargs
):
self.project_name = project_name
self.deployment_name = deployment_name
self.disable_service_logs = kwargs.get('disable_service_logs', None)
self.string_index_type = kwargs.get('string_index_type', None)
def __repr__(self):
return "RecognizeCustomEntitiesAction(project_name={}, deployment_name={}, disable_service_logs={}, " \
"string_index_type={})".format(
self.project_name,
self.deployment_name,
self.disable_service_logs,
self.string_index_type,
)[:1024]
def _to_generated(self, api_version, task_id): # pylint: disable=unused-argument
return _v3_2_preview_models.CustomEntitiesTask(
parameters=_v3_2_preview_models.CustomEntitiesTaskParameters(
project_name=self.project_name,
deployment_name=self.deployment_name,
string_index_type=self.string_index_type,
logging_opt_out=self.disable_service_logs,
),
task_name=task_id
)
class RecognizeCustomEntitiesResult(DictMixin):
"""RecognizeCustomEntitiesResult is a result object which contains
the custom recognized entities from a particular document.
:ivar str id: Unique, non-empty document identifier that matches the
document id that was passed in with the request. If not specified
in the request, an id is assigned for the document.
:ivar entities: Recognized custom entities in the document.
:vartype entities:
list[~azure.ai.textanalytics.CategorizedEntity]
:ivar warnings: Warnings encountered while processing document.
:vartype warnings: list[~azure.ai.textanalytics.TextAnalyticsWarning]
:ivar statistics: If `show_stats=True` was specified in the request this
field will contain information about the document payload.
:vartype statistics: ~azure.ai.textanalytics.TextDocumentStatistics
:ivar bool is_error: Boolean check for error item when iterating over list of
results. Always False for an instance of a RecognizeCustomEntitiesResult.
"""
def __init__(self, **kwargs):
self.id = kwargs.get("id", None)
self.entities = kwargs.get("entities", None)
self.warnings = kwargs.get("warnings", [])
self.statistics = kwargs.get("statistics", None)
self.is_error = False
def __repr__(self):
return "RecognizeCustomEntitiesResult(id={}, entities={}, warnings={}, statistics={}, is_error={})".format(
self.id,
repr(self.entities),
repr(self.warnings),
repr(self.statistics),
self.is_error,
)[
:1024
]
@classmethod
def _from_generated(cls, result):
return cls(
id=result.id,
entities=[
CategorizedEntity._from_generated(e) # pylint: disable=protected-access
for e in result.entities
],
warnings=[
TextAnalyticsWarning._from_generated( # pylint: disable=protected-access
w
)
for w in result.warnings
],
statistics=TextDocumentStatistics._from_generated( # pylint: disable=protected-access
result.statistics
),
)
class MultiCategoryClassifyAction(DictMixin):
"""MultiCategoryClassifyAction encapsulates the parameters for starting a long-running custom multi category
classification operation. To train a model to classify your documents, see
https://aka.ms/azsdk/textanalytics/customfunctionalities
:param str project_name: Required. This field indicates the project name for the model.
:param str deployment_name: Required. This field indicates the deployment name for the model.
:keyword bool disable_service_logs: If set to true, you opt-out of having your text input
logged on the service side for troubleshooting. By default, Text Analytics logs your
input text for 48 hours, solely to allow for troubleshooting issues in providing you with
the Text Analytics natural language processing functions. Setting this parameter to true,
disables input logging and may limit our ability to remediate issues that occur. Please see
Cognitive Services Compliance and Privacy notes at https://aka.ms/cs-compliance for
additional details, and Microsoft Responsible AI principles at
https://www.microsoft.com/ai/responsible-ai.
:ivar str project_name: This field indicates the project name for the model.
:ivar str deployment_name: This field indicates the deployment name for the model.
:ivar bool disable_service_logs: If set to true, you opt-out of having your text input
logged on the service side for troubleshooting. By default, Text Analytics logs your
input text for 48 hours, solely to allow for troubleshooting issues in providing you with
the Text Analytics natural language processing functions. Setting this parameter to true,
disables input logging and may limit our ability to remediate issues that occur. Please see
Cognitive Services Compliance and Privacy notes at https://aka.ms/cs-compliance for
additional details, and Microsoft Responsible AI principles at
https://www.microsoft.com/ai/responsible-ai.
"""
def __init__(
self,
project_name,
deployment_name,
**kwargs
):
self.project_name = project_name
self.deployment_name = deployment_name
self.disable_service_logs = kwargs.get('disable_service_logs', None)
def __repr__(self):
return "MultiCategoryClassifyAction(project_name={}, deployment_name={}, " \
"disable_service_logs={})".format(
self.project_name,
self.deployment_name,
self.disable_service_logs,
)[:1024]
def _to_generated(self, api_version, task_id): # pylint: disable=unused-argument
return _v3_2_preview_models.CustomMultiClassificationTask(
parameters=_v3_2_preview_models.CustomMultiClassificationTaskParameters(
project_name=self.project_name,
deployment_name=self.deployment_name,
logging_opt_out=self.disable_service_logs,
),
task_name=task_id
)
class MultiCategoryClassifyResult(DictMixin):
"""MultiCategoryClassifyResult is a result object which contains
the classifications for a particular document.
:ivar str id: Unique, non-empty document identifier.
:ivar classifications: Recognized classification results in the document.
:vartype classifications: list[~azure.ai.textanalytics.ClassificationCategory]
:ivar warnings: Warnings encountered while processing document.
:vartype warnings: list[~azure.ai.textanalytics.TextAnalyticsWarning]
:ivar statistics: If `show_stats=True` was specified in the request this
field will contain information about the document payload.
:vartype statistics: ~azure.ai.textanalytics.TextDocumentStatistics
:ivar bool is_error: Boolean check for error item when iterating over list of
results. Always False for an instance of a MultiCategoryClassifyResult.
"""
def __init__(
self,
**kwargs
):
self.id = kwargs.get('id', None)
self.classifications = kwargs.get('classifications', None)
self.warnings = kwargs.get('warnings', [])
self.statistics = kwargs.get('statistics', None)
self.is_error = False
def __repr__(self):
return "MultiCategoryClassifyResult(id={}, classifications={}, warnings={}, statistics={}, " \
"is_error={})".format(
self.id,
repr(self.classifications),
repr(self.warnings),
repr(self.statistics),
self.is_error,
)[
:1024
]
@classmethod
def _from_generated(cls, result):
return cls(
id=result.id,
classifications=[
ClassificationCategory._from_generated(e) # pylint: disable=protected-access
for e in result.classifications
],
warnings=[
TextAnalyticsWarning._from_generated( # pylint: disable=protected-access
w
)
for w in result.warnings
],
statistics=TextDocumentStatistics._from_generated( # pylint: disable=protected-access
result.statistics
),
)
class SingleCategoryClassifyAction(DictMixin):
"""SingleCategoryClassifyAction encapsulates the parameters for starting a long-running custom single category
classification operation. To train a model to classify your documents, see
https://aka.ms/azsdk/textanalytics/customfunctionalities
:param str project_name: Required. This field indicates the project name for the model.
:param str deployment_name: Required. This field indicates the deployment name for the model.
:keyword bool disable_service_logs: If set to true, you opt-out of having your text input
logged on the service side for troubleshooting. By default, Text Analytics logs your
input text for 48 hours, solely to allow for troubleshooting issues in providing you with
the Text Analytics natural language processing functions. Setting this parameter to true,
disables input logging and may limit our ability to remediate issues that occur. Please see
Cognitive Services Compliance and Privacy notes at https://aka.ms/cs-compliance for
additional details, and Microsoft Responsible AI principles at
https://www.microsoft.com/ai/responsible-ai.
:ivar str project_name: This field indicates the project name for the model.
:ivar str deployment_name: This field indicates the deployment name for the model.
:ivar bool disable_service_logs: If set to true, you opt-out of having your text input
logged on the service side for troubleshooting. By default, Text Analytics logs your
input text for 48 hours, solely to allow for troubleshooting issues in providing you with
the Text Analytics natural language processing functions. Setting this parameter to true,
disables input logging and may limit our ability to remediate issues that occur. Please see
Cognitive Services Compliance and Privacy notes at https://aka.ms/cs-compliance for
additional details, and Microsoft Responsible AI principles at
https://www.microsoft.com/ai/responsible-ai.
"""
def __init__(
self,
project_name,
deployment_name,
**kwargs
):
self.project_name = project_name
self.deployment_name = deployment_name
self.disable_service_logs = kwargs.get('disable_service_logs', None)
def __repr__(self):
return "SingleCategoryClassifyAction(project_name={}, deployment_name={}, " \
"disable_service_logs={})".format(
self.project_name,
self.deployment_name,
self.disable_service_logs,
)[:1024]
def _to_generated(self, api_version, task_id): # pylint: disable=unused-argument
return _v3_2_preview_models.CustomSingleClassificationTask(
parameters=_v3_2_preview_models.CustomSingleClassificationTaskParameters(
project_name=self.project_name,
deployment_name=self.deployment_name,
logging_opt_out=self.disable_service_logs,
),
task_name=task_id
)
class SingleCategoryClassifyResult(DictMixin):
"""SingleCategoryClassifyResult is a result object which contains
the classification for a particular document.
:ivar str id: Unique, non-empty document identifier.
:ivar classification: Recognized classification results in the document.
:vartype classification: ~azure.ai.textanalytics.ClassificationCategory
:ivar warnings: Warnings encountered while processing document.
:vartype warnings: list[~azure.ai.textanalytics.TextAnalyticsWarning]
:ivar statistics: If `show_stats=True` was specified in the request this
field will contain information about the document payload.
:vartype statistics: ~azure.ai.textanalytics.TextDocumentStatistics
:ivar bool is_error: Boolean check for error item when iterating over list of
results. Always False for an instance of a SingleCategoryClassifyResult.
"""
def __init__(
self,
**kwargs
):
self.id = kwargs.get('id', None)
self.classification = kwargs.get('classification', None)
self.warnings = kwargs.get('warnings', [])
self.statistics = kwargs.get('statistics', None)
self.is_error = False
def __repr__(self):
return "SingleCategoryClassifyResult(id={}, classification={}, warnings={}, statistics={}, " \
"is_error={})".format(
self.id,
repr(self.classification),
repr(self.warnings),
repr(self.statistics),
self.is_error,
)[
:1024
]
@classmethod
def _from_generated(cls, result):
return cls(
id=result.id,
classification=
ClassificationCategory._from_generated(result.classification), # pylint: disable=protected-access
warnings=[
TextAnalyticsWarning._from_generated( # pylint: disable=protected-access
w
)
for w in result.warnings
],
statistics=TextDocumentStatistics._from_generated( # pylint: disable=protected-access
result.statistics
),
)
class ClassificationCategory(DictMixin):
"""ClassificationCategory represents a classification of the input document.
:ivar str category: Classification type.
:ivar float confidence_score: Confidence score between 0 and 1 of the recognized classification.
"""
def __init__(
self,
**kwargs
):
self.category = kwargs.get('category', None)
self.confidence_score = kwargs.get('confidence_score', None)
def __repr__(self):
return "ClassificationCategory(category={}, confidence_score={})".format(
self.category,
self.confidence_score,
)[:1024]
@classmethod
def _from_generated(cls, result):
return cls(
category=result.category,
confidence_score=result.confidence_score
)
[textanalytics] custom model docstrings edits (#21498)
* update model docstrings
* fix
# coding=utf-8 pylint: disable=too-many-lines
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import re
from enum import Enum
from ._generated.models import (
LanguageInput,
MultiLanguageInput,
)
from ._generated.v3_0 import models as _v3_0_models
from ._generated.v3_2_preview_2 import models as _v3_2_preview_models
from ._version import DEFAULT_API_VERSION
def _get_indices(relation):
return [int(s) for s in re.findall(r"\d+", relation)]
class DictMixin(object):
def __setitem__(self, key, item):
self.__dict__[key] = item
def __getitem__(self, key):
return self.__dict__[key]
def __repr__(self):
return str(self)
def __len__(self):
return len(self.keys())
def __delitem__(self, key):
self.__dict__[key] = None
def __eq__(self, other):
"""Compare objects by comparing all attributes."""
if isinstance(other, self.__class__):
return self.__dict__ == other.__dict__
return False
def __ne__(self, other):
"""Compare objects by comparing all attributes."""
return not self.__eq__(other)
def __contains__(self, key):
return key in self.__dict__
def __str__(self):
return str({k: v for k, v in self.__dict__.items() if not k.startswith("_")})
def has_key(self, k):
return k in self.__dict__
def update(self, *args, **kwargs):
return self.__dict__.update(*args, **kwargs)
def keys(self):
return [k for k in self.__dict__ if not k.startswith("_")]
def values(self):
return [v for k, v in self.__dict__.items() if not k.startswith("_")]
def items(self):
return [(k, v) for k, v in self.__dict__.items() if not k.startswith("_")]
def get(self, key, default=None):
if key in self.__dict__:
return self.__dict__[key]
return default
class EntityAssociation(str, Enum):
"""Describes if the entity is the subject of the text or if it describes someone else."""
SUBJECT = "subject"
OTHER = "other"
class EntityCertainty(str, Enum):
"""Describes the entities certainty and polarity."""
POSITIVE = "positive"
POSITIVE_POSSIBLE = "positivePossible"
NEUTRAL_POSSIBLE = "neutralPossible"
NEGATIVE_POSSIBLE = "negativePossible"
NEGATIVE = "negative"
class EntityConditionality(str, Enum):
"""Describes any conditionality on the entity."""
HYPOTHETICAL = "hypothetical"
CONDITIONAL = "conditional"
class HealthcareEntityRelation(str, Enum):
"""Type of relation. Examples include: 'DosageOfMedication' or 'FrequencyOfMedication', etc."""
ABBREVIATION = "Abbreviation"
DIRECTION_OF_BODY_STRUCTURE = "DirectionOfBodyStructure"
DIRECTION_OF_CONDITION = "DirectionOfCondition"
DIRECTION_OF_EXAMINATION = "DirectionOfExamination"
DIRECTION_OF_TREATMENT = "DirectionOfTreatment"
DOSAGE_OF_MEDICATION = "DosageOfMedication"
FORM_OF_MEDICATION = "FormOfMedication"
FREQUENCY_OF_MEDICATION = "FrequencyOfMedication"
FREQUENCY_OF_TREATMENT = "FrequencyOfTreatment"
QUALIFIER_OF_CONDITION = "QualifierOfCondition"
RELATION_OF_EXAMINATION = "RelationOfExamination"
ROUTE_OF_MEDICATION = "RouteOfMedication"
TIME_OF_CONDITION = "TimeOfCondition"
TIME_OF_EVENT = "TimeOfEvent"
TIME_OF_EXAMINATION = "TimeOfExamination"
TIME_OF_MEDICATION = "TimeOfMedication"
TIME_OF_TREATMENT = "TimeOfTreatment"
UNIT_OF_CONDITION = "UnitOfCondition"
UNIT_OF_EXAMINATION = "UnitOfExamination"
VALUE_OF_CONDITION = "ValueOfCondition"
VALUE_OF_EXAMINATION = "ValueOfExamination"
class PiiEntityCategory(str, Enum):
"""Categories of Personally Identifiable Information (PII)."""
ABA_ROUTING_NUMBER = "ABARoutingNumber"
AR_NATIONAL_IDENTITY_NUMBER = "ARNationalIdentityNumber"
AU_BANK_ACCOUNT_NUMBER = "AUBankAccountNumber"
AU_DRIVERS_LICENSE_NUMBER = "AUDriversLicenseNumber"
AU_MEDICAL_ACCOUNT_NUMBER = "AUMedicalAccountNumber"
AU_PASSPORT_NUMBER = "AUPassportNumber"
AU_TAX_FILE_NUMBER = "AUTaxFileNumber"
AU_BUSINESS_NUMBER = "AUBusinessNumber"
AU_COMPANY_NUMBER = "AUCompanyNumber"
AT_IDENTITY_CARD = "ATIdentityCard"
AT_TAX_IDENTIFICATION_NUMBER = "ATTaxIdentificationNumber"
AT_VALUE_ADDED_TAX_NUMBER = "ATValueAddedTaxNumber"
AZURE_DOCUMENT_DB_AUTH_KEY = "AzureDocumentDBAuthKey"
AZURE_IAAS_DATABASE_CONNECTION_AND_SQL_STRING = (
"AzureIAASDatabaseConnectionAndSQLString"
)
AZURE_IO_T_CONNECTION_STRING = "AzureIoTConnectionString"
AZURE_PUBLISH_SETTING_PASSWORD = "AzurePublishSettingPassword"
AZURE_REDIS_CACHE_STRING = "AzureRedisCacheString"
AZURE_SAS = "AzureSAS"
AZURE_SERVICE_BUS_STRING = "AzureServiceBusString"
AZURE_STORAGE_ACCOUNT_KEY = "AzureStorageAccountKey"
AZURE_STORAGE_ACCOUNT_GENERIC = "AzureStorageAccountGeneric"
BE_NATIONAL_NUMBER = "BENationalNumber"
BE_NATIONAL_NUMBER_V2 = "BENationalNumberV2"
BE_VALUE_ADDED_TAX_NUMBER = "BEValueAddedTaxNumber"
BRCPF_NUMBER = "BRCPFNumber"
BR_LEGAL_ENTITY_NUMBER = "BRLegalEntityNumber"
BR_NATIONAL_IDRG = "BRNationalIDRG"
BG_UNIFORM_CIVIL_NUMBER = "BGUniformCivilNumber"
CA_BANK_ACCOUNT_NUMBER = "CABankAccountNumber"
CA_DRIVERS_LICENSE_NUMBER = "CADriversLicenseNumber"
CA_HEALTH_SERVICE_NUMBER = "CAHealthServiceNumber"
CA_PASSPORT_NUMBER = "CAPassportNumber"
CA_PERSONAL_HEALTH_IDENTIFICATION = "CAPersonalHealthIdentification"
CA_SOCIAL_INSURANCE_NUMBER = "CASocialInsuranceNumber"
CL_IDENTITY_CARD_NUMBER = "CLIdentityCardNumber"
CN_RESIDENT_IDENTITY_CARD_NUMBER = "CNResidentIdentityCardNumber"
CREDIT_CARD_NUMBER = "CreditCardNumber"
HR_IDENTITY_CARD_NUMBER = "HRIdentityCardNumber"
HR_NATIONAL_ID_NUMBER = "HRNationalIDNumber"
HR_PERSONAL_IDENTIFICATION_NUMBER = "HRPersonalIdentificationNumber"
HR_PERSONAL_IDENTIFICATION_OIB_NUMBER_V2 = "HRPersonalIdentificationOIBNumberV2"
CY_IDENTITY_CARD = "CYIdentityCard"
CY_TAX_IDENTIFICATION_NUMBER = "CYTaxIdentificationNumber"
CZ_PERSONAL_IDENTITY_NUMBER = "CZPersonalIdentityNumber"
CZ_PERSONAL_IDENTITY_V2 = "CZPersonalIdentityV2"
DK_PERSONAL_IDENTIFICATION_NUMBER = "DKPersonalIdentificationNumber"
DK_PERSONAL_IDENTIFICATION_V2 = "DKPersonalIdentificationV2"
DRUG_ENFORCEMENT_AGENCY_NUMBER = "DrugEnforcementAgencyNumber"
EE_PERSONAL_IDENTIFICATION_CODE = "EEPersonalIdentificationCode"
EU_DEBIT_CARD_NUMBER = "EUDebitCardNumber"
EU_DRIVERS_LICENSE_NUMBER = "EUDriversLicenseNumber"
EUGPS_COORDINATES = "EUGPSCoordinates"
EU_NATIONAL_IDENTIFICATION_NUMBER = "EUNationalIdentificationNumber"
EU_PASSPORT_NUMBER = "EUPassportNumber"
EU_SOCIAL_SECURITY_NUMBER = "EUSocialSecurityNumber"
EU_TAX_IDENTIFICATION_NUMBER = "EUTaxIdentificationNumber"
FI_EUROPEAN_HEALTH_NUMBER = "FIEuropeanHealthNumber"
FI_NATIONAL_ID = "FINationalID"
FI_NATIONAL_IDV2 = "FINationalIDV2"
FI_PASSPORT_NUMBER = "FIPassportNumber"
FR_DRIVERS_LICENSE_NUMBER = "FRDriversLicenseNumber"
FR_HEALTH_INSURANCE_NUMBER = "FRHealthInsuranceNumber"
FR_NATIONAL_ID = "FRNationalID"
FR_PASSPORT_NUMBER = "FRPassportNumber"
FR_SOCIAL_SECURITY_NUMBER = "FRSocialSecurityNumber"
FR_TAX_IDENTIFICATION_NUMBER = "FRTaxIdentificationNumber"
FR_VALUE_ADDED_TAX_NUMBER = "FRValueAddedTaxNumber"
DE_DRIVERS_LICENSE_NUMBER = "DEDriversLicenseNumber"
DE_PASSPORT_NUMBER = "DEPassportNumber"
DE_IDENTITY_CARD_NUMBER = "DEIdentityCardNumber"
DE_TAX_IDENTIFICATION_NUMBER = "DETaxIdentificationNumber"
DE_VALUE_ADDED_NUMBER = "DEValueAddedNumber"
GR_NATIONAL_ID_CARD = "GRNationalIDCard"
GR_NATIONAL_IDV2 = "GRNationalIDV2"
GR_TAX_IDENTIFICATION_NUMBER = "GRTaxIdentificationNumber"
HK_IDENTITY_CARD_NUMBER = "HKIdentityCardNumber"
HU_VALUE_ADDED_NUMBER = "HUValueAddedNumber"
HU_PERSONAL_IDENTIFICATION_NUMBER = "HUPersonalIdentificationNumber"
HU_TAX_IDENTIFICATION_NUMBER = "HUTaxIdentificationNumber"
IN_PERMANENT_ACCOUNT = "INPermanentAccount"
IN_UNIQUE_IDENTIFICATION_NUMBER = "INUniqueIdentificationNumber"
ID_IDENTITY_CARD_NUMBER = "IDIdentityCardNumber"
INTERNATIONAL_BANKING_ACCOUNT_NUMBER = "InternationalBankingAccountNumber"
IE_PERSONAL_PUBLIC_SERVICE_NUMBER = "IEPersonalPublicServiceNumber"
IE_PERSONAL_PUBLIC_SERVICE_NUMBER_V2 = "IEPersonalPublicServiceNumberV2"
IL_BANK_ACCOUNT_NUMBER = "ILBankAccountNumber"
IL_NATIONAL_ID = "ILNationalID"
IT_DRIVERS_LICENSE_NUMBER = "ITDriversLicenseNumber"
IT_FISCAL_CODE = "ITFiscalCode"
IT_VALUE_ADDED_TAX_NUMBER = "ITValueAddedTaxNumber"
JP_BANK_ACCOUNT_NUMBER = "JPBankAccountNumber"
JP_DRIVERS_LICENSE_NUMBER = "JPDriversLicenseNumber"
JP_PASSPORT_NUMBER = "JPPassportNumber"
JP_RESIDENT_REGISTRATION_NUMBER = "JPResidentRegistrationNumber"
JP_SOCIAL_INSURANCE_NUMBER = "JPSocialInsuranceNumber"
JP_MY_NUMBER_CORPORATE = "JPMyNumberCorporate"
JP_MY_NUMBER_PERSONAL = "JPMyNumberPersonal"
JP_RESIDENCE_CARD_NUMBER = "JPResidenceCardNumber"
LV_PERSONAL_CODE = "LVPersonalCode"
LT_PERSONAL_CODE = "LTPersonalCode"
LU_NATIONAL_IDENTIFICATION_NUMBER_NATURAL = "LUNationalIdentificationNumberNatural"
LU_NATIONAL_IDENTIFICATION_NUMBER_NON_NATURAL = (
"LUNationalIdentificationNumberNonNatural"
)
MY_IDENTITY_CARD_NUMBER = "MYIdentityCardNumber"
MT_IDENTITY_CARD_NUMBER = "MTIdentityCardNumber"
MT_TAX_ID_NUMBER = "MTTaxIDNumber"
NL_CITIZENS_SERVICE_NUMBER = "NLCitizensServiceNumber"
NL_CITIZENS_SERVICE_NUMBER_V2 = "NLCitizensServiceNumberV2"
NL_TAX_IDENTIFICATION_NUMBER = "NLTaxIdentificationNumber"
NL_VALUE_ADDED_TAX_NUMBER = "NLValueAddedTaxNumber"
NZ_BANK_ACCOUNT_NUMBER = "NZBankAccountNumber"
NZ_DRIVERS_LICENSE_NUMBER = "NZDriversLicenseNumber"
NZ_INLAND_REVENUE_NUMBER = "NZInlandRevenueNumber"
NZ_MINISTRY_OF_HEALTH_NUMBER = "NZMinistryOfHealthNumber"
NZ_SOCIAL_WELFARE_NUMBER = "NZSocialWelfareNumber"
NO_IDENTITY_NUMBER = "NOIdentityNumber"
PH_UNIFIED_MULTI_PURPOSE_ID_NUMBER = "PHUnifiedMultiPurposeIDNumber"
PL_IDENTITY_CARD = "PLIdentityCard"
PL_NATIONAL_ID = "PLNationalID"
PL_NATIONAL_IDV2 = "PLNationalIDV2"
PL_PASSPORT_NUMBER = "PLPassportNumber"
PL_TAX_IDENTIFICATION_NUMBER = "PLTaxIdentificationNumber"
PLREGON_NUMBER = "PLREGONNumber"
PT_CITIZEN_CARD_NUMBER = "PTCitizenCardNumber"
PT_CITIZEN_CARD_NUMBER_V2 = "PTCitizenCardNumberV2"
PT_TAX_IDENTIFICATION_NUMBER = "PTTaxIdentificationNumber"
RO_PERSONAL_NUMERICAL_CODE = "ROPersonalNumericalCode"
RU_PASSPORT_NUMBER_DOMESTIC = "RUPassportNumberDomestic"
RU_PASSPORT_NUMBER_INTERNATIONAL = "RUPassportNumberInternational"
SA_NATIONAL_ID = "SANationalID"
SG_NATIONAL_REGISTRATION_IDENTITY_CARD_NUMBER = (
"SGNationalRegistrationIdentityCardNumber"
)
SK_PERSONAL_NUMBER = "SKPersonalNumber"
SI_TAX_IDENTIFICATION_NUMBER = "SITaxIdentificationNumber"
SI_UNIQUE_MASTER_CITIZEN_NUMBER = "SIUniqueMasterCitizenNumber"
ZA_IDENTIFICATION_NUMBER = "ZAIdentificationNumber"
KR_RESIDENT_REGISTRATION_NUMBER = "KRResidentRegistrationNumber"
ESDNI = "ESDNI"
ES_SOCIAL_SECURITY_NUMBER = "ESSocialSecurityNumber"
ES_TAX_IDENTIFICATION_NUMBER = "ESTaxIdentificationNumber"
SQL_SERVER_CONNECTION_STRING = "SQLServerConnectionString"
SE_NATIONAL_ID = "SENationalID"
SE_NATIONAL_IDV2 = "SENationalIDV2"
SE_PASSPORT_NUMBER = "SEPassportNumber"
SE_TAX_IDENTIFICATION_NUMBER = "SETaxIdentificationNumber"
SWIFT_CODE = "SWIFTCode"
CH_SOCIAL_SECURITY_NUMBER = "CHSocialSecurityNumber"
TW_NATIONAL_ID = "TWNationalID"
TW_PASSPORT_NUMBER = "TWPassportNumber"
TW_RESIDENT_CERTIFICATE = "TWResidentCertificate"
TH_POPULATION_IDENTIFICATION_CODE = "THPopulationIdentificationCode"
TR_NATIONAL_IDENTIFICATION_NUMBER = "TRNationalIdentificationNumber"
UK_DRIVERS_LICENSE_NUMBER = "UKDriversLicenseNumber"
UK_ELECTORAL_ROLL_NUMBER = "UKElectoralRollNumber"
UK_NATIONAL_HEALTH_NUMBER = "UKNationalHealthNumber"
UK_NATIONAL_INSURANCE_NUMBER = "UKNationalInsuranceNumber"
UK_UNIQUE_TAXPAYER_NUMBER = "UKUniqueTaxpayerNumber"
USUK_PASSPORT_NUMBER = "USUKPassportNumber"
US_BANK_ACCOUNT_NUMBER = "USBankAccountNumber"
US_DRIVERS_LICENSE_NUMBER = "USDriversLicenseNumber"
US_INDIVIDUAL_TAXPAYER_IDENTIFICATION = "USIndividualTaxpayerIdentification"
US_SOCIAL_SECURITY_NUMBER = "USSocialSecurityNumber"
UA_PASSPORT_NUMBER_DOMESTIC = "UAPassportNumberDomestic"
UA_PASSPORT_NUMBER_INTERNATIONAL = "UAPassportNumberInternational"
ORGANIZATION = "Organization"
EMAIL = "Email"
URL = "URL"
AGE = "Age"
PHONE_NUMBER = "PhoneNumber"
IP_ADDRESS = "IPAddress"
DATE = "Date"
PERSON = "Person"
ADDRESS = "Address"
ALL = "All"
DEFAULT = "Default"
class HealthcareEntityCategory(str, Enum):
"""Healthcare Entity Category."""
BODY_STRUCTURE = "BodyStructure"
AGE = "Age"
GENDER = "Gender"
EXAMINATION_NAME = "ExaminationName"
DATE = "Date"
DIRECTION = "Direction"
FREQUENCY = "Frequency"
MEASUREMENT_VALUE = "MeasurementValue"
MEASUREMENT_UNIT = "MeasurementUnit"
RELATIONAL_OPERATOR = "RelationalOperator"
TIME = "Time"
GENE_OR_PROTEIN = "GeneOrProtein"
VARIANT = "Variant"
ADMINISTRATIVE_EVENT = "AdministrativeEvent"
CARE_ENVIRONMENT = "CareEnvironment"
HEALTHCARE_PROFESSION = "HealthcareProfession"
DIAGNOSIS = "Diagnosis"
SYMPTOM_OR_SIGN = "SymptomOrSign"
CONDITION_QUALIFIER = "ConditionQualifier"
MEDICATION_CLASS = "MedicationClass"
MEDICATION_NAME = "MedicationName"
DOSAGE = "Dosage"
MEDICATION_FORM = "MedicationForm"
MEDICATION_ROUTE = "MedicationRoute"
FAMILY_RELATION = "FamilyRelation"
TREATMENT_NAME = "TreatmentName"
class PiiEntityDomain(str, Enum):
"""The different domains of PII entities that users can filter by"""
PROTECTED_HEALTH_INFORMATION = (
"phi" # See https://aka.ms/tanerpii for more information.
)
class DetectedLanguage(DictMixin):
"""DetectedLanguage contains the predicted language found in text,
its confidence score, and its ISO 639-1 representation.
:ivar name: Long name of a detected language (e.g. English,
French).
:vartype name: str
:ivar iso6391_name: A two letter representation of the detected
language according to the ISO 639-1 standard (e.g. en, fr).
:vartype iso6391_name: str
:ivar confidence_score: A confidence score between 0 and 1. Scores close
to 1 indicate 100% certainty that the identified language is true.
:vartype confidence_score: float
"""
def __init__(self, **kwargs):
self.name = kwargs.get("name", None)
self.iso6391_name = kwargs.get("iso6391_name", None)
self.confidence_score = kwargs.get("confidence_score", None)
@classmethod
def _from_generated(cls, language):
return cls(
name=language.name,
iso6391_name=language.iso6391_name,
confidence_score=language.confidence_score,
)
def __repr__(self):
return "DetectedLanguage(name={}, iso6391_name={}, confidence_score={})".format(
self.name, self.iso6391_name, self.confidence_score
)[:1024]
class RecognizeEntitiesResult(DictMixin):
"""RecognizeEntitiesResult is a result object which contains
the recognized entities from a particular document.
:ivar id: Unique, non-empty document identifier that matches the
document id that was passed in with the request. If not specified
in the request, an id is assigned for the document.
:vartype id: str
:ivar entities: Recognized entities in the document.
:vartype entities:
list[~azure.ai.textanalytics.CategorizedEntity]
:ivar warnings: Warnings encountered while processing document. Results will still be returned
if there are warnings, but they may not be fully accurate.
:vartype warnings: list[~azure.ai.textanalytics.TextAnalyticsWarning]
:ivar statistics: If `show_stats=True` was specified in the request this
field will contain information about the document payload.
:vartype statistics:
~azure.ai.textanalytics.TextDocumentStatistics
:ivar bool is_error: Boolean check for error item when iterating over list of
results. Always False for an instance of a RecognizeEntitiesResult.
"""
def __init__(self, **kwargs):
self.id = kwargs.get("id", None)
self.entities = kwargs.get("entities", None)
self.warnings = kwargs.get("warnings", [])
self.statistics = kwargs.get("statistics", None)
self.is_error = False
def __repr__(self):
return "RecognizeEntitiesResult(id={}, entities={}, warnings={}, statistics={}, is_error={})".format(
self.id,
repr(self.entities),
repr(self.warnings),
repr(self.statistics),
self.is_error,
)[
:1024
]
class RecognizePiiEntitiesResult(DictMixin):
"""RecognizePiiEntitiesResult is a result object which contains
the recognized Personally Identifiable Information (PII) entities
from a particular document.
:ivar str id: Unique, non-empty document identifier that matches the
document id that was passed in with the request. If not specified
in the request, an id is assigned for the document.
:ivar entities: Recognized PII entities in the document.
:vartype entities:
list[~azure.ai.textanalytics.PiiEntity]
:ivar str redacted_text: Returns the text of the input document with all of the PII information
redacted out.
:ivar warnings: Warnings encountered while processing document. Results will still be returned
if there are warnings, but they may not be fully accurate.
:vartype warnings: list[~azure.ai.textanalytics.TextAnalyticsWarning]
:ivar statistics: If `show_stats=True` was specified in the request this
field will contain information about the document payload.
:vartype statistics:
~azure.ai.textanalytics.TextDocumentStatistics
:ivar bool is_error: Boolean check for error item when iterating over list of
results. Always False for an instance of a RecognizePiiEntitiesResult.
"""
def __init__(self, **kwargs):
self.id = kwargs.get("id", None)
self.entities = kwargs.get("entities", None)
self.redacted_text = kwargs.get("redacted_text", None)
self.warnings = kwargs.get("warnings", [])
self.statistics = kwargs.get("statistics", None)
self.is_error = False
def __repr__(self):
return (
"RecognizePiiEntitiesResult(id={}, entities={}, redacted_text={}, warnings={}, "
"statistics={}, is_error={})".format(
self.id,
repr(self.entities),
self.redacted_text,
repr(self.warnings),
repr(self.statistics),
self.is_error,
)[:1024]
)
class AnalyzeHealthcareEntitiesResult(DictMixin):
"""
AnalyzeHealthcareEntitiesResult contains the Healthcare entities from a
particular document.
:ivar str id: Unique, non-empty document identifier that matches the
document id that was passed in with the request. If not specified
in the request, an id is assigned for the document.
:ivar entities: Identified Healthcare entities in the document, i.e. in
the document "The subject took ibuprofen", "ibuprofen" is an identified entity
from the document.
:vartype entities:
list[~azure.ai.textanalytics.HealthcareEntity]
:ivar entity_relations: Identified Healthcare relations between entities. For example, in the
document "The subject took 100mg of ibuprofen", we would identify the relationship
between the dosage of 100mg and the medication ibuprofen.
:vartype entity_relations: list[~azure.ai.textanalytics.HealthcareRelation]
:ivar warnings: Warnings encountered while processing document. Results will still be returned
if there are warnings, but they may not be fully accurate.
:vartype warnings: list[~azure.ai.textanalytics.TextAnalyticsWarning]
:ivar statistics: If show_stats=true was specified in the request this
field will contain information about the document payload.
:vartype statistics:
~azure.ai.textanalytics.TextDocumentStatistics
:ivar bool is_error: Boolean check for error item when iterating over list of
results. Always False for an instance of a AnalyzeHealthcareEntitiesResult.
"""
def __init__(self, **kwargs):
self.id = kwargs.get("id", None)
self.entities = kwargs.get("entities", None)
self.entity_relations = kwargs.get("entity_relations", None)
self.warnings = kwargs.get("warnings", [])
self.statistics = kwargs.get("statistics", None)
self.is_error = False
@classmethod
def _from_generated(cls, healthcare_result):
entities = [
HealthcareEntity._from_generated(e) # pylint: disable=protected-access
for e in healthcare_result.entities
]
relations = [
HealthcareRelation._from_generated( # pylint: disable=protected-access
r, entities
)
for r in healthcare_result.relations
]
return cls(
id=healthcare_result.id,
entities=entities,
entity_relations=relations,
warnings=[
TextAnalyticsWarning._from_generated( # pylint: disable=protected-access
w
)
for w in healthcare_result.warnings
],
statistics=TextDocumentStatistics._from_generated( # pylint: disable=protected-access
healthcare_result.statistics
),
)
def __repr__(self):
return (
"AnalyzeHealthcareEntitiesResult(id={}, entities={}, entity_relations={}, warnings={}, "
"statistics={}, is_error={})".format(
self.id,
repr(self.entities),
repr(self.entity_relations),
repr(self.warnings),
repr(self.statistics),
self.is_error,
)[:1024]
)
class HealthcareRelation(DictMixin):
"""HealthcareRelation is a result object which represents a relation detected in a document.
Every HealthcareRelation is an entity graph of a certain relation type,
where all entities are connected and have specific roles within the relation context.
:ivar relation_type: The type of relation, i.e. the relationship between "100mg" and
"ibuprofen" in the document "The subject took 100 mg of ibuprofen" is "DosageOfMedication".
Possible values found in :class:`~azure.ai.textanalytics.HealthcareEntityRelation`
:vartype relation_type: str
:ivar roles: The roles present in this relation. I.e., in the document
"The subject took 100 mg of ibuprofen", the present roles are "Dosage" and "Medication".
:vartype roles: list[~azure.ai.textanalytics.HealthcareRelationRole]
"""
def __init__(self, **kwargs):
self.relation_type = kwargs.get("relation_type")
self.roles = kwargs.get("roles")
@classmethod
def _from_generated(cls, healthcare_relation_result, entities):
roles = [
HealthcareRelationRole._from_generated( # pylint: disable=protected-access
r, entities
)
for r in healthcare_relation_result.entities
]
return cls(
relation_type=healthcare_relation_result.relation_type,
roles=roles,
)
def __repr__(self):
return "HealthcareRelation(relation_type={}, roles={})".format(
self.relation_type,
repr(self.roles),
)[:1024]
class HealthcareRelationRole(DictMixin):
"""A model representing a role in a relation.
For example, in "The subject took 100 mg of ibuprofen",
"100 mg" is a dosage entity fulfilling the role "Dosage"
in the extracted relation "DosageofMedication".
:ivar name: The role of the entity in the relationship. I.e., in the relation
"The subject took 100 mg of ibuprofen", the dosage entity "100 mg" has role
"Dosage".
:vartype name: str
:ivar entity: The entity that is present in the relationship. For example, in
"The subject took 100 mg of ibuprofen", this property holds the dosage entity
of "100 mg".
:vartype entity: ~azure.ai.textanalytics.HealthcareEntity
"""
def __init__(self, **kwargs):
self.name = kwargs.get("name")
self.entity = kwargs.get("entity")
@staticmethod
def _get_entity(healthcare_role_result, entities):
nums = _get_indices(healthcare_role_result.ref)
entity_index = nums[
1
] # first num parsed from index is document #, second is entity index
return entities[entity_index]
@classmethod
def _from_generated(cls, healthcare_role_result, entities):
return cls(
name=healthcare_role_result.role,
entity=HealthcareRelationRole._get_entity(healthcare_role_result, entities),
)
def __repr__(self):
return "HealthcareRelationRole(name={}, entity={})".format(
self.name, repr(self.entity)
)
class DetectLanguageResult(DictMixin):
"""DetectLanguageResult is a result object which contains
the detected language of a particular document.
:ivar id: Unique, non-empty document identifier that matches the
document id that was passed in with the request. If not specified
in the request, an id is assigned for the document.
:vartype id: str
:ivar primary_language: The primary language detected in the document.
:vartype primary_language: ~azure.ai.textanalytics.DetectedLanguage
:ivar warnings: Warnings encountered while processing document. Results will still be returned
if there are warnings, but they may not be fully accurate.
:vartype warnings: list[~azure.ai.textanalytics.TextAnalyticsWarning]
:ivar statistics: If `show_stats=True` was specified in the request this
field will contain information about the document payload.
:vartype statistics:
~azure.ai.textanalytics.TextDocumentStatistics
:ivar bool is_error: Boolean check for error item when iterating over list of
results. Always False for an instance of a DetectLanguageResult.
"""
def __init__(self, **kwargs):
self.id = kwargs.get("id", None)
self.primary_language = kwargs.get("primary_language", None)
self.warnings = kwargs.get("warnings", [])
self.statistics = kwargs.get("statistics", None)
self.is_error = False
def __repr__(self):
return (
"DetectLanguageResult(id={}, primary_language={}, warnings={}, statistics={}, "
"is_error={})".format(
self.id,
repr(self.primary_language),
repr(self.warnings),
repr(self.statistics),
self.is_error,
)[:1024]
)
class CategorizedEntity(DictMixin):
"""CategorizedEntity contains information about a particular
entity found in text.
:ivar text: Entity text as appears in the request.
:vartype text: str
:ivar category: Entity category, such as Person/Location/Org/SSN etc
:vartype category: str
:ivar subcategory: Entity subcategory, such as Age/Year/TimeRange etc
:vartype subcategory: str
:ivar int length: The entity text length. This value depends on the value of the
`string_index_type` parameter set in the original request, which is UnicodeCodePoints
by default.
:ivar int offset: The entity text offset from the start of the document.
The value depends on the value of the `string_index_type` parameter
set in the original request, which is UnicodeCodePoints by default.
:ivar confidence_score: Confidence score between 0 and 1 of the extracted
entity.
:vartype confidence_score: float
.. versionadded:: v3.1
The *offset* and *length* properties.
"""
def __init__(self, **kwargs):
self.text = kwargs.get("text", None)
self.category = kwargs.get("category", None)
self.subcategory = kwargs.get("subcategory", None)
self.length = kwargs.get("length", None)
self.offset = kwargs.get("offset", None)
self.confidence_score = kwargs.get("confidence_score", None)
@classmethod
def _from_generated(cls, entity):
offset = entity.offset
length = entity.length
if isinstance(entity, _v3_0_models.Entity):
# we do not return offset for v3.0 since
# the correct encoding was not introduced for v3.0
offset = None
length = None
return cls(
text=entity.text,
category=entity.category,
subcategory=entity.subcategory,
length=length,
offset=offset,
confidence_score=entity.confidence_score,
)
def __repr__(self):
return (
"CategorizedEntity(text={}, category={}, subcategory={}, "
"length={}, offset={}, confidence_score={})".format(
self.text,
self.category,
self.subcategory,
self.length,
self.offset,
self.confidence_score,
)[:1024]
)
class PiiEntity(DictMixin):
"""PiiEntity contains information about a Personally Identifiable
Information (PII) entity found in text.
:ivar str text: Entity text as appears in the request.
:ivar str category: Entity category, such as Financial Account
Identification/Social Security Number/Phone Number, etc.
:ivar str subcategory: Entity subcategory, such as Credit Card/EU
Phone number/ABA Routing Numbers, etc.
:ivar int length: The PII entity text length. This value depends on the value
of the `string_index_type` parameter specified in the original request, which
is UnicodeCodePoints by default.
:ivar int offset: The PII entity text offset from the start of the document.
This value depends on the value of the `string_index_type` parameter specified
in the original request, which is UnicodeCodePoints by default.
:ivar float confidence_score: Confidence score between 0 and 1 of the extracted
entity.
"""
def __init__(self, **kwargs):
self.text = kwargs.get("text", None)
self.category = kwargs.get("category", None)
self.subcategory = kwargs.get("subcategory", None)
self.length = kwargs.get("length", None)
self.offset = kwargs.get("offset", None)
self.confidence_score = kwargs.get("confidence_score", None)
@classmethod
def _from_generated(cls, entity):
return cls(
text=entity.text,
category=entity.category,
subcategory=entity.subcategory,
length=entity.length,
offset=entity.offset,
confidence_score=entity.confidence_score,
)
def __repr__(self):
return (
"PiiEntity(text={}, category={}, subcategory={}, length={}, "
"offset={}, confidence_score={})".format(
self.text,
self.category,
self.subcategory,
self.length,
self.offset,
self.confidence_score,
)[:1024]
)
class HealthcareEntity(DictMixin):
"""HealthcareEntity contains information about a Healthcare entity found in text.
:ivar str text: Entity text as appears in the document.
:ivar str normalized_text: Optional. Normalized version of the raw `text` we extract
from the document. Not all `text` will have a normalized version.
:ivar str category: Entity category, see the :class:`~azure.ai.textanalytics.HealthcareEntityCategory`
type for possible healthcare entity categories.
:ivar str subcategory: Entity subcategory.
:ivar assertion: Contains various assertions about this entity. For example, if
an entity is a diagnosis, is this diagnosis 'conditional' on a symptom?
Are the doctors 'certain' about this diagnosis? Is this diagnosis 'associated'
with another diagnosis?
:vartype assertion: ~azure.ai.textanalytics.HealthcareEntityAssertion
:ivar int length: The entity text length. This value depends on the value
of the `string_index_type` parameter specified in the original request, which is
UnicodeCodePoints by default.
:ivar int offset: The entity text offset from the start of the document.
This value depends on the value of the `string_index_type` parameter specified
in the original request, which is UnicodeCodePoints by default.
:ivar float confidence_score: Confidence score between 0 and 1 of the extracted
entity.
:ivar data_sources: A collection of entity references in known data sources.
:vartype data_sources: list[~azure.ai.textanalytics.HealthcareEntityDataSource]
"""
def __init__(self, **kwargs):
self.text = kwargs.get("text", None)
self.normalized_text = kwargs.get("normalized_text", None)
self.category = kwargs.get("category", None)
self.subcategory = kwargs.get("subcategory", None)
self.assertion = kwargs.get("assertion", None)
self.length = kwargs.get("length", None)
self.offset = kwargs.get("offset", None)
self.confidence_score = kwargs.get("confidence_score", None)
self.data_sources = kwargs.get("data_sources", [])
@classmethod
def _from_generated(cls, healthcare_entity):
assertion = None
try:
if healthcare_entity.assertion:
assertion = HealthcareEntityAssertion._from_generated( # pylint: disable=protected-access
healthcare_entity.assertion
)
except AttributeError:
assertion = None
return cls(
text=healthcare_entity.text,
normalized_text=healthcare_entity.name,
category=healthcare_entity.category,
subcategory=healthcare_entity.subcategory,
assertion=assertion,
length=healthcare_entity.length,
offset=healthcare_entity.offset,
confidence_score=healthcare_entity.confidence_score,
data_sources=[
HealthcareEntityDataSource(entity_id=l.id, name=l.data_source)
for l in healthcare_entity.links
]
if healthcare_entity.links
else None,
)
def __hash__(self):
return hash(repr(self))
def __repr__(self):
return (
"HealthcareEntity(text={}, normalized_text={}, category={}, subcategory={}, assertion={}, length={}, "
"offset={}, confidence_score={}, data_sources={})".format(
self.text,
self.normalized_text,
self.category,
self.subcategory,
repr(self.assertion),
self.length,
self.offset,
self.confidence_score,
repr(self.data_sources),
)[:1024]
)
class HealthcareEntityAssertion(DictMixin):
"""Contains various assertions about a `HealthcareEntity`.
For example, if an entity is a diagnosis, is this diagnosis 'conditional' on a symptom?
Are the doctors 'certain' about this diagnosis? Is this diagnosis 'associated'
with another diagnosis?
:ivar str conditionality: Describes whether the healthcare entity it's on is conditional on another entity.
For example, "If the patient has a fever, he has pneumonia", the diagnosis of pneumonia
is 'conditional' on whether the patient has a fever. Possible values are "hypothetical" and
"conditional".
:ivar str certainty: Describes how certain the healthcare entity it's on is. For example,
in "The patient may have a fever", the fever entity is not 100% certain, but is instead
"positivePossible". Possible values are "positive", "positivePossible", "neutralPossible",
"negativePossible", and "negative".
:ivar str association: Describes whether the healthcare entity it's on is the subject of the document, or
if this entity describes someone else in the document. For example, in "The subject's mother has
a fever", the "fever" entity is not associated with the subject themselves, but with the subject's
mother. Possible values are "subject" and "other".
"""
def __init__(self, **kwargs):
self.conditionality = kwargs.get("conditionality", None)
self.certainty = kwargs.get("certainty", None)
self.association = kwargs.get("association", None)
@classmethod
def _from_generated(cls, healthcare_assertion):
return cls(
conditionality=healthcare_assertion.conditionality,
certainty=healthcare_assertion.certainty,
association=healthcare_assertion.association,
)
def __repr__(self):
return "HealthcareEntityAssertion(conditionality={}, certainty={}, association={})".format(
self.conditionality, self.certainty, self.association
)
class HealthcareEntityDataSource(DictMixin):
"""
HealthcareEntityDataSource contains information representing an entity reference in a known data source.
:ivar str entity_id: ID of the entity in the given source catalog.
:ivar str name: The name of the entity catalog from where the entity was identified, such as UMLS, CHV, MSH, etc.
"""
def __init__(self, **kwargs):
self.entity_id = kwargs.get("entity_id", None)
self.name = kwargs.get("name", None)
def __repr__(self):
return "HealthcareEntityDataSource(entity_id={}, name={})".format(
self.entity_id, self.name
)[:1024]
class TextAnalyticsError(DictMixin):
"""TextAnalyticsError contains the error code, message, and
other details that explain why the batch or individual document
failed to be processed by the service.
:ivar code: Error code. Possible values include:
'invalidRequest', 'invalidArgument', 'internalServerError',
'serviceUnavailable', 'invalidParameterValue', 'invalidRequestBodyFormat',
'emptyRequest', 'missingInputRecords', 'invalidDocument', 'modelVersionIncorrect',
'invalidDocumentBatch', 'unsupportedLanguageCode', 'invalidCountryHint'
:vartype code: str
:ivar message: Error message.
:vartype message: str
:ivar target: Error target.
:vartype target: str
"""
def __init__(self, **kwargs):
self.code = kwargs.get("code", None)
self.message = kwargs.get("message", None)
self.target = kwargs.get("target", None)
@classmethod
def _from_generated(cls, err):
if err.innererror:
return cls(
code=err.innererror.code,
message=err.innererror.message,
target=err.innererror.target,
)
return cls(code=err.code, message=err.message, target=err.target)
def __repr__(self):
return "TextAnalyticsError(code={}, message={}, target={})".format(
self.code, self.message, self.target
)[:1024]
class TextAnalyticsWarning(DictMixin):
"""TextAnalyticsWarning contains the warning code and message that explains why
the response has a warning.
:ivar code: Warning code. Possible values include: 'LongWordsInDocument',
'DocumentTruncated'.
:vartype code: str
:ivar message: Warning message.
:vartype message: str
"""
def __init__(self, **kwargs):
self.code = kwargs.get("code", None)
self.message = kwargs.get("message", None)
@classmethod
def _from_generated(cls, warning):
return cls(
code=warning.code,
message=warning.message,
)
def __repr__(self):
return "TextAnalyticsWarning(code={}, message={})".format(
self.code, self.message
)[:1024]
class ExtractKeyPhrasesResult(DictMixin):
"""ExtractKeyPhrasesResult is a result object which contains
the key phrases found in a particular document.
:ivar id: Unique, non-empty document identifier that matches the
document id that was passed in with the request. If not specified
in the request, an id is assigned for the document.
:vartype id: str
:ivar key_phrases: A list of representative words or phrases.
The number of key phrases returned is proportional to the number of words
in the input document.
:vartype key_phrases: list[str]
:ivar warnings: Warnings encountered while processing document. Results will still be returned
if there are warnings, but they may not be fully accurate.
:vartype warnings: list[~azure.ai.textanalytics.TextAnalyticsWarning]
:ivar statistics: If `show_stats=True` was specified in the request this
field will contain information about the document payload.
:vartype statistics:
~azure.ai.textanalytics.TextDocumentStatistics
:ivar bool is_error: Boolean check for error item when iterating over list of
results. Always False for an instance of a ExtractKeyPhrasesResult.
"""
def __init__(self, **kwargs):
self.id = kwargs.get("id", None)
self.key_phrases = kwargs.get("key_phrases", None)
self.warnings = kwargs.get("warnings", [])
self.statistics = kwargs.get("statistics", None)
self.is_error = False
def __repr__(self):
return "ExtractKeyPhrasesResult(id={}, key_phrases={}, warnings={}, statistics={}, is_error={})".format(
self.id,
self.key_phrases,
repr(self.warnings),
repr(self.statistics),
self.is_error,
)[
:1024
]
class RecognizeLinkedEntitiesResult(DictMixin):
"""RecognizeLinkedEntitiesResult is a result object which contains
links to a well-known knowledge base, like for example, Wikipedia or Bing.
:ivar id: Unique, non-empty document identifier that matches the
document id that was passed in with the request. If not specified
in the request, an id is assigned for the document.
:vartype id: str
:ivar entities: Recognized well-known entities in the document.
:vartype entities:
list[~azure.ai.textanalytics.LinkedEntity]
:ivar warnings: Warnings encountered while processing document. Results will still be returned
if there are warnings, but they may not be fully accurate.
:vartype warnings: list[~azure.ai.textanalytics.TextAnalyticsWarning]
:ivar statistics: If `show_stats=True` was specified in the request this
field will contain information about the document payload.
:vartype statistics:
~azure.ai.textanalytics.TextDocumentStatistics
:ivar bool is_error: Boolean check for error item when iterating over list of
results. Always False for an instance of a RecognizeLinkedEntitiesResult.
"""
def __init__(self, **kwargs):
self.id = kwargs.get("id", None)
self.entities = kwargs.get("entities", None)
self.warnings = kwargs.get("warnings", [])
self.statistics = kwargs.get("statistics", None)
self.is_error = False
def __repr__(self):
return "RecognizeLinkedEntitiesResult(id={}, entities={}, warnings={}, statistics={}, is_error={})".format(
self.id,
repr(self.entities),
repr(self.warnings),
repr(self.statistics),
self.is_error,
)[
:1024
]
class AnalyzeSentimentResult(DictMixin):
"""AnalyzeSentimentResult is a result object which contains
the overall predicted sentiment and confidence scores for your document
and a per-sentence sentiment prediction with scores.
:ivar id: Unique, non-empty document identifier that matches the
document id that was passed in with the request. If not specified
in the request, an id is assigned for the document.
:vartype id: str
:ivar sentiment: Predicted sentiment for document (Negative,
Neutral, Positive, or Mixed). Possible values include: 'positive',
'neutral', 'negative', 'mixed'
:vartype sentiment: str
:ivar warnings: Warnings encountered while processing document. Results will still be returned
if there are warnings, but they may not be fully accurate.
:vartype warnings: list[~azure.ai.textanalytics.TextAnalyticsWarning]
:ivar statistics: If `show_stats=True` was specified in the request this
field will contain information about the document payload.
:vartype statistics:
~azure.ai.textanalytics.TextDocumentStatistics
:ivar confidence_scores: Document level sentiment confidence
scores between 0 and 1 for each sentiment label.
:vartype confidence_scores:
~azure.ai.textanalytics.SentimentConfidenceScores
:ivar sentences: Sentence level sentiment analysis.
:vartype sentences:
list[~azure.ai.textanalytics.SentenceSentiment]
:ivar bool is_error: Boolean check for error item when iterating over list of
results. Always False for an instance of a AnalyzeSentimentResult.
"""
def __init__(self, **kwargs):
self.id = kwargs.get("id", None)
self.sentiment = kwargs.get("sentiment", None)
self.warnings = kwargs.get("warnings", [])
self.statistics = kwargs.get("statistics", None)
self.confidence_scores = kwargs.get("confidence_scores", None)
self.sentences = kwargs.get("sentences", None)
self.is_error = False
def __repr__(self):
return (
"AnalyzeSentimentResult(id={}, sentiment={}, warnings={}, statistics={}, confidence_scores={}, "
"sentences={}, is_error={})".format(
self.id,
self.sentiment,
repr(self.warnings),
repr(self.statistics),
repr(self.confidence_scores),
repr(self.sentences),
self.is_error,
)[:1024]
)
class TextDocumentStatistics(DictMixin):
"""TextDocumentStatistics contains information about
the document payload.
:ivar character_count: Number of text elements recognized in
the document.
:vartype character_count: int
:ivar transaction_count: Number of transactions for the document.
:vartype transaction_count: int
"""
def __init__(self, **kwargs):
self.character_count = kwargs.get("character_count", None)
self.transaction_count = kwargs.get("transaction_count", None)
@classmethod
def _from_generated(cls, stats):
if stats is None:
return None
return cls(
character_count=stats.characters_count,
transaction_count=stats.transactions_count,
)
def __repr__(self):
return (
"TextDocumentStatistics(character_count={}, transaction_count={})".format(
self.character_count, self.transaction_count
)[:1024]
)
class DocumentError(DictMixin):
"""DocumentError is an error object which represents an error on
the individual document.
:ivar id: Unique, non-empty document identifier that matches the
document id that was passed in with the request. If not specified
in the request, an id is assigned for the document.
:vartype id: str
:ivar error: The document error.
:vartype error: ~azure.ai.textanalytics.TextAnalyticsError
:ivar bool is_error: Boolean check for error item when iterating over list of
results. Always True for an instance of a DocumentError.
"""
def __init__(self, **kwargs):
self.id = kwargs.get("id", None)
self.error = kwargs.get("error", None)
self.is_error = True
def __getattr__(self, attr):
result_set = set()
result_set.update(
RecognizeEntitiesResult().keys()
+ RecognizePiiEntitiesResult().keys()
+ DetectLanguageResult().keys()
+ RecognizeLinkedEntitiesResult().keys()
+ AnalyzeSentimentResult().keys()
+ ExtractKeyPhrasesResult().keys()
)
result_attrs = result_set.difference(DocumentError().keys())
if attr in result_attrs:
raise AttributeError(
"'DocumentError' object has no attribute '{}'. The service was unable to process this document:\n"
"Document Id: {}\nError: {} - {}\n".format(
attr, self.id, self.error.code, self.error.message
)
)
raise AttributeError(
"'DocumentError' object has no attribute '{}'".format(attr)
)
@classmethod
def _from_generated(cls, doc_err):
return cls(
id=doc_err.id,
error=TextAnalyticsError._from_generated( # pylint: disable=protected-access
doc_err.error
),
is_error=True,
)
def __repr__(self):
return "DocumentError(id={}, error={}, is_error={})".format(
self.id, repr(self.error), self.is_error
)[:1024]
class DetectLanguageInput(LanguageInput):
"""The input document to be analyzed for detecting language.
:keyword str id: Unique, non-empty document identifier.
:keyword str text: The input text to process.
:keyword str country_hint: A country hint to help better detect
the language of the text. Accepts two letter country codes
specified by ISO 3166-1 alpha-2. Defaults to "US". Pass
in the string "none" to not use a country_hint.
:ivar id: Required. Unique, non-empty document identifier.
:vartype id: str
:ivar text: Required. The input text to process.
:vartype text: str
:ivar country_hint: A country hint to help better detect
the language of the text. Accepts two letter country codes
specified by ISO 3166-1 alpha-2. Defaults to "US". Pass
in the string "none" to not use a country_hint.
:vartype country_hint: str
"""
def __init__(self, **kwargs):
super(DetectLanguageInput, self).__init__(**kwargs)
self.id = kwargs.get("id", None)
self.text = kwargs.get("text", None)
self.country_hint = kwargs.get("country_hint", None)
def __repr__(self):
return "DetectLanguageInput(id={}, text={}, country_hint={})".format(
self.id, self.text, self.country_hint
)[:1024]
class LinkedEntity(DictMixin):
"""LinkedEntity contains a link to the well-known recognized
entity in text. The link comes from a data source like Wikipedia
or Bing. It additionally includes all of the matches of this
entity found in the document.
:ivar name: Entity Linking formal name.
:vartype name: str
:ivar matches: List of instances this entity appears in the text.
:vartype matches:
list[~azure.ai.textanalytics.LinkedEntityMatch]
:ivar language: Language used in the data source.
:vartype language: str
:ivar data_source_entity_id: Unique identifier of the recognized entity from the data
source.
:vartype data_source_entity_id: str
:ivar url: URL to the entity's page from the data source.
:vartype url: str
:ivar data_source: Data source used to extract entity linking,
such as Wiki/Bing etc.
:vartype data_source: str
:ivar str bing_entity_search_api_id: Bing Entity Search unique identifier of the recognized entity.
Use in conjunction with the Bing Entity Search SDK to fetch additional relevant information.
.. versionadded:: v3.1
The *bing_entity_search_api_id* property.
"""
def __init__(self, **kwargs):
self.name = kwargs.get("name", None)
self.matches = kwargs.get("matches", None)
self.language = kwargs.get("language", None)
self.data_source_entity_id = kwargs.get("data_source_entity_id", None)
self.url = kwargs.get("url", None)
self.data_source = kwargs.get("data_source", None)
self.bing_entity_search_api_id = kwargs.get("bing_entity_search_api_id", None)
@classmethod
def _from_generated(cls, entity):
bing_entity_search_api_id = (
entity.bing_id if hasattr(entity, "bing_id") else None
)
return cls(
name=entity.name,
matches=[
LinkedEntityMatch._from_generated(e) # pylint: disable=protected-access
for e in entity.matches
],
language=entity.language,
data_source_entity_id=entity.id,
url=entity.url,
data_source=entity.data_source,
bing_entity_search_api_id=bing_entity_search_api_id,
)
def __repr__(self):
return (
"LinkedEntity(name={}, matches={}, language={}, data_source_entity_id={}, url={}, "
"data_source={}, bing_entity_search_api_id={})".format(
self.name,
repr(self.matches),
self.language,
self.data_source_entity_id,
self.url,
self.data_source,
self.bing_entity_search_api_id,
)[:1024]
)
class LinkedEntityMatch(DictMixin):
"""A match for the linked entity found in text. Provides
the confidence score of the prediction and where the entity
was found in the text.
:ivar confidence_score: If a well-known item is recognized, a
decimal number denoting the confidence level between 0 and 1 will be
returned.
:vartype confidence_score: float
:ivar text: Entity text as appears in the request.
:ivar int length: The linked entity match text length. This value depends on the value of the
`string_index_type` parameter set in the original request, which is UnicodeCodePoints by default.
:ivar int offset: The linked entity match text offset from the start of the document.
The value depends on the value of the `string_index_type` parameter
set in the original request, which is UnicodeCodePoints by default.
:vartype text: str
.. versionadded:: v3.1
The *offset* and *length* properties.
"""
def __init__(self, **kwargs):
self.confidence_score = kwargs.get("confidence_score", None)
self.text = kwargs.get("text", None)
self.length = kwargs.get("length", None)
self.offset = kwargs.get("offset", None)
@classmethod
def _from_generated(cls, match):
offset = match.offset
length = match.length
if isinstance(match, _v3_0_models.Match):
# we do not return offset for v3.0 since
# the correct encoding was not introduced for v3.0
offset = None
length = None
return cls(
confidence_score=match.confidence_score,
text=match.text,
length=length,
offset=offset,
)
def __repr__(self):
return "LinkedEntityMatch(confidence_score={}, text={}, length={}, offset={})".format(
self.confidence_score, self.text, self.length, self.offset
)[
:1024
]
class TextDocumentInput(DictMixin, MultiLanguageInput):
"""The input document to be analyzed by the service.
:keyword str id: Unique, non-empty document identifier.
:keyword str text: The input text to process.
:keyword str language: This is the 2 letter ISO 639-1 representation
of a language. For example, use "en" for English; "es" for Spanish etc. If
not set, uses "en" for English as default.
:ivar id: Required. Unique, non-empty document identifier.
:vartype id: str
:ivar text: Required. The input text to process.
:vartype text: str
:ivar language: This is the 2 letter ISO 639-1 representation
of a language. For example, use "en" for English; "es" for Spanish etc. If
not set, uses "en" for English as default.
:vartype language: str
"""
def __init__(self, **kwargs):
super(TextDocumentInput, self).__init__(**kwargs)
self.id = kwargs.get("id", None)
self.text = kwargs.get("text", None)
self.language = kwargs.get("language", None)
def __repr__(self):
return "TextDocumentInput(id={}, text={}, language={})".format(
self.id, self.text, self.language
)[:1024]
class TextDocumentBatchStatistics(DictMixin):
"""TextDocumentBatchStatistics contains information about the
request payload. Note: This object is not returned
in the response and needs to be retrieved by a response hook.
:ivar document_count: Number of documents submitted in the request.
:vartype document_count: int
:ivar valid_document_count: Number of valid documents. This
excludes empty, over-size limit or non-supported languages documents.
:vartype valid_document_count: int
:ivar erroneous_document_count: Number of invalid documents.
This includes empty, over-size limit or non-supported languages documents.
:vartype erroneous_document_count: int
:ivar transaction_count: Number of transactions for the request.
:vartype transaction_count: long
"""
def __init__(self, **kwargs):
self.document_count = kwargs.get("document_count", None)
self.valid_document_count = kwargs.get("valid_document_count", None)
self.erroneous_document_count = kwargs.get("erroneous_document_count", None)
self.transaction_count = kwargs.get("transaction_count", None)
@classmethod
def _from_generated(cls, statistics):
if statistics is None:
return None
return cls(
document_count=statistics["documentsCount"],
valid_document_count=statistics["validDocumentsCount"],
erroneous_document_count=statistics["erroneousDocumentsCount"],
transaction_count=statistics["transactionsCount"],
)
def __repr__(self):
return (
"TextDocumentBatchStatistics(document_count={}, valid_document_count={}, erroneous_document_count={}, "
"transaction_count={})".format(
self.document_count,
self.valid_document_count,
self.erroneous_document_count,
self.transaction_count,
)[:1024]
)
class SentenceSentiment(DictMixin):
"""SentenceSentiment contains the predicted sentiment and
confidence scores for each individual sentence in the document.
:ivar text: The sentence text.
:vartype text: str
:ivar sentiment: The predicted Sentiment for the sentence.
Possible values include: 'positive', 'neutral', 'negative'
:vartype sentiment: str
:ivar confidence_scores: The sentiment confidence score between 0
and 1 for the sentence for all labels.
:vartype confidence_scores:
~azure.ai.textanalytics.SentimentConfidenceScores
:ivar int length: The sentence text length. This value depends on the value of the
`string_index_type` parameter set in the original request, which is UnicodeCodePoints
by default.
:ivar int offset: The sentence text offset from the start of the document.
The value depends on the value of the `string_index_type` parameter
set in the original request, which is UnicodeCodePoints by default.
:ivar mined_opinions: The list of opinions mined from this sentence.
For example in the sentence "The food is good, but the service is bad", we would
mine the two opinions "food is good" and "service is bad". Only returned
if `show_opinion_mining` is set to True in the call to `analyze_sentiment` and
api version is v3.1 and up.
:vartype mined_opinions:
list[~azure.ai.textanalytics.MinedOpinion]
.. versionadded:: v3.1
The *offset*, *length*, and *mined_opinions* properties.
"""
def __init__(self, **kwargs):
self.text = kwargs.get("text", None)
self.sentiment = kwargs.get("sentiment", None)
self.confidence_scores = kwargs.get("confidence_scores", None)
self.length = kwargs.get("length", None)
self.offset = kwargs.get("offset", None)
self.mined_opinions = kwargs.get("mined_opinions", None)
@classmethod
def _from_generated(cls, sentence, results, sentiment):
offset = sentence.offset
length = sentence.length
if isinstance(sentence, _v3_0_models.SentenceSentiment):
# we do not return offset for v3.0 since
# the correct encoding was not introduced for v3.0
offset = None
length = None
if hasattr(sentence, "targets"):
mined_opinions = (
[
MinedOpinion._from_generated( # pylint: disable=protected-access
target, results, sentiment
)
for target in sentence.targets
]
if sentence.targets
else []
)
else:
mined_opinions = None
return cls(
text=sentence.text,
sentiment=sentence.sentiment,
confidence_scores=SentimentConfidenceScores._from_generated( # pylint: disable=protected-access
sentence.confidence_scores
),
length=length,
offset=offset,
mined_opinions=mined_opinions,
)
def __repr__(self):
return (
"SentenceSentiment(text={}, sentiment={}, confidence_scores={}, "
"length={}, offset={}, mined_opinions={})".format(
self.text,
self.sentiment,
repr(self.confidence_scores),
self.length,
self.offset,
repr(self.mined_opinions),
)[:1024]
)
class MinedOpinion(DictMixin):
"""A mined opinion object represents an opinion we've extracted from a sentence.
It consists of both a target that these opinions are about, and the assessments
representing the opinion.
:ivar target: The target of an opinion about a product/service.
:vartype target: ~azure.ai.textanalytics.TargetSentiment
:ivar assessments: The assessments representing the opinion of the target.
:vartype assessments: list[~azure.ai.textanalytics.AssessmentSentiment]
"""
def __init__(self, **kwargs):
self.target = kwargs.get("target", None)
self.assessments = kwargs.get("assessments", None)
@staticmethod
def _get_assessments(
relations, results, sentiment
): # pylint: disable=unused-argument
if not relations:
return []
assessment_relations = [
r.ref for r in relations if r.relation_type == "assessment"
]
assessments = []
for assessment_relation in assessment_relations:
nums = _get_indices(assessment_relation)
sentence_index = nums[1]
assessment_index = nums[2]
assessments.append(
sentiment.sentences[sentence_index].assessments[assessment_index]
)
return assessments
@classmethod
def _from_generated(cls, target, results, sentiment):
return cls(
target=TargetSentiment._from_generated( # pylint: disable=protected-access
target
),
assessments=[
AssessmentSentiment._from_generated( # pylint: disable=protected-access
assessment
)
for assessment in cls._get_assessments(
target.relations, results, sentiment
)
],
)
def __repr__(self):
return "MinedOpinion(target={}, assessments={})".format(
repr(self.target), repr(self.assessments)
)[:1024]
class TargetSentiment(DictMixin):
"""TargetSentiment contains the predicted sentiment,
confidence scores and other information about a key component of a product/service.
For example in "The food at Hotel Foo is good", "food" is an key component of
"Hotel Foo".
:ivar str text: The text value of the target.
:ivar str sentiment: The predicted Sentiment for the target. Possible values
include 'positive', 'mixed', and 'negative'.
:ivar confidence_scores: The sentiment confidence score between 0
and 1 for the target for 'positive' and 'negative' labels. It's score
for 'neutral' will always be 0
:vartype confidence_scores:
~azure.ai.textanalytics.SentimentConfidenceScores
:ivar int length: The target text length. This value depends on the value of the
`string_index_type` parameter set in the original request, which is UnicodeCodePoints
by default.
:ivar int offset: The target text offset from the start of the document.
The value depends on the value of the `string_index_type` parameter
set in the original request, which is UnicodeCodePoints by default.
"""
def __init__(self, **kwargs):
self.text = kwargs.get("text", None)
self.sentiment = kwargs.get("sentiment", None)
self.confidence_scores = kwargs.get("confidence_scores", None)
self.length = kwargs.get("length", None)
self.offset = kwargs.get("offset", None)
@classmethod
def _from_generated(cls, target):
return cls(
text=target.text,
sentiment=target.sentiment,
confidence_scores=SentimentConfidenceScores._from_generated( # pylint: disable=protected-access
target.confidence_scores
),
length=target.length,
offset=target.offset,
)
def __repr__(self):
return (
"TargetSentiment(text={}, sentiment={}, confidence_scores={}, "
"length={}, offset={})".format(
self.text,
self.sentiment,
repr(self.confidence_scores),
self.length,
self.offset,
)[:1024]
)
class AssessmentSentiment(DictMixin):
"""AssessmentSentiment contains the predicted sentiment,
confidence scores and other information about an assessment given about
a particular target. For example, in the sentence "The food is good", the assessment
of the target 'food' is 'good'.
:ivar str text: The assessment text.
:ivar str sentiment: The predicted Sentiment for the assessment. Possible values
include 'positive', 'mixed', and 'negative'.
:ivar confidence_scores: The sentiment confidence score between 0
and 1 for the assessment for 'positive' and 'negative' labels. It's score
for 'neutral' will always be 0
:vartype confidence_scores:
~azure.ai.textanalytics.SentimentConfidenceScores
:ivar int length: The assessment text length. This value depends on the value of the
`string_index_type` parameter set in the original request, which is UnicodeCodePoints
by default.
:ivar int offset: The assessment text offset from the start of the document.
The value depends on the value of the `string_index_type` parameter
set in the original request, which is UnicodeCodePoints by default.
:ivar bool is_negated: Whether the value of the assessment is negated. For example, in
"The food is not good", the assessment "good" is negated.
"""
def __init__(self, **kwargs):
self.text = kwargs.get("text", None)
self.sentiment = kwargs.get("sentiment", None)
self.confidence_scores = kwargs.get("confidence_scores", None)
self.length = kwargs.get("length", None)
self.offset = kwargs.get("offset", None)
self.is_negated = kwargs.get("is_negated", None)
@classmethod
def _from_generated(cls, assessment):
return cls(
text=assessment.text,
sentiment=assessment.sentiment,
confidence_scores=SentimentConfidenceScores._from_generated( # pylint: disable=protected-access
assessment.confidence_scores
),
length=assessment.length,
offset=assessment.offset,
is_negated=assessment.is_negated,
)
def __repr__(self):
return (
"AssessmentSentiment(text={}, sentiment={}, confidence_scores={}, length={}, offset={}, "
"is_negated={})".format(
self.text,
self.sentiment,
repr(self.confidence_scores),
self.length,
self.offset,
self.is_negated,
)[:1024]
)
class SentimentConfidenceScores(DictMixin):
"""The confidence scores (Softmax scores) between 0 and 1.
Higher values indicate higher confidence.
:ivar positive: Positive score.
:vartype positive: float
:ivar neutral: Neutral score.
:vartype neutral: float
:ivar negative: Negative score.
:vartype negative: float
"""
def __init__(self, **kwargs):
self.positive = kwargs.get("positive", 0.0)
self.neutral = kwargs.get("neutral", 0.0)
self.negative = kwargs.get("negative", 0.0)
@classmethod
def _from_generated(cls, score):
return cls(
positive=score.positive,
neutral=score.neutral if hasattr(score, "neutral") else 0.0,
negative=score.negative,
)
def __repr__(self):
return "SentimentConfidenceScores(positive={}, neutral={}, negative={})".format(
self.positive, self.neutral, self.negative
)[:1024]
class _AnalyzeActionsType(str, Enum):
"""The type of action that was applied to the documents"""
RECOGNIZE_ENTITIES = "recognize_entities" #: Entities Recognition action.
RECOGNIZE_PII_ENTITIES = (
"recognize_pii_entities" #: PII Entities Recognition action.
)
EXTRACT_KEY_PHRASES = "extract_key_phrases" #: Key Phrase Extraction action.
RECOGNIZE_LINKED_ENTITIES = (
"recognize_linked_entities" #: Linked Entities Recognition action.
)
ANALYZE_SENTIMENT = "analyze_sentiment" #: Sentiment Analysis action.
EXTRACT_SUMMARY = "extract_summary"
RECOGNIZE_CUSTOM_ENTITIES = "recognize_custom_entities"
SINGLE_CATEGORY_CLASSIFY = "single_category_classify"
MULTI_CATEGORY_CLASSIFY = "multi_category_classify"
class ActionPointerKind(str, Enum):
RECOGNIZE_ENTITIES = "entityRecognitionTasks"
RECOGNIZE_PII_ENTITIES = "entityRecognitionPiiTasks"
EXTRACT_KEY_PHRASES = "keyPhraseExtractionTasks"
RECOGNIZE_LINKED_ENTITIES = "entityLinkingTasks"
ANALYZE_SENTIMENT = "sentimentAnalysisTasks"
EXTRACT_SUMMARY = "extractiveSummarizationTasks"
RECOGNIZE_CUSTOM_ENTITIES = "customEntityRecognitionTasks"
SINGLE_CATEGORY_CLASSIFY = "customSingleClassificationTasks"
MULTI_CATEGORY_CLASSIFY = "customMultiClassificationTasks"
class RecognizeEntitiesAction(DictMixin):
"""RecognizeEntitiesAction encapsulates the parameters for starting a long-running Entities Recognition operation.
If you just want to recognize entities in a list of documents, and not perform multiple
long running actions on the input of documents, call method `recognize_entities` instead
of interfacing with this model.
:keyword str model_version: The model version to use for the analysis.
:keyword str string_index_type: Specifies the method used to interpret string offsets.
`UnicodeCodePoint`, the Python encoding, is the default. To override the Python default,
you can also pass in `Utf16CodePoint` or TextElement_v8`. For additional information
see https://aka.ms/text-analytics-offsets
:keyword bool disable_service_logs: If set to true, you opt-out of having your text input
logged on the service side for troubleshooting. By default, Text Analytics logs your
input text for 48 hours, solely to allow for troubleshooting issues in providing you with
the Text Analytics natural language processing functions. Setting this parameter to true,
disables input logging and may limit our ability to remediate issues that occur. Please see
Cognitive Services Compliance and Privacy notes at https://aka.ms/cs-compliance for
additional details, and Microsoft Responsible AI principles at
https://www.microsoft.com/ai/responsible-ai.
:ivar str model_version: The model version to use for the analysis.
:ivar str string_index_type: Specifies the method used to interpret string offsets.
`UnicodeCodePoint`, the Python encoding, is the default. To override the Python default,
you can also pass in `Utf16CodePoint` or TextElement_v8`. For additional information
see https://aka.ms/text-analytics-offsets
:ivar bool disable_service_logs: If set to true, you opt-out of having your text input
logged on the service side for troubleshooting. By default, Text Analytics logs your
input text for 48 hours, solely to allow for troubleshooting issues in providing you with
the Text Analytics natural language processing functions. Setting this parameter to true,
disables input logging and may limit our ability to remediate issues that occur. Please see
Cognitive Services Compliance and Privacy notes at https://aka.ms/cs-compliance for
additional details, and Microsoft Responsible AI principles at
https://www.microsoft.com/ai/responsible-ai.
"""
def __init__(self, **kwargs):
self.model_version = kwargs.get("model_version", "latest")
self.string_index_type = kwargs.get("string_index_type", "UnicodeCodePoint")
self.disable_service_logs = kwargs.get("disable_service_logs", False)
def __repr__(self, **kwargs):
return "RecognizeEntitiesAction(model_version={}, string_index_type={}, disable_service_logs={})".format(
self.model_version, self.string_index_type, self.disable_service_logs
)[
:1024
]
def _to_generated(self, api_version, task_id):
if api_version == DEFAULT_API_VERSION:
from ._generated.v3_2_preview_2 import models
else:
from ._generated.v3_1 import models
return models.EntitiesTask(
parameters=models.EntitiesTaskParameters(
model_version=self.model_version,
string_index_type=self.string_index_type,
logging_opt_out=self.disable_service_logs,
),
task_name=task_id
)
class AnalyzeSentimentAction(DictMixin):
"""AnalyzeSentimentAction encapsulates the parameters for starting a long-running
Sentiment Analysis operation.
If you just want to analyze sentiment in a list of documents, and not perform multiple
long running actions on the input of documents, call method `analyze_sentiment` instead
of interfacing with this model.
:keyword str model_version: The model version to use for the analysis.
:keyword bool show_opinion_mining: Whether to mine the opinions of a sentence and conduct more
granular analysis around the aspects of a product or service (also known as
aspect-based sentiment analysis). If set to true, the returned
:class:`~azure.ai.textanalytics.SentenceSentiment` objects
will have property `mined_opinions` containing the result of this analysis.
:keyword str string_index_type: Specifies the method used to interpret string offsets.
`UnicodeCodePoint`, the Python encoding, is the default. To override the Python default,
you can also pass in `Utf16CodePoint` or TextElement_v8`. For additional information
see https://aka.ms/text-analytics-offsets
:keyword bool disable_service_logs: If set to true, you opt-out of having your text input
logged on the service side for troubleshooting. By default, Text Analytics logs your
input text for 48 hours, solely to allow for troubleshooting issues in providing you with
the Text Analytics natural language processing functions. Setting this parameter to true,
disables input logging and may limit our ability to remediate issues that occur. Please see
Cognitive Services Compliance and Privacy notes at https://aka.ms/cs-compliance for
additional details, and Microsoft Responsible AI principles at
https://www.microsoft.com/ai/responsible-ai.
:ivar str model_version: The model version to use for the analysis.
:ivar bool show_opinion_mining: Whether to mine the opinions of a sentence and conduct more
granular analysis around the aspects of a product or service (also known as
aspect-based sentiment analysis). If set to true, the returned
:class:`~azure.ai.textanalytics.SentenceSentiment` objects
will have property `mined_opinions` containing the result of this analysis.
:ivar str string_index_type: Specifies the method used to interpret string offsets.
`UnicodeCodePoint`, the Python encoding, is the default. To override the Python default,
you can also pass in `Utf16CodePoint` or TextElement_v8`. For additional information
see https://aka.ms/text-analytics-offsets
:ivar bool disable_service_logs: If set to true, you opt-out of having your text input
logged on the service side for troubleshooting. By default, Text Analytics logs your
input text for 48 hours, solely to allow for troubleshooting issues in providing you with
the Text Analytics natural language processing functions. Setting this parameter to true,
disables input logging and may limit our ability to remediate issues that occur. Please see
Cognitive Services Compliance and Privacy notes at https://aka.ms/cs-compliance for
additional details, and Microsoft Responsible AI principles at
https://www.microsoft.com/ai/responsible-ai.
"""
def __init__(self, **kwargs):
self.model_version = kwargs.get("model_version", "latest")
self.show_opinion_mining = kwargs.get("show_opinion_mining", False)
self.string_index_type = kwargs.get("string_index_type", None)
self.disable_service_logs = kwargs.get("disable_service_logs", False)
def __repr__(self, **kwargs):
return (
"AnalyzeSentimentAction(model_version={}, show_opinion_mining={}, string_index_type={}, "
"disable_service_logs={}".format(
self.model_version,
self.show_opinion_mining,
self.string_index_type,
self.disable_service_logs,
)[:1024]
)
def _to_generated(self, api_version, task_id):
if api_version == DEFAULT_API_VERSION:
from ._generated.v3_2_preview_2 import models
else:
from ._generated.v3_1 import models
return models.SentimentAnalysisTask(
parameters=models.SentimentAnalysisTaskParameters(
model_version=self.model_version,
opinion_mining=self.show_opinion_mining,
string_index_type=self.string_index_type,
logging_opt_out=self.disable_service_logs,
),
task_name=task_id
)
class RecognizePiiEntitiesAction(DictMixin):
"""RecognizePiiEntitiesAction encapsulates the parameters for starting a long-running PII
Entities Recognition operation.
If you just want to recognize pii entities in a list of documents, and not perform multiple
long running actions on the input of documents, call method `recognize_pii_entities` instead
of interfacing with this model.
:keyword str model_version: The model version to use for the analysis.
:keyword str domain_filter: An optional string to set the PII domain to include only a
subset of the PII entity categories. Possible values include 'phi' or None.
:keyword categories_filter: Instead of filtering over all PII entity categories, you can pass in a list of
the specific PII entity categories you want to filter out. For example, if you only want to filter out
U.S. social security numbers in a document, you can pass in
`[PiiEntityCategory.US_SOCIAL_SECURITY_NUMBER]` for this kwarg.
:paramtype categories_filter: list[~azure.ai.textanalytics.PiiEntityCategory]
:keyword str string_index_type: Specifies the method used to interpret string offsets.
`UnicodeCodePoint`, the Python encoding, is the default. To override the Python default,
you can also pass in `Utf16CodePoint` or TextElement_v8`. For additional information
see https://aka.ms/text-analytics-offsets
:keyword bool disable_service_logs: Defaults to true, meaning that Text Analytics will not log your
input text on the service side for troubleshooting. If set to False, Text Analytics logs your
input text for 48 hours, solely to allow for troubleshooting issues in providing you with
the Text Analytics natural language processing functions. Please see
Cognitive Services Compliance and Privacy notes at https://aka.ms/cs-compliance for
additional details, and Microsoft Responsible AI principles at
https://www.microsoft.com/ai/responsible-ai.
:ivar str model_version: The model version to use for the analysis.
:ivar str domain_filter: An optional string to set the PII domain to include only a
subset of the PII entity categories. Possible values include 'phi' or None.
:ivar categories_filter: Instead of filtering over all PII entity categories, you can pass in a list of
the specific PII entity categories you want to filter out. For example, if you only want to filter out
U.S. social security numbers in a document, you can pass in
`[PiiEntityCategory.US_SOCIAL_SECURITY_NUMBER]` for this kwarg.
:vartype categories_filter: list[~azure.ai.textanalytics.PiiEntityCategory]
:ivar str string_index_type: Specifies the method used to interpret string offsets.
`UnicodeCodePoint`, the Python encoding, is the default. To override the Python default,
you can also pass in `Utf16CodePoint` or TextElement_v8`. For additional information
see https://aka.ms/text-analytics-offsets
:ivar bool disable_service_logs: Defaults to true, meaning that Text Analytics will not log your
input text on the service side for troubleshooting. If set to False, Text Analytics logs your
input text for 48 hours, solely to allow for troubleshooting issues in providing you with
the Text Analytics natural language processing functions. Please see
Cognitive Services Compliance and Privacy notes at https://aka.ms/cs-compliance for
additional details, and Microsoft Responsible AI principles at
https://www.microsoft.com/ai/responsible-ai.
"""
def __init__(self, **kwargs):
self.model_version = kwargs.get("model_version", "latest")
self.domain_filter = kwargs.get("domain_filter", None)
self.categories_filter = kwargs.get("categories_filter", None)
self.string_index_type = kwargs.get("string_index_type", "UnicodeCodePoint")
self.disable_service_logs = kwargs.get("disable_service_logs", True)
def __repr__(self, **kwargs):
return (
"RecognizePiiEntitiesAction(model_version={}, domain_filter={}, categories_filter={}, "
"string_index_type={}, disable_service_logs={}".format(
self.model_version,
self.domain_filter,
self.categories_filter,
self.string_index_type,
self.disable_service_logs,
)[:1024]
)
def _to_generated(self, api_version, task_id):
if api_version == DEFAULT_API_VERSION:
from ._generated.v3_2_preview_2 import models
else:
from ._generated.v3_1 import models
return models.PiiTask(
parameters=models.PiiTaskParameters(
model_version=self.model_version,
domain=self.domain_filter,
pii_categories=self.categories_filter,
string_index_type=self.string_index_type,
logging_opt_out=self.disable_service_logs,
),
task_name=task_id
)
class ExtractKeyPhrasesAction(DictMixin):
"""ExtractKeyPhrasesAction encapsulates the parameters for starting a long-running key phrase
extraction operation
If you just want to extract key phrases from a list of documents, and not perform multiple
long running actions on the input of documents, call method `extract_key_phrases` instead
of interfacing with this model.
:keyword str model_version: The model version to use for the analysis.
:keyword bool disable_service_logs: If set to true, you opt-out of having your text input
logged on the service side for troubleshooting. By default, Text Analytics logs your
input text for 48 hours, solely to allow for troubleshooting issues in providing you with
the Text Analytics natural language processing functions. Setting this parameter to true,
disables input logging and may limit our ability to remediate issues that occur. Please see
Cognitive Services Compliance and Privacy notes at https://aka.ms/cs-compliance for
additional details, and Microsoft Responsible AI principles at
https://www.microsoft.com/ai/responsible-ai.
:ivar str model_version: The model version to use for the analysis.
:ivar bool disable_service_logs: If set to true, you opt-out of having your text input
logged on the service side for troubleshooting. By default, Text Analytics logs your
input text for 48 hours, solely to allow for troubleshooting issues in providing you with
the Text Analytics natural language processing functions. Setting this parameter to true,
disables input logging and may limit our ability to remediate issues that occur. Please see
Cognitive Services Compliance and Privacy notes at https://aka.ms/cs-compliance for
additional details, and Microsoft Responsible AI principles at
https://www.microsoft.com/ai/responsible-ai.
"""
def __init__(self, **kwargs):
self.model_version = kwargs.get("model_version", "latest")
self.disable_service_logs = kwargs.get("disable_service_logs", False)
def __repr__(self, **kwargs):
return (
"ExtractKeyPhrasesAction(model_version={}, disable_service_logs={})".format(
self.model_version, self.disable_service_logs
)[:1024]
)
def _to_generated(self, api_version, task_id):
if api_version == DEFAULT_API_VERSION:
from ._generated.v3_2_preview_2 import models
else:
from ._generated.v3_1 import models
return models.KeyPhrasesTask(
parameters=models.KeyPhrasesTaskParameters(
model_version=self.model_version,
logging_opt_out=self.disable_service_logs,
),
task_name=task_id
)
class RecognizeLinkedEntitiesAction(DictMixin):
"""RecognizeLinkedEntitiesAction encapsulates the parameters for starting a long-running Linked Entities
Recognition operation.
If you just want to recognize linked entities in a list of documents, and not perform multiple
long running actions on the input of documents, call method `recognize_linked_entities` instead
of interfacing with this model.
:keyword str model_version: The model version to use for the analysis.
:keyword str string_index_type: Specifies the method used to interpret string offsets.
`UnicodeCodePoint`, the Python encoding, is the default. To override the Python default,
you can also pass in `Utf16CodePoint` or TextElement_v8`. For additional information
see https://aka.ms/text-analytics-offsets
:keyword bool disable_service_logs: If set to true, you opt-out of having your text input
logged on the service side for troubleshooting. By default, Text Analytics logs your
input text for 48 hours, solely to allow for troubleshooting issues in providing you with
the Text Analytics natural language processing functions. Setting this parameter to true,
disables input logging and may limit our ability to remediate issues that occur. Please see
Cognitive Services Compliance and Privacy notes at https://aka.ms/cs-compliance for
additional details, and Microsoft Responsible AI principles at
https://www.microsoft.com/ai/responsible-ai.
:ivar str model_version: The model version to use for the analysis.
:ivar str string_index_type: Specifies the method used to interpret string offsets.
`UnicodeCodePoint`, the Python encoding, is the default. To override the Python default,
you can also pass in `Utf16CodePoint` or TextElement_v8`. For additional information
see https://aka.ms/text-analytics-offsets
:ivar bool disable_service_logs: If set to true, you opt-out of having your text input
logged on the service side for troubleshooting. By default, Text Analytics logs your
input text for 48 hours, solely to allow for troubleshooting issues in providing you with
the Text Analytics natural language processing functions. Setting this parameter to true,
disables input logging and may limit our ability to remediate issues that occur. Please see
Cognitive Services Compliance and Privacy notes at https://aka.ms/cs-compliance for
additional details, and Microsoft Responsible AI principles at
https://www.microsoft.com/ai/responsible-ai.
"""
def __init__(self, **kwargs):
self.model_version = kwargs.get("model_version", "latest")
self.string_index_type = kwargs.get("string_index_type", "UnicodeCodePoint")
self.disable_service_logs = kwargs.get("disable_service_logs", False)
def __repr__(self, **kwargs):
return (
"RecognizeLinkedEntitiesAction(model_version={}, string_index_type={}), "
"disable_service_logs={}".format(
self.model_version, self.string_index_type, self.disable_service_logs
)[:1024]
)
def _to_generated(self, api_version, task_id):
if api_version == DEFAULT_API_VERSION:
from ._generated.v3_2_preview_2 import models
else:
from ._generated.v3_1 import models
return models.EntityLinkingTask(
parameters=models.EntityLinkingTaskParameters(
model_version=self.model_version,
string_index_type=self.string_index_type,
logging_opt_out=self.disable_service_logs,
),
task_name=task_id
)
class ExtractSummaryAction(DictMixin):
"""ExtractSummaryAction encapsulates the parameters for starting a long-running Extractive Text
Summarization operation. For a conceptual discussion of extractive summarization, see the service documentation:
https://docs.microsoft.com/azure/cognitive-services/text-analytics/how-tos/extractive-summarization
:keyword str model_version: The model version to use for the analysis.
:keyword str string_index_type: Specifies the method used to interpret string offsets.
`UnicodeCodePoint`, the Python encoding, is the default. To override the Python default,
you can also pass in `Utf16CodePoint` or TextElement_v8`. For additional information
see https://aka.ms/text-analytics-offsets
:keyword bool disable_service_logs: If set to true, you opt-out of having your text input
logged on the service side for troubleshooting. By default, Text Analytics logs your
input text for 48 hours, solely to allow for troubleshooting issues in providing you with
the Text Analytics natural language processing functions. Setting this parameter to true,
disables input logging and may limit our ability to remediate issues that occur. Please see
Cognitive Services Compliance and Privacy notes at https://aka.ms/cs-compliance for
additional details, and Microsoft Responsible AI principles at
https://www.microsoft.com/ai/responsible-ai.
:keyword int max_sentence_count: Maximum number of sentences to return. Defaults to 3.
:keyword str order_by: Possible values include: "Offset", "Rank". Default value: "Offset".
:ivar str model_version: The model version to use for the analysis.
:ivar str string_index_type: Specifies the method used to interpret string offsets.
`UnicodeCodePoint`, the Python encoding, is the default. To override the Python default,
you can also pass in `Utf16CodePoint` or TextElement_v8`. For additional information
see https://aka.ms/text-analytics-offsets
:ivar bool disable_service_logs: If set to true, you opt-out of having your text input
logged on the service side for troubleshooting. By default, Text Analytics logs your
input text for 48 hours, solely to allow for troubleshooting issues in providing you with
the Text Analytics natural language processing functions. Setting this parameter to true,
disables input logging and may limit our ability to remediate issues that occur. Please see
Cognitive Services Compliance and Privacy notes at https://aka.ms/cs-compliance for
additional details, and Microsoft Responsible AI principles at
https://www.microsoft.com/ai/responsible-ai.
:ivar int max_sentence_count: Number of sentences to return. Defaults to 3.
:ivar str order_by: Possible values include: "Offset", "Rank". Default value: "Offset".
"""
def __init__(self, **kwargs):
self.model_version = kwargs.get("model_version", "latest")
self.string_index_type = kwargs.get("string_index_type", "UnicodeCodePoint")
self.disable_service_logs = kwargs.get("disable_service_logs", False)
self.max_sentence_count = kwargs.get("max_sentence_count", 3)
self.order_by = kwargs.get("order_by", "Offset")
def __repr__(self):
return (
"ExtractSummaryAction(model_version={}, string_index_type={}, disable_service_logs={}, "
"max_sentence_count={}, order_by={})".format(
self.model_version,
self.string_index_type,
self.disable_service_logs,
self.max_sentence_count,
self.order_by,
)[:1024]
)
def _to_generated(self, api_version, task_id): # pylint: disable=unused-argument
return _v3_2_preview_models.ExtractiveSummarizationTask(
parameters=_v3_2_preview_models.ExtractiveSummarizationTaskParameters(
model_version=self.model_version,
string_index_type=self.string_index_type,
logging_opt_out=self.disable_service_logs,
sentence_count=self.max_sentence_count,
sort_by=self.order_by,
),
task_name=task_id
)
class ExtractSummaryResult(DictMixin):
"""ExtractSummaryResult is a result object which contains
the extractive text summarization from a particular document.
:ivar str id: Unique, non-empty document identifier.
:ivar sentences: A ranked list of sentences representing the extracted summary.
:vartype sentences: list[~azure.ai.textanalytics.SummarySentence]
:ivar warnings: Warnings encountered while processing document.
:vartype warnings: list[~azure.ai.textanalytics.TextAnalyticsWarning]
:ivar statistics: If `show_stats=True` was specified in the request this
field will contain information about the document payload.
:vartype statistics: ~azure.ai.textanalytics.TextDocumentStatistics
:ivar bool is_error: Boolean check for error item when iterating over list of
results. Always False for an instance of an ExtractSummaryResult.
"""
def __init__(self, **kwargs):
self.id = kwargs.get("id", None)
self.sentences = kwargs.get("sentences", None)
self.warnings = kwargs.get("warnings", None)
self.statistics = kwargs.get("statistics", None)
self.is_error = False
def __repr__(self):
return "ExtractSummaryResult(id={}, sentences={}, warnings={}, statistics={}, is_error={})".format(
self.id,
repr(self.sentences),
repr(self.warnings),
repr(self.statistics),
self.is_error,
)[
:1024
]
@classmethod
def _from_generated(cls, summary):
return cls(
id=summary.id,
sentences=[
SummarySentence._from_generated( # pylint: disable=protected-access
sentence
)
for sentence in summary.sentences
],
warnings=[
TextAnalyticsWarning._from_generated( # pylint: disable=protected-access
w
)
for w in summary.warnings
],
statistics=TextDocumentStatistics._from_generated( # pylint: disable=protected-access
summary.statistics
),
)
class SummarySentence(DictMixin):
"""Represents a single sentence from the extractive text summarization.
:ivar str text: The extracted sentence text.
:ivar float rank_score: A float value representing the relevance of the sentence within
the summary. Higher values indicate higher importance.
:ivar int offset: The sentence offset from the start of the document.
The value depends on the value of the `string_index_type` parameter
set in the original request, which is UnicodeCodePoint by default.
:ivar int length: The length of the sentence. This value depends on the value of the
`string_index_type` parameter set in the original request, which is UnicodeCodePoint
by default.
"""
def __init__(self, **kwargs):
self.text = kwargs.get("text", None)
self.rank_score = kwargs.get("rank_score", None)
self.offset = kwargs.get("offset", None)
self.length = kwargs.get("length", None)
def __repr__(self):
return "SummarySentence(text={}, rank_score={}, offset={}, length={})".format(
self.text,
self.rank_score,
self.offset,
self.length,
)[:1024]
@classmethod
def _from_generated(cls, sentence):
return cls(
text=sentence.text,
rank_score=sentence.rank_score,
offset=sentence.offset,
length=sentence.length,
)
class RecognizeCustomEntitiesAction(DictMixin):
"""RecognizeCustomEntitiesAction encapsulates the parameters for starting a long-running custom entity
recognition operation. For information on regional support of custom features and how to train a model to
recognize custom entities, see https://aka.ms/azsdk/textanalytics/customentityrecognition
:param str project_name: Required. This field indicates the project name for the model.
:param str deployment_name: This field indicates the deployment name for the model.
:keyword str string_index_type: Specifies the method used to interpret string offsets.
`UnicodeCodePoint`, the Python encoding, is the default. To override the Python default,
you can also pass in `Utf16CodePoint` or TextElement_v8`. For additional information
see https://aka.ms/text-analytics-offsets
:keyword bool disable_service_logs: If set to true, you opt-out of having your text input
logged on the service side for troubleshooting. By default, Text Analytics logs your
input text for 48 hours, solely to allow for troubleshooting issues in providing you with
the Text Analytics natural language processing functions. Setting this parameter to true,
disables input logging and may limit our ability to remediate issues that occur. Please see
Cognitive Services Compliance and Privacy notes at https://aka.ms/cs-compliance for
additional details, and Microsoft Responsible AI principles at
https://www.microsoft.com/ai/responsible-ai.
:ivar str project_name: This field indicates the project name for the model.
:ivar str deployment_name: This field indicates the deployment name for the model.
:ivar str string_index_type: Specifies the method used to interpret string offsets.
`UnicodeCodePoint`, the Python encoding, is the default. To override the Python default,
you can also pass in `Utf16CodePoint` or TextElement_v8`. For additional information
see https://aka.ms/text-analytics-offsets
:ivar bool disable_service_logs: If set to true, you opt-out of having your text input
logged on the service side for troubleshooting. By default, Text Analytics logs your
input text for 48 hours, solely to allow for troubleshooting issues in providing you with
the Text Analytics natural language processing functions. Setting this parameter to true,
disables input logging and may limit our ability to remediate issues that occur. Please see
Cognitive Services Compliance and Privacy notes at https://aka.ms/cs-compliance for
additional details, and Microsoft Responsible AI principles at
https://www.microsoft.com/ai/responsible-ai.
"""
def __init__(
self,
project_name,
deployment_name,
**kwargs
):
self.project_name = project_name
self.deployment_name = deployment_name
self.disable_service_logs = kwargs.get('disable_service_logs', None)
self.string_index_type = kwargs.get('string_index_type', None)
def __repr__(self):
return "RecognizeCustomEntitiesAction(project_name={}, deployment_name={}, disable_service_logs={}, " \
"string_index_type={})".format(
self.project_name,
self.deployment_name,
self.disable_service_logs,
self.string_index_type,
)[:1024]
def _to_generated(self, api_version, task_id): # pylint: disable=unused-argument
return _v3_2_preview_models.CustomEntitiesTask(
parameters=_v3_2_preview_models.CustomEntitiesTaskParameters(
project_name=self.project_name,
deployment_name=self.deployment_name,
string_index_type=self.string_index_type,
logging_opt_out=self.disable_service_logs,
),
task_name=task_id
)
class RecognizeCustomEntitiesResult(DictMixin):
"""RecognizeCustomEntitiesResult is a result object which contains
the custom recognized entities from a particular document.
:ivar str id: Unique, non-empty document identifier that matches the
document id that was passed in with the request. If not specified
in the request, an id is assigned for the document.
:ivar entities: Recognized custom entities in the document.
:vartype entities:
list[~azure.ai.textanalytics.CategorizedEntity]
:ivar warnings: Warnings encountered while processing document.
:vartype warnings: list[~azure.ai.textanalytics.TextAnalyticsWarning]
:ivar statistics: If `show_stats=True` was specified in the request this
field will contain information about the document payload.
:vartype statistics: ~azure.ai.textanalytics.TextDocumentStatistics
:ivar bool is_error: Boolean check for error item when iterating over list of
results. Always False for an instance of a RecognizeCustomEntitiesResult.
"""
def __init__(self, **kwargs):
self.id = kwargs.get("id", None)
self.entities = kwargs.get("entities", None)
self.warnings = kwargs.get("warnings", [])
self.statistics = kwargs.get("statistics", None)
self.is_error = False
def __repr__(self):
return "RecognizeCustomEntitiesResult(id={}, entities={}, warnings={}, statistics={}, is_error={})".format(
self.id,
repr(self.entities),
repr(self.warnings),
repr(self.statistics),
self.is_error,
)[
:1024
]
@classmethod
def _from_generated(cls, result):
return cls(
id=result.id,
entities=[
CategorizedEntity._from_generated(e) # pylint: disable=protected-access
for e in result.entities
],
warnings=[
TextAnalyticsWarning._from_generated( # pylint: disable=protected-access
w
)
for w in result.warnings
],
statistics=TextDocumentStatistics._from_generated( # pylint: disable=protected-access
result.statistics
),
)
class MultiCategoryClassifyAction(DictMixin):
"""MultiCategoryClassifyAction encapsulates the parameters for starting a long-running custom multi category
classification operation. For information on regional support of custom features and how to train a model to
classify your documents, see https://aka.ms/azsdk/textanalytics/customfunctionalities
:param str project_name: Required. This field indicates the project name for the model.
:param str deployment_name: Required. This field indicates the deployment name for the model.
:keyword bool disable_service_logs: If set to true, you opt-out of having your text input
logged on the service side for troubleshooting. By default, Text Analytics logs your
input text for 48 hours, solely to allow for troubleshooting issues in providing you with
the Text Analytics natural language processing functions. Setting this parameter to true,
disables input logging and may limit our ability to remediate issues that occur. Please see
Cognitive Services Compliance and Privacy notes at https://aka.ms/cs-compliance for
additional details, and Microsoft Responsible AI principles at
https://www.microsoft.com/ai/responsible-ai.
:ivar str project_name: This field indicates the project name for the model.
:ivar str deployment_name: This field indicates the deployment name for the model.
:ivar bool disable_service_logs: If set to true, you opt-out of having your text input
logged on the service side for troubleshooting. By default, Text Analytics logs your
input text for 48 hours, solely to allow for troubleshooting issues in providing you with
the Text Analytics natural language processing functions. Setting this parameter to true,
disables input logging and may limit our ability to remediate issues that occur. Please see
Cognitive Services Compliance and Privacy notes at https://aka.ms/cs-compliance for
additional details, and Microsoft Responsible AI principles at
https://www.microsoft.com/ai/responsible-ai.
"""
def __init__(
self,
project_name,
deployment_name,
**kwargs
):
self.project_name = project_name
self.deployment_name = deployment_name
self.disable_service_logs = kwargs.get('disable_service_logs', None)
def __repr__(self):
return "MultiCategoryClassifyAction(project_name={}, deployment_name={}, " \
"disable_service_logs={})".format(
self.project_name,
self.deployment_name,
self.disable_service_logs,
)[:1024]
def _to_generated(self, api_version, task_id): # pylint: disable=unused-argument
return _v3_2_preview_models.CustomMultiClassificationTask(
parameters=_v3_2_preview_models.CustomMultiClassificationTaskParameters(
project_name=self.project_name,
deployment_name=self.deployment_name,
logging_opt_out=self.disable_service_logs,
),
task_name=task_id
)
class MultiCategoryClassifyResult(DictMixin):
"""MultiCategoryClassifyResult is a result object which contains
the classifications for a particular document.
:ivar str id: Unique, non-empty document identifier.
:ivar classifications: Recognized classification results in the document.
:vartype classifications: list[~azure.ai.textanalytics.ClassificationCategory]
:ivar warnings: Warnings encountered while processing document.
:vartype warnings: list[~azure.ai.textanalytics.TextAnalyticsWarning]
:ivar statistics: If `show_stats=True` was specified in the request this
field will contain information about the document payload.
:vartype statistics: ~azure.ai.textanalytics.TextDocumentStatistics
:ivar bool is_error: Boolean check for error item when iterating over list of
results. Always False for an instance of a MultiCategoryClassifyResult.
"""
def __init__(
self,
**kwargs
):
self.id = kwargs.get('id', None)
self.classifications = kwargs.get('classifications', None)
self.warnings = kwargs.get('warnings', [])
self.statistics = kwargs.get('statistics', None)
self.is_error = False
def __repr__(self):
return "MultiCategoryClassifyResult(id={}, classifications={}, warnings={}, statistics={}, " \
"is_error={})".format(
self.id,
repr(self.classifications),
repr(self.warnings),
repr(self.statistics),
self.is_error,
)[
:1024
]
@classmethod
def _from_generated(cls, result):
return cls(
id=result.id,
classifications=[
ClassificationCategory._from_generated(e) # pylint: disable=protected-access
for e in result.classifications
],
warnings=[
TextAnalyticsWarning._from_generated( # pylint: disable=protected-access
w
)
for w in result.warnings
],
statistics=TextDocumentStatistics._from_generated( # pylint: disable=protected-access
result.statistics
),
)
class SingleCategoryClassifyAction(DictMixin):
"""SingleCategoryClassifyAction encapsulates the parameters for starting a long-running custom single category
classification operation. For information on regional support of custom features and how to train a model to
classify your documents, see https://aka.ms/azsdk/textanalytics/customfunctionalities
:param str project_name: Required. This field indicates the project name for the model.
:param str deployment_name: Required. This field indicates the deployment name for the model.
:keyword bool disable_service_logs: If set to true, you opt-out of having your text input
logged on the service side for troubleshooting. By default, Text Analytics logs your
input text for 48 hours, solely to allow for troubleshooting issues in providing you with
the Text Analytics natural language processing functions. Setting this parameter to true,
disables input logging and may limit our ability to remediate issues that occur. Please see
Cognitive Services Compliance and Privacy notes at https://aka.ms/cs-compliance for
additional details, and Microsoft Responsible AI principles at
https://www.microsoft.com/ai/responsible-ai.
:ivar str project_name: This field indicates the project name for the model.
:ivar str deployment_name: This field indicates the deployment name for the model.
:ivar bool disable_service_logs: If set to true, you opt-out of having your text input
logged on the service side for troubleshooting. By default, Text Analytics logs your
input text for 48 hours, solely to allow for troubleshooting issues in providing you with
the Text Analytics natural language processing functions. Setting this parameter to true,
disables input logging and may limit our ability to remediate issues that occur. Please see
Cognitive Services Compliance and Privacy notes at https://aka.ms/cs-compliance for
additional details, and Microsoft Responsible AI principles at
https://www.microsoft.com/ai/responsible-ai.
"""
def __init__(
self,
project_name,
deployment_name,
**kwargs
):
self.project_name = project_name
self.deployment_name = deployment_name
self.disable_service_logs = kwargs.get('disable_service_logs', None)
def __repr__(self):
return "SingleCategoryClassifyAction(project_name={}, deployment_name={}, " \
"disable_service_logs={})".format(
self.project_name,
self.deployment_name,
self.disable_service_logs,
)[:1024]
def _to_generated(self, api_version, task_id): # pylint: disable=unused-argument
return _v3_2_preview_models.CustomSingleClassificationTask(
parameters=_v3_2_preview_models.CustomSingleClassificationTaskParameters(
project_name=self.project_name,
deployment_name=self.deployment_name,
logging_opt_out=self.disable_service_logs,
),
task_name=task_id
)
class SingleCategoryClassifyResult(DictMixin):
"""SingleCategoryClassifyResult is a result object which contains
the classification for a particular document.
:ivar str id: Unique, non-empty document identifier.
:ivar classification: Recognized classification results in the document.
:vartype classification: ~azure.ai.textanalytics.ClassificationCategory
:ivar warnings: Warnings encountered while processing document.
:vartype warnings: list[~azure.ai.textanalytics.TextAnalyticsWarning]
:ivar statistics: If `show_stats=True` was specified in the request this
field will contain information about the document payload.
:vartype statistics: ~azure.ai.textanalytics.TextDocumentStatistics
:ivar bool is_error: Boolean check for error item when iterating over list of
results. Always False for an instance of a SingleCategoryClassifyResult.
"""
def __init__(
self,
**kwargs
):
self.id = kwargs.get('id', None)
self.classification = kwargs.get('classification', None)
self.warnings = kwargs.get('warnings', [])
self.statistics = kwargs.get('statistics', None)
self.is_error = False
def __repr__(self):
return "SingleCategoryClassifyResult(id={}, classification={}, warnings={}, statistics={}, " \
"is_error={})".format(
self.id,
repr(self.classification),
repr(self.warnings),
repr(self.statistics),
self.is_error,
)[
:1024
]
@classmethod
def _from_generated(cls, result):
return cls(
id=result.id,
classification=
ClassificationCategory._from_generated(result.classification), # pylint: disable=protected-access
warnings=[
TextAnalyticsWarning._from_generated( # pylint: disable=protected-access
w
)
for w in result.warnings
],
statistics=TextDocumentStatistics._from_generated( # pylint: disable=protected-access
result.statistics
),
)
class ClassificationCategory(DictMixin):
"""ClassificationCategory represents a classification of the input document.
:ivar str category: Custom classification category for the document.
:ivar float confidence_score: Confidence score between 0 and 1 of the recognized classification.
"""
def __init__(
self,
**kwargs
):
self.category = kwargs.get('category', None)
self.confidence_score = kwargs.get('confidence_score', None)
def __repr__(self):
return "ClassificationCategory(category={}, confidence_score={})".format(
self.category,
self.confidence_score,
)[:1024]
@classmethod
def _from_generated(cls, result):
return cls(
category=result.category,
confidence_score=result.confidence_score
)
|
#!/usr/bin/python
import cv2
import fileinput
import fnmatch
import fractions
import json
import math
from matplotlib import pyplot as plt
import numpy as np
import os.path
from progress.bar import Bar
import scipy.interpolate
import subprocess
import sys
import time
import geojson
from props import root, getNode
import props_json
from getchar import find_getch
import Camera
import Image
import ImageList
import Matcher
import Render
import transformations
class ProjectMgr():
def __init__(self, project_dir, create=False):
self.project_dir = project_dir
self.cam = Camera.Camera()
self.image_list = []
self.matcher_params = { 'matcher': 'FLANN', # { FLANN or 'BF' }
'match-ratio': 0.75,
'filter': 'fundamental',
'image-fuzz': 40,
'feature-fuzz': 20 }
# the following member variables need to be reviewed/organized
self.ac3d_steps = 8
self.render = Render.Render()
self.dir_node = getNode('/config/directories', True)
self.load( create )
def set_defaults(self):
self.cam.set_defaults() # camera defaults
# project_dir is a new folder for all derived files
def validate_project_dir(self, create_if_needed=True):
if not os.path.exists(self.project_dir):
if create_if_needed:
print("Notice: creating project directory:", self.project_dir)
os.makedirs(self.project_dir)
else:
print("Error: project dir doesn't exist: ", self.project_dir)
return False
# and make children directories
meta_dir = os.path.join(self.project_dir, 'meta')
if not os.path.exists(meta_dir):
if create_if_needed:
print("Notice: creating meta directory:", meta_dir)
os.makedirs(meta_dir)
else:
print("Error: image dir doesn't exist:", meta_dir)
return False
# all is good
return True
# source_dir is the folder containing all the raw/original images.
# The expected work flow is that we will import/scale all the
# original images into our project folder leaving the original
# image set completely untouched.
def set_image_sources(self, image_dirs):
for i, dir in enumerate(image_dirs):
if dir == self.project_dir:
print("Error: image source and project dirs must be different.")
return
if not os.path.exists(dir):
print("Error: image source path does not exist:", dir)
self.dir_node.setStringEnum('image_sources', i, dir)
def save(self):
# create a project dictionary and write it out as json
if not os.path.exists(self.project_dir):
print("Error: project doesn't exist:", self.project_dir)
return
project_file = os.path.join(self.project_dir, "config.json")
config_node = getNode("/config", True)
props_json.save(project_file, config_node)
def load(self, create=True):
if not self.validate_project_dir():
return
# load project configuration
result = False
project_file = os.path.join(self.project_dir, "config.json")
config_node = getNode("/config", True)
if os.path.isfile(project_file):
if props_json.load(project_file, config_node):
# fixme:
# if 'matcher' in project_dict:
# self.matcher_params = project_dict['matcher']
# root.pretty_print()
result = True
else:
print("Notice: unable to load: ", project_file)
else:
print("Notice: project configuration doesn't exist:", project_file)
if not result and create:
print("Continuing with an empty project configuration")
self.set_defaults()
elif not result:
print("aborting...")
quit()
#root.pretty_print()
def load_images_info(self):
# load image meta info
result = False
meta_dir = os.path.join(self.project_dir, 'meta')
images_node = getNode("/images", True)
for file in os.listdir(meta_dir):
if fnmatch.fnmatch(file, '*.json'):
name, ext = os.path.splitext(file)
image_node = images_node.getChild(name, True)
props_json.load(os.path.join(meta_dir, file), image_node)
# images_node.pretty_print()
# wipe image list (so we don't double load)
self.image_list = []
for name in images_node.getChildren():
image = Image.Image(meta_dir, name)
self.image_list.append( image )
# make sure our matcher gets a copy of the image list
self.render.setImageList(self.image_list)
def load_features(self, descriptors=False):
if descriptors:
msg = 'Loading keypoints and descriptors:'
else:
msg = 'Loading keypoints:'
bar = Bar(msg, max = len(self.image_list))
for image in self.image_list:
image.load_features()
if descriptors:
image.load_descriptors()
bar.next()
bar.finish()
def load_match_pairs(self, extra_verbose=True):
if extra_verbose:
print("")
print("ProjectMgr.load_match_pairs():")
print("Notice: this routine is depricated for most purposes, unless")
print("resetting the match state of the system back to the original")
print("set of found matches.")
time.sleep(2)
bar = Bar('Loading keypoint (pair) matches:',
max = len(self.image_list))
for image in self.image_list:
image.load_matches()
bar.next()
bar.finish()
# generate a n x n structure of image vs. image pair matches and
# return it
def generate_match_pairs(self, matches_direct):
# generate skeleton structure
result = []
for i, i1 in enumerate(self.image_list):
matches = []
for j, i2 in enumerate(self.image_list):
matches.append( [] )
result.append(matches)
# fill in the structure (a match = ned point followed by
# image/feat-index, ...)
for k, match in enumerate(matches_direct):
#print match
for p1 in match[1:]:
for p2 in match[1:]:
if p1 == p2:
pass
#print 'skip self match'
else:
#print p1, 'vs', p2
i = p1[0]; j = p2[0]
result[i][j].append( [p1[1], p2[1], k] )
#for i, i1 in enumerate(self.image_list):
# for j, i2 in enumerate(self.image_list):
# print 'a:', self.image_list[i].match_list[j]
# print 'b:', result[i][j]
return result
def save_images_info(self):
# create a project dictionary and write it out as json
if not os.path.exists(self.project_dir):
print("Error: project doesn't exist:", self.project_dir)
return
meta_dir = os.path.join(self.project_dir, 'meta')
images_node = getNode("/images", True)
for name in images_node.getChildren():
image_node = images_node.getChild(name, True)
image_path = os.path.join(meta_dir, name + '.json')
props_json.save(image_path, image_node)
def set_matcher_params(self, mparams):
self.matcher_params = mparams
def detect_features(self, scale, show=False):
if not show:
bar = Bar('Detecting features:', max = len(self.image_list))
for image in self.image_list:
#print "detecting features and computing descriptors: " + image.name
rgb = image.load_rgb()
image.detect_features(rgb, scale)
image.save_features()
image.save_descriptors()
image.save_matches()
if show:
result = image.show_features()
if result == 27 or result == ord('q'):
break
if not show:
bar.next()
if not show:
bar.finish()
self.save_images_info()
def show_features_image(self, image):
result = image.show_features()
return result
def show_features_images(self, name=None):
for image in self.image_list:
result = self.show_features_image(image)
if result == 27 or result == ord('q'):
break
def findImageByName(self, name):
for i in self.image_list:
if i.name == name:
return i
return None
def findIndexByName(self, name):
for i, img in enumerate(self.image_list):
if img.name == name:
return i
return None
# compute a center reference location (lon, lat) for the group of
# images.
def compute_ned_reference_lla(self):
# requires images to have their location computed/loaded
lon_sum = 0.0
lat_sum = 0.0
count = 0
images_node = getNode("/images", True)
for name in images_node.getChildren():
image_node = images_node.getChild(name, True)
pose_node = image_node.getChild('aircraft_pose', True)
if pose_node.hasChild('lon_deg') and pose_node.hasChild('lat_deg'):
lon_sum += pose_node.getFloat('lon_deg')
lat_sum += pose_node.getFloat('lat_deg')
count += 1
ned_node = getNode('/config/ned_reference', True)
ned_node.setFloat('lat_deg', lat_sum / count)
ned_node.setFloat('lon_deg', lon_sum / count)
ned_node.setFloat('alt_m', 0.0)
def undistort_uvlist(self, image, uv_orig):
if len(uv_orig) == 0:
return []
# camera parameters
dist_coeffs = np.array(self.cam.get_dist_coeffs())
K = self.cam.get_K()
# assemble the points in the proper format
uv_raw = np.zeros((len(uv_orig),1,2), dtype=np.float32)
for i, kp in enumerate(uv_orig):
uv_raw[i][0] = (kp[0], kp[1])
# do the actual undistort
uv_new = cv2.undistortPoints(uv_raw, K, dist_coeffs, P=K)
# return the results in an easier format
result = []
for i, uv in enumerate(uv_new):
result.append(uv_new[i][0])
#print " orig = %s undistort = %s" % (uv_raw[i][0], uv_new[i][0]
return result
# for each feature in each image, compute the undistorted pixel
# location (from the calibrated distortion parameters)
def undistort_keypoints(self, optimized=False):
bar = Bar('Undistorting keypoints:', max = len(self.image_list))
for image in self.image_list:
if len(image.kp_list) == 0:
continue
K = self.cam.get_K(optimized)
uv_raw = np.zeros((len(image.kp_list),1,2), dtype=np.float32)
for i, kp in enumerate(image.kp_list):
uv_raw[i][0] = (kp.pt[0], kp.pt[1])
dist_coeffs = self.cam.get_dist_coeffs(optimized)
uv_new = cv2.undistortPoints(uv_raw, K, np.array(dist_coeffs), P=K)
image.uv_list = []
for i, uv in enumerate(uv_new):
image.uv_list.append(uv_new[i][0])
# print(" orig = %s undistort = %s" % (uv_raw[i][0], uv_new[i][0]))
bar.next()
bar.finish()
# for each uv in the provided uv list, apply the distortion
# formula to compute the original distorted value.
def redistort(self, uv_list, K, dist_coeffs):
fx = K[0,0]
fy = K[1,1]
cx = K[0,2]
cy = K[1,2]
k1, k2, p1, p2, k3 = dist_coeffs
uv_distorted = []
for pt in uv_list:
x = (pt[0] - cx) / fx
y = (pt[1] - cy) / fy
# Compute radius^2
r2 = x**2 + y**2
r4, r6 = r2**2, r2**3
# Compute tangential distortion
dx = 2*p1*x*y + p2*(r2 + 2*x*x)
dy = p1*(r2 + 2*y*y) + 2*p2*x*y
# Compute radial factor
Lr = 1.0 + k1*r2 + k2*r4 + k3*r6
ud = Lr*x + dx
vd = Lr*y + dy
uv_distorted.append( [ud * fx + cx, vd * fy + cy] )
return uv_distorted
def compute_kp_usage(self, all=False):
print("Determining feature usage in matching pairs...")
# but they may have different scaling or other attributes important
# during feature matching
if all:
for image in self.image_list:
image.kp_used = np.ones(len(image.kp_list), np.bool_)
else:
for image in self.image_list:
image.kp_used = np.zeros(len(image.kp_list), np.bool_)
for i1 in self.image_list:
for j, matches in enumerate(i1.match_list):
i2 = self.image_list[j]
for k, pair in enumerate(matches):
i1.kp_used[ pair[0] ] = True
i2.kp_used[ pair[1] ] = True
def compute_kp_usage_new(self, matches_direct):
print("Determining feature usage in matching pairs...")
for image in self.image_list:
image.kp_used = np.zeros(len(image.kp_list), np.bool_)
for match in matches_direct:
for p in match[1:]:
image = self.image_list[ p[0] ]
image.kp_used[ p[1] ] = True
# project the list of (u, v) pixels from image space into camera
# space, remap that to a vector in ned space (for camera
# ypr=[0,0,0], and then transform that by the camera pose, returns
# the vector from the camera, through the pixel, into ned space
def projectVectors(self, IK, body2ned, cam2body, uv_list):
proj_list = []
for uv in uv_list:
uvh = np.array([uv[0], uv[1], 1.0])
proj = body2ned.dot(cam2body).dot(IK).dot(uvh)
proj_norm = transformations.unit_vector(proj)
proj_list.append(proj_norm)
#for uv in uv_list:
# print "uv:", uv
# uvh = np.array([uv[0], uv[1], 1.0])
# print "cam vec=", transformations.unit_vector(IR.dot(IK).dot(uvh))
return proj_list
# project the (u, v) pixels for the specified image using the current
# sba pose and write them to image.vec_list
def projectVectorsImageSBA(self, IK, image):
vec_list = []
body2ned = image.get_body2ned_sba()
cam2body = image.get_cam2body()
for uv in image.uv_list:
uvh = np.array([uv[0], uv[1], 1.0])
proj = body2ned.dot(cam2body).dot(IK).dot(uvh)
proj_norm = transformations.unit_vector(proj)
vec_list.append(proj_norm)
return vec_list
# given a set of vectors in the ned frame, and a starting point.
# Find the ground intersection point. For any vectors which point into
# the sky, return just the original reference/starting point.
def intersectVectorsWithGroundPlane(self, pose_ned, ground_m, v_list):
pt_list = []
for v in v_list:
# solve projection
p = pose_ned
if v[2] > 0.0:
d_proj = -(pose_ned[2] + ground_m)
factor = d_proj / v[2]
n_proj = v[0] * factor
e_proj = v[1] * factor
p = [ pose_ned[0] + n_proj, pose_ned[1] + e_proj, pose_ned[2] + d_proj ]
pt_list.append(p)
return pt_list
def polyval2d(self, x, y, m):
order = int(np.sqrt(len(m))) - 1
ij = itertools.product(range(order+1), range(order+1))
z = np.zeros_like(x)
for a, (i,j) in zip(m, ij):
z += a * x**i * y**j
return z
def intersectVectorWithPoly(self, pose_ned, v, m):
pass
# given a set of vectors in the ned frame, and a starting point.
# Find the intersection points with the given 2d polynomial. For
# any vectors which point into the sky, return just the original
# reference/starting point.
def intersectVectorsWithPoly(self, pose_ned, m, v_list):
pt_list = []
for v in v_list:
p = self.intersectVectorWithPoly(pose_ned, m, v.flatten())
pt_list.append(p)
return pt_list
# build an interpolation table for 'fast' projection of keypoints
# into 3d world space
#
# 1. make a grid (i.e. 8x8) of uv coordinates covering the whole image
# 2. undistort these uv coordinates
# 3. project them into vectors
# 4. intersect them with the srtm terrain to get ned coordinates
# 5. use linearndinterpolator ... g = scipy.interpolate.LinearNDInterpolator([[0,0],[1,0],[0,1],[1,1]], [[0,4,8],[1,3,2],[2,2,-4],[4,1,0]])
# with origin uv vs. 3d location to build a table
# 6. interpolate original uv coordinates to 3d locations
def fastProjectKeypointsTo3d(self, sss):
bar = Bar('Projecting keypoints to 3d:',
max = len(self.image_list))
for image in self.image_list:
K = self.cam.get_K()
IK = np.linalg.inv(K)
# build a regular grid of uv coordinates
w, h = image.get_size()
steps = 32
u_grid = np.linspace(0, w-1, steps+1)
v_grid = np.linspace(0, h-1, steps+1)
uv_raw = []
for u in u_grid:
for v in v_grid:
uv_raw.append( [u,v] )
# undistort the grid of points
uv_grid = self.undistort_uvlist(image, uv_raw)
# filter crazy values when can happen out at the very fringes
half_width = w * 0.5
half_height = h * 0.5
uv_filt = []
for p in uv_grid:
if p[0] < -half_width or p[0] > w + half_width:
print("rejecting width outlier:", p)
continue
if p[1] < -half_height or p[1] > h + half_height:
print("rejecting height outlier:", p)
continue
uv_filt.append(p)
# project the grid out into vectors
body2ned = image.get_body2ned() # IR
# M is a transform to map the lens coordinate system (at
# zero roll/pitch/yaw to the ned coordinate system at zero
# roll/pitch/yaw). It is essentially a +90 pitch followed
# by +90 roll (or equivalently a +90 yaw followed by +90
# pitch.)
cam2body = image.get_cam2body()
vec_list = self.projectVectors(IK, body2ned, cam2body, uv_filt)
# intersect the vectors with the surface to find the 3d points
ned, ypr, quat = image.get_camera_pose()
coord_list = sss.interpolate_vectors(ned, vec_list)
# filter the coordinate list for bad interpolation
coord_filt = []
for i in reversed(range(len(coord_list))):
if np.isnan(coord_list[i][0]):
print("rejecting ground interpolation fault:", uv_filt[i])
coord_list.pop(i)
uv_filt.pop(i)
# build the multidimenstional interpolator that relates
# undistored uv coordinates to their 3d location. Note we
# could also relate the original raw/distored points to
# their 3d locations and interpolate from the raw uv's,
# but we already have a convenient list of undistored uv
# points.
g = scipy.interpolate.LinearNDInterpolator(uv_filt, coord_list)
# interpolate all the keypoints now to approximate their
# 3d locations
image.coord_list = []
for i, uv in enumerate(image.uv_list):
if image.kp_used[i]:
coord = g(uv)
# coord[0] is the 3 element vector
if not np.isnan(coord[0][0]):
image.coord_list.append(coord[0])
else:
print("nan alert!")
print("a feature is too close to an edge and undistorting puts it in a weird place.")
print(" uv:", uv, "coord:", coord)
print(" orig:", image.kp_list[i].pt)
#or append zeros which would be a hack until
#figuring out the root cause of the problem
#... if it isn't wrong image dimensions in the
#.info file...
#
image.coord_list.append(np.zeros(3)*np.nan)
else:
image.coord_list.append(np.zeros(3)*np.nan)
bar.next()
bar.finish()
def fastProjectKeypointsToGround(self, ground_m, cam_dict=None):
bar = Bar('Projecting keypoints to 3d:',
max = len(self.image_list))
for image in self.image_list:
K = self.cam.get_K()
IK = np.linalg.inv(K)
# project the grid out into vectors
if cam_dict == None:
body2ned = image.get_body2ned() # IR
else:
body2ned = image.rvec_to_body2ned(cam_dict[image.name]['rvec'])
# M is a transform to map the lens coordinate system (at
# zero roll/pitch/yaw to the ned coordinate system at zero
# roll/pitch/yaw). It is essentially a +90 pitch followed
# by +90 roll (or equivalently a +90 yaw followed by +90
# pitch.)
cam2body = image.get_cam2body()
vec_list = self.projectVectors(IK, body2ned, cam2body, image.uv_list)
# intersect the vectors with the surface to find the 3d points
if cam_dict == None:
pose = image.camera_pose
else:
pose = cam_dict[image.name]
pts_ned = self.intersectVectorsWithGroundPlane(pose['ned'],
ground_m, vec_list)
image.coord_list = pts_ned
bar.next()
bar.finish()
Lessen redundant work in one spot.
Verbocity++ in another spot.
#!/usr/bin/python
import cv2
import fileinput
import fnmatch
import fractions
import json
import math
from matplotlib import pyplot as plt
import numpy as np
import os.path
from progress.bar import Bar
import scipy.interpolate
import subprocess
import sys
import time
import geojson
from props import root, getNode
import props_json
from getchar import find_getch
import Camera
import Image
import ImageList
import Matcher
import Render
import transformations
class ProjectMgr():
def __init__(self, project_dir, create=False):
self.project_dir = project_dir
self.cam = Camera.Camera()
self.image_list = []
self.matcher_params = { 'matcher': 'FLANN', # { FLANN or 'BF' }
'match-ratio': 0.75,
'filter': 'fundamental',
'image-fuzz': 40,
'feature-fuzz': 20 }
# the following member variables need to be reviewed/organized
self.ac3d_steps = 8
self.render = Render.Render()
self.dir_node = getNode('/config/directories', True)
self.load( create )
def set_defaults(self):
self.cam.set_defaults() # camera defaults
# project_dir is a new folder for all derived files
def validate_project_dir(self, create_if_needed=True):
if not os.path.exists(self.project_dir):
if create_if_needed:
print("Notice: creating project directory:", self.project_dir)
os.makedirs(self.project_dir)
else:
print("Error: project dir doesn't exist: ", self.project_dir)
return False
# and make children directories
meta_dir = os.path.join(self.project_dir, 'meta')
if not os.path.exists(meta_dir):
if create_if_needed:
print("Notice: creating meta directory:", meta_dir)
os.makedirs(meta_dir)
else:
print("Error: image dir doesn't exist:", meta_dir)
return False
# all is good
return True
# source_dir is the folder containing all the raw/original images.
# The expected work flow is that we will import/scale all the
# original images into our project folder leaving the original
# image set completely untouched.
def set_image_sources(self, image_dirs):
for i, dir in enumerate(image_dirs):
if dir == self.project_dir:
print("Error: image source and project dirs must be different.")
return
if not os.path.exists(dir):
print("Error: image source path does not exist:", dir)
self.dir_node.setStringEnum('image_sources', i, dir)
def save(self):
# create a project dictionary and write it out as json
if not os.path.exists(self.project_dir):
print("Error: project doesn't exist:", self.project_dir)
return
project_file = os.path.join(self.project_dir, "config.json")
config_node = getNode("/config", True)
props_json.save(project_file, config_node)
def load(self, create=True):
if not self.validate_project_dir():
return
# load project configuration
result = False
project_file = os.path.join(self.project_dir, "config.json")
config_node = getNode("/config", True)
if os.path.isfile(project_file):
if props_json.load(project_file, config_node):
# fixme:
# if 'matcher' in project_dict:
# self.matcher_params = project_dict['matcher']
# root.pretty_print()
result = True
else:
print("Notice: unable to load: ", project_file)
else:
print("Notice: project configuration doesn't exist:", project_file)
if not result and create:
print("Continuing with an empty project configuration")
self.set_defaults()
elif not result:
print("aborting...")
quit()
#root.pretty_print()
def load_images_info(self):
# load image meta info
result = False
meta_dir = os.path.join(self.project_dir, 'meta')
images_node = getNode("/images", True)
for file in os.listdir(meta_dir):
if fnmatch.fnmatch(file, '*.json'):
name, ext = os.path.splitext(file)
image_node = images_node.getChild(name, True)
props_json.load(os.path.join(meta_dir, file), image_node)
# images_node.pretty_print()
# wipe image list (so we don't double load)
self.image_list = []
for name in images_node.getChildren():
image = Image.Image(meta_dir, name)
self.image_list.append( image )
# make sure our matcher gets a copy of the image list
self.render.setImageList(self.image_list)
def load_features(self, descriptors=False):
if descriptors:
msg = 'Loading keypoints and descriptors:'
else:
msg = 'Loading keypoints:'
bar = Bar(msg, max = len(self.image_list))
for image in self.image_list:
image.load_features()
if descriptors:
image.load_descriptors()
bar.next()
bar.finish()
def load_match_pairs(self, extra_verbose=True):
if extra_verbose:
print("")
print("ProjectMgr.load_match_pairs():")
print("Notice: this routine is depricated for most purposes, unless")
print("resetting the match state of the system back to the original")
print("set of found matches.")
time.sleep(2)
bar = Bar('Loading keypoint (pair) matches:',
max = len(self.image_list))
for image in self.image_list:
image.load_matches()
bar.next()
bar.finish()
# generate a n x n structure of image vs. image pair matches and
# return it
def generate_match_pairs(self, matches_direct):
# generate skeleton structure
result = []
for i, i1 in enumerate(self.image_list):
matches = []
for j, i2 in enumerate(self.image_list):
matches.append( [] )
result.append(matches)
# fill in the structure (a match = ned point followed by
# image/feat-index, ...)
for k, match in enumerate(matches_direct):
#print match
for p1 in match[1:]:
for p2 in match[1:]:
if p1 == p2:
pass
#print 'skip self match'
else:
#print p1, 'vs', p2
i = p1[0]; j = p2[0]
result[i][j].append( [p1[1], p2[1], k] )
#for i, i1 in enumerate(self.image_list):
# for j, i2 in enumerate(self.image_list):
# print 'a:', self.image_list[i].match_list[j]
# print 'b:', result[i][j]
return result
def save_images_info(self):
# create a project dictionary and write it out as json
if not os.path.exists(self.project_dir):
print("Error: project doesn't exist:", self.project_dir)
return
meta_dir = os.path.join(self.project_dir, 'meta')
images_node = getNode("/images", True)
for name in images_node.getChildren():
image_node = images_node.getChild(name, True)
image_path = os.path.join(meta_dir, name + '.json')
props_json.save(image_path, image_node)
def set_matcher_params(self, mparams):
self.matcher_params = mparams
def detect_features(self, scale, show=False):
if not show:
bar = Bar('Detecting features:', max = len(self.image_list))
for image in self.image_list:
#print "detecting features and computing descriptors: " + image.name
rgb = image.load_rgb()
image.detect_features(rgb, scale)
image.save_features()
image.save_descriptors()
image.save_matches()
if show:
result = image.show_features()
if result == 27 or result == ord('q'):
break
if not show:
bar.next()
if not show:
bar.finish()
self.save_images_info()
def show_features_image(self, image):
result = image.show_features()
return result
def show_features_images(self, name=None):
for image in self.image_list:
result = self.show_features_image(image)
if result == 27 or result == ord('q'):
break
def findImageByName(self, name):
for i in self.image_list:
if i.name == name:
return i
return None
def findIndexByName(self, name):
for i, img in enumerate(self.image_list):
if img.name == name:
return i
return None
# compute a center reference location (lon, lat) for the group of
# images.
def compute_ned_reference_lla(self):
# requires images to have their location computed/loaded
lon_sum = 0.0
lat_sum = 0.0
count = 0
images_node = getNode("/images", True)
for name in images_node.getChildren():
image_node = images_node.getChild(name, True)
pose_node = image_node.getChild('aircraft_pose', True)
if pose_node.hasChild('lon_deg') and pose_node.hasChild('lat_deg'):
lon_sum += pose_node.getFloat('lon_deg')
lat_sum += pose_node.getFloat('lat_deg')
count += 1
ned_node = getNode('/config/ned_reference', True)
ned_node.setFloat('lat_deg', lat_sum / count)
ned_node.setFloat('lon_deg', lon_sum / count)
ned_node.setFloat('alt_m', 0.0)
def undistort_uvlist(self, image, uv_orig):
if len(uv_orig) == 0:
return []
# camera parameters
dist_coeffs = np.array(self.cam.get_dist_coeffs())
K = self.cam.get_K()
# assemble the points in the proper format
uv_raw = np.zeros((len(uv_orig),1,2), dtype=np.float32)
for i, kp in enumerate(uv_orig):
uv_raw[i][0] = (kp[0], kp[1])
# do the actual undistort
uv_new = cv2.undistortPoints(uv_raw, K, dist_coeffs, P=K)
# return the results in an easier format
result = []
for i, uv in enumerate(uv_new):
result.append(uv_new[i][0])
#print " orig = %s undistort = %s" % (uv_raw[i][0], uv_new[i][0]
return result
# for each feature in each image, compute the undistorted pixel
# location (from the calibrated distortion parameters)
def undistort_keypoints(self, optimized=False):
bar = Bar('Undistorting keypoints:', max = len(self.image_list))
for image in self.image_list:
if len(image.kp_list) == 0:
continue
K = self.cam.get_K(optimized)
uv_raw = np.zeros((len(image.kp_list),1,2), dtype=np.float32)
for i, kp in enumerate(image.kp_list):
uv_raw[i][0] = (kp.pt[0], kp.pt[1])
dist_coeffs = self.cam.get_dist_coeffs(optimized)
uv_new = cv2.undistortPoints(uv_raw, K, np.array(dist_coeffs), P=K)
image.uv_list = []
for i, uv in enumerate(uv_new):
image.uv_list.append(uv_new[i][0])
# print(" orig = %s undistort = %s" % (uv_raw[i][0], uv_new[i][0]))
bar.next()
bar.finish()
# for each uv in the provided uv list, apply the distortion
# formula to compute the original distorted value.
def redistort(self, uv_list, K, dist_coeffs):
fx = K[0,0]
fy = K[1,1]
cx = K[0,2]
cy = K[1,2]
k1, k2, p1, p2, k3 = dist_coeffs
uv_distorted = []
for pt in uv_list:
x = (pt[0] - cx) / fx
y = (pt[1] - cy) / fy
# Compute radius^2
r2 = x**2 + y**2
r4, r6 = r2**2, r2**3
# Compute tangential distortion
dx = 2*p1*x*y + p2*(r2 + 2*x*x)
dy = p1*(r2 + 2*y*y) + 2*p2*x*y
# Compute radial factor
Lr = 1.0 + k1*r2 + k2*r4 + k3*r6
ud = Lr*x + dx
vd = Lr*y + dy
uv_distorted.append( [ud * fx + cx, vd * fy + cy] )
return uv_distorted
def compute_kp_usage(self, all=False):
print("Determining feature usage in matching pairs...")
# but they may have different scaling or other attributes important
# during feature matching
if all:
for image in self.image_list:
image.kp_used = np.ones(len(image.kp_list), np.bool_)
else:
for image in self.image_list:
image.kp_used = np.zeros(len(image.kp_list), np.bool_)
for i1 in self.image_list:
for j, matches in enumerate(i1.match_list):
i2 = self.image_list[j]
for k, pair in enumerate(matches):
i1.kp_used[ pair[0] ] = True
i2.kp_used[ pair[1] ] = True
def compute_kp_usage_new(self, matches_direct):
print("Determining feature usage in matching pairs...")
for image in self.image_list:
image.kp_used = np.zeros(len(image.kp_list), np.bool_)
for match in matches_direct:
for p in match[1:]:
image = self.image_list[ p[0] ]
image.kp_used[ p[1] ] = True
# project the list of (u, v) pixels from image space into camera
# space, remap that to a vector in ned space (for camera
# ypr=[0,0,0], and then transform that by the camera pose, returns
# the vector from the camera, through the pixel, into ned space
def projectVectors(self, IK, body2ned, cam2body, uv_list):
proj_list = []
for uv in uv_list:
uvh = np.array([uv[0], uv[1], 1.0])
proj = body2ned.dot(cam2body).dot(IK).dot(uvh)
proj_norm = transformations.unit_vector(proj)
proj_list.append(proj_norm)
#for uv in uv_list:
# print "uv:", uv
# uvh = np.array([uv[0], uv[1], 1.0])
# print "cam vec=", transformations.unit_vector(IR.dot(IK).dot(uvh))
return proj_list
# project the (u, v) pixels for the specified image using the current
# sba pose and write them to image.vec_list
def projectVectorsImageSBA(self, IK, image):
vec_list = []
body2ned = image.get_body2ned_sba()
cam2body = image.get_cam2body()
for uv in image.uv_list:
uvh = np.array([uv[0], uv[1], 1.0])
proj = body2ned.dot(cam2body).dot(IK).dot(uvh)
proj_norm = transformations.unit_vector(proj)
vec_list.append(proj_norm)
return vec_list
# given a set of vectors in the ned frame, and a starting point.
# Find the ground intersection point. For any vectors which point into
# the sky, return just the original reference/starting point.
def intersectVectorsWithGroundPlane(self, pose_ned, ground_m, v_list):
pt_list = []
for v in v_list:
# solve projection
p = pose_ned
if v[2] > 0.0:
d_proj = -(pose_ned[2] + ground_m)
factor = d_proj / v[2]
n_proj = v[0] * factor
e_proj = v[1] * factor
p = [ pose_ned[0] + n_proj, pose_ned[1] + e_proj, pose_ned[2] + d_proj ]
pt_list.append(p)
return pt_list
def polyval2d(self, x, y, m):
order = int(np.sqrt(len(m))) - 1
ij = itertools.product(range(order+1), range(order+1))
z = np.zeros_like(x)
for a, (i,j) in zip(m, ij):
z += a * x**i * y**j
return z
def intersectVectorWithPoly(self, pose_ned, v, m):
pass
# given a set of vectors in the ned frame, and a starting point.
# Find the intersection points with the given 2d polynomial. For
# any vectors which point into the sky, return just the original
# reference/starting point.
def intersectVectorsWithPoly(self, pose_ned, m, v_list):
pt_list = []
for v in v_list:
p = self.intersectVectorWithPoly(pose_ned, m, v.flatten())
pt_list.append(p)
return pt_list
# build an interpolation table for 'fast' projection of keypoints
# into 3d world space
#
# 1. make a grid (i.e. 8x8) of uv coordinates covering the whole image
# 2. undistort these uv coordinates
# 3. project them into vectors
# 4. intersect them with the srtm terrain to get ned coordinates
# 5. use linearndinterpolator ... g = scipy.interpolate.LinearNDInterpolator([[0,0],[1,0],[0,1],[1,1]], [[0,4,8],[1,3,2],[2,2,-4],[4,1,0]])
# with origin uv vs. 3d location to build a table
# 6. interpolate original uv coordinates to 3d locations
def fastProjectKeypointsTo3d(self, sss):
bar = Bar('Projecting keypoints to 3d:',
max = len(self.image_list))
K = self.cam.get_K()
IK = np.linalg.inv(K)
for image in self.image_list:
# build a regular grid of uv coordinates
w, h = image.get_size()
steps = 32
u_grid = np.linspace(0, w-1, steps+1)
v_grid = np.linspace(0, h-1, steps+1)
uv_raw = []
for u in u_grid:
for v in v_grid:
uv_raw.append( [u,v] )
# undistort the grid of points
uv_grid = self.undistort_uvlist(image, uv_raw)
# filter crazy values when can happen out at the very fringes
half_width = w * 0.5
half_height = h * 0.5
uv_filt = []
for i, p in enumerate(uv_grid):
if p[0] < -half_width or p[0] > w + half_width:
print("rejecting width outlier:", p, '(', uv_raw[i], ')')
continue
if p[1] < -half_height or p[1] > h + half_height:
print("rejecting height outlier:", p, '(', uv_raw[i], ')')
continue
uv_filt.append(p)
print('raw pts:', len(uv_raw), 'undist pts:', len(uv_filt))
# project the grid out into vectors
body2ned = image.get_body2ned() # IR
# M is a transform to map the lens coordinate system (at
# zero roll/pitch/yaw to the ned coordinate system at zero
# roll/pitch/yaw). It is essentially a +90 pitch followed
# by +90 roll (or equivalently a +90 yaw followed by +90
# pitch.)
cam2body = image.get_cam2body()
vec_list = self.projectVectors(IK, body2ned, cam2body, uv_filt)
# intersect the vectors with the surface to find the 3d points
ned, ypr, quat = image.get_camera_pose()
coord_list = sss.interpolate_vectors(ned, vec_list)
# filter the coordinate list for bad interpolation
coord_filt = []
for i in reversed(range(len(coord_list))):
if np.isnan(coord_list[i][0]):
print("rejecting ground interpolation fault:", uv_filt[i])
coord_list.pop(i)
uv_filt.pop(i)
# build the multidimenstional interpolator that relates
# undistored uv coordinates to their 3d location. Note we
# could also relate the original raw/distored points to
# their 3d locations and interpolate from the raw uv's,
# but we already have a convenient list of undistored uv
# points.
g = scipy.interpolate.LinearNDInterpolator(uv_filt, coord_list)
# interpolate all the keypoints now to approximate their
# 3d locations
image.coord_list = []
for i, uv in enumerate(image.uv_list):
if image.kp_used[i]:
coord = g(uv)
# coord[0] is the 3 element vector
if not np.isnan(coord[0][0]):
image.coord_list.append(coord[0])
else:
print("nan alert!")
print("a feature is too close to an edge and undistorting puts it in a weird place.")
print(" uv:", uv, "coord:", coord)
print(" orig:", image.kp_list[i].pt)
#or append zeros which would be a hack until
#figuring out the root cause of the problem
#... if it isn't wrong image dimensions in the
#.info file...
#
image.coord_list.append(np.zeros(3)*np.nan)
else:
image.coord_list.append(np.zeros(3)*np.nan)
bar.next()
bar.finish()
def fastProjectKeypointsToGround(self, ground_m, cam_dict=None):
bar = Bar('Projecting keypoints to 3d:',
max = len(self.image_list))
for image in self.image_list:
K = self.cam.get_K()
IK = np.linalg.inv(K)
# project the grid out into vectors
if cam_dict == None:
body2ned = image.get_body2ned() # IR
else:
body2ned = image.rvec_to_body2ned(cam_dict[image.name]['rvec'])
# M is a transform to map the lens coordinate system (at
# zero roll/pitch/yaw to the ned coordinate system at zero
# roll/pitch/yaw). It is essentially a +90 pitch followed
# by +90 roll (or equivalently a +90 yaw followed by +90
# pitch.)
cam2body = image.get_cam2body()
vec_list = self.projectVectors(IK, body2ned, cam2body, image.uv_list)
# intersect the vectors with the surface to find the 3d points
if cam_dict == None:
pose = image.camera_pose
else:
pose = cam_dict[image.name]
pts_ned = self.intersectVectorsWithGroundPlane(pose['ned'],
ground_m, vec_list)
image.coord_list = pts_ned
bar.next()
bar.finish()
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Module visualizes PCollection data.
For internal use only; no backwards-compatibility guarantees.
Only works with Python 3.5+.
"""
# pytype: skip-file
from __future__ import absolute_import
from __future__ import division
import base64
import datetime
import logging
from datetime import timedelta
from dateutil import tz
from apache_beam import pvalue
from apache_beam.runners.interactive import interactive_environment as ie
from apache_beam.runners.interactive import pipeline_instrument as instr
from apache_beam.runners.interactive.utils import elements_to_df
from apache_beam.runners.interactive.utils import obfuscate
from apache_beam.runners.interactive.utils import to_element_list
from apache_beam.transforms.window import GlobalWindow
from apache_beam.transforms.window import IntervalWindow
try:
from IPython import get_ipython # pylint: disable=import-error
from IPython.core.display import HTML # pylint: disable=import-error
from IPython.core.display import Javascript # pylint: disable=import-error
from IPython.core.display import display # pylint: disable=import-error
from IPython.core.display import display_javascript # pylint: disable=import-error
from IPython.core.display import update_display # pylint: disable=import-error
from facets_overview.generic_feature_statistics_generator import GenericFeatureStatisticsGenerator # pylint: disable=import-error
from timeloop import Timeloop # pylint: disable=import-error
if get_ipython():
_pcoll_visualization_ready = True
else:
_pcoll_visualization_ready = False
except ImportError:
_pcoll_visualization_ready = False
_LOGGER = logging.getLogger(__name__)
_CSS = """
<style>
.p-Widget.jp-OutputPrompt.jp-OutputArea-prompt:empty {{
padding: 0;
border: 0;
}}
.p-Widget.jp-RenderedJavaScript.jp-mod-trusted.jp-OutputArea-output:empty {{
padding: 0;
border: 0;
}}
</style>"""
_DIVE_SCRIPT_TEMPLATE = """
try {{
document.querySelector("#{display_id}").data = {jsonstr};
}} catch (e) {{
// NOOP when the user has cleared the output from the notebook.
}}"""
_DIVE_HTML_TEMPLATE = _CSS + """
<script src="https://cdnjs.cloudflare.com/ajax/libs/webcomponentsjs/1.3.3/webcomponents-lite.js"></script>
<link rel="import" href="https://raw.githubusercontent.com/PAIR-code/facets/1.0.0/facets-dist/facets-jupyter.html">
<facets-dive sprite-image-width="{sprite_size}" sprite-image-height="{sprite_size}" id="{display_id}" height="600"></facets-dive>
<script>
document.querySelector("#{display_id}").data = {jsonstr};
</script>"""
_OVERVIEW_SCRIPT_TEMPLATE = """
try {{
document.querySelector("#{display_id}").protoInput = "{protostr}";
}} catch (e) {{
// NOOP when the user has cleared the output from the notebook.
}}"""
_OVERVIEW_HTML_TEMPLATE = _CSS + """
<script src="https://cdnjs.cloudflare.com/ajax/libs/webcomponentsjs/1.3.3/webcomponents-lite.js"></script>
<link rel="import" href="https://raw.githubusercontent.com/PAIR-code/facets/1.0.0/facets-dist/facets-jupyter.html">
<facets-overview id="{display_id}"></facets-overview>
<script>
document.querySelector("#{display_id}").protoInput = "{protostr}";
</script>"""
_DATATABLE_INITIALIZATION_CONFIG = """
bAutoWidth: false,
columns: {columns},
destroy: true,
responsive: true,
columnDefs: [
{{
targets: "_all",
className: "dt-left"
}},
{{
"targets": 0,
"width": "10px",
"title": ""
}}
]"""
_DATAFRAME_SCRIPT_TEMPLATE = """
var dt;
if ($.fn.dataTable.isDataTable("#{table_id}")) {{
dt = $("#{table_id}").dataTable();
}} else if ($("#{table_id}_wrapper").length == 0) {{
dt = $("#{table_id}").dataTable({{
""" + _DATATABLE_INITIALIZATION_CONFIG + """
}});
}} else {{
return;
}}
dt.api()
.clear()
.rows.add({data_as_rows})
.draw('full-hold');"""
_DATAFRAME_PAGINATION_TEMPLATE = _CSS + """
<link rel="stylesheet" href="https://cdn.datatables.net/1.10.20/css/jquery.dataTables.min.css">
<table id="{table_id}" class="display"></table>
<script>
{script_in_jquery_with_datatable}
</script>"""
def visualize(
pcoll,
dynamic_plotting_interval=None,
include_window_info=False,
display_facets=False):
"""Visualizes the data of a given PCollection. Optionally enables dynamic
plotting with interval in seconds if the PCollection is being produced by a
running pipeline or the pipeline is streaming indefinitely. The function
always returns immediately and is asynchronous when dynamic plotting is on.
If dynamic plotting enabled, the visualization is updated continuously until
the pipeline producing the PCollection is in an end state. The visualization
would be anchored to the notebook cell output area. The function
asynchronously returns a handle to the visualization job immediately. The user
could manually do::
# In one notebook cell, enable dynamic plotting every 1 second:
handle = visualize(pcoll, dynamic_plotting_interval=1)
# Visualization anchored to the cell's output area.
# In a different cell:
handle.stop()
# Will stop the dynamic plotting of the above visualization manually.
# Otherwise, dynamic plotting ends when pipeline is not running anymore.
If dynamic_plotting is not enabled (by default), None is returned.
If include_window_info is True, the data will include window information,
which consists of the event timestamps, windows, and pane info.
If display_facets is True, the facets widgets will be rendered. Otherwise, the
facets widgets will not be rendered.
The function is experimental. For internal use only; no
backwards-compatibility guarantees.
"""
if not _pcoll_visualization_ready:
return None
pv = PCollectionVisualization(
pcoll,
include_window_info=include_window_info,
display_facets=display_facets)
if ie.current_env().is_in_notebook:
pv.display()
else:
pv.display_plain_text()
# We don't want to do dynamic plotting if there is no notebook frontend.
return None
if dynamic_plotting_interval:
# Disables the verbose logging from timeloop.
logging.getLogger('timeloop').disabled = True
tl = Timeloop()
def dynamic_plotting(pcoll, pv, tl, include_window_info, display_facets):
@tl.job(interval=timedelta(seconds=dynamic_plotting_interval))
def continuous_update_display(): # pylint: disable=unused-variable
# Always creates a new PCollVisualization instance when the
# PCollection materialization is being updated and dynamic
# plotting is in-process.
# PCollectionVisualization created at this level doesn't need dynamic
# plotting interval information when instantiated because it's already
# in dynamic plotting logic.
updated_pv = PCollectionVisualization(
pcoll,
include_window_info=include_window_info,
display_facets=display_facets)
updated_pv.display(updating_pv=pv)
if ie.current_env().is_terminated(pcoll.pipeline):
try:
tl.stop()
except RuntimeError:
# The job can only be stopped once. Ignore excessive stops.
pass
tl.start()
return tl
return dynamic_plotting(pcoll, pv, tl, include_window_info, display_facets)
return None
class PCollectionVisualization(object):
"""A visualization of a PCollection.
The class relies on creating a PipelineInstrument w/o actual instrument to
access current interactive environment for materialized PCollection data at
the moment of self instantiation through cache.
"""
def __init__(self, pcoll, include_window_info=False, display_facets=False):
assert _pcoll_visualization_ready, (
'Dependencies for PCollection visualization are not available. Please '
'use `pip install apache-beam[interactive]` to install necessary '
'dependencies and make sure that you are executing code in an '
'interactive environment such as a Jupyter notebook.')
assert isinstance(
pcoll,
pvalue.PCollection), ('pcoll should be apache_beam.pvalue.PCollection')
self._pcoll = pcoll
# This allows us to access cache key and other meta data about the pipeline
# whether it's the pipeline defined in user code or a copy of that pipeline.
# Thus, this module doesn't need any other user input but the PCollection
# variable to be visualized. It then automatically figures out the pipeline
# definition, materialized data and the pipeline result for the execution
# even if the user never assigned or waited the result explicitly.
# With only the constructor of PipelineInstrument, any interactivity related
# pre-process or instrument is not triggered for performance concerns.
self._pin = instr.PipelineInstrument(pcoll.pipeline)
# Variable name as the title for element value in the rendered data table.
self._pcoll_var = self._pin.cacheable_var_by_pcoll_id(
self._pin.pcolls_to_pcoll_id.get(str(pcoll), None))
if not self._pcoll_var:
self._pcoll_var = 'Value'
self._cache_key = self._pin.cache_key(self._pcoll)
obfuscated_id = obfuscate(self._cache_key, id(self))
self._dive_display_id = 'facets_dive_{}'.format(obfuscated_id)
self._overview_display_id = 'facets_overview_{}'.format(obfuscated_id)
self._df_display_id = 'df_{}'.format(obfuscated_id)
self._include_window_info = include_window_info
self._display_facets = display_facets
self._is_datatable_empty = True
def display_plain_text(self):
"""Displays a head sample of the normalized PCollection data.
This function is used when the ipython kernel is not connected to a
notebook frontend such as when running ipython in terminal or in unit tests.
It's a visualization in terminal-like UI, not a function to retrieve data
for programmatically usages.
"""
# Double check if the dependency is ready in case someone mistakenly uses
# the function.
if _pcoll_visualization_ready:
data = self._to_dataframe()
# Displays a data-table with at most 25 entries from the head.
data_sample = data.head(25)
display(data_sample)
def display(self, updating_pv=None):
"""Displays the visualization through IPython.
Args:
updating_pv: A PCollectionVisualization object. When provided, the
display_id of each visualization part will inherit from the initial
display of updating_pv and only update that visualization web element
instead of creating new ones.
The visualization has 3 parts: facets-dive, facets-overview and paginated
data table. Each part is assigned an auto-generated unique display id
(the uniqueness is guaranteed throughout the lifespan of the PCollection
variable).
"""
# Ensures that dive, overview and table render the same data because the
# materialized PCollection data might being updated continuously.
data = self._to_dataframe()
# Give the numbered column names when visualizing.
data.columns = [
self._pcoll_var + '.' +
str(column) if isinstance(column, int) else column
for column in data.columns
]
# String-ify the dictionaries for display because elements of type dict
# cannot be ordered.
data = data.applymap(lambda x: str(x) if isinstance(x, dict) else x)
if updating_pv:
# Only updates when data is not empty. Otherwise, consider it a bad
# iteration and noop since there is nothing to be updated.
if data.empty:
_LOGGER.debug('Skip a visualization update due to empty data.')
else:
self._display_dataframe(data.copy(deep=True), updating_pv)
if self._display_facets:
self._display_dive(data.copy(deep=True), updating_pv)
self._display_overview(data.copy(deep=True), updating_pv)
else:
self._display_dataframe(data.copy(deep=True))
if self._display_facets:
self._display_dive(data.copy(deep=True))
self._display_overview(data.copy(deep=True))
def _display_dive(self, data, update=None):
sprite_size = 32 if len(data.index) > 50000 else 64
format_window_info_in_dataframe(data)
jsonstr = data.to_json(orient='records', default_handler=str)
if update:
script = _DIVE_SCRIPT_TEMPLATE.format(
display_id=update._dive_display_id, jsonstr=jsonstr)
display_javascript(Javascript(script))
else:
html = _DIVE_HTML_TEMPLATE.format(
display_id=self._dive_display_id,
jsonstr=jsonstr,
sprite_size=sprite_size)
display(HTML(html))
def _display_overview(self, data, update=None):
if (not data.empty and self._include_window_info and
all(column in data.columns
for column in ('event_time', 'windows', 'pane_info'))):
data = data.drop(['event_time', 'windows', 'pane_info'], axis=1)
# GFSG expects all column names to be strings.
data.columns = data.columns.astype(str)
gfsg = GenericFeatureStatisticsGenerator()
proto = gfsg.ProtoFromDataFrames([{'name': 'data', 'table': data}])
protostr = base64.b64encode(proto.SerializeToString()).decode('utf-8')
if update:
script = _OVERVIEW_SCRIPT_TEMPLATE.format(
display_id=update._overview_display_id, protostr=protostr)
display_javascript(Javascript(script))
else:
html = _OVERVIEW_HTML_TEMPLATE.format(
display_id=self._overview_display_id, protostr=protostr)
display(HTML(html))
def _display_dataframe(self, data, update=None):
table_id = 'table_{}'.format(
update._df_display_id if update else self._df_display_id)
columns = [{
'title': ''
}] + [{
'title': str(column)
} for column in data.columns]
format_window_info_in_dataframe(data)
# Convert the dataframe into rows, each row looks like
# [column_1_val, column_2_val, ...].
rows = data.applymap(lambda x: str(x)).to_dict('split')['data']
# Convert each row into dict where keys are column index in the datatable
# to be rendered and values are data from the dataframe. Column index 0 is
# left out to hold the int index (not part of the data) from dataframe.
# Each row becomes: {1: column_1_val, 2: column_2_val, ...}.
rows = [{k + 1: v for k, v in enumerate(row)} for row in rows]
# Add the dataframe int index (used as default ordering column) to datatable
# column index 0 (will be rendered as the first column).
# Each row becomes:
# {1: column_1_val, 2: column_2_val, ..., 0: int_index_in_dataframe}.
for k, row in enumerate(rows):
row[0] = k
script = _DATAFRAME_SCRIPT_TEMPLATE.format(
table_id=table_id, columns=columns, data_as_rows=rows)
script_in_jquery_with_datatable = ie._JQUERY_WITH_DATATABLE_TEMPLATE.format(
customized_script=script)
# Dynamically load data into the existing datatable if not empty.
if update and not update._is_datatable_empty:
display_javascript(Javascript(script_in_jquery_with_datatable))
else:
html = _DATAFRAME_PAGINATION_TEMPLATE.format(
table_id=table_id,
script_in_jquery_with_datatable=script_in_jquery_with_datatable)
if update:
if not data.empty:
# Re-initialize a datatable to replace the existing empty datatable.
update_display(HTML(html), display_id=update._df_display_id)
update._is_datatable_empty = False
else:
# Initialize a datatable for the first time rendering.
display(HTML(html), display_id=self._df_display_id)
if not data.empty:
self._is_datatable_empty = False
def _to_dataframe(self):
results = []
cache_manager = ie.current_env().cache_manager()
if cache_manager.exists('full', self._cache_key):
coder = cache_manager.load_pcoder('full', self._cache_key)
reader, _ = cache_manager.read('full', self._cache_key)
results = list(to_element_list(reader, coder, include_window_info=True))
return elements_to_df(results, self._include_window_info)
def format_window_info_in_dataframe(data):
if 'event_time' in data.columns:
data['event_time'] = data['event_time'].apply(event_time_formatter)
if 'windows' in data.columns:
data['windows'] = data['windows'].apply(windows_formatter)
if 'pane_info' in data.columns:
data['pane_info'] = data['pane_info'].apply(pane_info_formatter)
def event_time_formatter(event_time_us):
options = ie.current_env().options
to_tz = options.display_timezone
try:
return (
datetime.datetime.utcfromtimestamp(event_time_us / 1000000).replace(
tzinfo=tz.tzutc()).astimezone(to_tz).strftime(
options.display_timestamp_format))
except ValueError:
if event_time_us < 0:
return 'Min Timestamp'
return 'Max Timestamp'
def windows_formatter(windows):
result = []
for w in windows:
if isinstance(w, GlobalWindow):
result.append(str(w))
elif isinstance(w, IntervalWindow):
# First get the duration in terms of hours, minutes, seconds, and
# micros.
duration = w.end.micros - w.start.micros
duration_secs = duration // 1000000
hours, remainder = divmod(duration_secs, 3600)
minutes, seconds = divmod(remainder, 60)
micros = (duration - duration_secs * 1000000) % 1000000
# Construct the duration string. Try and write the string in such a
# way that minimizes the amount of characters written.
duration = ''
if hours:
duration += '{}h '.format(hours)
if minutes or (hours and seconds):
duration += '{}m '.format(minutes)
if seconds:
if micros:
duration += '{}.{:06}s'.format(seconds, micros)
else:
duration += '{}s'.format(seconds)
start = event_time_formatter(w.start.micros)
result.append('{} ({})'.format(start, duration))
return ','.join(result)
def pane_info_formatter(pane_info):
from apache_beam.utils.windowed_value import PaneInfo
from apache_beam.utils.windowed_value import PaneInfoTiming
assert isinstance(pane_info, PaneInfo)
result = 'Pane {}'.format(pane_info.index)
timing_info = '{}{}'.format(
'Final ' if pane_info.is_last else '',
PaneInfoTiming.to_string(pane_info.timing).lower().capitalize() if
pane_info.timing in (PaneInfoTiming.EARLY, PaneInfoTiming.LATE) else '')
if timing_info:
result += ': ' + timing_info
return result
add display:block to datatable so that the columns can have various lengths based on the contents in them.
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Module visualizes PCollection data.
For internal use only; no backwards-compatibility guarantees.
Only works with Python 3.5+.
"""
# pytype: skip-file
from __future__ import absolute_import
from __future__ import division
import base64
import datetime
import logging
from datetime import timedelta
from dateutil import tz
from apache_beam import pvalue
from apache_beam.runners.interactive import interactive_environment as ie
from apache_beam.runners.interactive import pipeline_instrument as instr
from apache_beam.runners.interactive.utils import elements_to_df
from apache_beam.runners.interactive.utils import obfuscate
from apache_beam.runners.interactive.utils import to_element_list
from apache_beam.transforms.window import GlobalWindow
from apache_beam.transforms.window import IntervalWindow
try:
from IPython import get_ipython # pylint: disable=import-error
from IPython.core.display import HTML # pylint: disable=import-error
from IPython.core.display import Javascript # pylint: disable=import-error
from IPython.core.display import display # pylint: disable=import-error
from IPython.core.display import display_javascript # pylint: disable=import-error
from IPython.core.display import update_display # pylint: disable=import-error
from facets_overview.generic_feature_statistics_generator import GenericFeatureStatisticsGenerator # pylint: disable=import-error
from timeloop import Timeloop # pylint: disable=import-error
if get_ipython():
_pcoll_visualization_ready = True
else:
_pcoll_visualization_ready = False
except ImportError:
_pcoll_visualization_ready = False
_LOGGER = logging.getLogger(__name__)
_CSS = """
<style>
.p-Widget.jp-OutputPrompt.jp-OutputArea-prompt:empty {{
padding: 0;
border: 0;
}}
.p-Widget.jp-RenderedJavaScript.jp-mod-trusted.jp-OutputArea-output:empty {{
padding: 0;
border: 0;
}}
</style>"""
_DIVE_SCRIPT_TEMPLATE = """
try {{
document.querySelector("#{display_id}").data = {jsonstr};
}} catch (e) {{
// NOOP when the user has cleared the output from the notebook.
}}"""
_DIVE_HTML_TEMPLATE = _CSS + """
<script src="https://cdnjs.cloudflare.com/ajax/libs/webcomponentsjs/1.3.3/webcomponents-lite.js"></script>
<link rel="import" href="https://raw.githubusercontent.com/PAIR-code/facets/1.0.0/facets-dist/facets-jupyter.html">
<facets-dive sprite-image-width="{sprite_size}" sprite-image-height="{sprite_size}" id="{display_id}" height="600"></facets-dive>
<script>
document.querySelector("#{display_id}").data = {jsonstr};
</script>"""
_OVERVIEW_SCRIPT_TEMPLATE = """
try {{
document.querySelector("#{display_id}").protoInput = "{protostr}";
}} catch (e) {{
// NOOP when the user has cleared the output from the notebook.
}}"""
_OVERVIEW_HTML_TEMPLATE = _CSS + """
<script src="https://cdnjs.cloudflare.com/ajax/libs/webcomponentsjs/1.3.3/webcomponents-lite.js"></script>
<link rel="import" href="https://raw.githubusercontent.com/PAIR-code/facets/1.0.0/facets-dist/facets-jupyter.html">
<facets-overview id="{display_id}"></facets-overview>
<script>
document.querySelector("#{display_id}").protoInput = "{protostr}";
</script>"""
_DATATABLE_INITIALIZATION_CONFIG = """
bAutoWidth: false,
columns: {columns},
destroy: true,
responsive: true,
columnDefs: [
{{
targets: "_all",
className: "dt-left"
}},
{{
"targets": 0,
"width": "10px",
"title": ""
}}
]"""
_DATAFRAME_SCRIPT_TEMPLATE = """
var dt;
if ($.fn.dataTable.isDataTable("#{table_id}")) {{
dt = $("#{table_id}").dataTable();
}} else if ($("#{table_id}_wrapper").length == 0) {{
dt = $("#{table_id}").dataTable({{
""" + _DATATABLE_INITIALIZATION_CONFIG + """
}});
}} else {{
return;
}}
dt.api()
.clear()
.rows.add({data_as_rows})
.draw('full-hold');"""
_DATAFRAME_PAGINATION_TEMPLATE = _CSS + """
<link rel="stylesheet" href="https://cdn.datatables.net/1.10.20/css/jquery.dataTables.min.css">
<table id="{table_id}" class="display" style="display:block"></table>
<script>
{script_in_jquery_with_datatable}
</script>"""
def visualize(
pcoll,
dynamic_plotting_interval=None,
include_window_info=False,
display_facets=False):
"""Visualizes the data of a given PCollection. Optionally enables dynamic
plotting with interval in seconds if the PCollection is being produced by a
running pipeline or the pipeline is streaming indefinitely. The function
always returns immediately and is asynchronous when dynamic plotting is on.
If dynamic plotting enabled, the visualization is updated continuously until
the pipeline producing the PCollection is in an end state. The visualization
would be anchored to the notebook cell output area. The function
asynchronously returns a handle to the visualization job immediately. The user
could manually do::
# In one notebook cell, enable dynamic plotting every 1 second:
handle = visualize(pcoll, dynamic_plotting_interval=1)
# Visualization anchored to the cell's output area.
# In a different cell:
handle.stop()
# Will stop the dynamic plotting of the above visualization manually.
# Otherwise, dynamic plotting ends when pipeline is not running anymore.
If dynamic_plotting is not enabled (by default), None is returned.
If include_window_info is True, the data will include window information,
which consists of the event timestamps, windows, and pane info.
If display_facets is True, the facets widgets will be rendered. Otherwise, the
facets widgets will not be rendered.
The function is experimental. For internal use only; no
backwards-compatibility guarantees.
"""
if not _pcoll_visualization_ready:
return None
pv = PCollectionVisualization(
pcoll,
include_window_info=include_window_info,
display_facets=display_facets)
if ie.current_env().is_in_notebook:
pv.display()
else:
pv.display_plain_text()
# We don't want to do dynamic plotting if there is no notebook frontend.
return None
if dynamic_plotting_interval:
# Disables the verbose logging from timeloop.
logging.getLogger('timeloop').disabled = True
tl = Timeloop()
def dynamic_plotting(pcoll, pv, tl, include_window_info, display_facets):
@tl.job(interval=timedelta(seconds=dynamic_plotting_interval))
def continuous_update_display(): # pylint: disable=unused-variable
# Always creates a new PCollVisualization instance when the
# PCollection materialization is being updated and dynamic
# plotting is in-process.
# PCollectionVisualization created at this level doesn't need dynamic
# plotting interval information when instantiated because it's already
# in dynamic plotting logic.
updated_pv = PCollectionVisualization(
pcoll,
include_window_info=include_window_info,
display_facets=display_facets)
updated_pv.display(updating_pv=pv)
if ie.current_env().is_terminated(pcoll.pipeline):
try:
tl.stop()
except RuntimeError:
# The job can only be stopped once. Ignore excessive stops.
pass
tl.start()
return tl
return dynamic_plotting(pcoll, pv, tl, include_window_info, display_facets)
return None
class PCollectionVisualization(object):
"""A visualization of a PCollection.
The class relies on creating a PipelineInstrument w/o actual instrument to
access current interactive environment for materialized PCollection data at
the moment of self instantiation through cache.
"""
def __init__(self, pcoll, include_window_info=False, display_facets=False):
assert _pcoll_visualization_ready, (
'Dependencies for PCollection visualization are not available. Please '
'use `pip install apache-beam[interactive]` to install necessary '
'dependencies and make sure that you are executing code in an '
'interactive environment such as a Jupyter notebook.')
assert isinstance(
pcoll,
pvalue.PCollection), ('pcoll should be apache_beam.pvalue.PCollection')
self._pcoll = pcoll
# This allows us to access cache key and other meta data about the pipeline
# whether it's the pipeline defined in user code or a copy of that pipeline.
# Thus, this module doesn't need any other user input but the PCollection
# variable to be visualized. It then automatically figures out the pipeline
# definition, materialized data and the pipeline result for the execution
# even if the user never assigned or waited the result explicitly.
# With only the constructor of PipelineInstrument, any interactivity related
# pre-process or instrument is not triggered for performance concerns.
self._pin = instr.PipelineInstrument(pcoll.pipeline)
# Variable name as the title for element value in the rendered data table.
self._pcoll_var = self._pin.cacheable_var_by_pcoll_id(
self._pin.pcolls_to_pcoll_id.get(str(pcoll), None))
if not self._pcoll_var:
self._pcoll_var = 'Value'
self._cache_key = self._pin.cache_key(self._pcoll)
obfuscated_id = obfuscate(self._cache_key, id(self))
self._dive_display_id = 'facets_dive_{}'.format(obfuscated_id)
self._overview_display_id = 'facets_overview_{}'.format(obfuscated_id)
self._df_display_id = 'df_{}'.format(obfuscated_id)
self._include_window_info = include_window_info
self._display_facets = display_facets
self._is_datatable_empty = True
def display_plain_text(self):
"""Displays a head sample of the normalized PCollection data.
This function is used when the ipython kernel is not connected to a
notebook frontend such as when running ipython in terminal or in unit tests.
It's a visualization in terminal-like UI, not a function to retrieve data
for programmatically usages.
"""
# Double check if the dependency is ready in case someone mistakenly uses
# the function.
if _pcoll_visualization_ready:
data = self._to_dataframe()
# Displays a data-table with at most 25 entries from the head.
data_sample = data.head(25)
display(data_sample)
def display(self, updating_pv=None):
"""Displays the visualization through IPython.
Args:
updating_pv: A PCollectionVisualization object. When provided, the
display_id of each visualization part will inherit from the initial
display of updating_pv and only update that visualization web element
instead of creating new ones.
The visualization has 3 parts: facets-dive, facets-overview and paginated
data table. Each part is assigned an auto-generated unique display id
(the uniqueness is guaranteed throughout the lifespan of the PCollection
variable).
"""
# Ensures that dive, overview and table render the same data because the
# materialized PCollection data might being updated continuously.
data = self._to_dataframe()
# Give the numbered column names when visualizing.
data.columns = [
self._pcoll_var + '.' +
str(column) if isinstance(column, int) else column
for column in data.columns
]
# String-ify the dictionaries for display because elements of type dict
# cannot be ordered.
data = data.applymap(lambda x: str(x) if isinstance(x, dict) else x)
if updating_pv:
# Only updates when data is not empty. Otherwise, consider it a bad
# iteration and noop since there is nothing to be updated.
if data.empty:
_LOGGER.debug('Skip a visualization update due to empty data.')
else:
self._display_dataframe(data.copy(deep=True), updating_pv)
if self._display_facets:
self._display_dive(data.copy(deep=True), updating_pv)
self._display_overview(data.copy(deep=True), updating_pv)
else:
self._display_dataframe(data.copy(deep=True))
if self._display_facets:
self._display_dive(data.copy(deep=True))
self._display_overview(data.copy(deep=True))
def _display_dive(self, data, update=None):
sprite_size = 32 if len(data.index) > 50000 else 64
format_window_info_in_dataframe(data)
jsonstr = data.to_json(orient='records', default_handler=str)
if update:
script = _DIVE_SCRIPT_TEMPLATE.format(
display_id=update._dive_display_id, jsonstr=jsonstr)
display_javascript(Javascript(script))
else:
html = _DIVE_HTML_TEMPLATE.format(
display_id=self._dive_display_id,
jsonstr=jsonstr,
sprite_size=sprite_size)
display(HTML(html))
def _display_overview(self, data, update=None):
if (not data.empty and self._include_window_info and
all(column in data.columns
for column in ('event_time', 'windows', 'pane_info'))):
data = data.drop(['event_time', 'windows', 'pane_info'], axis=1)
# GFSG expects all column names to be strings.
data.columns = data.columns.astype(str)
gfsg = GenericFeatureStatisticsGenerator()
proto = gfsg.ProtoFromDataFrames([{'name': 'data', 'table': data}])
protostr = base64.b64encode(proto.SerializeToString()).decode('utf-8')
if update:
script = _OVERVIEW_SCRIPT_TEMPLATE.format(
display_id=update._overview_display_id, protostr=protostr)
display_javascript(Javascript(script))
else:
html = _OVERVIEW_HTML_TEMPLATE.format(
display_id=self._overview_display_id, protostr=protostr)
display(HTML(html))
def _display_dataframe(self, data, update=None):
table_id = 'table_{}'.format(
update._df_display_id if update else self._df_display_id)
columns = [{
'title': ''
}] + [{
'title': str(column)
} for column in data.columns]
format_window_info_in_dataframe(data)
# Convert the dataframe into rows, each row looks like
# [column_1_val, column_2_val, ...].
rows = data.applymap(lambda x: str(x)).to_dict('split')['data']
# Convert each row into dict where keys are column index in the datatable
# to be rendered and values are data from the dataframe. Column index 0 is
# left out to hold the int index (not part of the data) from dataframe.
# Each row becomes: {1: column_1_val, 2: column_2_val, ...}.
rows = [{k + 1: v for k, v in enumerate(row)} for row in rows]
# Add the dataframe int index (used as default ordering column) to datatable
# column index 0 (will be rendered as the first column).
# Each row becomes:
# {1: column_1_val, 2: column_2_val, ..., 0: int_index_in_dataframe}.
for k, row in enumerate(rows):
row[0] = k
script = _DATAFRAME_SCRIPT_TEMPLATE.format(
table_id=table_id, columns=columns, data_as_rows=rows)
script_in_jquery_with_datatable = ie._JQUERY_WITH_DATATABLE_TEMPLATE.format(
customized_script=script)
# Dynamically load data into the existing datatable if not empty.
if update and not update._is_datatable_empty:
display_javascript(Javascript(script_in_jquery_with_datatable))
else:
html = _DATAFRAME_PAGINATION_TEMPLATE.format(
table_id=table_id,
script_in_jquery_with_datatable=script_in_jquery_with_datatable)
if update:
if not data.empty:
# Re-initialize a datatable to replace the existing empty datatable.
update_display(HTML(html), display_id=update._df_display_id)
update._is_datatable_empty = False
else:
# Initialize a datatable for the first time rendering.
display(HTML(html), display_id=self._df_display_id)
if not data.empty:
self._is_datatable_empty = False
def _to_dataframe(self):
results = []
cache_manager = ie.current_env().cache_manager()
if cache_manager.exists('full', self._cache_key):
coder = cache_manager.load_pcoder('full', self._cache_key)
reader, _ = cache_manager.read('full', self._cache_key)
results = list(to_element_list(reader, coder, include_window_info=True))
return elements_to_df(results, self._include_window_info)
def format_window_info_in_dataframe(data):
if 'event_time' in data.columns:
data['event_time'] = data['event_time'].apply(event_time_formatter)
if 'windows' in data.columns:
data['windows'] = data['windows'].apply(windows_formatter)
if 'pane_info' in data.columns:
data['pane_info'] = data['pane_info'].apply(pane_info_formatter)
def event_time_formatter(event_time_us):
options = ie.current_env().options
to_tz = options.display_timezone
try:
return (
datetime.datetime.utcfromtimestamp(event_time_us / 1000000).replace(
tzinfo=tz.tzutc()).astimezone(to_tz).strftime(
options.display_timestamp_format))
except ValueError:
if event_time_us < 0:
return 'Min Timestamp'
return 'Max Timestamp'
def windows_formatter(windows):
result = []
for w in windows:
if isinstance(w, GlobalWindow):
result.append(str(w))
elif isinstance(w, IntervalWindow):
# First get the duration in terms of hours, minutes, seconds, and
# micros.
duration = w.end.micros - w.start.micros
duration_secs = duration // 1000000
hours, remainder = divmod(duration_secs, 3600)
minutes, seconds = divmod(remainder, 60)
micros = (duration - duration_secs * 1000000) % 1000000
# Construct the duration string. Try and write the string in such a
# way that minimizes the amount of characters written.
duration = ''
if hours:
duration += '{}h '.format(hours)
if minutes or (hours and seconds):
duration += '{}m '.format(minutes)
if seconds:
if micros:
duration += '{}.{:06}s'.format(seconds, micros)
else:
duration += '{}s'.format(seconds)
start = event_time_formatter(w.start.micros)
result.append('{} ({})'.format(start, duration))
return ','.join(result)
def pane_info_formatter(pane_info):
from apache_beam.utils.windowed_value import PaneInfo
from apache_beam.utils.windowed_value import PaneInfoTiming
assert isinstance(pane_info, PaneInfo)
result = 'Pane {}'.format(pane_info.index)
timing_info = '{}{}'.format(
'Final ' if pane_info.is_last else '',
PaneInfoTiming.to_string(pane_info.timing).lower().capitalize() if
pane_info.timing in (PaneInfoTiming.EARLY, PaneInfoTiming.LATE) else '')
if timing_info:
result += ': ' + timing_info
return result
|
#!/usr/bin/env python
'''A library and a command line tool to interact with the LOCKSS daemon status
service via its Web Services API.'''
__copyright__ = '''\
Copyright (c) 2000-2016 Board of Trustees of Leland Stanford Jr. University,
all rights reserved.
'''
__license__ = '''\
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
STANFORD UNIVERSITY BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
Except as contained in this notice, the name of Stanford University shall not
be used in advertising or otherwise to promote the sale, use or other dealings
in this Software without prior written authorization from Stanford University.
'''
__version__ = '0.6.0'
import getpass
import itertools
from multiprocessing.dummy import Pool as ThreadPool
import optparse
import os.path
import sys
from threading import Thread
try: import ZSI
except ImportError: sys.exit('The Python ZSI module must be installed (or on the PYTHONPATH)')
import DaemonStatusServiceImplService_client
from wsutil import datems, datetimems, durationms, zsiauth
#
# Library
#
def get_au_status(host, auth, auid):
'''Performs a getAuStatus operation on the given host for the given AUID, and
returns a record with these fields (or None if ZSI.FaultException starting
with 'No Archival Unit with provided identifier' is raised):
- AccessType (string)
- AvailableFromPublisher (boolean)
- ContentSize (numeric)
- CrawlPool (string)
- CrawlProxy (string)
- CrawlWindow (string)
- CreationTime (numeric)
- CurrentlyCrawling (boolean)
- CurrentlyPolling (boolean)
- DiskUsage (numeric)
- JournalTitle (string)
- LastCompletedCrawl (numeric)
- LastCompletedPoll (numeric)
- LastCrawl (numeric)
- LastCrawlResult (string)
- LastPoll (numeric)
- LastPollResult (string)
- PluginName (string)
- Provider (string)
- Publisher (string)
- PublishingPlatform (string)
- RecentPollAgreement (floating point)
- Repository (string)
- Status (string)
- SubscriptionStatus (string)
- SubstanceState (string)
- Volume (string) (the AU name)
- Year (string)
Parameters:
- host (string): a host:port pair
- auth (ZSI authentication object): an authentication object
- auid (string): an AUID
'''
req = DaemonStatusServiceImplService_client.getAuStatus()
req.AuId = auid
try:
ret = _ws_port(host, auth).getAuStatus(req)
return ret.Return
except ZSI.FaultException as e:
if str(e).startswith('No Archival Unit with provided identifier'):
return None
raise
def get_au_urls(host, auth, auid, prefix=None):
'''Performs a getAuUrls operation on the given host for the given AUID and
returns a list of URLs (strings) in the AU. If the optional prefix argument is
given, limits the results to URLs with that prefix (including the URL itself).
Parameters:
- host (string): a host:port pair
- auth (ZSI authentication object): an authentication object
- auid (string): an AUID
- prefix (string): a URL prefix (default: None)
'''
req = DaemonStatusServiceImplService_client.getAuUrls()
req.AuId = auid
if prefix is not None: req.url = prefix
return _ws_port(host, auth).getAuUrls(req).Return
def get_au_type_urls(host, auth, auid, type):
'''Performs a queryAus operation on the given host for the given AUID and
selects only the url list of type given (articleUrls,substanceUrls) for the AU.
Parameters:
- host (string): a host:port pair
- auth (ZSI authentication object): an authentication object
- auid (string): an AUID
- type (string): one of articleUrls or substanceUrls
'''
res = query_aus(host, auth, type, 'auId = "%s"' % (auid,))
if len(res) == 0: return None
else:
if type == 'articleUrls':
return res[0].ArticleUrls
else: return res[0].SubstanceUrls
def get_auids(host, auth):
'''Performs a getAuids operation on the given host, which really produces a
sequence of all AUIDs with the AU names, and returns a list of records with
these fields:
- Id (string)
- Name (string)
Parameters:
- host (string): a host:port pair
- auth (ZSI authentication object): an authentication object
'''
req = DaemonStatusServiceImplService_client.getAuIds()
return _ws_port(host, auth).getAuIds(req).Return
def get_peer_agreements(host, auth, auid):
'''Convenience call to query_aus() that returns the PeerAgreements list for
the given AUID (or None if there is no such AUID). The PeerAgreements list is
a list of records with these fields:
- Agreements, a record with these fields:
- Entry, a list of records with these fields:
- Key, a string among:
- "POR"
- "POP"
- "SYMMETRIC_POR"
- "SYMMETRIC_POP"
- "POR_HINT"
- "POP_HINT"
- "SYMMETRIC_POR_HINT"
- "SYMMETRIC_POP_HINT"
- "W_POR"
- "W_POP"
- "W_SYMMETRIC_POR"
- "W_SYMMETRIC_POP"
- "W_POR_HINT"
- "W_POP_HINT"
- "W_SYMMETRIC_POR_HINT"
- "W_SYMMETRIC_POP_HINT"
- Value, a record with these fields:
- HighestPercentAgreement (floating point)
- HighestPercentAgreementTimestamp (numeric)
- PercentAgreement (floating point)
- PercentAgreementTimestamp (numeric)
- PeerId (string)
Parameters:
- host (string): a host:port pair
- auth (ZSI authentication object): an authentication object
- auid (string): an AUID
'''
res = query_aus(host, auth, 'peerAgreements', 'auId = "%s"' % (auid,))
if len(res) == 0: return None
else: return res[0].PeerAgreements
def get_platform_configuration(host, auth):
'''Performs a getPlatformConfiguration operation on the given host and returns
a record with these fields:
- AdminEmail (string)
- BuildHost (string)
- BuildTimestamp (numeric)
- CurrentTime (numeric)
- CurrentWorkingDirectory (string)
- DaemonVersion, a record with these fields:
- BuildVersion (numeric)
- FullVersion (string)
- MajorVersion (numeric)
- MinorVersion (numeric)
- Disks (list of strings)
- Groups (list of strings)
- HostName (string)
- IpAddress (string)
- JavaVersion, a record with these fields:
- RuntimeName (string)
- RuntimeVersion (string)
- SpecificationVersion (string)
- Version (string)
- MailRelay (string)
- Platform, a record with these fields:
- Name (string)
- Suffix (string)
- Version (string)
- Project (string)
- Properties (list of strings)
- Uptime (numeric)
- V3Identity (string)
Parameters:
- host (string): a host:port pair
- auth (ZSI authentication object): an authentication object
'''
req = DaemonStatusServiceImplService_client.getPlatformConfiguration()
return _ws_port(host, auth).getPlatformConfiguration(req).Return
def is_daemon_ready(host, auth):
'''Performs an isDaemonReady operation on the given host and returns True or
False.
Parameters:
- host (string): a host:port pair
- auth (ZSI authentication object): an authentication object
'''
req = DaemonStatusServiceImplService_client.isDaemonReady()
return _ws_port(host, auth).isDaemonReady(req).Return
def query_aus(host, auth, select, where=None):
'''Performs a queryAus operation on the given host, using the given field
names to build a SELECT clause, optionally using the given string to build a
WHERE clause, and returns a list of records with these fields (populated or
not depending on the SELECT clause):
- AccessType (string)
- ArticleUrls (list of strings)
- AuConfiguration, a record with these fields:
- DefParams, a list of records with these fields:
- Key (string)
- Value (string)
- NonDefParams, a list of records with these fields:
- Key (string)
- Value (string)
- AuId (string)
- AvailableFromPublisher (boolean)
- ContentSize (numeric)
- CrawlPool (string)
- CrawlProxy (string)
- CrawlWindow (string)
- CreationTime (numeric)
- CurrentlyCrawling (boolean)
- CurrentlyPolling (boolean)
- DiskUsage (numeric)
- HighestPollAgreement (numeric)
- IsBulkContent (boolean)
- JournalTitle (string)
- LastCompletedCrawl (numeric)
- LastCompletedPoll (numeric)
- LastCrawl (numeric)
- LastCrawlResult (string)
- LastPoll (numeric)
- LastPollResult (string)
- Name (string)
- NewContentCrawlUrls (list of strings)
- PeerAgreements, a list of records with these fields:
- Agreements, a record with these fields:
- Entry, a list of records with these fields:
- Key, a string among:
- "POR"
- "POP"
- "SYMMETRIC_POR"
- "SYMMETRIC_POP"
- "POR_HINT"
- "POP_HINT"
- "SYMMETRIC_POR_HINT"
- "SYMMETRIC_POP_HINT"
- "W_POR"
- "W_POP"
- "W_SYMMETRIC_POR"
- "W_SYMMETRIC_POP"
- "W_POR_HINT"
- "W_POP_HINT"
- "W_SYMMETRIC_POR_HINT"
- "W_SYMMETRIC_POP_HINT"
- Value, a record with these fields:
- HighestPercentAgreement (floating point)
- HighestPercentAgreementTimestamp (numeric)
- PercentAgreement (floating point)
- PercentAgreementTimestamp (numeric)
- PeerId (string)
- PluginName (string)
- PublishingPlatform (string)
- RecentPollAgreement (numeric)
- RepositoryPath (string)
- SubscriptionStatus (string)
- SubstanceState (string)
- TdbProvider (string)
- TdbPublisher (string)
- TdbYear (string)
- UrlStems (list of strings)
- Urls, a list of records with these fields:
- CureentVersionSize (numeric)
- Url (string)
- VersionCount (numeric)
- Volume (string)
Parameters:
- host (string): a host:port pair
- auth (ZSI authentication object): an authentication object
- select (string or list of strings): if a list of strings, the field names to
be used in the SELECT clause; if a string, the single field name to be used in
the SELECT clause
- where (string): optional statement for the WHERE clause (default: None)
Raises:
- ValueError if select is not of the right type
'''
if type(select) is list: query = 'SELECT %s' % (', '.join(select))
elif type(select) is str: query = 'SELECT %s' % (select,)
else: raise ValueError, 'invalid type for select parameter: %s' % (type(select),)
if where is not None: query = '%s WHERE %s' % (query, where)
req = DaemonStatusServiceImplService_client.queryAus()
req.AuQuery = query
return _ws_port(host, auth).queryAus(req).Return
def query_crawls(host, auth, select, where=None):
'''Performs a queryCrawls operation on the given host, using the given field
names to build a SELECT clause, optionally using the given string to build a
WHERE clause, and returns a list of records with these fields (populated or
not depending on the SELECT clause):
- AuId (string)
- AuName (string)
- BytesFetchedCount (long)
- CrawlKey (string)
- CrawlStatus (string)
- CrawlType (string)
- Duration (long)
- LinkDepth (int)
- MimeTypeCount (int)
- MimeTypes (list of strings)
- OffSiteUrlsExcludedCount (int)
- PagesExcluded (list of strings)
- PagesExcludedCount (int)
- PagesFetched (list of strings)
- PagesFetchedCount (int)
- PagesNotModified (list of strings)
- PagesNotModifiedCount (int)
- PagesParsed (list of strings)
- PagesParsedCount (int)
- PagesPending (list of strings)
- PagesPendingCount (int)
- PagesWithErrors, a list of records with these fields:
- Message (string)
- Severity (string)
- Url (string)
- PagesWithErrorsCount (int)
- RefetchDepth (int)
- Sources (list of strings)
- StartTime (long)
- StartingUrls (list of strings)
'''
if type(select) is list: query = 'SELECT %s' % (', '.join(select))
elif type(select) is str: query = 'SELECT %s' % (select,)
else: raise ValueError, 'invalid type for select parameter: %s' % (type(select),)
if where is not None: query = '%s WHERE %s' % (query, where)
req = DaemonStatusServiceImplService_client.queryCrawls()
req.CrawlQuery = query
return _ws_port(host, auth).queryCrawls(req).Return
def _ws_port(host, auth, tracefile=None):
url = 'http://%s/ws/DaemonStatusService' % (host,)
locator = DaemonStatusServiceImplService_client.DaemonStatusServiceImplServiceLocator()
if tracefile is None: return locator.getDaemonStatusServiceImplPort(url=url, auth=auth)
else: return locator.getDaemonStatusServiceImplPort(url=url, auth=auth, tracefile=tracefile)
#
# Command line tool
#
class _DaemonStatusServiceOptions(object):
@staticmethod
def make_parser():
usage = '%prog {--host=HOST|--hosts=HFILE}... [OPTIONS]'
parser = optparse.OptionParser(version=__version__, description=__doc__, usage=usage)
# Hosts
group = optparse.OptionGroup(parser, 'Target hosts')
group.add_option('--host', action='append', default=list(), help='add host:port pair to list of target hosts')
group.add_option('--hosts', action='append', default=list(), metavar='HFILE', help='add host:port pairs in HFILE to list of target hosts')
group.add_option('--password', metavar='PASS', help='UI password (default: interactive prompt)')
group.add_option('--username', metavar='USER', help='UI username (default: interactive prompt)')
parser.add_option_group(group)
# AUIDs
group = optparse.OptionGroup(parser, 'Target AUIDs')
group.add_option('--auid', action='append', default=list(), help='add AUID to list of target AUIDs')
group.add_option('--auids', action='append', default=list(), metavar='AFILE', help='add AUIDs in AFILE to list of target AUIDs')
parser.add_option_group(group)
# Daemon operations
group = optparse.OptionGroup(parser, 'Daemon operations')
group.add_option('--get-platform-configuration', action='store_true', help='output platform configuration information for target hosts; narrow down with optional --select list chosen among %s' % (', '.join(sorted(_PLATFORM_CONFIGURATION)),))
group.add_option('--is-daemon-ready', action='store_true', help='output True/False table of ready status of target hosts; always exit with 0')
group.add_option('--is-daemon-ready-quiet', action='store_true', help='output nothing; exit with 0 if all target hosts are ready, 1 otherwise')
parser.add_option_group(group)
# AUID operations
group = optparse.OptionGroup(parser, 'AU operations')
group.add_option('--get-au-status', action='store_true', help='output status information about target AUIDs; narrow down output with optional --select list chosen among %s' % (', '.join(sorted(_AU_STATUS)),))
group.add_option('--get-au-urls', action='store_true', help='output URLs in one AU on one host')
group.add_option('--get-au-article-urls', action='store_true', help='output article URLs in one AU on one host')
group.add_option('--get-au-subst-urls', action='store_true', help='output substance URLs in one AU on one host')
group.add_option('--get-auids', action='store_true', help='output True/False table of all AUIDs (or target AUIDs if specified) present on target hosts')
group.add_option('--get-auids-names', action='store_true', help='output True/False table of all AUIDs (or target AUIDs if specified) and their names present on target hosts')
group.add_option('--get-peer-agreements', action='store_true', help='output peer agreements for one AU on one hosts')
group.add_option('--query-aus', action='store_true', help='perform AU query (with optional --where clause) with --select list chosen among %s' % (', '.join(sorted(_QUERY_AUS)),))
parser.add_option_group(group)
# Crawl operations
group = optparse.OptionGroup(parser, 'Crawl operations')
group.add_option('--query-crawls', action='store_true', help='perform crawl query (with optional --where clause) with --select list chosen among %s' % (', '.join(sorted(_QUERY_CRAWLS)),))
parser.add_option_group(group)
# Other options
group = optparse.OptionGroup(parser, 'Other options')
group.add_option('--group-by-field', action='store_true', help='group results by field instead of host')
group.add_option('--no-special-output', action='store_true', help='no special output format for a single target host')
group.add_option('--select', metavar='FIELDS', help='comma-separated list of fields for narrower output')
group.add_option('--threads', type='int', help='max parallel jobs allowed (default: no limit)')
group.add_option('--where', help='optional WHERE clause for query operations')
parser.add_option_group(group)
return parser
def __init__(self, parser, opts, args):
super(_DaemonStatusServiceOptions, self).__init__()
if len(args) > 0: parser.error('extraneous arguments: %s' % (' '.join(args)))
if len(filter(None, [opts.get_au_status, opts.get_au_urls, opts.get_au_article_urls, opts.get_au_subst_urls, opts.get_auids, opts.get_auids_names, opts.get_peer_agreements, opts.get_platform_configuration, opts.is_daemon_ready, opts.is_daemon_ready_quiet, opts.query_aus, opts.query_crawls])) != 1:
parser.error('exactly one of --get-au-status, --get-au-urls, --get-au-article-urls, --get-au-subst-urls,--get-auids, --get-auids-names, --get-peer-agreements, --get-platform-configuration, --is-daemon-ready, --is-daemon-ready-quiet, --query-aus --query-crawls is required')
if len(opts.auid) + len(opts.auids) > 0 and not any([opts.get_au_status, opts.get_au_urls, opts.get_au_article_urls, opts.get_au_subst_urls, opts.get_auids, opts.get_auids_names, opts.get_peer_agreements]):
parser.error('--auid, --auids can only be applied to --get-au-status, --get-au-urls, --get-au-article-urls, --get-au-subst-urls, --get-auids, --get-auids-names, --get-peer-agreements')
if opts.select and not any([opts.get_au_status, opts.get_platform_configuration, opts.query_aus, opts.query_crawls]):
parser.error('--select can only be applied to --get-au-status, --get-platform-configuration, --query-aus, --query-crawls')
if opts.where and not any([opts.query_aus, opts.query_crawls]):
parser.error('--where can only be applied to --query-aus, --query-crawls')
if opts.group_by_field and not any([opts.get_au_status, opts.query_aus]):
parser.error('--group-by-field can only be applied to --get-au-status, --query-aus')
# hosts
self.hosts = opts.host[:]
for f in opts.hosts: self.hosts.extend(_file_lines(f))
if len(self.hosts) == 0: parser.error('at least one target host is required')
# auids
self.auids = opts.auid[:]
for f in opts.auids: self.auids.extend(_file_lines(f))
# get_auids/get_auids_names/is_daemon_ready/is_daemon_ready_quiet
self.get_auids = opts.get_auids
self.get_auids_names = opts.get_auids_names
self.is_daemon_ready = opts.is_daemon_ready
self.is_daemon_ready_quiet = opts.is_daemon_ready_quiet
# get_platform_configuration/select
self.get_platform_configuration = opts.get_platform_configuration
if self.get_platform_configuration:
self.select = self.__init_select(parser, opts, _PLATFORM_CONFIGURATION)
# get_au_status/select
self.get_au_status = opts.get_au_status
if self.get_au_status:
if len(self.auids) == 0: parser.error('at least one target AUID is required with --get-au-status')
self.select = self.__init_select(parser, opts, _AU_STATUS)
# get_au_urls
self.get_au_urls = opts.get_au_urls
if self.get_au_urls:
if len(self.hosts) != 1: parser.error('only one target host is allowed with --get-au-urls')
if len(self.auids) != 1: parser.error('only one target AUID is allowed with --get-au-urls')
# get article url list
self.get_au_article_urls = opts.get_au_article_urls
if self.get_au_article_urls:
if len(self.hosts) != 1: parser.error('only one target host is allowed with --get-au-article-urls')
if len(self.auids) != 1: parser.error('only one target AUID is allowed with --get-au-article-urls')
# get substance url list
self.get_au_subst_urls = opts.get_au_subst_urls
if self.get_au_subst_urls:
if len(self.hosts) != 1: parser.error('only one target host is allowed with --get-au-subst-urls')
if len(self.auids) != 1: parser.error('only one target AUID is allowed with --get-au-subst-urls')
# get_peer_agreements
self.get_peer_agreements = opts.get_peer_agreements
if self.get_peer_agreements:
if len(self.hosts) != 1: parser.error('only one target host is allowed with --get-peer-agreements')
if len(self.auids) != 1: parser.error('only one target AUID is allowed with --get-peer-agreements')
# query_aus/select/where
self.query_aus = opts.query_aus
if self.query_aus:
self.select = self.__init_select(parser, opts, _QUERY_AUS)
self.where = opts.where
# query_crawls/select/where
self.query_crawls = opts.query_crawls
if self.query_crawls:
if len(self.hosts) != 1: parser.error('only one target host is allowed with --query-crawls')
self.select = self.__init_select(parser, opts, _QUERY_CRAWLS)
self.where = opts.where
# group_by_field/no_special_output
self.group_by_field = opts.group_by_field
self.no_special_output = opts.no_special_output
# threads
self.threads = opts.threads or len(self.hosts)
# auth
u = opts.username or getpass.getpass('UI username: ')
p = opts.password or getpass.getpass('UI password: ')
self.auth = zsiauth(u, p)
def __init_select(self, parser, opts, field_dict):
if opts.select is None: return sorted(field_dict)
fields = [s.strip() for s in opts.select.split(',')]
errfields = filter(lambda f: f not in field_dict, fields)
if len(errfields) == 1: parser.error('unknown field: %s' % (errfields[0],))
if len(errfields) > 1: parser.error('unknown fields: %s' % (', '.join(errfields),))
return fields
# Last modified 2016-08-02
def _output_record(options, lst):
print '\t'.join([x if type(x) is unicode else str(x or '') for x in lst])
# Last modified 2015-08-05
def _output_table(options, data, rowheaders, lstcolkeys):
colkeys = [x for x in itertools.product(*lstcolkeys)]
for j in xrange(len(lstcolkeys)):
if j < len(lstcolkeys) - 1: rowpart = [''] * len(rowheaders)
else: rowpart = rowheaders
_output_record(options, rowpart + [x[j] for x in colkeys])
for rowkey in sorted(set([k[0] for k in data])):
_output_record(options, list(rowkey) + [data.get((rowkey, colkey)) for colkey in colkeys])
# Last modified 2015-08-31
def _file_lines(fstr):
with open(os.path.expanduser(fstr)) as f: ret = filter(lambda y: len(y) > 0, [x.partition('#')[0].strip() for x in f])
if len(ret) == 0: sys.exit('Error: %s contains no meaningful lines' % (fstr,))
return ret
_AU_STATUS = {
'accessType': ('Access type', lambda r: r.AccessType),
'availableFromPublisher': ('Available from publisher', lambda r: r.AvailableFromPublisher),
'contentSize': ('Content size', lambda r: r.ContentSize),
'crawlPool': ('Crawl pool', lambda r: r.CrawlPool),
'crawlProxy': ('Crawl proxy', lambda r: r.CrawlProxy),
'crawlWindow': ('Crawl window', lambda r: r.CrawlWindow),
'creationTime': ('Creation time', lambda r: datetimems(r.CreationTime)),
'currentlyCrawling': ('Currently crawling', lambda r: r.CurrentlyCrawling),
'currentlyPolling': ('Currently polling', lambda r: r.CurrentlyPolling),
'diskUsage': ('Disk usage', lambda r: r.DiskUsage),
'journalTitle': ('Journal title', lambda r: r.JournalTitle),
'lastCompletedCrawl': ('Last completed crawl', lambda r: datetimems(r.LastCompletedCrawl)),
'lastCompletedPoll': ('Last completed poll', lambda r: datetimems(r.LastCompletedPoll)),
'lastCrawl': ('Last crawl', lambda r: datetimems(r.LastCrawl)),
'lastCrawlResult': ('Last crawl result', lambda r: r.LastCrawlResult),
'lastPoll': ('Last poll', lambda r: datetimems(r.LastPoll)),
'lastPollResult': ('Last poll result', lambda r: r.LastPollResult),
'pluginName': ('Plugin name', lambda r: r.PluginName),
'provider': ('Provider', lambda r: r.Provider),
'publisher': ('Publisher', lambda r: r.Publisher),
'publishingPlatform': ('Publishing platform', lambda r: r.PublishingPlatform),
'recentPollAgreement': ('Recent poll agreement', lambda r: r.RecentPollAgreement),
'repository': ('Repository', lambda r: r.Repository),
'status': ('Status', lambda r: r.Status),
'subscriptionStatus': ('Subscription status', lambda r: r.SubscriptionStatus),
'substanceState': ('Substance state', lambda r: r.SubstanceState),
'volume': ('Volume name', lambda r: r.Volume),
'year': ('Year', lambda r: r.Year)
}
def _do_get_au_status(options):
headlamb = [_AU_STATUS[x] for x in options.select]
data = dict()
for host, auid, result in ThreadPool(options.threads).imap_unordered( \
lambda _tup: (_tup[1], _tup[0], get_au_status(_tup[1], options.auth, _tup[0])), \
itertools.product(options.auids, options.hosts)):
if result is not None:
for head, lamb in headlamb:
if options.group_by_field: colkey = (head, host)
else: colkey = (host, head)
data[((auid,), colkey)] = lamb(result)
_output_table(options, data, ['AUID'], [[x[0] for x in headlamb], sorted(options.hosts)] if options.group_by_field else [sorted(options.hosts), [x[0] for x in headlamb]])
def _do_get_au_urls(options):
# Single request to a single host: unthreaded
r = get_au_urls(options.hosts[0], options.auth, options.auids[0])
for url in sorted(r): _output_record(options, [url])
def _do_get_au_article_urls(options):
# Single request to a single host: unthreaded
r = get_au_type_urls(options.hosts[0], options.auth, options.auids[0], "articleUrls")
for url in sorted(r): _output_record(options, [url])
def _do_get_au_subst_urls(options):
# Single request to a single host: unthreaded
r = get_au_type_urls(options.hosts[0], options.auth, options.auids[0], "substanceUrls")
for url in sorted(r): _output_record(options, [url])
def _do_get_auids(options):
if len(options.auids) > 0:
targetauids = set(options.auids)
shouldskip = lambda a: a not in targetauids
else: shouldskip = lambda a: False
data = dict()
auids = set()
for host, result in ThreadPool(options.threads).imap_unordered( \
lambda _host: (_host, get_auids(_host, options.auth)), \
options.hosts):
for r in result:
if shouldskip(r.Id): continue
if options.get_auids_names: rowkey = (r.Id, r.Name)
else: rowkey = (r.Id,)
data[(rowkey, (host,))] = True
if r.Id not in auids:
auids.add(r.Id)
for h in options.hosts: data.setdefault((rowkey, (h,)), False)
_output_table(options, data, ['AUID', 'Name'] if options.get_auids_names else ['AUID'], [sorted(options.hosts)])
_PLATFORM_CONFIGURATION = {
'adminEmail': ('Admin e-mail', lambda r: r.AdminEmail),
'buildHost': ('Build host', lambda r: r.BuildHost),
'buildTimestamp': ('Build timestamp', lambda r: datetimems(r.BuildTimestamp)),
'currentTime': ('Current time', lambda r: datetimems(r.CurrentTime)),
'currentWorkingDirectory': ('Current working directory', lambda r: r.CurrentWorkingDirectory),
'daemonBuildVersion': ('Daemon build version', lambda r: r.DaemonVersion.BuildVersion),
'daemonFullVersion': ('Daemon full version', lambda r: r.DaemonVersion.FullVersion),
'daemonMajorVersion': ('Daemon major version', lambda r: r.DaemonVersion.MajorVersion),
'daemonMinorVersion': ('Daemon minor version', lambda r: r.DaemonVersion.MinorVersion),
'disks': ('Disks', lambda r: ', '.join(r.Disks)),
'groups': ('Groups', lambda r: ', '.join(r.Groups)),
'hostName': ('Host name', lambda r: r.HostName),
'ipAddress': ('IP address', lambda r: r.IpAddress),
'javaRuntimeName': ('Java runtime name', lambda r: r.JavaVersion.RuntimeName),
'javaRuntimeVersion': ('Java runtime version', lambda r: r.JavaVersion.RuntimeVersion),
'javaSpecificationVersion': ('Java specification version', lambda r: r.JavaVersion.SpecificationVersion),
'javaVersion': ('Java version', lambda r: r.JavaVersion.Version),
'mailRelay': ('Mail relay', lambda r: r.MailRelay),
'platformName': ('Platform name', lambda r: r.Platform.Name),
'platformSuffix': ('Platform suffix', lambda r: r.Platform.Suffix),
'platformVersion': ('Platform version', lambda r: r.Platform.Version),
'project': ('Project', lambda r: r.Project),
'properties': ('Properties', lambda r: ', '.join(r.Properties)),
'uptime': ('Uptime', lambda r: durationms(r.Uptime)),
'v3Identity': ('V3 identity', lambda r: r.V3Identity)
}
def _do_get_peer_agreements(options):
# Single request to a single host: unthreaded
pa = get_peer_agreements(options.hosts[0], options.auth, options.auids[0])
if pa is None:
print 'No such AUID'
return
for pae in pa:
for ae in pae.Agreements.Entry:
_output_record(options, [pae.PeerId, ae.Key, ae.Value.PercentAgreement, datetimems(ae.Value.PercentAgreementTimestamp), ae.Value.HighestPercentAgreement, datetimems(ae.Value.HighestPercentAgreementTimestamp)])
def _do_get_platform_configuration(options):
headlamb = [_PLATFORM_CONFIGURATION[x] for x in options.select]
data = dict()
for host, result in ThreadPool(options.threads).imap_unordered( \
lambda _host: (_host, get_platform_configuration(_host, options.auth)), \
options.hosts):
for head, lamb in headlamb:
data[((head,), (host,))] = lamb(result)
_output_table(options, data, [''], [sorted(options.hosts)])
def _do_is_daemon_ready(options):
data = dict()
for host, result in ThreadPool(options.threads).imap_unordered( \
lambda _host: (_host, is_daemon_ready(_host, options.auth)), \
options.hosts):
if options.is_daemon_ready_quiet and result is False: sys.exit(1)
else: data[((host,), ('Daemon is ready',))] = result
if options.is_daemon_ready_quiet: pass
else: _output_table(options, data, ['Host'], [['Daemon is ready']])
_QUERY_AUS = {
'accessType': ('Access type', lambda r: r.AccessType),
'articleUrls': ('Article URLs', lambda r: '<ArticleUrls>'),
'auConfiguration': ('AU configuration', lambda r: '<AuConfiguration>'),
'auId': ('AUID', lambda r: r.AuId),
'availableFromPublisher': ('Available from publisher', lambda r: r.AvailableFromPublisher),
'contentSize': ('Content size', lambda r: r.ContentSize),
'crawlPool': ('Crawl pool', lambda r: r.CrawlPool),
'crawlProxy': ('Crawl proxy', lambda r: r.CrawlProxy),
'crawlWindow': ('Crawl window', lambda r: r.CrawlWindow),
'creationTime': ('Creation time', lambda r: datetimems(r.CreationTime)),
'currentlyCrawling': ('Currently crawling', lambda r: r.CurrentlyCrawling),
'currentlyPolling': ('Currently polling', lambda r: r.CurrentlyPolling),
'diskUsage': ('Disk usage', lambda r: r.DiskUsage),
'highestPollAgreement': ('Highest poll agreement', lambda r: r.HighestPollAgreement),
'isBulkContent': ('Is bulk content', lambda r: r.IsBulkContent),
'journalTitle': ('Title', lambda r: r.JournalTitle),
'lastCompletedCrawl': ('Last completed crawl', lambda r: datetimems(r.LastCompletedCrawl)),
'lastCompletedPoll': ('Last completed poll', lambda r: datetimems(r.LastCompletedPoll)),
'lastCrawl': ('Last crawl', lambda r: datetimems(r.LastCrawl)),
'lastCrawlResult': ('Last crawl result', lambda r: r.LastCrawlResult),
'lastPoll': ('Last poll', lambda r: datetimems(r.LastPoll)),
'lastPollResult': ('Last poll result', lambda r: r.LastPollResult),
'name': ('Name', lambda r: r.Name),
'newContentCrawlUrls': ('New content crawl URLs', '<NewContentCrawlUrls>'),
'peerAgreements': ('Peer agreements', lambda r: '<PeerAgreements>'),
'pluginName': ('Plugin name', lambda r: r.PluginName),
'publishingPlatform': ('Publishing platform', lambda r: r.PublishingPlatform),
'recentPollAgreement': ('Recent poll agreement', lambda r: r.RecentPollAgreement),
'repositoryPath': ('Repository path', lambda r: r.RepositoryPath),
'subscriptionStatus': ('Subscription status', lambda r: r.SubscriptionStatus),
'substanceState': ('Substance state', lambda r: r.SubstanceState),
'tdbProvider': ('TDB provider', lambda r: r.TdbProvider),
'tdbPublisher': ('TDB publisher', lambda r: r.TdbPublisher),
'tdbYear': ('TDB year', lambda r: r.TdbYear),
'urlStems': ('URL stems', lambda r: '<UrlStems>'),
'urls': ('URLs', lambda r: '<Urls>'),
'volume': ('Volume', lambda r: r.Volume)
}
def _do_query_aus(options):
select = filter(lambda x: x != 'auId', options.select)
auid_select = ['auId'] + select
headlamb = [_QUERY_AUS[x] for x in options.select]
data = dict()
for host, result in ThreadPool(options.threads).imap_unordered( \
lambda _host: (_host, query_aus(_host, options.auth, auid_select, options.where)), \
options.hosts):
for r in result:
for head, lamb in headlamb:
if options.group_by_field: colkey = (head, host)
else: colkey = (host, head)
data[((r.AuId,), colkey)] = lamb(r)
_output_table(options, data, ['AUID'], [[x[0] for x in headlamb], sorted(options.hosts)] if options.group_by_field else [sorted(options.hosts), [x[0] for x in headlamb]])
_QUERY_CRAWLS = {
'auId': ('AUID', lambda r: r.AuId),
'auName': ('AU name', lambda r: r.AuName),
'bytesFetchedCount': ('Bytes Fetched', lambda r: r.BytesFetchedCount),
'crawlKey': ('Crawl key', lambda r: r.CrawlKey),
'crawlStatus': ('Crawl status', lambda r: r.CrawlStatus),
'crawlType': ('Crawl type', lambda r: r.CrawlType),
'duration': ('Duration', lambda r: durationms(r.Duration)),
'linkDepth': ('Link depth', lambda r: r.LinkDepth),
'mimeTypeCount': ('MIME type count', lambda r: r.MimeTypeCount),
'mimeTypes': ('MIME types', lambda r: '<MIME types>'),
'offSiteUrlsExcludedCount': ('Off-site URLs excluded count', lambda r: r.OffSiteUrlsExcludedCount),
'pagesExcluded': ('Pages excluded', lambda r: '<Pages excluded>'),
'pagesExcludedCount': ('Pages excluded count', lambda r: r.PagesExcludedCount),
'pagesFetched': ('Pages fetched', lambda r: '<Pages fetched>'),
'pagesFetchedCount': ('Pages fetched count', lambda r: r.PagesFetchedCount),
'pagesNotModified': ('Pages not modified', lambda r: '<Pages not modified>'),
'pagesNotModifiedCount': ('Pages not modified count', lambda r: r.PagesNotModifiedCount),
'pagesParsed': ('Pages parsed', lambda r: '<Pages parsed>'),
'pagesParsedCount': ('Pages parsed count', lambda r: r.PagesParsedCount),
'pagesPending': ('Pages pending', lambda r: '<Pages pending>'),
'pagesPendingCount': ('Pages pending count', lambda r: r.PagesPendingCount),
'pagesWithErrors': ('Pages with errors', lambda r: '<Pages with errors>'),
'pagesWithErrorsCount': ('Pages with errors count', lambda r: r.PagesWithErrors),
'refetchDepth': ('RefetchDepth', lambda r: r.RefetchDepth),
'sources': ('Sources', lambda r: '<Sources>'),
'startTime': ('Start time', lambda r: datetimems(r.StartTime)),
'startingUrls': ('Starting URLs', lambda r: '<Starting URLs>')
}
def _do_query_crawls(options):
# Single request to a single host: unthreaded
select = filter(lambda x: x != 'auId', options.select)
auid_select = ['auId'] + select
headlamb = [_QUERY_CRAWLS[x] for x in options.select]
data = dict()
for r in query_crawls(options.hosts[0], options.auth, auid_select, options.where):
for head, lamb in headlamb:
data[((r.AuId,), (head,))] = lamb(r)
_output_table(options, data, ['AUID'], [[x[0] for x in headlamb]])
def _dispatch(options):
if options.get_au_status: _do_get_au_status(options)
elif options.get_au_urls: _do_get_au_urls(options)
elif options.get_au_article_urls: _do_get_au_article_urls(options)
elif options.get_au_subst_urls: _do_get_au_subst_urls(options)
elif options.get_auids or options.get_auids_names: _do_get_auids(options)
elif options.get_peer_agreements: _do_get_peer_agreements(options)
elif options.get_platform_configuration: _do_get_platform_configuration(options)
elif options.is_daemon_ready or options.is_daemon_ready_quiet: _do_is_daemon_ready(options)
elif options.query_aus: _do_query_aus(options)
elif options.query_crawls: _do_query_crawls(options)
else: raise RuntimeError, 'Unreachable'
def _main():
'''Main method.'''
# Parse command line
parser = _DaemonStatusServiceOptions.make_parser()
(opts, args) = parser.parse_args()
options = _DaemonStatusServiceOptions(parser, opts, args)
# Dispatch
t = Thread(target=_dispatch, args=(options,))
t.daemon = True
t.start()
while True:
t.join(1.5)
if not t.is_alive(): break
if __name__ == '__main__': _main()
check for None
#!/usr/bin/env python
'''A library and a command line tool to interact with the LOCKSS daemon status
service via its Web Services API.'''
__copyright__ = '''\
Copyright (c) 2000-2016 Board of Trustees of Leland Stanford Jr. University,
all rights reserved.
'''
__license__ = '''\
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
STANFORD UNIVERSITY BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
Except as contained in this notice, the name of Stanford University shall not
be used in advertising or otherwise to promote the sale, use or other dealings
in this Software without prior written authorization from Stanford University.
'''
__version__ = '0.6.1'
import getpass
import itertools
from multiprocessing.dummy import Pool as ThreadPool
import optparse
import os.path
import sys
from threading import Thread
try: import ZSI
except ImportError: sys.exit('The Python ZSI module must be installed (or on the PYTHONPATH)')
import DaemonStatusServiceImplService_client
from wsutil import datems, datetimems, durationms, zsiauth
#
# Library
#
def get_au_status(host, auth, auid):
'''Performs a getAuStatus operation on the given host for the given AUID, and
returns a record with these fields (or None if ZSI.FaultException starting
with 'No Archival Unit with provided identifier' is raised):
- AccessType (string)
- AvailableFromPublisher (boolean)
- ContentSize (numeric)
- CrawlPool (string)
- CrawlProxy (string)
- CrawlWindow (string)
- CreationTime (numeric)
- CurrentlyCrawling (boolean)
- CurrentlyPolling (boolean)
- DiskUsage (numeric)
- JournalTitle (string)
- LastCompletedCrawl (numeric)
- LastCompletedPoll (numeric)
- LastCrawl (numeric)
- LastCrawlResult (string)
- LastPoll (numeric)
- LastPollResult (string)
- PluginName (string)
- Provider (string)
- Publisher (string)
- PublishingPlatform (string)
- RecentPollAgreement (floating point)
- Repository (string)
- Status (string)
- SubscriptionStatus (string)
- SubstanceState (string)
- Volume (string) (the AU name)
- Year (string)
Parameters:
- host (string): a host:port pair
- auth (ZSI authentication object): an authentication object
- auid (string): an AUID
'''
req = DaemonStatusServiceImplService_client.getAuStatus()
req.AuId = auid
try:
ret = _ws_port(host, auth).getAuStatus(req)
return ret.Return
except ZSI.FaultException as e:
if str(e).startswith('No Archival Unit with provided identifier'):
return None
raise
def get_au_urls(host, auth, auid, prefix=None):
'''Performs a getAuUrls operation on the given host for the given AUID and
returns a list of URLs (strings) in the AU. If the optional prefix argument is
given, limits the results to URLs with that prefix (including the URL itself).
Parameters:
- host (string): a host:port pair
- auth (ZSI authentication object): an authentication object
- auid (string): an AUID
- prefix (string): a URL prefix (default: None)
'''
req = DaemonStatusServiceImplService_client.getAuUrls()
req.AuId = auid
if prefix is not None: req.url = prefix
return _ws_port(host, auth).getAuUrls(req).Return
def get_au_type_urls(host, auth, auid, type):
'''Performs a queryAus operation on the given host for the given AUID and
selects only the url list of type given (articleUrls,substanceUrls) for the AU.
Parameters:
- host (string): a host:port pair
- auth (ZSI authentication object): an authentication object
- auid (string): an AUID
- type (string): one of articleUrls or substanceUrls
'''
res = query_aus(host, auth, type, 'auId = "%s"' % (auid,))
if len(res) == 0: return None
else:
if type == 'articleUrls':
return res[0].ArticleUrls
else: return res[0].SubstanceUrls
def get_auids(host, auth):
'''Performs a getAuids operation on the given host, which really produces a
sequence of all AUIDs with the AU names, and returns a list of records with
these fields:
- Id (string)
- Name (string)
Parameters:
- host (string): a host:port pair
- auth (ZSI authentication object): an authentication object
'''
req = DaemonStatusServiceImplService_client.getAuIds()
return _ws_port(host, auth).getAuIds(req).Return
def get_peer_agreements(host, auth, auid):
'''Convenience call to query_aus() that returns the PeerAgreements list for
the given AUID (or None if there is no such AUID). The PeerAgreements list is
a list of records with these fields:
- Agreements, a record with these fields:
- Entry, a list of records with these fields:
- Key, a string among:
- "POR"
- "POP"
- "SYMMETRIC_POR"
- "SYMMETRIC_POP"
- "POR_HINT"
- "POP_HINT"
- "SYMMETRIC_POR_HINT"
- "SYMMETRIC_POP_HINT"
- "W_POR"
- "W_POP"
- "W_SYMMETRIC_POR"
- "W_SYMMETRIC_POP"
- "W_POR_HINT"
- "W_POP_HINT"
- "W_SYMMETRIC_POR_HINT"
- "W_SYMMETRIC_POP_HINT"
- Value, a record with these fields:
- HighestPercentAgreement (floating point)
- HighestPercentAgreementTimestamp (numeric)
- PercentAgreement (floating point)
- PercentAgreementTimestamp (numeric)
- PeerId (string)
Parameters:
- host (string): a host:port pair
- auth (ZSI authentication object): an authentication object
- auid (string): an AUID
'''
res = query_aus(host, auth, 'peerAgreements', 'auId = "%s"' % (auid,))
if len(res) == 0: return None
else: return res[0].PeerAgreements
def get_platform_configuration(host, auth):
'''Performs a getPlatformConfiguration operation on the given host and returns
a record with these fields:
- AdminEmail (string)
- BuildHost (string)
- BuildTimestamp (numeric)
- CurrentTime (numeric)
- CurrentWorkingDirectory (string)
- DaemonVersion, a record with these fields:
- BuildVersion (numeric)
- FullVersion (string)
- MajorVersion (numeric)
- MinorVersion (numeric)
- Disks (list of strings)
- Groups (list of strings)
- HostName (string)
- IpAddress (string)
- JavaVersion, a record with these fields:
- RuntimeName (string)
- RuntimeVersion (string)
- SpecificationVersion (string)
- Version (string)
- MailRelay (string)
- Platform, a record with these fields:
- Name (string)
- Suffix (string)
- Version (string)
- Project (string)
- Properties (list of strings)
- Uptime (numeric)
- V3Identity (string)
Parameters:
- host (string): a host:port pair
- auth (ZSI authentication object): an authentication object
'''
req = DaemonStatusServiceImplService_client.getPlatformConfiguration()
return _ws_port(host, auth).getPlatformConfiguration(req).Return
def is_daemon_ready(host, auth):
'''Performs an isDaemonReady operation on the given host and returns True or
False.
Parameters:
- host (string): a host:port pair
- auth (ZSI authentication object): an authentication object
'''
req = DaemonStatusServiceImplService_client.isDaemonReady()
return _ws_port(host, auth).isDaemonReady(req).Return
def query_aus(host, auth, select, where=None):
'''Performs a queryAus operation on the given host, using the given field
names to build a SELECT clause, optionally using the given string to build a
WHERE clause, and returns a list of records with these fields (populated or
not depending on the SELECT clause):
- AccessType (string)
- ArticleUrls (list of strings)
- AuConfiguration, a record with these fields:
- DefParams, a list of records with these fields:
- Key (string)
- Value (string)
- NonDefParams, a list of records with these fields:
- Key (string)
- Value (string)
- AuId (string)
- AvailableFromPublisher (boolean)
- ContentSize (numeric)
- CrawlPool (string)
- CrawlProxy (string)
- CrawlWindow (string)
- CreationTime (numeric)
- CurrentlyCrawling (boolean)
- CurrentlyPolling (boolean)
- DiskUsage (numeric)
- HighestPollAgreement (numeric)
- IsBulkContent (boolean)
- JournalTitle (string)
- LastCompletedCrawl (numeric)
- LastCompletedPoll (numeric)
- LastCrawl (numeric)
- LastCrawlResult (string)
- LastPoll (numeric)
- LastPollResult (string)
- Name (string)
- NewContentCrawlUrls (list of strings)
- PeerAgreements, a list of records with these fields:
- Agreements, a record with these fields:
- Entry, a list of records with these fields:
- Key, a string among:
- "POR"
- "POP"
- "SYMMETRIC_POR"
- "SYMMETRIC_POP"
- "POR_HINT"
- "POP_HINT"
- "SYMMETRIC_POR_HINT"
- "SYMMETRIC_POP_HINT"
- "W_POR"
- "W_POP"
- "W_SYMMETRIC_POR"
- "W_SYMMETRIC_POP"
- "W_POR_HINT"
- "W_POP_HINT"
- "W_SYMMETRIC_POR_HINT"
- "W_SYMMETRIC_POP_HINT"
- Value, a record with these fields:
- HighestPercentAgreement (floating point)
- HighestPercentAgreementTimestamp (numeric)
- PercentAgreement (floating point)
- PercentAgreementTimestamp (numeric)
- PeerId (string)
- PluginName (string)
- PublishingPlatform (string)
- RecentPollAgreement (numeric)
- RepositoryPath (string)
- SubscriptionStatus (string)
- SubstanceState (string)
- TdbProvider (string)
- TdbPublisher (string)
- TdbYear (string)
- UrlStems (list of strings)
- Urls, a list of records with these fields:
- CureentVersionSize (numeric)
- Url (string)
- VersionCount (numeric)
- Volume (string)
Parameters:
- host (string): a host:port pair
- auth (ZSI authentication object): an authentication object
- select (string or list of strings): if a list of strings, the field names to
be used in the SELECT clause; if a string, the single field name to be used in
the SELECT clause
- where (string): optional statement for the WHERE clause (default: None)
Raises:
- ValueError if select is not of the right type
'''
if type(select) is list: query = 'SELECT %s' % (', '.join(select))
elif type(select) is str: query = 'SELECT %s' % (select,)
else: raise ValueError, 'invalid type for select parameter: %s' % (type(select),)
if where is not None: query = '%s WHERE %s' % (query, where)
req = DaemonStatusServiceImplService_client.queryAus()
req.AuQuery = query
return _ws_port(host, auth).queryAus(req).Return
def query_crawls(host, auth, select, where=None):
'''Performs a queryCrawls operation on the given host, using the given field
names to build a SELECT clause, optionally using the given string to build a
WHERE clause, and returns a list of records with these fields (populated or
not depending on the SELECT clause):
- AuId (string)
- AuName (string)
- BytesFetchedCount (long)
- CrawlKey (string)
- CrawlStatus (string)
- CrawlType (string)
- Duration (long)
- LinkDepth (int)
- MimeTypeCount (int)
- MimeTypes (list of strings)
- OffSiteUrlsExcludedCount (int)
- PagesExcluded (list of strings)
- PagesExcludedCount (int)
- PagesFetched (list of strings)
- PagesFetchedCount (int)
- PagesNotModified (list of strings)
- PagesNotModifiedCount (int)
- PagesParsed (list of strings)
- PagesParsedCount (int)
- PagesPending (list of strings)
- PagesPendingCount (int)
- PagesWithErrors, a list of records with these fields:
- Message (string)
- Severity (string)
- Url (string)
- PagesWithErrorsCount (int)
- RefetchDepth (int)
- Sources (list of strings)
- StartTime (long)
- StartingUrls (list of strings)
'''
if type(select) is list: query = 'SELECT %s' % (', '.join(select))
elif type(select) is str: query = 'SELECT %s' % (select,)
else: raise ValueError, 'invalid type for select parameter: %s' % (type(select),)
if where is not None: query = '%s WHERE %s' % (query, where)
req = DaemonStatusServiceImplService_client.queryCrawls()
req.CrawlQuery = query
return _ws_port(host, auth).queryCrawls(req).Return
def _ws_port(host, auth, tracefile=None):
url = 'http://%s/ws/DaemonStatusService' % (host,)
locator = DaemonStatusServiceImplService_client.DaemonStatusServiceImplServiceLocator()
if tracefile is None: return locator.getDaemonStatusServiceImplPort(url=url, auth=auth)
else: return locator.getDaemonStatusServiceImplPort(url=url, auth=auth, tracefile=tracefile)
#
# Command line tool
#
class _DaemonStatusServiceOptions(object):
@staticmethod
def make_parser():
usage = '%prog {--host=HOST|--hosts=HFILE}... [OPTIONS]'
parser = optparse.OptionParser(version=__version__, description=__doc__, usage=usage)
# Hosts
group = optparse.OptionGroup(parser, 'Target hosts')
group.add_option('--host', action='append', default=list(), help='add host:port pair to list of target hosts')
group.add_option('--hosts', action='append', default=list(), metavar='HFILE', help='add host:port pairs in HFILE to list of target hosts')
group.add_option('--password', metavar='PASS', help='UI password (default: interactive prompt)')
group.add_option('--username', metavar='USER', help='UI username (default: interactive prompt)')
parser.add_option_group(group)
# AUIDs
group = optparse.OptionGroup(parser, 'Target AUIDs')
group.add_option('--auid', action='append', default=list(), help='add AUID to list of target AUIDs')
group.add_option('--auids', action='append', default=list(), metavar='AFILE', help='add AUIDs in AFILE to list of target AUIDs')
parser.add_option_group(group)
# Daemon operations
group = optparse.OptionGroup(parser, 'Daemon operations')
group.add_option('--get-platform-configuration', action='store_true', help='output platform configuration information for target hosts; narrow down with optional --select list chosen among %s' % (', '.join(sorted(_PLATFORM_CONFIGURATION)),))
group.add_option('--is-daemon-ready', action='store_true', help='output True/False table of ready status of target hosts; always exit with 0')
group.add_option('--is-daemon-ready-quiet', action='store_true', help='output nothing; exit with 0 if all target hosts are ready, 1 otherwise')
parser.add_option_group(group)
# AUID operations
group = optparse.OptionGroup(parser, 'AU operations')
group.add_option('--get-au-status', action='store_true', help='output status information about target AUIDs; narrow down output with optional --select list chosen among %s' % (', '.join(sorted(_AU_STATUS)),))
group.add_option('--get-au-urls', action='store_true', help='output URLs in one AU on one host')
group.add_option('--get-au-article-urls', action='store_true', help='output article URLs in one AU on one host')
group.add_option('--get-au-subst-urls', action='store_true', help='output substance URLs in one AU on one host')
group.add_option('--get-auids', action='store_true', help='output True/False table of all AUIDs (or target AUIDs if specified) present on target hosts')
group.add_option('--get-auids-names', action='store_true', help='output True/False table of all AUIDs (or target AUIDs if specified) and their names present on target hosts')
group.add_option('--get-peer-agreements', action='store_true', help='output peer agreements for one AU on one hosts')
group.add_option('--query-aus', action='store_true', help='perform AU query (with optional --where clause) with --select list chosen among %s' % (', '.join(sorted(_QUERY_AUS)),))
parser.add_option_group(group)
# Crawl operations
group = optparse.OptionGroup(parser, 'Crawl operations')
group.add_option('--query-crawls', action='store_true', help='perform crawl query (with optional --where clause) with --select list chosen among %s' % (', '.join(sorted(_QUERY_CRAWLS)),))
parser.add_option_group(group)
# Other options
group = optparse.OptionGroup(parser, 'Other options')
group.add_option('--group-by-field', action='store_true', help='group results by field instead of host')
group.add_option('--no-special-output', action='store_true', help='no special output format for a single target host')
group.add_option('--select', metavar='FIELDS', help='comma-separated list of fields for narrower output')
group.add_option('--threads', type='int', help='max parallel jobs allowed (default: no limit)')
group.add_option('--where', help='optional WHERE clause for query operations')
parser.add_option_group(group)
return parser
def __init__(self, parser, opts, args):
super(_DaemonStatusServiceOptions, self).__init__()
if len(args) > 0: parser.error('extraneous arguments: %s' % (' '.join(args)))
if len(filter(None, [opts.get_au_status, opts.get_au_urls, opts.get_au_article_urls, opts.get_au_subst_urls, opts.get_auids, opts.get_auids_names, opts.get_peer_agreements, opts.get_platform_configuration, opts.is_daemon_ready, opts.is_daemon_ready_quiet, opts.query_aus, opts.query_crawls])) != 1:
parser.error('exactly one of --get-au-status, --get-au-urls, --get-au-article-urls, --get-au-subst-urls,--get-auids, --get-auids-names, --get-peer-agreements, --get-platform-configuration, --is-daemon-ready, --is-daemon-ready-quiet, --query-aus --query-crawls is required')
if len(opts.auid) + len(opts.auids) > 0 and not any([opts.get_au_status, opts.get_au_urls, opts.get_au_article_urls, opts.get_au_subst_urls, opts.get_auids, opts.get_auids_names, opts.get_peer_agreements]):
parser.error('--auid, --auids can only be applied to --get-au-status, --get-au-urls, --get-au-article-urls, --get-au-subst-urls, --get-auids, --get-auids-names, --get-peer-agreements')
if opts.select and not any([opts.get_au_status, opts.get_platform_configuration, opts.query_aus, opts.query_crawls]):
parser.error('--select can only be applied to --get-au-status, --get-platform-configuration, --query-aus, --query-crawls')
if opts.where and not any([opts.query_aus, opts.query_crawls]):
parser.error('--where can only be applied to --query-aus, --query-crawls')
if opts.group_by_field and not any([opts.get_au_status, opts.query_aus]):
parser.error('--group-by-field can only be applied to --get-au-status, --query-aus')
# hosts
self.hosts = opts.host[:]
for f in opts.hosts: self.hosts.extend(_file_lines(f))
if len(self.hosts) == 0: parser.error('at least one target host is required')
# auids
self.auids = opts.auid[:]
for f in opts.auids: self.auids.extend(_file_lines(f))
# get_auids/get_auids_names/is_daemon_ready/is_daemon_ready_quiet
self.get_auids = opts.get_auids
self.get_auids_names = opts.get_auids_names
self.is_daemon_ready = opts.is_daemon_ready
self.is_daemon_ready_quiet = opts.is_daemon_ready_quiet
# get_platform_configuration/select
self.get_platform_configuration = opts.get_platform_configuration
if self.get_platform_configuration:
self.select = self.__init_select(parser, opts, _PLATFORM_CONFIGURATION)
# get_au_status/select
self.get_au_status = opts.get_au_status
if self.get_au_status:
if len(self.auids) == 0: parser.error('at least one target AUID is required with --get-au-status')
self.select = self.__init_select(parser, opts, _AU_STATUS)
# get_au_urls
self.get_au_urls = opts.get_au_urls
if self.get_au_urls:
if len(self.hosts) != 1: parser.error('only one target host is allowed with --get-au-urls')
if len(self.auids) != 1: parser.error('only one target AUID is allowed with --get-au-urls')
# get article url list
self.get_au_article_urls = opts.get_au_article_urls
if self.get_au_article_urls:
if len(self.hosts) != 1: parser.error('only one target host is allowed with --get-au-article-urls')
if len(self.auids) != 1: parser.error('only one target AUID is allowed with --get-au-article-urls')
# get substance url list
self.get_au_subst_urls = opts.get_au_subst_urls
if self.get_au_subst_urls:
if len(self.hosts) != 1: parser.error('only one target host is allowed with --get-au-subst-urls')
if len(self.auids) != 1: parser.error('only one target AUID is allowed with --get-au-subst-urls')
# get_peer_agreements
self.get_peer_agreements = opts.get_peer_agreements
if self.get_peer_agreements:
if len(self.hosts) != 1: parser.error('only one target host is allowed with --get-peer-agreements')
if len(self.auids) != 1: parser.error('only one target AUID is allowed with --get-peer-agreements')
# query_aus/select/where
self.query_aus = opts.query_aus
if self.query_aus:
self.select = self.__init_select(parser, opts, _QUERY_AUS)
self.where = opts.where
# query_crawls/select/where
self.query_crawls = opts.query_crawls
if self.query_crawls:
if len(self.hosts) != 1: parser.error('only one target host is allowed with --query-crawls')
self.select = self.__init_select(parser, opts, _QUERY_CRAWLS)
self.where = opts.where
# group_by_field/no_special_output
self.group_by_field = opts.group_by_field
self.no_special_output = opts.no_special_output
# threads
self.threads = opts.threads or len(self.hosts)
# auth
u = opts.username or getpass.getpass('UI username: ')
p = opts.password or getpass.getpass('UI password: ')
self.auth = zsiauth(u, p)
def __init_select(self, parser, opts, field_dict):
if opts.select is None: return sorted(field_dict)
fields = [s.strip() for s in opts.select.split(',')]
errfields = filter(lambda f: f not in field_dict, fields)
if len(errfields) == 1: parser.error('unknown field: %s' % (errfields[0],))
if len(errfields) > 1: parser.error('unknown fields: %s' % (', '.join(errfields),))
return fields
# Last modified 2016-08-02
def _output_record(options, lst):
print '\t'.join([x if type(x) is unicode else str(x or '') for x in lst])
# Last modified 2015-08-05
def _output_table(options, data, rowheaders, lstcolkeys):
colkeys = [x for x in itertools.product(*lstcolkeys)]
for j in xrange(len(lstcolkeys)):
if j < len(lstcolkeys) - 1: rowpart = [''] * len(rowheaders)
else: rowpart = rowheaders
_output_record(options, rowpart + [x[j] for x in colkeys])
for rowkey in sorted(set([k[0] for k in data])):
_output_record(options, list(rowkey) + [data.get((rowkey, colkey)) for colkey in colkeys])
# Last modified 2015-08-31
def _file_lines(fstr):
with open(os.path.expanduser(fstr)) as f: ret = filter(lambda y: len(y) > 0, [x.partition('#')[0].strip() for x in f])
if len(ret) == 0: sys.exit('Error: %s contains no meaningful lines' % (fstr,))
return ret
_AU_STATUS = {
'accessType': ('Access type', lambda r: r.AccessType),
'availableFromPublisher': ('Available from publisher', lambda r: r.AvailableFromPublisher),
'contentSize': ('Content size', lambda r: r.ContentSize),
'crawlPool': ('Crawl pool', lambda r: r.CrawlPool),
'crawlProxy': ('Crawl proxy', lambda r: r.CrawlProxy),
'crawlWindow': ('Crawl window', lambda r: r.CrawlWindow),
'creationTime': ('Creation time', lambda r: datetimems(r.CreationTime)),
'currentlyCrawling': ('Currently crawling', lambda r: r.CurrentlyCrawling),
'currentlyPolling': ('Currently polling', lambda r: r.CurrentlyPolling),
'diskUsage': ('Disk usage', lambda r: r.DiskUsage),
'journalTitle': ('Journal title', lambda r: r.JournalTitle),
'lastCompletedCrawl': ('Last completed crawl', lambda r: datetimems(r.LastCompletedCrawl)),
'lastCompletedPoll': ('Last completed poll', lambda r: datetimems(r.LastCompletedPoll)),
'lastCrawl': ('Last crawl', lambda r: datetimems(r.LastCrawl)),
'lastCrawlResult': ('Last crawl result', lambda r: r.LastCrawlResult),
'lastPoll': ('Last poll', lambda r: datetimems(r.LastPoll)),
'lastPollResult': ('Last poll result', lambda r: r.LastPollResult),
'pluginName': ('Plugin name', lambda r: r.PluginName),
'provider': ('Provider', lambda r: r.Provider),
'publisher': ('Publisher', lambda r: r.Publisher),
'publishingPlatform': ('Publishing platform', lambda r: r.PublishingPlatform),
'recentPollAgreement': ('Recent poll agreement', lambda r: r.RecentPollAgreement),
'repository': ('Repository', lambda r: r.Repository),
'status': ('Status', lambda r: r.Status),
'subscriptionStatus': ('Subscription status', lambda r: r.SubscriptionStatus),
'substanceState': ('Substance state', lambda r: r.SubstanceState),
'volume': ('Volume name', lambda r: r.Volume),
'year': ('Year', lambda r: r.Year)
}
def _do_get_au_status(options):
headlamb = [_AU_STATUS[x] for x in options.select]
data = dict()
for host, auid, result in ThreadPool(options.threads).imap_unordered( \
lambda _tup: (_tup[1], _tup[0], get_au_status(_tup[1], options.auth, _tup[0])), \
itertools.product(options.auids, options.hosts)):
if result is not None:
for head, lamb in headlamb:
if options.group_by_field: colkey = (head, host)
else: colkey = (host, head)
data[((auid,), colkey)] = lamb(result)
_output_table(options, data, ['AUID'], [[x[0] for x in headlamb], sorted(options.hosts)] if options.group_by_field else [sorted(options.hosts), [x[0] for x in headlamb]])
def _do_get_au_urls(options):
# Single request to a single host: unthreaded
r = get_au_urls(options.hosts[0], options.auth, options.auids[0])
for url in sorted(r): _output_record(options, [url])
def _do_get_au_article_urls(options):
# Single request to a single host: unthreaded
r = get_au_type_urls(options.hosts[0], options.auth, options.auids[0], "articleUrls")
if r is not None:
for url in sorted(r): _output_record(options, [url])
def _do_get_au_subst_urls(options):
# Single request to a single host: unthreaded
r = get_au_type_urls(options.hosts[0], options.auth, options.auids[0], "substanceUrls")
if r is not None:
for url in sorted(r): _output_record(options, [url])
def _do_get_auids(options):
if len(options.auids) > 0:
targetauids = set(options.auids)
shouldskip = lambda a: a not in targetauids
else: shouldskip = lambda a: False
data = dict()
auids = set()
for host, result in ThreadPool(options.threads).imap_unordered( \
lambda _host: (_host, get_auids(_host, options.auth)), \
options.hosts):
for r in result:
if shouldskip(r.Id): continue
if options.get_auids_names: rowkey = (r.Id, r.Name)
else: rowkey = (r.Id,)
data[(rowkey, (host,))] = True
if r.Id not in auids:
auids.add(r.Id)
for h in options.hosts: data.setdefault((rowkey, (h,)), False)
_output_table(options, data, ['AUID', 'Name'] if options.get_auids_names else ['AUID'], [sorted(options.hosts)])
_PLATFORM_CONFIGURATION = {
'adminEmail': ('Admin e-mail', lambda r: r.AdminEmail),
'buildHost': ('Build host', lambda r: r.BuildHost),
'buildTimestamp': ('Build timestamp', lambda r: datetimems(r.BuildTimestamp)),
'currentTime': ('Current time', lambda r: datetimems(r.CurrentTime)),
'currentWorkingDirectory': ('Current working directory', lambda r: r.CurrentWorkingDirectory),
'daemonBuildVersion': ('Daemon build version', lambda r: r.DaemonVersion.BuildVersion),
'daemonFullVersion': ('Daemon full version', lambda r: r.DaemonVersion.FullVersion),
'daemonMajorVersion': ('Daemon major version', lambda r: r.DaemonVersion.MajorVersion),
'daemonMinorVersion': ('Daemon minor version', lambda r: r.DaemonVersion.MinorVersion),
'disks': ('Disks', lambda r: ', '.join(r.Disks)),
'groups': ('Groups', lambda r: ', '.join(r.Groups)),
'hostName': ('Host name', lambda r: r.HostName),
'ipAddress': ('IP address', lambda r: r.IpAddress),
'javaRuntimeName': ('Java runtime name', lambda r: r.JavaVersion.RuntimeName),
'javaRuntimeVersion': ('Java runtime version', lambda r: r.JavaVersion.RuntimeVersion),
'javaSpecificationVersion': ('Java specification version', lambda r: r.JavaVersion.SpecificationVersion),
'javaVersion': ('Java version', lambda r: r.JavaVersion.Version),
'mailRelay': ('Mail relay', lambda r: r.MailRelay),
'platformName': ('Platform name', lambda r: r.Platform.Name),
'platformSuffix': ('Platform suffix', lambda r: r.Platform.Suffix),
'platformVersion': ('Platform version', lambda r: r.Platform.Version),
'project': ('Project', lambda r: r.Project),
'properties': ('Properties', lambda r: ', '.join(r.Properties)),
'uptime': ('Uptime', lambda r: durationms(r.Uptime)),
'v3Identity': ('V3 identity', lambda r: r.V3Identity)
}
def _do_get_peer_agreements(options):
# Single request to a single host: unthreaded
pa = get_peer_agreements(options.hosts[0], options.auth, options.auids[0])
if pa is None:
print 'No such AUID'
return
for pae in pa:
for ae in pae.Agreements.Entry:
_output_record(options, [pae.PeerId, ae.Key, ae.Value.PercentAgreement, datetimems(ae.Value.PercentAgreementTimestamp), ae.Value.HighestPercentAgreement, datetimems(ae.Value.HighestPercentAgreementTimestamp)])
def _do_get_platform_configuration(options):
headlamb = [_PLATFORM_CONFIGURATION[x] for x in options.select]
data = dict()
for host, result in ThreadPool(options.threads).imap_unordered( \
lambda _host: (_host, get_platform_configuration(_host, options.auth)), \
options.hosts):
for head, lamb in headlamb:
data[((head,), (host,))] = lamb(result)
_output_table(options, data, [''], [sorted(options.hosts)])
def _do_is_daemon_ready(options):
data = dict()
for host, result in ThreadPool(options.threads).imap_unordered( \
lambda _host: (_host, is_daemon_ready(_host, options.auth)), \
options.hosts):
if options.is_daemon_ready_quiet and result is False: sys.exit(1)
else: data[((host,), ('Daemon is ready',))] = result
if options.is_daemon_ready_quiet: pass
else: _output_table(options, data, ['Host'], [['Daemon is ready']])
_QUERY_AUS = {
'accessType': ('Access type', lambda r: r.AccessType),
'articleUrls': ('Article URLs', lambda r: '<ArticleUrls>'),
'auConfiguration': ('AU configuration', lambda r: '<AuConfiguration>'),
'auId': ('AUID', lambda r: r.AuId),
'availableFromPublisher': ('Available from publisher', lambda r: r.AvailableFromPublisher),
'contentSize': ('Content size', lambda r: r.ContentSize),
'crawlPool': ('Crawl pool', lambda r: r.CrawlPool),
'crawlProxy': ('Crawl proxy', lambda r: r.CrawlProxy),
'crawlWindow': ('Crawl window', lambda r: r.CrawlWindow),
'creationTime': ('Creation time', lambda r: datetimems(r.CreationTime)),
'currentlyCrawling': ('Currently crawling', lambda r: r.CurrentlyCrawling),
'currentlyPolling': ('Currently polling', lambda r: r.CurrentlyPolling),
'diskUsage': ('Disk usage', lambda r: r.DiskUsage),
'highestPollAgreement': ('Highest poll agreement', lambda r: r.HighestPollAgreement),
'isBulkContent': ('Is bulk content', lambda r: r.IsBulkContent),
'journalTitle': ('Title', lambda r: r.JournalTitle),
'lastCompletedCrawl': ('Last completed crawl', lambda r: datetimems(r.LastCompletedCrawl)),
'lastCompletedPoll': ('Last completed poll', lambda r: datetimems(r.LastCompletedPoll)),
'lastCrawl': ('Last crawl', lambda r: datetimems(r.LastCrawl)),
'lastCrawlResult': ('Last crawl result', lambda r: r.LastCrawlResult),
'lastPoll': ('Last poll', lambda r: datetimems(r.LastPoll)),
'lastPollResult': ('Last poll result', lambda r: r.LastPollResult),
'name': ('Name', lambda r: r.Name),
'newContentCrawlUrls': ('New content crawl URLs', '<NewContentCrawlUrls>'),
'peerAgreements': ('Peer agreements', lambda r: '<PeerAgreements>'),
'pluginName': ('Plugin name', lambda r: r.PluginName),
'publishingPlatform': ('Publishing platform', lambda r: r.PublishingPlatform),
'recentPollAgreement': ('Recent poll agreement', lambda r: r.RecentPollAgreement),
'repositoryPath': ('Repository path', lambda r: r.RepositoryPath),
'subscriptionStatus': ('Subscription status', lambda r: r.SubscriptionStatus),
'substanceState': ('Substance state', lambda r: r.SubstanceState),
'tdbProvider': ('TDB provider', lambda r: r.TdbProvider),
'tdbPublisher': ('TDB publisher', lambda r: r.TdbPublisher),
'tdbYear': ('TDB year', lambda r: r.TdbYear),
'urlStems': ('URL stems', lambda r: '<UrlStems>'),
'urls': ('URLs', lambda r: '<Urls>'),
'volume': ('Volume', lambda r: r.Volume)
}
def _do_query_aus(options):
select = filter(lambda x: x != 'auId', options.select)
auid_select = ['auId'] + select
headlamb = [_QUERY_AUS[x] for x in options.select]
data = dict()
for host, result in ThreadPool(options.threads).imap_unordered( \
lambda _host: (_host, query_aus(_host, options.auth, auid_select, options.where)), \
options.hosts):
for r in result:
for head, lamb in headlamb:
if options.group_by_field: colkey = (head, host)
else: colkey = (host, head)
data[((r.AuId,), colkey)] = lamb(r)
_output_table(options, data, ['AUID'], [[x[0] for x in headlamb], sorted(options.hosts)] if options.group_by_field else [sorted(options.hosts), [x[0] for x in headlamb]])
_QUERY_CRAWLS = {
'auId': ('AUID', lambda r: r.AuId),
'auName': ('AU name', lambda r: r.AuName),
'bytesFetchedCount': ('Bytes Fetched', lambda r: r.BytesFetchedCount),
'crawlKey': ('Crawl key', lambda r: r.CrawlKey),
'crawlStatus': ('Crawl status', lambda r: r.CrawlStatus),
'crawlType': ('Crawl type', lambda r: r.CrawlType),
'duration': ('Duration', lambda r: durationms(r.Duration)),
'linkDepth': ('Link depth', lambda r: r.LinkDepth),
'mimeTypeCount': ('MIME type count', lambda r: r.MimeTypeCount),
'mimeTypes': ('MIME types', lambda r: '<MIME types>'),
'offSiteUrlsExcludedCount': ('Off-site URLs excluded count', lambda r: r.OffSiteUrlsExcludedCount),
'pagesExcluded': ('Pages excluded', lambda r: '<Pages excluded>'),
'pagesExcludedCount': ('Pages excluded count', lambda r: r.PagesExcludedCount),
'pagesFetched': ('Pages fetched', lambda r: '<Pages fetched>'),
'pagesFetchedCount': ('Pages fetched count', lambda r: r.PagesFetchedCount),
'pagesNotModified': ('Pages not modified', lambda r: '<Pages not modified>'),
'pagesNotModifiedCount': ('Pages not modified count', lambda r: r.PagesNotModifiedCount),
'pagesParsed': ('Pages parsed', lambda r: '<Pages parsed>'),
'pagesParsedCount': ('Pages parsed count', lambda r: r.PagesParsedCount),
'pagesPending': ('Pages pending', lambda r: '<Pages pending>'),
'pagesPendingCount': ('Pages pending count', lambda r: r.PagesPendingCount),
'pagesWithErrors': ('Pages with errors', lambda r: '<Pages with errors>'),
'pagesWithErrorsCount': ('Pages with errors count', lambda r: r.PagesWithErrors),
'refetchDepth': ('RefetchDepth', lambda r: r.RefetchDepth),
'sources': ('Sources', lambda r: '<Sources>'),
'startTime': ('Start time', lambda r: datetimems(r.StartTime)),
'startingUrls': ('Starting URLs', lambda r: '<Starting URLs>')
}
def _do_query_crawls(options):
# Single request to a single host: unthreaded
select = filter(lambda x: x != 'auId', options.select)
auid_select = ['auId'] + select
headlamb = [_QUERY_CRAWLS[x] for x in options.select]
data = dict()
for r in query_crawls(options.hosts[0], options.auth, auid_select, options.where):
for head, lamb in headlamb:
data[((r.AuId,), (head,))] = lamb(r)
_output_table(options, data, ['AUID'], [[x[0] for x in headlamb]])
def _dispatch(options):
if options.get_au_status: _do_get_au_status(options)
elif options.get_au_urls: _do_get_au_urls(options)
elif options.get_au_article_urls: _do_get_au_article_urls(options)
elif options.get_au_subst_urls: _do_get_au_subst_urls(options)
elif options.get_auids or options.get_auids_names: _do_get_auids(options)
elif options.get_peer_agreements: _do_get_peer_agreements(options)
elif options.get_platform_configuration: _do_get_platform_configuration(options)
elif options.is_daemon_ready or options.is_daemon_ready_quiet: _do_is_daemon_ready(options)
elif options.query_aus: _do_query_aus(options)
elif options.query_crawls: _do_query_crawls(options)
else: raise RuntimeError, 'Unreachable'
def _main():
'''Main method.'''
# Parse command line
parser = _DaemonStatusServiceOptions.make_parser()
(opts, args) = parser.parse_args()
options = _DaemonStatusServiceOptions(parser, opts, args)
# Dispatch
t = Thread(target=_dispatch, args=(options,))
t.daemon = True
t.start()
while True:
t.join(1.5)
if not t.is_alive(): break
if __name__ == '__main__': _main()
|
fix sidebar styling
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import pytz
from openerp import SUPERUSER_ID, workflow
from datetime import datetime
from dateutil.relativedelta import relativedelta
from operator import attrgetter
from openerp.tools.safe_eval import safe_eval as eval
from openerp.osv import fields, osv
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
from openerp.osv.orm import browse_record_list, browse_record, browse_null
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT, DATETIME_FORMATS_MAP
from openerp.tools.float_utils import float_compare
from openerp.exceptions import UserError
class purchase_order(osv.osv):
def _amount_all(self, cr, uid, ids, field_name, arg, context=None):
res = {}
cur_obj = self.pool.get('res.currency')
for order in self.browse(cr, uid, ids, context=context):
res[order.id] = {
'amount_untaxed': 0.0,
'amount_tax': 0.0,
'amount_total': 0.0,
}
val = val1 = 0.0
cur = order.pricelist_id.currency_id
for line in order.order_line:
val1 += line.price_subtotal
for c in line.taxes_id.compute_all(line.price_unit, cur, line.product_qty, product=line.product_id, partner=order.partner_id)['taxes']:
val += c.get('amount', 0.0)
res[order.id]['amount_tax'] = cur_obj.round(cr, uid, cur, val)
res[order.id]['amount_untaxed'] = cur_obj.round(cr, uid, cur, val1)
res[order.id]['amount_total'] = res[order.id]['amount_untaxed'] + res[order.id]['amount_tax']
return res
def _set_minimum_planned_date(self, cr, uid, ids, name, value, arg, context=None):
if not value: return False
if type(ids)!=type([]):
ids=[ids]
pol_obj = self.pool.get('purchase.order.line')
for po in self.browse(cr, uid, ids, context=context):
if po.order_line:
pol_ids = pol_obj.search(cr, uid, [
('order_id', '=', po.id), '|', ('date_planned', '=', po.minimum_planned_date), ('date_planned', '<', value)
], context=context)
pol_obj.write(cr, uid, pol_ids, {'date_planned': value}, context=context)
self.invalidate_cache(cr, uid, context=context)
return True
def _minimum_planned_date(self, cr, uid, ids, field_name, arg, context=None):
res={}
purchase_obj=self.browse(cr, uid, ids, context=context)
for purchase in purchase_obj:
res[purchase.id] = False
if purchase.order_line:
min_date=purchase.order_line[0].date_planned
for line in purchase.order_line:
if line.state == 'cancel':
continue
if line.date_planned < min_date:
min_date=line.date_planned
res[purchase.id]=min_date
return res
def _invoiced_rate(self, cursor, user, ids, name, arg, context=None):
res = {}
for purchase in self.browse(cursor, user, ids, context=context):
tot = 0.0
for invoice in purchase.invoice_ids:
if invoice.state not in ('draft','cancel'):
tot += invoice.amount_untaxed
if purchase.amount_untaxed:
res[purchase.id] = tot * 100.0 / purchase.amount_untaxed
else:
res[purchase.id] = 0.0
return res
def _shipped_rate(self, cr, uid, ids, name, arg, context=None):
if not ids: return {}
res = {}
for id in ids:
res[id] = [0.0,0.0]
cr.execute('''SELECT
p.order_id, sum(m.product_qty), m.state
FROM
stock_move m
LEFT JOIN
purchase_order_line p on (p.id=m.purchase_line_id)
WHERE
p.order_id IN %s GROUP BY m.state, p.order_id''',(tuple(ids),))
for oid,nbr,state in cr.fetchall():
if state=='cancel':
continue
if state=='done':
res[oid][0] += nbr or 0.0
res[oid][1] += nbr or 0.0
else:
res[oid][1] += nbr or 0.0
for r in res:
if not res[r][1]:
res[r] = 0.0
else:
res[r] = 100.0 * res[r][0] / res[r][1]
return res
def _get_order(self, cr, uid, ids, context=None):
result = {}
for line in self.pool.get('purchase.order.line').browse(cr, uid, ids, context=context):
result[line.order_id.id] = True
return result.keys()
def _get_purchase_order(self, cr, uid, ids, context=None):
result = {}
for order in self.browse(cr, uid, ids, context=context):
result[order.id] = True
return result.keys()
def _invoiced(self, cursor, user, ids, name, arg, context=None):
res = {}
for purchase in self.browse(cursor, user, ids, context=context):
res[purchase.id] = all(line.invoiced for line in purchase.order_line if line.state != 'cancel')
return res
def _get_journal(self, cr, uid, context=None):
if context is None:
context = {}
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
company_id = context.get('company_id', user.company_id.id)
journal_obj = self.pool.get('account.journal')
res = journal_obj.search(cr, uid, [('type', '=', 'purchase'),
('company_id', '=', company_id)],
limit=1)
return res and res[0] or False
def _get_picking_in(self, cr, uid, context=None):
obj_data = self.pool.get('ir.model.data')
type_obj = self.pool.get('stock.picking.type')
user_obj = self.pool.get('res.users')
company_id = user_obj.browse(cr, uid, uid, context=context).company_id.id
types = type_obj.search(cr, uid, [('code', '=', 'incoming'), ('warehouse_id.company_id', '=', company_id)], context=context)
if not types:
types = type_obj.search(cr, uid, [('code', '=', 'incoming'), ('warehouse_id', '=', False)], context=context)
if not types:
raise UserError(_("Make sure you have at least an incoming picking type defined"))
return types[0]
def _get_picking_ids(self, cr, uid, ids, field_names, args, context=None):
res = {}
for po_id in ids:
res[po_id] = []
query = """
SELECT picking_id, po.id FROM stock_picking p, stock_move m, purchase_order_line pol, purchase_order po
WHERE po.id in %s and po.id = pol.order_id and pol.id = m.purchase_line_id and m.picking_id = p.id
GROUP BY picking_id, po.id
"""
cr.execute(query, (tuple(ids), ))
picks = cr.fetchall()
for pick_id, po_id in picks:
res[po_id].append(pick_id)
return res
def _count_all(self, cr, uid, ids, field_name, arg, context=None):
return {
purchase.id: {
'shipment_count': len(purchase.picking_ids),
'invoice_count': len(purchase.invoice_ids),
}
for purchase in self.browse(cr, uid, ids, context=context)
}
STATE_SELECTION = [
('draft', 'Draft RFQ'),
('sent', 'RFQ'),
('bid', 'Bid Received'),
('confirmed', 'Waiting Approval'),
('approved', 'Purchase Confirmed'),
('except_picking', 'Shipping Exception'),
('except_invoice', 'Invoice Exception'),
('done', 'Done'),
('cancel', 'Cancelled')
]
READONLY_STATES = {
'confirmed': [('readonly', True)],
'approved': [('readonly', True)],
'done': [('readonly', True)]
}
_columns = {
'name': fields.char('Order Reference', required=True, select=True, copy=False,
help="Unique number of the purchase order, "
"computed automatically when the purchase order is created."),
'origin': fields.char('Source Document', copy=False,
help="Reference of the document that generated this purchase order "
"request; a sales order or an internal procurement request."),
'partner_ref': fields.char('Supplier Reference', states={'confirmed':[('readonly',True)],
'approved':[('readonly',True)],
'done':[('readonly',True)]},
copy=False,
help="Reference of the sales order or bid sent by your supplier. "
"It's mainly used to do the matching when you receive the "
"products as this reference is usually written on the "
"delivery order sent by your supplier."),
'date_order':fields.datetime('Order Date', required=True, states={'confirmed':[('readonly',True)],
'approved':[('readonly',True)]},
select=True, help="Depicts the date where the Quotation should be validated and converted into a Purchase Order, by default it's the creation date.",
copy=False),
'date_approve':fields.date('Date Approved', readonly=1, select=True, copy=False,
help="Date on which purchase order has been approved"),
'partner_id':fields.many2one('res.partner', 'Supplier', required=True, states=READONLY_STATES,
change_default=True, track_visibility='always'),
'dest_address_id':fields.many2one('res.partner', 'Customer Address (Direct Delivery)',
states=READONLY_STATES,
help="Put an address if you want to deliver directly from the supplier to the customer. " \
"Otherwise, keep empty to deliver to your own company."
),
'location_id': fields.many2one('stock.location', 'Destination', required=True, domain=[('usage','<>','view')], states=READONLY_STATES),
'pricelist_id':fields.many2one('product.pricelist', 'Pricelist', required=True, states=READONLY_STATES, help="The pricelist sets the currency used for this purchase order. It also computes the supplier price for the selected products/quantities."),
'currency_id': fields.many2one('res.currency','Currency', required=True, states=READONLY_STATES),
'state': fields.selection(STATE_SELECTION, 'Status', readonly=True,
help="The status of the purchase order or the quotation request. "
"A request for quotation is a purchase order in a 'Draft' status. "
"Then the order has to be confirmed by the user, the status switch "
"to 'Confirmed'. Then the supplier must confirm the order to change "
"the status to 'Approved'. When the purchase order is paid and "
"received, the status becomes 'Done'. If a cancel action occurs in "
"the invoice or in the receipt of goods, the status becomes "
"in exception.",
select=True, copy=False),
'order_line': fields.one2many('purchase.order.line', 'order_id', 'Order Lines',
states={'approved':[('readonly',True)],
'done':[('readonly',True)]},
copy=True),
'validator' : fields.many2one('res.users', 'Validated by', readonly=True, copy=False),
'notes': fields.text('Terms and Conditions'),
'invoice_ids': fields.many2many('account.invoice', 'purchase_invoice_rel', 'purchase_id',
'invoice_id', 'Invoices', copy=False,
help="Invoices generated for a purchase order"),
'picking_ids': fields.function(_get_picking_ids, method=True, type='one2many', relation='stock.picking', string='Picking List', help="This is the list of receipts that have been generated for this purchase order."),
'shipped':fields.boolean('Received', readonly=True, select=True, copy=False,
help="It indicates that a picking has been done"),
'shipped_rate': fields.function(_shipped_rate, string='Received Ratio', type='float'),
'invoiced': fields.function(_invoiced, string='Invoice Received', type='boolean', copy=False,
help="It indicates that an invoice has been validated"),
'invoiced_rate': fields.function(_invoiced_rate, string='Invoiced', type='float'),
'invoice_method': fields.selection([('manual','Based on Purchase Order lines'),('order','Based on generated draft invoice'),('picking','Based on incoming shipments')], 'Invoicing Control', required=True,
readonly=True, states={'draft':[('readonly',False)], 'sent':[('readonly',False)],'bid':[('readonly',False)]},
help="Based on Purchase Order lines: place individual lines in 'Invoice Control / On Purchase Order lines' from where you can selectively create an invoice.\n" \
"Based on generated invoice: create a draft invoice you can validate later.\n" \
"Based on incoming shipments: let you create an invoice when receipts are validated."
),
'minimum_planned_date':fields.function(_minimum_planned_date, fnct_inv=_set_minimum_planned_date, string='Expected Date', type='datetime', select=True, help="This is computed as the minimum scheduled date of all purchase order lines' products.",
store = {
'purchase.order.line': (_get_order, ['date_planned'], 10),
'purchase.order': (_get_purchase_order, ['order_line'], 10),
}
),
'amount_untaxed': fields.function(_amount_all, digits=0, string='Untaxed Amount',
store={
'purchase.order.line': (_get_order, None, 10),
}, multi="sums", help="The amount without tax", track_visibility='always'),
'amount_tax': fields.function(_amount_all, digits=0, string='Taxes',
store={
'purchase.order.line': (_get_order, None, 10),
}, multi="sums", help="The tax amount"),
'amount_total': fields.function(_amount_all, digits=0, string='Total',
store={
'purchase.order.line': (_get_order, None, 10),
}, multi="sums", help="The total amount"),
'fiscal_position_id': fields.many2one('account.fiscal.position', oldname='fiscal_position', string='Fiscal Position'),
'payment_term_id': fields.many2one('account.payment.term', 'Payment Term'),
'incoterm_id': fields.many2one('stock.incoterms', 'Incoterm', help="International Commercial Terms are a series of predefined commercial terms used in international transactions."),
'product_id': fields.related('order_line', 'product_id', type='many2one', relation='product.product', string='Product'),
'create_uid': fields.many2one('res.users', 'Responsible'),
'company_id': fields.many2one('res.company', 'Company', required=True, select=1, states={'confirmed': [('readonly', True)], 'approved': [('readonly', True)]}),
'journal_id': fields.many2one('account.journal', 'Journal'),
'bid_date': fields.date('Bid Received On', readonly=True, help="Date on which the bid was received"),
'bid_validity': fields.date('Bid Valid Until', help="Date on which the bid expired"),
'picking_type_id': fields.many2one('stock.picking.type', 'Deliver To', help="This will determine picking type of incoming shipment", required=True,
states={'confirmed': [('readonly', True)], 'approved': [('readonly', True)], 'done': [('readonly', True)]}),
'related_location_id': fields.related('picking_type_id', 'default_location_dest_id', type='many2one', relation='stock.location', string="Related location", store=True),
'related_usage': fields.related('location_id', 'usage', type='char'),
'shipment_count': fields.function(_count_all, type='integer', string='Incoming Shipments', multi=True),
'invoice_count': fields.function(_count_all, type='integer', string='Invoices', multi=True),
'group_id': fields.many2one('procurement.group', string="Procurement Group"),
}
_defaults = {
'date_order': fields.datetime.now,
'state': 'draft',
'name': lambda obj, cr, uid, context: '/',
'shipped': 0,
'invoice_method': 'order',
'invoiced': 0,
'pricelist_id': lambda self, cr, uid, context: context.get('partner_id', False) and self.pool.get('res.partner').browse(cr, uid, context['partner_id']).property_product_pricelist_purchase.id,
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'purchase.order', context=c),
'journal_id': _get_journal,
'currency_id': lambda self, cr, uid, context: self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.currency_id.id,
'picking_type_id': _get_picking_in,
}
_sql_constraints = [
('name_uniq', 'unique(name, company_id)', 'Order Reference must be unique per Company!'),
]
_name = "purchase.order"
_inherit = ['mail.thread', 'ir.needaction_mixin']
_description = "Purchase Order"
_order = 'date_order desc, id desc'
def create(self, cr, uid, vals, context=None):
if vals.get('name', '/') == '/':
vals['name'] = self.pool.get('ir.sequence').next_by_code(cr, uid, 'purchase.order') or '/'
context = dict(context or {}, mail_create_nolog=True)
order = super(purchase_order, self).create(cr, uid, vals, context=context)
self.message_post(cr, uid, [order], body=_("RFQ created"), context=context)
return order
def unlink(self, cr, uid, ids, context=None):
purchase_orders = self.read(cr, uid, ids, ['state'], context=context)
unlink_ids = []
for s in purchase_orders:
if s['state'] in ['draft','cancel']:
unlink_ids.append(s['id'])
else:
raise UserError(_('In order to delete a purchase order, you must cancel it first.'))
# automatically sending subflow.delete upon deletion
self.signal_workflow(cr, uid, unlink_ids, 'purchase_cancel')
return super(purchase_order, self).unlink(cr, uid, unlink_ids, context=context)
def _track_subtype(self, cr, uid, ids, init_values, context=None):
record = self.browse(cr, uid, ids[0], context=context)
if 'state' in init_values and record.state == 'approved':
return 'purchase.mt_rfq_approved'
elif 'state' in init_values and record.state == 'confirmed':
return 'purchase.mt_rfq_confirmed'
elif 'state' in init_values and record.state == 'done':
return 'purchase.mt_rfq_done'
return super(purchase_order, self)._track_subtype(cr, uid, ids, init_values, context=context)
def set_order_line_status(self, cr, uid, ids, status, context=None):
line = self.pool.get('purchase.order.line')
order_line_ids = []
proc_obj = self.pool.get('procurement.order')
for order in self.browse(cr, uid, ids, context=context):
if status in ('draft', 'cancel'):
order_line_ids += [po_line.id for po_line in order.order_line]
else: # Do not change the status of already cancelled lines
order_line_ids += [po_line.id for po_line in order.order_line if po_line.state != 'cancel']
if order_line_ids:
line.write(cr, uid, order_line_ids, {'state': status}, context=context)
if order_line_ids and status == 'cancel':
procs = proc_obj.search(cr, uid, [('purchase_line_id', 'in', order_line_ids)], context=context)
if procs:
proc_obj.write(cr, uid, procs, {'state': 'exception'}, context=context)
return True
def button_dummy(self, cr, uid, ids, context=None):
return True
def onchange_pricelist(self, cr, uid, ids, pricelist_id, context=None):
if not pricelist_id:
return {}
return {'value': {'currency_id': self.pool.get('product.pricelist').browse(cr, uid, pricelist_id, context=context).currency_id.id}}
#Destination address is used when dropshipping
def onchange_dest_address_id(self, cr, uid, ids, address_id, context=None):
if not address_id:
return {}
address = self.pool.get('res.partner')
values = {}
supplier = address.browse(cr, uid, address_id, context=context)
if supplier:
location_id = supplier.property_stock_customer.id
values.update({'location_id': location_id})
return {'value':values}
def onchange_picking_type_id(self, cr, uid, ids, picking_type_id, context=None):
value = {}
if picking_type_id:
picktype = self.pool.get("stock.picking.type").browse(cr, uid, picking_type_id, context=context)
if picktype.default_location_dest_id:
value.update({'location_id': picktype.default_location_dest_id.id, 'related_usage': picktype.default_location_dest_id.usage})
value.update({'related_location_id': picktype.default_location_dest_id.id})
return {'value': value}
def onchange_partner_id(self, cr, uid, ids, partner_id, context=None):
partner = self.pool.get('res.partner')
if not partner_id:
return {'value': {
'fiscal_position_id': False,
'payment_term_id': False,
}}
company_id = self.pool.get('res.users')._get_company(cr, uid, context=context)
if not company_id:
raise osv.except_osv(_('Error!'), _('There is no default company for the current user!'))
fp = self.pool['account.fiscal.position'].get_fiscal_position(cr, uid, company_id, partner_id, context=context)
supplier_address = partner.address_get(cr, uid, [partner_id], ['default'], context=context)
supplier = partner.browse(cr, uid, partner_id, context=context)
return {'value': {
'pricelist_id': supplier.property_product_pricelist_purchase.id,
'fiscal_position_id': fp or supplier.property_account_position_id and supplier.property_account_position_id.id or False,
'payment_term_id': supplier.property_supplier_payment_term_id.id or False,
}}
def invoice_open(self, cr, uid, ids, context=None):
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
action_id = mod_obj.xmlid_to_res_id(cr, uid, 'account.action_invoice_tree2')
result = act_obj.read(cr, uid, action_id, context=context)
inv_ids = []
for po in self.browse(cr, uid, ids, context=context):
inv_ids += [invoice.id for invoice in po.invoice_ids]
if not inv_ids:
raise UserError(_('Please create Invoices.'))
if len(inv_ids) > 1:
result['domain'] = [('id', 'in', inv_ids)]
else:
res = mod_obj.xmlid_to_res_id(cr, uid, 'account.invoice_supplier_form')
result['views'] = [(res, 'form')]
result['res_id'] = inv_ids and inv_ids[0] or False
return result
def view_invoice(self, cr, uid, ids, context=None):
'''
This function returns an action that display existing invoices of given sales order ids. It can either be a in a list or in a form view, if there is only one invoice to show.
'''
context = dict(context or {})
mod_obj = self.pool.get('ir.model.data')
wizard_obj = self.pool.get('purchase.order.line_invoice')
#compute the number of invoices to display
inv_ids = []
for po in self.browse(cr, uid, ids, context=context):
if po.invoice_method == 'manual':
if not po.invoice_ids:
context.update({'active_ids' : [line.id for line in po.order_line if line.state != 'cancel']})
wizard_obj.makeInvoices(cr, uid, [], context=context)
for po in self.browse(cr, uid, ids, context=context):
inv_ids+= [invoice.id for invoice in po.invoice_ids]
res = mod_obj.get_object_reference(cr, uid, 'account', 'invoice_supplier_form')
res_id = res and res[1] or False
return {
'name': _('Supplier Bills'),
'view_type': 'form',
'view_mode': 'form',
'view_id': [res_id],
'res_model': 'account.invoice',
'context': "{'type':'in_invoice', 'journal_type': 'purchase'}",
'type': 'ir.actions.act_window',
'target': 'current',
'res_id': inv_ids and inv_ids[0] or False,
}
def view_picking(self, cr, uid, ids, context=None):
'''
This function returns an action that display existing picking orders of given purchase order ids.
'''
if context is None:
context = {}
mod_obj = self.pool.get('ir.model.data')
dummy, action_id = tuple(mod_obj.get_object_reference(cr, uid, 'stock', 'action_picking_tree'))
action = self.pool.get('ir.actions.act_window').read(cr, uid, action_id, context=context)
pick_ids = []
for po in self.browse(cr, uid, ids, context=context):
pick_ids += [picking.id for picking in po.picking_ids]
#override the context to get rid of the default filtering on picking type
action['context'] = {}
#choose the view_mode accordingly
if len(pick_ids) > 1:
action['domain'] = "[('id','in',[" + ','.join(map(str, pick_ids)) + "])]"
else:
res = mod_obj.get_object_reference(cr, uid, 'stock', 'view_picking_form')
action['views'] = [(res and res[1] or False, 'form')]
action['res_id'] = pick_ids and pick_ids[0] or False
return action
def wkf_approve_order(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'approved', 'date_approve': fields.date.context_today(self,cr,uid,context=context)})
return True
def wkf_bid_received(self, cr, uid, ids, context=None):
bid_date = fields.date.context_today(self, cr, uid, context=context)
self.message_post(cr, uid, ids, body=_("Bid received on %s") % (bid_date), context=context)
return self.write(cr, uid, ids, {'state':'bid', 'bid_date': bid_date})
def wkf_send_rfq(self, cr, uid, ids, context=None):
'''
This function opens a window to compose an email, with the edi purchase template message loaded by default
'''
if not context:
context= {}
ir_model_data = self.pool.get('ir.model.data')
try:
if context.get('send_rfq', False):
template_id = ir_model_data.get_object_reference(cr, uid, 'purchase', 'email_template_edi_purchase')[1]
else:
template_id = ir_model_data.get_object_reference(cr, uid, 'purchase', 'email_template_edi_purchase_done')[1]
except ValueError:
template_id = False
try:
compose_form_id = ir_model_data.get_object_reference(cr, uid, 'mail', 'email_compose_message_wizard_form')[1]
except ValueError:
compose_form_id = False
ctx = dict(context)
ctx.update({
'default_model': 'purchase.order',
'default_res_id': ids[0],
'default_use_template': bool(template_id),
'default_template_id': template_id,
'default_composition_mode': 'comment',
})
return {
'name': _('Compose Email'),
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'mail.compose.message',
'views': [(compose_form_id, 'form')],
'view_id': compose_form_id,
'target': 'new',
'context': ctx,
}
def print_quotation(self, cr, uid, ids, context=None):
'''
This function prints the request for quotation and mark it as sent, so that we can see more easily the next step of the workflow
'''
assert len(ids) == 1, 'This option should only be used for a single id at a time'
self.signal_workflow(cr, uid, ids, 'send_rfq')
return self.pool['report'].get_action(cr, uid, ids, 'purchase.report_purchasequotation', context=context)
def wkf_confirm_order(self, cr, uid, ids, context=None):
todo = []
for po in self.browse(cr, uid, ids, context=context):
if not any(line.state != 'cancel' for line in po.order_line):
raise UserError(_('You cannot confirm a purchase order without any purchase order line.'))
if po.invoice_method == 'picking' and not any([l.product_id and l.product_id.type in ('product', 'consu') and l.state != 'cancel' for l in po.order_line]):
raise osv.except_osv(
_('Error!'),
_("You cannot confirm a purchase order with Invoice Control Method 'Based on incoming shipments' that doesn't contain any stockable item."))
for line in po.order_line:
if line.state=='draft':
todo.append(line.id)
self.pool.get('purchase.order.line').action_confirm(cr, uid, todo, context)
for id in ids:
self.write(cr, uid, [id], {'state' : 'confirmed', 'validator' : uid})
return True
def _choose_account_from_po_line(self, cr, uid, po_line, context=None):
fiscal_obj = self.pool.get('account.fiscal.position')
property_obj = self.pool.get('ir.property')
if po_line.product_id:
acc_id = po_line.product_id.property_account_expense_id.id
if not acc_id:
acc_id = po_line.product_id.categ_id.property_account_expense_categ_id.id
if not acc_id:
raise UserError(_('Define an expense account for this product: "%s" (id:%d).') % (po_line.product_id.name, po_line.product_id.id,))
else:
acc_id = property_obj.get(cr, uid, 'property_account_expense_categ_id', 'product.category', context=context).id
fpos = po_line.order_id.fiscal_position_id or False
#For anglo-saxon accounting
account_id = fiscal_obj.map_account(cr, uid, fpos, acc_id)
if po_line.company_id.anglo_saxon_accounting and po_line.product_id and not po_line.product_id.type == 'service':
acc_id = po_line.product_id.property_stock_account_input and po_line.product_id.property_stock_account_input.id
if not acc_id:
acc_id = po_line.product_id.categ_id.property_stock_account_input_categ_id and po_line.product_id.categ_id.property_stock_account_input_categ_id.id
if acc_id:
fpos = po_line.order_id.fiscal_position_id or False
account_id = self.pool.get('account.fiscal.position').map_account(cr, uid, fpos, acc_id)
return account_id
def _prepare_inv_line(self, cr, uid, account_id, order_line, context=None):
"""Collects require data from purchase order line that is used to create invoice line
for that purchase order line
:param account_id: Expense account of the product of PO line if any.
:param browse_record order_line: Purchase order line browse record
:return: Value for fields of invoice lines.
:rtype: dict
"""
return {
'name': order_line.name,
'account_id': account_id,
'price_unit': order_line.price_unit or 0.0,
'quantity': order_line.product_qty,
'product_id': order_line.product_id.id or False,
'uos_id': order_line.product_uom.id or False,
'invoice_line_tax_ids': [(6, 0, [x.id for x in order_line.taxes_id])],
'account_analytic_id': order_line.account_analytic_id.id or False,
'purchase_line_id': order_line.id,
}
def _prepare_invoice(self, cr, uid, order, line_ids, context=None):
"""Prepare the dict of values to create the new invoice for a
purchase order. This method may be overridden to implement custom
invoice generation (making sure to call super() to establish
a clean extension chain).
:param browse_record order: purchase.order record to invoice
:param list(int) line_ids: list of invoice line IDs that must be
attached to the invoice
:return: dict of value to create() the invoice
"""
journal_ids = self.pool['account.journal'].search(
cr, uid, [('type', '=', 'purchase'),
('company_id', '=', order.company_id.id)],
limit=1)
if not journal_ids:
raise UserError(_('Define purchase journal for this company: "%s" (id:%d).') % (order.company_id.name, order.company_id.id))
return {
'name': order.partner_ref or order.name,
'reference': order.partner_ref or order.name,
'account_id': order.partner_id.property_account_payable_id.id,
'type': 'in_invoice',
'partner_id': order.partner_id.id,
'currency_id': order.currency_id.id,
'journal_id': len(journal_ids) and journal_ids[0] or False,
'invoice_line_ids': [(6, 0, line_ids)],
'origin': order.name,
'fiscal_position_id': order.fiscal_position_id.id or False,
'payment_term_id': order.payment_term_id.id or False,
'company_id': order.company_id.id,
}
def action_cancel_draft(self, cr, uid, ids, context=None):
if not len(ids):
return False
self.write(cr, uid, ids, {'state':'draft','shipped':0})
self.set_order_line_status(cr, uid, ids, 'draft', context=context)
for p_id in ids:
# Deleting the existing instance of workflow for PO
self.delete_workflow(cr, uid, [p_id]) # TODO is it necessary to interleave the calls?
self.create_workflow(cr, uid, [p_id])
return True
def wkf_po_done(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'done'}, context=context)
self.set_order_line_status(cr, uid, ids, 'done', context=context)
def action_invoice_create(self, cr, uid, ids, context=None):
"""Generates invoice for given ids of purchase orders and links that invoice ID to purchase order.
:param ids: list of ids of purchase orders.
:return: ID of created invoice.
:rtype: int
"""
context = dict(context or {})
inv_obj = self.pool.get('account.invoice')
inv_line_obj = self.pool.get('account.invoice.line')
res = False
uid_company_id = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.id
for order in self.browse(cr, uid, ids, context=context):
context.pop('force_company', None)
if order.company_id.id != uid_company_id:
#if the company of the document is different than the current user company, force the company in the context
#then re-do a browse to read the property fields for the good company.
context['force_company'] = order.company_id.id
order = self.browse(cr, uid, order.id, context=context)
# generate invoice line correspond to PO line and link that to created invoice (inv_id) and PO line
inv_lines = []
for po_line in order.order_line:
if po_line.state == 'cancel':
continue
acc_id = self._choose_account_from_po_line(cr, uid, po_line, context=context)
inv_line_data = self._prepare_inv_line(cr, uid, acc_id, po_line, context=context)
inv_line_id = inv_line_obj.create(cr, uid, inv_line_data, context=context)
inv_lines.append(inv_line_id)
po_line.write({'invoice_lines': [(4, inv_line_id)]})
# get invoice data and create invoice
inv_data = self._prepare_invoice(cr, uid, order, inv_lines, context=context)
inv_id = inv_obj.create(cr, uid, inv_data, context=context)
# Link this new invoice to related purchase order
order.write({'invoice_ids': [(4, inv_id)]})
res = inv_id
return res
def invoice_done(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'approved'}, context=context)
return True
def has_stockable_product(self, cr, uid, ids, *args):
for order in self.browse(cr, uid, ids):
for order_line in order.order_line:
if order_line.state == 'cancel':
continue
if order_line.product_id and order_line.product_id.type in ('product', 'consu'):
return True
return False
def wkf_action_cancel(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'cancel'}, context=context)
self.set_order_line_status(cr, uid, ids, 'cancel', context=context)
def action_cancel(self, cr, uid, ids, context=None):
for purchase in self.browse(cr, uid, ids, context=context):
for pick in purchase.picking_ids:
for move in pick.move_lines:
if pick.state == 'done':
raise UserError(_('Unable to cancel the purchase order %s.') % (purchase.name) + _('You have already received some goods for it. '))
self.pool.get('stock.picking').action_cancel(cr, uid, [x.id for x in purchase.picking_ids if x.state != 'cancel'], context=context)
for inv in purchase.invoice_ids:
if inv and inv.state not in ('cancel', 'draft'):
raise UserError(_("Unable to cancel this purchase order.") + " " + _("You must first cancel all invoices related to this purchase order."))
self.pool.get('account.invoice') \
.signal_workflow(cr, uid, map(attrgetter('id'), purchase.invoice_ids), 'invoice_cancel')
self.signal_workflow(cr, uid, ids, 'purchase_cancel')
return True
def _prepare_order_line_move(self, cr, uid, order, order_line, picking_id, group_id, context=None):
''' prepare the stock move data from the PO line. This function returns a list of dictionary ready to be used in stock.move's create()'''
product_uom = self.pool.get('product.uom')
price_unit = order_line.price_unit
if order_line.product_uom.id != order_line.product_id.uom_id.id:
price_unit *= order_line.product_uom.factor / order_line.product_id.uom_id.factor
if order.currency_id.id != order.company_id.currency_id.id:
#we don't round the price_unit, as we may want to store the standard price with more digits than allowed by the currency
price_unit = self.pool.get('res.currency').compute(cr, uid, order.currency_id.id, order.company_id.currency_id.id, price_unit, round=False, context=context)
res = []
move_template = {
'name': order_line.name or '',
'product_id': order_line.product_id.id,
'product_uom': order_line.product_uom.id,
'product_uos': order_line.product_uom.id,
'date': order.date_order,
'date_expected': order_line.date_planned,
'location_id': order.partner_id.property_stock_supplier.id,
'location_dest_id': order.location_id.id,
'picking_id': picking_id,
'partner_id': order.dest_address_id.id,
'move_dest_id': False,
'state': 'draft',
'purchase_line_id': order_line.id,
'company_id': order.company_id.id,
'price_unit': price_unit,
'picking_type_id': order.picking_type_id.id,
'group_id': group_id,
'procurement_id': False,
'origin': order.name,
'route_ids': order.picking_type_id.warehouse_id and [(6, 0, [x.id for x in order.picking_type_id.warehouse_id.route_ids])] or [],
'warehouse_id':order.picking_type_id.warehouse_id.id,
'invoice_state': order.invoice_method == 'picking' and '2binvoiced' or 'none',
}
diff_quantity = order_line.product_qty
for procurement in order_line.procurement_ids:
procurement_qty = product_uom._compute_qty(cr, uid, procurement.product_uom.id, procurement.product_qty, to_uom_id=order_line.product_uom.id)
tmp = move_template.copy()
tmp.update({
'product_uom_qty': min(procurement_qty, diff_quantity),
'product_uos_qty': min(procurement_qty, diff_quantity),
'move_dest_id': procurement.move_dest_id.id, #move destination is same as procurement destination
'procurement_id': procurement.id,
'invoice_state': procurement.rule_id.invoice_state or (procurement.location_id and procurement.location_id.usage == 'customer' and procurement.invoice_state=='2binvoiced' and '2binvoiced') or (order.invoice_method == 'picking' and '2binvoiced') or 'none', #dropship case takes from sale
'propagate': procurement.rule_id.propagate,
})
diff_quantity -= min(procurement_qty, diff_quantity)
res.append(tmp)
#if the order line has a bigger quantity than the procurement it was for (manually changed or minimal quantity), then
#split the future stock move in two because the route followed may be different.
if float_compare(diff_quantity, 0.0, precision_rounding=order_line.product_uom.rounding) > 0:
move_template['product_uom_qty'] = diff_quantity
move_template['product_uos_qty'] = diff_quantity
res.append(move_template)
return res
def _create_stock_moves(self, cr, uid, order, order_lines, picking_id=False, context=None):
"""Creates appropriate stock moves for given order lines, whose can optionally create a
picking if none is given or no suitable is found, then confirms the moves, makes them
available, and confirms the pickings.
If ``picking_id`` is provided, the stock moves will be added to it, otherwise a standard
incoming picking will be created to wrap the stock moves (default behavior of the stock.move)
Modules that wish to customize the procurements or partition the stock moves over
multiple stock pickings may override this method and call ``super()`` with
different subsets of ``order_lines`` and/or preset ``picking_id`` values.
:param browse_record order: purchase order to which the order lines belong
:param list(browse_record) order_lines: purchase order line records for which picking
and moves should be created.
:param int picking_id: optional ID of a stock picking to which the created stock moves
will be added. A new picking will be created if omitted.
:return: None
"""
stock_move = self.pool.get('stock.move')
todo_moves = []
if order.group_id:
new_group = order.group_id.id
else:
new_group = self.pool.get("procurement.group").create(cr, uid, {'name': order.name, 'partner_id': order.partner_id.id}, context=context)
for order_line in order_lines:
if order_line.state == 'cancel':
continue
if not order_line.product_id:
continue
if order_line.product_id.type in ('product', 'consu'):
for vals in self._prepare_order_line_move(cr, uid, order, order_line, picking_id, new_group, context=context):
move = stock_move.create(cr, uid, vals, context=context)
todo_moves.append(move)
todo_moves = stock_move.action_confirm(cr, uid, todo_moves)
stock_move.force_assign(cr, uid, todo_moves)
def test_moves_done(self, cr, uid, ids, context=None):
'''PO is done at the delivery side if all the incoming shipments are done'''
for purchase in self.browse(cr, uid, ids, context=context):
for picking in purchase.picking_ids:
if picking.state != 'done':
return False
return True
def test_moves_except(self, cr, uid, ids, context=None):
''' PO is in exception at the delivery side if one of the picking is canceled
and the other pickings are completed (done or canceled)
'''
at_least_one_canceled = False
alldoneorcancel = True
for purchase in self.browse(cr, uid, ids, context=context):
for picking in purchase.picking_ids:
if picking.state == 'cancel':
at_least_one_canceled = True
if picking.state not in ['done', 'cancel']:
alldoneorcancel = False
return at_least_one_canceled and alldoneorcancel
def move_lines_get(self, cr, uid, ids, *args):
res = []
for order in self.browse(cr, uid, ids, context={}):
for line in order.order_line:
res += [x.id for x in line.move_ids]
return res
def action_picking_create(self, cr, uid, ids, context=None):
for order in self.browse(cr, uid, ids):
picking_vals = {
'picking_type_id': order.picking_type_id.id,
'partner_id': order.partner_id.id,
'date': order.date_order,
'origin': order.name,
'location_id': order.partner_id.property_stock_supplier.id,
'location_dest_id': order.location_id.id,
}
picking_id = self.pool.get('stock.picking').create(cr, uid, picking_vals, context=context)
self._create_stock_moves(cr, uid, order, order.order_line, picking_id, context=context)
def picking_done(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'shipped':1,'state':'approved'}, context=context)
# Do check on related procurements:
proc_obj = self.pool.get("procurement.order")
po_lines = []
for po in self.browse(cr, uid, ids, context=context):
po_lines += [x.id for x in po.order_line if x.state != 'cancel']
if po_lines:
procs = proc_obj.search(cr, uid, [('purchase_line_id', 'in', po_lines)], context=context)
if procs:
proc_obj.check(cr, uid, procs, context=context)
for id in ids:
self.message_post(cr, uid, id, body=_("Products received"), context=context)
return True
def do_merge(self, cr, uid, ids, context=None):
"""
To merge similar type of purchase orders.
Orders will only be merged if:
* Purchase Orders are in draft
* Purchase Orders belong to the same partner
* Purchase Orders are have same stock location, same pricelist, same currency
Lines will only be merged if:
* Order lines are exactly the same except for the quantity and unit
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param ids: the ID or list of IDs
@param context: A standard dictionary
@return: new purchase order id
"""
#TOFIX: merged order line should be unlink
def make_key(br, fields):
list_key = []
for field in fields:
field_val = getattr(br, field)
if field in ('product_id', 'account_analytic_id'):
if not field_val:
field_val = False
if isinstance(field_val, browse_record):
field_val = field_val.id
elif isinstance(field_val, browse_null):
field_val = False
elif isinstance(field_val, browse_record_list):
field_val = ((6, 0, tuple([v.id for v in field_val])),)
list_key.append((field, field_val))
list_key.sort()
return tuple(list_key)
context = dict(context or {})
# Compute what the new orders should contain
new_orders = {}
order_lines_to_move = {}
for porder in [order for order in self.browse(cr, uid, ids, context=context) if order.state == 'draft']:
order_key = make_key(porder, ('partner_id', 'location_id', 'pricelist_id', 'currency_id'))
new_order = new_orders.setdefault(order_key, ({}, []))
new_order[1].append(porder.id)
order_infos = new_order[0]
order_lines_to_move.setdefault(order_key, [])
if not order_infos:
order_infos.update({
'origin': porder.origin,
'date_order': porder.date_order,
'partner_id': porder.partner_id.id,
'dest_address_id': porder.dest_address_id.id,
'picking_type_id': porder.picking_type_id.id,
'location_id': porder.location_id.id,
'pricelist_id': porder.pricelist_id.id,
'currency_id': porder.currency_id.id,
'state': 'draft',
'order_line': {},
'notes': '%s' % (porder.notes or '',),
'fiscal_position_id': porder.fiscal_position_id and porder.fiscal_position_id.id or False,
})
else:
if porder.date_order < order_infos['date_order']:
order_infos['date_order'] = porder.date_order
if porder.notes:
order_infos['notes'] = (order_infos['notes'] or '') + ('\n%s' % (porder.notes,))
if porder.origin:
order_infos['origin'] = (order_infos['origin'] or '') + ' ' + porder.origin
order_lines_to_move[order_key] += [order_line.id for order_line in porder.order_line
if order_line.state != 'cancel']
allorders = []
orders_info = {}
for order_key, (order_data, old_ids) in new_orders.iteritems():
# skip merges with only one order
if len(old_ids) < 2:
allorders += (old_ids or [])
continue
# cleanup order line data
for key, value in order_data['order_line'].iteritems():
del value['uom_factor']
value.update(dict(key))
order_data['order_line'] = [(6, 0, order_lines_to_move[order_key])]
# create the new order
context.update({'mail_create_nolog': True})
neworder_id = self.create(cr, uid, order_data)
self.message_post(cr, uid, [neworder_id], body=_("RFQ created"), context=context)
orders_info.update({neworder_id: old_ids})
allorders.append(neworder_id)
# make triggers pointing to the old orders point to the new order
for old_id in old_ids:
self.redirect_workflow(cr, uid, [(old_id, neworder_id)])
self.signal_workflow(cr, uid, [old_id], 'purchase_cancel')
return orders_info
class purchase_order_line(osv.osv):
def _amount_line(self, cr, uid, ids, prop, arg, context=None):
res = {}
for line in self.browse(cr, uid, ids, context=context):
cur = line.order_id.pricelist_id.currency_id
res[line.id] = line.taxes_id.compute_all(line.price_unit, cur, line.product_qty, product=line.product_id, partner=line.order_id.partner_id)['total_excluded']
return res
def _get_uom_id(self, cr, uid, context=None):
try:
proxy = self.pool.get('ir.model.data')
result = proxy.get_object_reference(cr, uid, 'product', 'product_uom_unit')
return result[1]
except Exception, ex:
return False
_columns = {
'name': fields.text('Description', required=True),
'product_qty': fields.float('Quantity', digits_compute=dp.get_precision('Product Unit of Measure'), required=True),
'date_planned': fields.datetime('Scheduled Date', required=True, select=True),
'taxes_id': fields.many2many('account.tax', 'purchase_order_taxe', 'ord_id', 'tax_id', 'Taxes'),
'product_uom': fields.many2one('product.uom', 'Product Unit of Measure', required=True),
'product_id': fields.many2one('product.product', 'Product', domain=[('purchase_ok','=',True)], change_default=True),
'move_ids': fields.one2many('stock.move', 'purchase_line_id', 'Reservation', readonly=True, ondelete='set null'),
'price_unit': fields.float('Unit Price', required=True, digits_compute= dp.get_precision('Product Price')),
'price_subtotal': fields.function(_amount_line, string='Subtotal', digits=0),
'order_id': fields.many2one('purchase.order', 'Order Reference', select=True, required=True, ondelete='cascade'),
'account_analytic_id':fields.many2one('account.analytic.account', 'Analytic Account',),
'company_id': fields.related('order_id','company_id',type='many2one',relation='res.company',string='Company', store=True, readonly=True),
'state': fields.selection([('draft', 'Draft'), ('confirmed', 'Confirmed'), ('done', 'Done'), ('cancel', 'Cancelled')],
'Status', required=True, readonly=True, copy=False,
help=' * The \'Draft\' status is set automatically when purchase order in draft status. \
\n* The \'Confirmed\' status is set automatically as confirm when purchase order in confirm status. \
\n* The \'Done\' status is set automatically when purchase order is set as done. \
\n* The \'Cancelled\' status is set automatically when user cancel purchase order.'),
'invoice_lines': fields.many2many('account.invoice.line', 'purchase_order_line_invoice_rel',
'order_line_id', 'invoice_id', 'Invoice Lines',
readonly=True, copy=False),
'invoiced': fields.boolean('Invoiced', readonly=True, copy=False),
'partner_id': fields.related('order_id', 'partner_id', string='Partner', readonly=True, type="many2one", relation="res.partner", store=True),
'date_order': fields.related('order_id', 'date_order', string='Order Date', readonly=True, type="datetime"),
'procurement_ids': fields.one2many('procurement.order', 'purchase_line_id', string='Associated procurements'),
}
_defaults = {
'product_uom' : _get_uom_id,
'product_qty': lambda *a: 1.0,
'state': lambda *args: 'draft',
'invoiced': lambda *a: 0,
}
_table = 'purchase_order_line'
_name = 'purchase.order.line'
_description = 'Purchase Order Line'
def unlink(self, cr, uid, ids, context=None):
for line in self.browse(cr, uid, ids, context=context):
if line.order_id.state in ['approved', 'done'] and line.state not in ['draft', 'cancel']:
raise UserError(_('Cannot delete a purchase order line which is in state \'%s\'.') %(line.state,))
procurement_obj = self.pool.get('procurement.order')
procurement_ids_to_except = procurement_obj.search(cr, uid, [('purchase_line_id', 'in', ids)], context=context)
if procurement_ids_to_except:
for po_id in procurement_ids_to_except:
procurement_obj.message_post(cr, uid, po_id, body=_('Purchase order line deleted.'), context=context)
procurement_obj.write(cr, uid, procurement_ids_to_except, {'state': 'exception'}, context=context)
return super(purchase_order_line, self).unlink(cr, uid, ids, context=context)
def onchange_product_uom(self, cr, uid, ids, pricelist_id, product_id, qty, uom_id,
partner_id, date_order=False, fiscal_position_id=False, date_planned=False,
name=False, price_unit=False, state='draft', context=None):
"""
onchange handler of product_uom.
"""
if context is None:
context = {}
if not uom_id:
return {'value': {'price_unit': price_unit or 0.0, 'name': name or '', 'product_uom' : uom_id or False}}
context = dict(context, purchase_uom_check=True)
return self.onchange_product_id(cr, uid, ids, pricelist_id, product_id, qty, uom_id,
partner_id, date_order=date_order, fiscal_position_id=fiscal_position_id, date_planned=date_planned,
name=name, price_unit=price_unit, state=state, replace=False, context=context)
def _get_date_planned(self, cr, uid, supplier_info, date_order_str, context=None):
"""Return the datetime value to use as Schedule Date (``date_planned``) for
PO Lines that correspond to the given product.supplierinfo,
when ordered at `date_order_str`.
:param browse_record | False supplier_info: product.supplierinfo, used to
determine delivery delay (if False, default delay = 0)
:param str date_order_str: date of order field, as a string in
DEFAULT_SERVER_DATETIME_FORMAT
:rtype: datetime
:return: desired Schedule Date for the PO line
"""
supplier_delay = int(supplier_info.delay) if supplier_info else 0
return datetime.strptime(date_order_str, DEFAULT_SERVER_DATETIME_FORMAT) + relativedelta(days=supplier_delay)
def action_cancel(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'cancel'}, context=context)
# We will group by PO first, so we do the check only once for each PO
purchase_orders = list(set([x.order_id for x in self.browse(cr, uid, ids, context=context)]))
for purchase in purchase_orders:
if all([l.state == 'cancel' for l in purchase.order_line]):
self.pool.get('purchase.order').action_cancel(cr, uid, [purchase.id], context=context)
def _check_product_uom_group(self, cr, uid, context=None):
group_uom = self.pool.get('ir.model.data').get_object(cr, uid, 'product', 'group_uom')
res = [user for user in group_uom.users if user.id == uid]
return len(res) and True or False
def onchange_product_id(self, cr, uid, ids, pricelist_id, product_id, qty, uom_id,
partner_id, date_order=False, fiscal_position_id=False, date_planned=False,
name=False, price_unit=False, state='draft', replace=True, context=None):
"""
onchange handler of product_id.
"""
if context is None:
context = {}
res = {'value': {'price_unit': price_unit or 0.0, 'name': name or '', 'product_uom' : uom_id or False}}
if not product_id:
return res
product_product = self.pool.get('product.product')
product_uom = self.pool.get('product.uom')
res_partner = self.pool.get('res.partner')
product_pricelist = self.pool.get('product.pricelist')
account_fiscal_position = self.pool.get('account.fiscal.position')
account_tax = self.pool.get('account.tax')
# - check for the presence of partner_id and pricelist_id
#if not partner_id:
# raise UserError(_('Select a partner in purchase order to choose a product.'))
#if not pricelist_id:
# raise UserError(_('Select a price list in the purchase order form before choosing a product.'))
# - determine name and notes based on product in partner lang.
context_partner = context.copy()
if partner_id:
lang = res_partner.browse(cr, uid, partner_id).lang
context_partner.update( {'lang': lang, 'partner_id': partner_id} )
product = product_product.browse(cr, uid, product_id, context=context_partner)
if replace:
#call name_get() with partner in the context to eventually match name and description in the seller_ids field
dummy, name = product_product.name_get(cr, uid, product_id, context=context_partner)[0]
if product.description_purchase:
name += '\n' + product.description_purchase
res['value'].update({'name': name})
# - set a domain on product_uom
res['domain'] = {'product_uom': [('category_id','=',product.uom_id.category_id.id)]}
# - check that uom and product uom belong to the same category
product_uom_po_id = product.uom_po_id.id
if not uom_id:
uom_id = product_uom_po_id
if product.uom_id.category_id.id != product_uom.browse(cr, uid, uom_id, context=context).category_id.id:
if context.get('purchase_uom_check') and self._check_product_uom_group(cr, uid, context=context):
res['warning'] = {'title': _('Warning!'), 'message': _('Selected Unit of Measure does not belong to the same category as the product Unit of Measure.')}
uom_id = product_uom_po_id
res['value'].update({'product_uom': uom_id})
# - determine product_qty and date_planned based on seller info
if not date_order:
date_order = fields.datetime.now()
supplierinfo = False
precision = self.pool.get('decimal.precision').precision_get(cr, uid, 'Product Unit of Measure')
for supplier in product.seller_ids:
if partner_id and (supplier.name.id == partner_id):
supplierinfo = supplier
if supplierinfo.product_uom.id != uom_id:
res['warning'] = {'title': _('Warning!'), 'message': _('The selected supplier only sells this product by %s') % supplierinfo.product_uom.name }
min_qty = product_uom._compute_qty(cr, uid, supplierinfo.product_uom.id, supplierinfo.min_qty, to_uom_id=uom_id)
if float_compare(min_qty , qty, precision_digits=precision) == 1: # If the supplier quantity is greater than entered from user, set minimal.
if qty:
res['warning'] = {'title': _('Warning!'), 'message': _('The selected supplier has a minimal quantity set to %s %s, you should not purchase less.') % (supplierinfo.min_qty, supplierinfo.product_uom.name)}
qty = min_qty
dt = self._get_date_planned(cr, uid, supplierinfo, date_order, context=context).strftime(DEFAULT_SERVER_DATETIME_FORMAT)
qty = qty or 1.0
res['value'].update({'date_planned': date_planned or dt})
if qty:
res['value'].update({'product_qty': qty})
price = price_unit
if price_unit is False or price_unit is None:
# - determine price_unit and taxes_id
if pricelist_id:
date_order_str = datetime.strptime(date_order, DEFAULT_SERVER_DATETIME_FORMAT).strftime(DEFAULT_SERVER_DATE_FORMAT)
price = product_pricelist.price_get(cr, uid, [pricelist_id],
product.id, qty or 1.0, partner_id or False, {'uom': uom_id, 'date': date_order_str})[pricelist_id]
else:
price = product.standard_price
taxes = account_tax.browse(cr, uid, map(lambda x: x.id, product.supplier_taxes_id))
fpos = fiscal_position_id and account_fiscal_position.browse(cr, uid, fiscal_position_id, context=context) or False
taxes_ids = account_fiscal_position.map_tax(cr, uid, fpos, taxes)
res['value'].update({'price_unit': price, 'taxes_id': taxes_ids})
return res
product_id_change = onchange_product_id
product_uom_change = onchange_product_uom
def action_confirm(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'confirmed'}, context=context)
return True
class procurement_rule(osv.osv):
_inherit = 'procurement.rule'
def _get_action(self, cr, uid, context=None):
return [('buy', _('Buy'))] + super(procurement_rule, self)._get_action(cr, uid, context=context)
class procurement_order(osv.osv):
_inherit = 'procurement.order'
_columns = {
'purchase_line_id': fields.many2one('purchase.order.line', 'Purchase Order Line'),
'purchase_id': fields.related('purchase_line_id', 'order_id', type='many2one', relation='purchase.order', string='Purchase Order'),
}
def propagate_cancels(self, cr, uid, ids, context=None):
purchase_line_obj = self.pool.get('purchase.order.line')
lines_to_cancel = []
uom_obj = self.pool.get("product.uom")
for procurement in self.browse(cr, uid, ids, context=context):
if procurement.rule_id.action == 'buy' and procurement.purchase_line_id:
if procurement.purchase_line_id.state not in ('draft', 'cancel'):
raise UserError(
_('Can not cancel this procurement like this as the related purchase order has been confirmed already. Please cancel the purchase order first. '))
new_qty, new_price = self._calc_new_qty_price(cr, uid, procurement, cancel=True, context=context)
if new_qty != procurement.purchase_line_id.product_qty:
purchase_line_obj.write(cr, uid, [procurement.purchase_line_id.id], {'product_qty': new_qty, 'price_unit': new_price}, context=context)
if float_compare(new_qty, 0.0, precision_rounding=procurement.product_uom.rounding) != 1:
if procurement.purchase_line_id.id not in lines_to_cancel:
lines_to_cancel += [procurement.purchase_line_id.id]
if lines_to_cancel:
purchase_line_obj.action_cancel(cr, uid, lines_to_cancel, context=context)
purchase_line_obj.unlink(cr, uid, lines_to_cancel, context=context)
return super(procurement_order, self).propagate_cancels(cr, uid, ids, context=context)
def _run(self, cr, uid, procurement, context=None):
if procurement.rule_id and procurement.rule_id.action == 'buy':
#make a purchase order for the procurement
return self.make_po(cr, uid, [procurement.id], context=context)[procurement.id]
return super(procurement_order, self)._run(cr, uid, procurement, context=context)
#TODO: Autocommit needed?
def run(self, cr, uid, ids, autocommit=False, context=None):
procs = self.browse(cr, uid, ids, context=context)
to_assign = [x for x in procs if x.state not in ('running', 'done')]
self._assign_multi(cr, uid, to_assign, context=context)
buy_ids = [x.id for x in to_assign if x.rule_id and x.rule_id.action == 'buy']
if buy_ids:
result_dict = self.make_po(cr, uid, buy_ids, context=context)
runnings = []
exceptions = []
for proc in result_dict.keys():
if result_dict[proc]:
runnings += [proc]
else:
exceptions += [proc]
if runnings:
self.write(cr, uid, runnings, {'state': 'running'}, context=context)
if exceptions:
self.write(cr, uid, exceptions, {'state': 'exception'}, context=context)
set_others = set(ids) - set(buy_ids)
return super(procurement_order, self).run(cr, uid, list(set_others), context=context)
def _check(self, cr, uid, procurement, context=None):
if procurement.purchase_line_id:
if procurement.purchase_line_id.order_id.shipped:
return True
elif procurement.move_ids:
moves = self.pool.get('stock.move').browse(cr, uid, [x.id for x in procurement.move_ids], context=context)
return all(move.state == 'done' for move in moves)
return super(procurement_order, self)._check(cr, uid, procurement, context=context)
def _check_supplier_info(self, cr, uid, ids, context=None):
''' Check the supplier info field of a product and write an error message on the procurement if needed.
Returns True if all needed information is there, False if some configuration mistake is detected.
'''
partner_obj = self.pool.get('res.partner')
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
for procurement in self.browse(cr, uid, ids, context=context):
message = ''
partner = procurement.product_id.seller_id #Taken Main Supplier of Product of Procurement.
if not procurement.product_id.seller_ids:
message = _('No supplier defined for this product !')
elif not partner:
message = _('No default supplier defined for this product')
elif not partner_obj.address_get(cr, uid, [partner.id], ['delivery'])['delivery']:
message = _('No address defined for the supplier')
if message:
if procurement.message != message:
cr.execute('update procurement_order set message=%s where id=%s', (message, procurement.id))
return False
if user.company_id and user.company_id.partner_id:
if partner.id == user.company_id.partner_id.id:
raise UserError(_('The product "%s" has been defined with your company as reseller which seems to be a configuration error!' % procurement.product_id.name))
return True
def create_procurement_purchase_order(self, cr, uid, procurement, po_vals, line_vals, context=None):
"""Create the purchase order from the procurement, using
the provided field values, after adding the given purchase
order line in the purchase order.
:params procurement: the procurement object generating the purchase order
:params dict po_vals: field values for the new purchase order (the
``order_line`` field will be overwritten with one
single line, as passed in ``line_vals``).
:params dict line_vals: field values of the single purchase order line that
the purchase order will contain.
:return: id of the newly created purchase order
:rtype: int
"""
po_vals.update({'order_line': [(0,0,line_vals)]})
return self.pool.get('purchase.order').create(cr, uid, po_vals, context=context)
def _get_purchase_schedule_date(self, cr, uid, procurement, company, context=None):
"""Return the datetime value to use as Schedule Date (``date_planned``) for the
Purchase Order Lines created to satisfy the given procurement.
:param browse_record procurement: the procurement for which a PO will be created.
:param browse_report company: the company to which the new PO will belong to.
:rtype: datetime
:return: the desired Schedule Date for the PO lines
"""
procurement_date_planned = datetime.strptime(procurement.date_planned, DEFAULT_SERVER_DATETIME_FORMAT)
schedule_date = (procurement_date_planned - relativedelta(days=company.po_lead))
return schedule_date
def _get_purchase_order_date(self, cr, uid, procurement, company, schedule_date, context=None):
"""Return the datetime value to use as Order Date (``date_order``) for the
Purchase Order created to satisfy the given procurement.
:param browse_record procurement: the procurement for which a PO will be created.
:param browse_report company: the company to which the new PO will belong to.
:param datetime schedule_date: desired Scheduled Date for the Purchase Order lines.
:rtype: datetime
:return: the desired Order Date for the PO
"""
seller_delay = int(procurement.product_id.seller_delay)
return schedule_date - relativedelta(days=seller_delay)
def _get_product_supplier(self, cr, uid, procurement, context=None):
''' returns the main supplier of the procurement's product given as argument'''
supplierinfo = self.pool['product.supplierinfo']
company_supplier = supplierinfo.search(cr, uid,
[('product_tmpl_id', '=', procurement.product_id.product_tmpl_id.id), ('company_id', '=', procurement.company_id.id)], limit=1, context=context)
if company_supplier:
return supplierinfo.browse(cr, uid, company_supplier[0], context=context).name
return procurement.product_id.seller_id
def _get_po_line_values_from_procs(self, cr, uid, procurements, partner, schedule_date, context=None):
res = {}
if context is None:
context = {}
uom_obj = self.pool.get('product.uom')
pricelist_obj = self.pool.get('product.pricelist')
prod_obj = self.pool.get('product.product')
acc_pos_obj = self.pool.get('account.fiscal.position')
pricelist_id = partner.property_product_pricelist_purchase.id
prices_qty = []
for procurement in procurements:
seller_qty = procurement.product_id.seller_qty if procurement.location_id.usage != 'customer' else 0.0
uom_id = procurement.product_id.uom_po_id.id
qty = uom_obj._compute_qty(cr, uid, procurement.product_uom.id, procurement.product_qty, uom_id)
if seller_qty:
qty = max(qty, seller_qty)
prices_qty += [(procurement.product_id, qty, partner)]
prices = pricelist_obj.price_get_multi(cr, uid, [pricelist_id], prices_qty)
#Passing partner_id to context for purchase order line integrity of Line name
new_context = context.copy()
new_context.update({'lang': partner.lang, 'partner_id': partner.id})
names = prod_obj.name_get(cr, uid, [x.product_id.id for x in procurements], context=context)
names_dict = {}
for id, name in names:
names_dict[id] = name
for procurement in procurements:
taxes_ids = procurement.product_id.supplier_taxes_id
taxes = acc_pos_obj.map_tax(cr, uid, partner.property_account_position_id, taxes_ids)
name = names_dict[procurement.product_id.id]
if procurement.product_id.description_purchase:
name += '\n' + procurement.product_id.description_purchase
price = prices[procurement.product_id.id][pricelist_id]
values = {
'name': name,
'product_qty': qty,
'product_id': procurement.product_id.id,
'product_uom': procurement.product_id.uom_po_id.id,
'price_unit': price or 0.0,
'date_planned': schedule_date.strftime(DEFAULT_SERVER_DATETIME_FORMAT),
'taxes_id': [(6, 0, taxes)],
'procurement_ids': [(4, procurement.id)]
}
res[procurement.id] = values
return res
def _calc_new_qty_price(self, cr, uid, procurement, po_line=None, cancel=False, context=None):
if not po_line:
po_line = procurement.purchase_line_id
uom_obj = self.pool.get('product.uom')
qty = uom_obj._compute_qty(cr, uid, procurement.product_uom.id, procurement.product_qty,
procurement.product_id.uom_po_id.id)
if cancel:
qty = -qty
# Make sure we use the minimum quantity of the partner corresponding to the PO
# This does not apply in case of dropshipping
supplierinfo_min_qty = 0.0
if po_line.order_id.location_id.usage != 'customer':
if po_line.product_id.seller_id.id == po_line.order_id.partner_id.id:
supplierinfo_min_qty = po_line.product_id.seller_qty
else:
supplierinfo_obj = self.pool.get('product.supplierinfo')
supplierinfo_ids = supplierinfo_obj.search(cr, uid, [('name', '=', po_line.order_id.partner_id.id), ('product_tmpl_id', '=', po_line.product_id.product_tmpl_id.id)])
supplierinfo_min_qty = supplierinfo_obj.browse(cr, uid, supplierinfo_ids).min_qty
if supplierinfo_min_qty == 0.0:
qty += po_line.product_qty
else:
# Recompute quantity by adding existing running procurements.
for proc in po_line.procurement_ids:
qty += uom_obj._compute_qty(cr, uid, proc.product_uom.id, proc.product_qty,
proc.product_id.uom_po_id.id) if proc.state == 'running' else 0.0
qty = max(qty, supplierinfo_min_qty) if qty > 0.0 else 0.0
price = po_line.price_unit
if qty != po_line.product_qty:
pricelist_obj = self.pool.get('product.pricelist')
pricelist_id = po_line.order_id.partner_id.property_product_pricelist_purchase.id
price = pricelist_obj.price_get(cr, uid, [pricelist_id], procurement.product_id.id, qty, po_line.order_id.partner_id.id, {'uom': procurement.product_uom.id})[pricelist_id]
return qty, price
def _get_grouping_dicts(self, cr, uid, ids, context=None):
"""
It will group the procurements according to the pos they should go into. That way, lines going to the same
po, can be processed at once.
Returns two dictionaries:
add_purchase_dicts: key: po value: procs to add to the po
create_purchase_dicts: key: values for proc to create (not that necessary as they are in procurement => TODO),
values: procs to add
"""
po_obj = self.pool.get('purchase.order')
# Regroup POs
cr.execute("""
SELECT psi.name, p.id, pr.id, pr.picking_type_id, p.location_id, p.partner_dest_id, p.company_id, p.group_id,
pr.group_propagation_option, pr.group_id, psi.qty
FROM procurement_order AS p
LEFT JOIN procurement_rule AS pr ON pr.id = p.rule_id
LEFT JOIN procurement_group AS pg ON p.group_id = pg.id,
product_supplierinfo AS psi, product_product AS pp
WHERE
p.product_id = pp.id AND p.id in %s AND psi.product_tmpl_id = pp.product_tmpl_id
AND (psi.company_id = p.company_id or psi.company_id IS NULL)
ORDER BY psi.sequence,
psi.name, p.rule_id, p.location_id, p.company_id, p.partner_dest_id, p.group_id
""", (tuple(ids), ))
res = cr.fetchall()
old = False
# A giant dict for grouping lines, ... to do at once
create_purchase_procs = {} # Lines to add to a newly to create po
add_purchase_procs = {} # Lines to add/adjust in an existing po
proc_seller = {} # To check we only process one po
for partner, proc, rule, pick_type, location, partner_dest, company, group, group_propagation, fixed_group, qty in res:
if not proc_seller.get(proc):
proc_seller[proc] = partner
new = partner, rule, pick_type, location, company, group, group_propagation, fixed_group
if new != old:
old = new
dom = [
('partner_id', '=', partner), ('state', '=', 'draft'), ('picking_type_id', '=', pick_type),
('location_id', '=', location), ('company_id', '=', company), ('dest_address_id', '=', partner_dest)]
if group_propagation == 'propagate':
dom += [('group_id', '=', group)]
elif group_propagation == 'fixed':
dom += [('group_id', '=', fixed_group)]
available_draft_po_ids = po_obj.search(cr, uid, dom, context=context)
available_draft_po = available_draft_po_ids and available_draft_po_ids[0] or False
# Add to dictionary
if available_draft_po:
if add_purchase_procs.get(available_draft_po):
add_purchase_procs[available_draft_po] += [proc]
else:
add_purchase_procs[available_draft_po] = [proc]
else:
if create_purchase_procs.get(new):
create_purchase_procs[new] += [proc]
else:
create_purchase_procs[new] = [proc]
return add_purchase_procs, create_purchase_procs
def make_po(self, cr, uid, ids, context=None):
res = {}
po_obj = self.pool.get('purchase.order')
po_line_obj = self.pool.get('purchase.order.line')
seq_obj = self.pool.get('ir.sequence')
uom_obj = self.pool.get('product.uom')
add_purchase_procs, create_purchase_procs = self._get_grouping_dicts(cr, uid, ids, context=context)
procs_done = []
# Let us check existing purchase orders and add/adjust lines on them
for add_purchase in add_purchase_procs.keys():
procs_done += add_purchase_procs[add_purchase]
po = po_obj.browse(cr, uid, add_purchase, context=context)
lines_to_update = {}
line_values = []
procurements = self.browse(cr, uid, add_purchase_procs[add_purchase], context=context)
po_line_ids = po_line_obj.search(cr, uid, [('order_id', '=', add_purchase), ('product_id', 'in', [x.product_id.id for x in procurements])], context=context)
po_lines = po_line_obj.browse(cr, uid, po_line_ids, context=context)
po_prod_dict = {}
for pol in po_lines:
po_prod_dict[pol.product_id.id] = pol
procs_to_create = []
#Check which procurements need a new line and which need to be added to an existing one
for proc in procurements:
if po_prod_dict.get(proc.product_id.id):
po_line = po_prod_dict[proc.product_id.id]
# FIXME: compute quantity using `_calc_new_qty_price` method.
# new_qty, new_price = self._calc_new_qty_price(cr, uid, proc, po_line=po_line, context=context)
uom_id = po_line.product_uom # Convert to UoM of existing line
qty = uom_obj._compute_qty_obj(cr, uid, proc.product_uom, proc.product_qty, uom_id)
if lines_to_update.get(po_line):
lines_to_update[po_line] += [(proc, qty)]
else:
lines_to_update[po_line] = [(proc, qty)]
else:
procs_to_create.append(proc)
procs = []
# FIXME: these are not real tracking values, it should be fixed if tracking values for one2many
# are managed
def format_message(message_description, tracked_values):
message = ''
if message_description:
message = '<span>%s</span>' % message_description
for name, values in tracked_values.iteritems():
message += '<div> • <b>%s</b>: ' % name
message += '%s</div>' % values
return message
# Update the quantities of the lines that need to
for line in lines_to_update.keys():
tot_qty = 0
for proc, qty in lines_to_update[line]:
tot_qty += qty
self.message_post(cr, uid, proc.id, body=_("Quantity added in existing Purchase Order Line"), context=context)
msg = format_message(_('Quantity added in existing Purchase Order Line'), {'Product': proc.product_id.name, 'Quantity': proc.product_qty, 'Procurement': proc.origin})
po_obj.message_post(cr, uid, [add_purchase], body=msg, context=context)
line_values += [(1, line.id, {'product_qty': line.product_qty + tot_qty, 'procurement_ids': [(4, x[0].id) for x in lines_to_update[line]]})]
# Create lines for which no line exists yet
if procs_to_create:
partner = po.partner_id
schedule_date = datetime.strptime(po.minimum_planned_date, DEFAULT_SERVER_DATETIME_FORMAT)
value_lines = self._get_po_line_values_from_procs(cr, uid, procs_to_create, partner, schedule_date, context=context)
line_values += [(0, 0, value_lines[x]) for x in value_lines.keys()]
for proc in procs_to_create:
self.message_post(cr, uid, [proc.id], body=_("Purchase line created and linked to an existing Purchase Order"), context=context)
msg = format_message(_('Purchase order line added'), {'Product': proc.product_id.name, 'Quantity': proc.product_qty, 'Procurement': proc.origin})
po_obj.message_post(cr, uid, [add_purchase], body=msg, context=context)
po_obj.write(cr, uid, [add_purchase], {'order_line': line_values},context=context)
# Create new purchase orders
partner_obj = self.pool.get("res.partner")
new_pos = []
for create_purchase in create_purchase_procs.keys():
procs_done += create_purchase_procs[create_purchase]
line_values = []
procurements = self.browse(cr, uid, create_purchase_procs[create_purchase], context=context)
partner = partner_obj.browse(cr, uid, create_purchase[0], context=context)
#Create purchase order itself:
procurement = procurements[0]
schedule_date = self._get_purchase_schedule_date(cr, uid, procurement, procurement.company_id, context=context)
purchase_date = self._get_purchase_order_date(cr, uid, procurement, procurement.company_id, schedule_date, context=context)
value_lines = self._get_po_line_values_from_procs(cr, uid, procurements, partner, schedule_date, context=context)
line_values += [(0, 0, value_lines[x]) for x in value_lines.keys()]
name = seq_obj.next_by_code(cr, uid, 'purchase.order') or _('PO: %s') % procurement.name
gpo = procurement.rule_id.group_propagation_option
group = (gpo == 'fixed' and procurement.rule_id.group_id.id) or (gpo == 'propagate' and procurement.group_id.id) or False
po_vals = {
'name': name,
'origin': procurement.origin,
'partner_id': create_purchase[0],
'location_id': procurement.location_id.id,
'picking_type_id': procurement.rule_id.picking_type_id.id,
'pricelist_id': partner.property_product_pricelist_purchase.id,
'date_order': purchase_date.strftime(DEFAULT_SERVER_DATETIME_FORMAT),
'company_id': procurement.company_id.id,
'fiscal_position_id': partner.property_account_position_id.id,
'payment_term_id': partner.property_supplier_payment_term_id.id,
'dest_address_id': procurement.partner_dest_id.id,
'group_id': group,
'order_line': line_values,
}
new_po = po_obj.create(cr, uid, po_vals, context=context)
new_pos.append(new_po)
for proc in create_purchase_procs[create_purchase]:
self.message_post(cr, uid, proc, body=_("Draft Purchase Order created"), context=context)
other_proc_ids = list(set(ids) - set(procs_done))
res = dict.fromkeys(ids, True)
if other_proc_ids:
other_procs = self.browse(cr, uid, other_proc_ids, context=context)
for procurement in other_procs:
res[procurement.id] = False
self.message_post(cr, uid, [procurement.id], _('There is no supplier associated to product %s') % (procurement.product_id.name))
return res
class mail_mail(osv.Model):
_name = 'mail.mail'
_inherit = 'mail.mail'
def _postprocess_sent_message(self, cr, uid, mail, context=None, mail_sent=True):
if mail_sent and mail.model == 'purchase.order':
obj = self.pool.get('purchase.order').browse(cr, uid, mail.res_id, context=context)
if obj.state == 'draft':
self.pool.get('purchase.order').signal_workflow(cr, uid, [mail.res_id], 'send_rfq')
return super(mail_mail, self)._postprocess_sent_message(cr, uid, mail=mail, context=context, mail_sent=mail_sent)
class product_template(osv.Model):
_name = 'product.template'
_inherit = 'product.template'
def _get_buy_route(self, cr, uid, context=None):
buy_route = self.pool.get('ir.model.data').xmlid_to_res_id(cr, uid, 'purchase.route_warehouse0_buy')
if buy_route:
return [buy_route]
return []
def _purchase_count(self, cr, uid, ids, field_name, arg, context=None):
res = dict.fromkeys(ids, 0)
for template in self.browse(cr, uid, ids, context=context):
res[template.id] = sum([p.purchase_count for p in template.product_variant_ids])
return res
_columns = {
'property_account_creditor_price_difference': fields.property(
type='many2one',
relation='account.account',
string="Price Difference Account",
help="This account will be used to value price difference between purchase price and cost price."),
'purchase_ok': fields.boolean('Can be Purchased', help="Specify if the product can be selected in a purchase order line."),
'purchase_count': fields.function(_purchase_count, string='# Purchases', type='integer'),
}
_defaults = {
'purchase_ok': 1,
'route_ids': _get_buy_route,
}
def action_view_purchases(self, cr, uid, ids, context=None):
products = self._get_products(cr, uid, ids, context=context)
result = self._get_act_window_dict(cr, uid, 'purchase.action_purchase_line_product_tree', context=context)
result['domain'] = "[('product_id','in',[" + ','.join(map(str, products)) + "])]"
return result
class product_product(osv.Model):
_name = 'product.product'
_inherit = 'product.product'
def _purchase_count(self, cr, uid, ids, field_name, arg, context=None):
r = dict.fromkeys(ids, 0)
domain = [
('state', 'in', ['confirmed', 'approved', 'except_picking', 'except_invoice', 'done']),
('product_id', 'in', ids),
]
for group in self.pool['purchase.report'].read_group(cr, uid, domain, ['product_id', 'quantity'], ['product_id'], context=context):
r[group['product_id'][0]] = group['quantity']
return r
def action_view_purchases(self, cr, uid, ids, context=None):
if isinstance(ids, (int, long)):
ids = [ids]
result = self.pool['product.template']._get_act_window_dict(cr, uid, 'purchase.action_purchase_line_product_tree', context=context)
result['domain'] = "[('product_id','in',[" + ','.join(map(str, ids)) + "])]"
return result
_columns = {
'purchase_count': fields.function(_purchase_count, string='# Purchases', type='integer'),
}
class product_category(osv.Model):
_inherit = "product.category"
_columns = {
'property_account_creditor_price_difference_categ': fields.property(
type='many2one',
relation='account.account',
string="Price Difference Account",
help="This account will be used to value price difference between purchase price and cost price."),
}
class mail_compose_message(osv.Model):
_inherit = 'mail.compose.message'
def send_mail(self, cr, uid, ids, auto_commit=False, context=None):
context = context or {}
if context.get('default_model') == 'purchase.order' and context.get('default_res_id'):
context = dict(context, mail_post_autofollow=True)
self.pool.get('purchase.order').signal_workflow(cr, uid, [context['default_res_id']], 'send_rfq')
return super(mail_compose_message, self).send_mail(cr, uid, ids, context=context)
class account_invoice(osv.Model):
""" Override account_invoice to add Chatter messages on the related purchase
orders, logging the invoice receipt or payment. """
_inherit = 'account.invoice'
def invoice_validate(self, cr, uid, ids, context=None):
res = super(account_invoice, self).invoice_validate(cr, uid, ids, context=context)
purchase_order_obj = self.pool.get('purchase.order')
# read access on purchase.order object is not required
if not purchase_order_obj.check_access_rights(cr, uid, 'read', raise_exception=False):
user_id = SUPERUSER_ID
else:
user_id = uid
po_ids = purchase_order_obj.search(cr, user_id, [('invoice_ids', 'in', ids)], context=context)
for order in purchase_order_obj.browse(cr, user_id, po_ids, context=context):
purchase_order_obj.message_post(cr, user_id, order.id, body=_("Invoice received"), context=context)
invoiced = []
shipped = True
# for invoice method manual or order, don't care about shipping state
# for invoices based on incoming shippment, beware of partial deliveries
if (order.invoice_method == 'picking' and
not all(picking.invoice_state in ['invoiced'] for picking in order.picking_ids)):
shipped = False
for po_line in order.order_line:
if all(line.invoice_id.state not in ['draft', 'cancel'] for line in po_line.invoice_lines):
invoiced.append(po_line.id)
if invoiced and shipped:
self.pool['purchase.order.line'].write(cr, user_id, invoiced, {'invoiced': True})
workflow.trg_write(user_id, 'purchase.order', order.id, cr)
return res
def confirm_paid(self, cr, uid, ids, context=None):
res = super(account_invoice, self).confirm_paid(cr, uid, ids, context=context)
purchase_order_obj = self.pool.get('purchase.order')
# read access on purchase.order object is not required
if not purchase_order_obj.check_access_rights(cr, uid, 'read', raise_exception=False):
user_id = SUPERUSER_ID
else:
user_id = uid
po_ids = purchase_order_obj.search(cr, user_id, [('invoice_ids', 'in', ids)], context=context)
for po_id in po_ids:
purchase_order_obj.message_post(cr, user_id, po_id, body=_("Invoice paid"), context=context)
return res
class account_invoice_line(osv.Model):
""" Override account_invoice_line to add the link to the purchase order line it is related to"""
_inherit = 'account.invoice.line'
_columns = {
'purchase_line_id': fields.many2one('purchase.order.line',
'Purchase Order Line', ondelete='set null', select=True,
readonly=True),
}
def move_line_get(self, cr, uid, invoice_id, context=None):
res = super(account_invoice_line,self).move_line_get(cr, uid, invoice_id, context=context)
if self.company_id.anglo_saxon_accounting:
if inv.type in ('in_invoice','in_refund'):
for i_line in inv.invoice_line_ids:
res.extend(self._anglo_saxon_purchase_move_lines(cr, uid, i_line, res, context=context))
return res
def _anglo_saxon_purchase_move_lines(self, cr, uid, i_line, res, context=None):
"""Return the additional move lines for purchase invoices and refunds.
i_line: An account.invoice.line object.
res: The move line entries produced so far by the parent move_line_get.
"""
inv = i_line.invoice_id
company_currency = inv.company_id.currency_id.id
if i_line.product_id and i_line.product_id.valuation == 'real_time':
if i_line.product_id.type != 'service':
# get the price difference account at the product
acc = i_line.product_id.property_account_creditor_price_difference and i_line.product_id.property_account_creditor_price_difference.id
if not acc:
# if not found on the product get the price difference account at the category
acc = i_line.product_id.categ_id.property_account_creditor_price_difference_categ and i_line.product_id.categ_id.property_account_creditor_price_difference_categ.id
a = None
# oa will be the stock input account
# first check the product, if empty check the category
oa = i_line.product_id.property_stock_account_input and i_line.product_id.property_stock_account_input.id
if not oa:
oa = i_line.product_id.categ_id.property_stock_account_input_categ_id and i_line.product_id.categ_id.property_stock_account_input_categ_id.id
if oa:
# get the fiscal position
fpos = i_line.invoice_id.fiscal_position_id or False
a = self.pool.get('account.fiscal.position').map_account(cr, uid, fpos, oa)
diff_res = []
account_prec = inv.company_id.currency_id.decimal_places
# calculate and write down the possible price difference between invoice price and product price
for line in res:
if line.get('invl_id', 0) == i_line.id and a == line['account_id']:
uom = i_line.product_id.uos_id or i_line.product_id.uom_id
valuation_price_unit = self.pool.get('product.uom')._compute_price(cr, uid, uom.id, i_line.product_id.standard_price, i_line.uos_id.id)
if i_line.product_id.cost_method != 'standard' and i_line.purchase_line_id:
#for average/fifo/lifo costing method, fetch real cost price from incomming moves
stock_move_obj = self.pool.get('stock.move')
valuation_stock_move = stock_move_obj.search(cr, uid, [('purchase_line_id', '=', i_line.purchase_line_id.id)], limit=1, context=context)
if valuation_stock_move:
valuation_price_unit = stock_move_obj.browse(cr, uid, valuation_stock_move[0], context=context).price_unit
if inv.currency_id.id != company_currency:
valuation_price_unit = self.pool.get('res.currency').compute(cr, uid, company_currency, inv.currency_id.id, valuation_price_unit, context={'date': inv.date_invoice})
if valuation_price_unit != i_line.price_unit and line['price_unit'] == i_line.price_unit and acc:
# price with discount and without tax included
price_unit = self.pool['account.tax'].compute_all(cr, uid, line['taxes'], i_line.price_unit * (1-(i_line.discount or 0.0)/100.0),
inv.currency_id.id, line['quantity'])['total_excluded']
price_line = round(valuation_price_unit * line['quantity'], account_prec)
price_diff = round(price_unit - price_line, account_prec)
line.update({'price': price_line})
diff_res.append({
'type': 'src',
'name': i_line.name[:64],
'price_unit': round(price_diff / line['quantity'], account_prec),
'quantity': line['quantity'],
'price': price_diff,
'account_id': acc,
'product_id': line['product_id'],
'uos_id': line['uos_id'],
'account_analytic_id': line['account_analytic_id'],
'taxes': line.get('taxes', []),
})
return diff_res
return []
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
[IMP] terminology RFQ Sent
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import pytz
from openerp import SUPERUSER_ID, workflow
from datetime import datetime
from dateutil.relativedelta import relativedelta
from operator import attrgetter
from openerp.tools.safe_eval import safe_eval as eval
from openerp.osv import fields, osv
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
from openerp.osv.orm import browse_record_list, browse_record, browse_null
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT, DATETIME_FORMATS_MAP
from openerp.tools.float_utils import float_compare
from openerp.exceptions import UserError
class purchase_order(osv.osv):
def _amount_all(self, cr, uid, ids, field_name, arg, context=None):
res = {}
cur_obj = self.pool.get('res.currency')
for order in self.browse(cr, uid, ids, context=context):
res[order.id] = {
'amount_untaxed': 0.0,
'amount_tax': 0.0,
'amount_total': 0.0,
}
val = val1 = 0.0
cur = order.pricelist_id.currency_id
for line in order.order_line:
val1 += line.price_subtotal
for c in line.taxes_id.compute_all(line.price_unit, cur, line.product_qty, product=line.product_id, partner=order.partner_id)['taxes']:
val += c.get('amount', 0.0)
res[order.id]['amount_tax'] = cur_obj.round(cr, uid, cur, val)
res[order.id]['amount_untaxed'] = cur_obj.round(cr, uid, cur, val1)
res[order.id]['amount_total'] = res[order.id]['amount_untaxed'] + res[order.id]['amount_tax']
return res
def _set_minimum_planned_date(self, cr, uid, ids, name, value, arg, context=None):
if not value: return False
if type(ids)!=type([]):
ids=[ids]
pol_obj = self.pool.get('purchase.order.line')
for po in self.browse(cr, uid, ids, context=context):
if po.order_line:
pol_ids = pol_obj.search(cr, uid, [
('order_id', '=', po.id), '|', ('date_planned', '=', po.minimum_planned_date), ('date_planned', '<', value)
], context=context)
pol_obj.write(cr, uid, pol_ids, {'date_planned': value}, context=context)
self.invalidate_cache(cr, uid, context=context)
return True
def _minimum_planned_date(self, cr, uid, ids, field_name, arg, context=None):
res={}
purchase_obj=self.browse(cr, uid, ids, context=context)
for purchase in purchase_obj:
res[purchase.id] = False
if purchase.order_line:
min_date=purchase.order_line[0].date_planned
for line in purchase.order_line:
if line.state == 'cancel':
continue
if line.date_planned < min_date:
min_date=line.date_planned
res[purchase.id]=min_date
return res
def _invoiced_rate(self, cursor, user, ids, name, arg, context=None):
res = {}
for purchase in self.browse(cursor, user, ids, context=context):
tot = 0.0
for invoice in purchase.invoice_ids:
if invoice.state not in ('draft','cancel'):
tot += invoice.amount_untaxed
if purchase.amount_untaxed:
res[purchase.id] = tot * 100.0 / purchase.amount_untaxed
else:
res[purchase.id] = 0.0
return res
def _shipped_rate(self, cr, uid, ids, name, arg, context=None):
if not ids: return {}
res = {}
for id in ids:
res[id] = [0.0,0.0]
cr.execute('''SELECT
p.order_id, sum(m.product_qty), m.state
FROM
stock_move m
LEFT JOIN
purchase_order_line p on (p.id=m.purchase_line_id)
WHERE
p.order_id IN %s GROUP BY m.state, p.order_id''',(tuple(ids),))
for oid,nbr,state in cr.fetchall():
if state=='cancel':
continue
if state=='done':
res[oid][0] += nbr or 0.0
res[oid][1] += nbr or 0.0
else:
res[oid][1] += nbr or 0.0
for r in res:
if not res[r][1]:
res[r] = 0.0
else:
res[r] = 100.0 * res[r][0] / res[r][1]
return res
def _get_order(self, cr, uid, ids, context=None):
result = {}
for line in self.pool.get('purchase.order.line').browse(cr, uid, ids, context=context):
result[line.order_id.id] = True
return result.keys()
def _get_purchase_order(self, cr, uid, ids, context=None):
result = {}
for order in self.browse(cr, uid, ids, context=context):
result[order.id] = True
return result.keys()
def _invoiced(self, cursor, user, ids, name, arg, context=None):
res = {}
for purchase in self.browse(cursor, user, ids, context=context):
res[purchase.id] = all(line.invoiced for line in purchase.order_line if line.state != 'cancel')
return res
def _get_journal(self, cr, uid, context=None):
if context is None:
context = {}
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
company_id = context.get('company_id', user.company_id.id)
journal_obj = self.pool.get('account.journal')
res = journal_obj.search(cr, uid, [('type', '=', 'purchase'),
('company_id', '=', company_id)],
limit=1)
return res and res[0] or False
def _get_picking_in(self, cr, uid, context=None):
obj_data = self.pool.get('ir.model.data')
type_obj = self.pool.get('stock.picking.type')
user_obj = self.pool.get('res.users')
company_id = user_obj.browse(cr, uid, uid, context=context).company_id.id
types = type_obj.search(cr, uid, [('code', '=', 'incoming'), ('warehouse_id.company_id', '=', company_id)], context=context)
if not types:
types = type_obj.search(cr, uid, [('code', '=', 'incoming'), ('warehouse_id', '=', False)], context=context)
if not types:
raise UserError(_("Make sure you have at least an incoming picking type defined"))
return types[0]
def _get_picking_ids(self, cr, uid, ids, field_names, args, context=None):
res = {}
for po_id in ids:
res[po_id] = []
query = """
SELECT picking_id, po.id FROM stock_picking p, stock_move m, purchase_order_line pol, purchase_order po
WHERE po.id in %s and po.id = pol.order_id and pol.id = m.purchase_line_id and m.picking_id = p.id
GROUP BY picking_id, po.id
"""
cr.execute(query, (tuple(ids), ))
picks = cr.fetchall()
for pick_id, po_id in picks:
res[po_id].append(pick_id)
return res
def _count_all(self, cr, uid, ids, field_name, arg, context=None):
return {
purchase.id: {
'shipment_count': len(purchase.picking_ids),
'invoice_count': len(purchase.invoice_ids),
}
for purchase in self.browse(cr, uid, ids, context=context)
}
STATE_SELECTION = [
('draft', 'Draft RFQ'),
('sent', 'RFQ Sent'),
('bid', 'Bid Received'),
('confirmed', 'Waiting Approval'),
('approved', 'Purchase Confirmed'),
('except_picking', 'Shipping Exception'),
('except_invoice', 'Invoice Exception'),
('done', 'Done'),
('cancel', 'Cancelled')
]
READONLY_STATES = {
'confirmed': [('readonly', True)],
'approved': [('readonly', True)],
'done': [('readonly', True)]
}
_columns = {
'name': fields.char('Order Reference', required=True, select=True, copy=False,
help="Unique number of the purchase order, "
"computed automatically when the purchase order is created."),
'origin': fields.char('Source Document', copy=False,
help="Reference of the document that generated this purchase order "
"request; a sales order or an internal procurement request."),
'partner_ref': fields.char('Supplier Reference', states={'confirmed':[('readonly',True)],
'approved':[('readonly',True)],
'done':[('readonly',True)]},
copy=False,
help="Reference of the sales order or bid sent by your supplier. "
"It's mainly used to do the matching when you receive the "
"products as this reference is usually written on the "
"delivery order sent by your supplier."),
'date_order':fields.datetime('Order Date', required=True, states={'confirmed':[('readonly',True)],
'approved':[('readonly',True)]},
select=True, help="Depicts the date where the Quotation should be validated and converted into a Purchase Order, by default it's the creation date.",
copy=False),
'date_approve':fields.date('Date Approved', readonly=1, select=True, copy=False,
help="Date on which purchase order has been approved"),
'partner_id':fields.many2one('res.partner', 'Supplier', required=True, states=READONLY_STATES,
change_default=True, track_visibility='always'),
'dest_address_id':fields.many2one('res.partner', 'Customer Address (Direct Delivery)',
states=READONLY_STATES,
help="Put an address if you want to deliver directly from the supplier to the customer. " \
"Otherwise, keep empty to deliver to your own company."
),
'location_id': fields.many2one('stock.location', 'Destination', required=True, domain=[('usage','<>','view')], states=READONLY_STATES),
'pricelist_id':fields.many2one('product.pricelist', 'Pricelist', required=True, states=READONLY_STATES, help="The pricelist sets the currency used for this purchase order. It also computes the supplier price for the selected products/quantities."),
'currency_id': fields.many2one('res.currency','Currency', required=True, states=READONLY_STATES),
'state': fields.selection(STATE_SELECTION, 'Status', readonly=True,
help="The status of the purchase order or the quotation request. "
"A request for quotation is a purchase order in a 'Draft' status. "
"Then the order has to be confirmed by the user, the status switch "
"to 'Confirmed'. Then the supplier must confirm the order to change "
"the status to 'Approved'. When the purchase order is paid and "
"received, the status becomes 'Done'. If a cancel action occurs in "
"the invoice or in the receipt of goods, the status becomes "
"in exception.",
select=True, copy=False),
'order_line': fields.one2many('purchase.order.line', 'order_id', 'Order Lines',
states={'approved':[('readonly',True)],
'done':[('readonly',True)]},
copy=True),
'validator' : fields.many2one('res.users', 'Validated by', readonly=True, copy=False),
'notes': fields.text('Terms and Conditions'),
'invoice_ids': fields.many2many('account.invoice', 'purchase_invoice_rel', 'purchase_id',
'invoice_id', 'Invoices', copy=False,
help="Invoices generated for a purchase order"),
'picking_ids': fields.function(_get_picking_ids, method=True, type='one2many', relation='stock.picking', string='Picking List', help="This is the list of receipts that have been generated for this purchase order."),
'shipped':fields.boolean('Received', readonly=True, select=True, copy=False,
help="It indicates that a picking has been done"),
'shipped_rate': fields.function(_shipped_rate, string='Received Ratio', type='float'),
'invoiced': fields.function(_invoiced, string='Invoice Received', type='boolean', copy=False,
help="It indicates that an invoice has been validated"),
'invoiced_rate': fields.function(_invoiced_rate, string='Invoiced', type='float'),
'invoice_method': fields.selection([('manual','Based on Purchase Order lines'),('order','Based on generated draft invoice'),('picking','Based on incoming shipments')], 'Invoicing Control', required=True,
readonly=True, states={'draft':[('readonly',False)], 'sent':[('readonly',False)],'bid':[('readonly',False)]},
help="Based on Purchase Order lines: place individual lines in 'Invoice Control / On Purchase Order lines' from where you can selectively create an invoice.\n" \
"Based on generated invoice: create a draft invoice you can validate later.\n" \
"Based on incoming shipments: let you create an invoice when receipts are validated."
),
'minimum_planned_date':fields.function(_minimum_planned_date, fnct_inv=_set_minimum_planned_date, string='Expected Date', type='datetime', select=True, help="This is computed as the minimum scheduled date of all purchase order lines' products.",
store = {
'purchase.order.line': (_get_order, ['date_planned'], 10),
'purchase.order': (_get_purchase_order, ['order_line'], 10),
}
),
'amount_untaxed': fields.function(_amount_all, digits=0, string='Untaxed Amount',
store={
'purchase.order.line': (_get_order, None, 10),
}, multi="sums", help="The amount without tax", track_visibility='always'),
'amount_tax': fields.function(_amount_all, digits=0, string='Taxes',
store={
'purchase.order.line': (_get_order, None, 10),
}, multi="sums", help="The tax amount"),
'amount_total': fields.function(_amount_all, digits=0, string='Total',
store={
'purchase.order.line': (_get_order, None, 10),
}, multi="sums", help="The total amount"),
'fiscal_position_id': fields.many2one('account.fiscal.position', oldname='fiscal_position', string='Fiscal Position'),
'payment_term_id': fields.many2one('account.payment.term', 'Payment Term'),
'incoterm_id': fields.many2one('stock.incoterms', 'Incoterm', help="International Commercial Terms are a series of predefined commercial terms used in international transactions."),
'product_id': fields.related('order_line', 'product_id', type='many2one', relation='product.product', string='Product'),
'create_uid': fields.many2one('res.users', 'Responsible'),
'company_id': fields.many2one('res.company', 'Company', required=True, select=1, states={'confirmed': [('readonly', True)], 'approved': [('readonly', True)]}),
'journal_id': fields.many2one('account.journal', 'Journal'),
'bid_date': fields.date('Bid Received On', readonly=True, help="Date on which the bid was received"),
'bid_validity': fields.date('Bid Valid Until', help="Date on which the bid expired"),
'picking_type_id': fields.many2one('stock.picking.type', 'Deliver To', help="This will determine picking type of incoming shipment", required=True,
states={'confirmed': [('readonly', True)], 'approved': [('readonly', True)], 'done': [('readonly', True)]}),
'related_location_id': fields.related('picking_type_id', 'default_location_dest_id', type='many2one', relation='stock.location', string="Related location", store=True),
'related_usage': fields.related('location_id', 'usage', type='char'),
'shipment_count': fields.function(_count_all, type='integer', string='Incoming Shipments', multi=True),
'invoice_count': fields.function(_count_all, type='integer', string='Invoices', multi=True),
'group_id': fields.many2one('procurement.group', string="Procurement Group"),
}
_defaults = {
'date_order': fields.datetime.now,
'state': 'draft',
'name': lambda obj, cr, uid, context: '/',
'shipped': 0,
'invoice_method': 'order',
'invoiced': 0,
'pricelist_id': lambda self, cr, uid, context: context.get('partner_id', False) and self.pool.get('res.partner').browse(cr, uid, context['partner_id']).property_product_pricelist_purchase.id,
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'purchase.order', context=c),
'journal_id': _get_journal,
'currency_id': lambda self, cr, uid, context: self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.currency_id.id,
'picking_type_id': _get_picking_in,
}
_sql_constraints = [
('name_uniq', 'unique(name, company_id)', 'Order Reference must be unique per Company!'),
]
_name = "purchase.order"
_inherit = ['mail.thread', 'ir.needaction_mixin']
_description = "Purchase Order"
_order = 'date_order desc, id desc'
def create(self, cr, uid, vals, context=None):
if vals.get('name', '/') == '/':
vals['name'] = self.pool.get('ir.sequence').next_by_code(cr, uid, 'purchase.order') or '/'
context = dict(context or {}, mail_create_nolog=True)
order = super(purchase_order, self).create(cr, uid, vals, context=context)
self.message_post(cr, uid, [order], body=_("RFQ created"), context=context)
return order
def unlink(self, cr, uid, ids, context=None):
purchase_orders = self.read(cr, uid, ids, ['state'], context=context)
unlink_ids = []
for s in purchase_orders:
if s['state'] in ['draft','cancel']:
unlink_ids.append(s['id'])
else:
raise UserError(_('In order to delete a purchase order, you must cancel it first.'))
# automatically sending subflow.delete upon deletion
self.signal_workflow(cr, uid, unlink_ids, 'purchase_cancel')
return super(purchase_order, self).unlink(cr, uid, unlink_ids, context=context)
def _track_subtype(self, cr, uid, ids, init_values, context=None):
record = self.browse(cr, uid, ids[0], context=context)
if 'state' in init_values and record.state == 'approved':
return 'purchase.mt_rfq_approved'
elif 'state' in init_values and record.state == 'confirmed':
return 'purchase.mt_rfq_confirmed'
elif 'state' in init_values and record.state == 'done':
return 'purchase.mt_rfq_done'
return super(purchase_order, self)._track_subtype(cr, uid, ids, init_values, context=context)
def set_order_line_status(self, cr, uid, ids, status, context=None):
line = self.pool.get('purchase.order.line')
order_line_ids = []
proc_obj = self.pool.get('procurement.order')
for order in self.browse(cr, uid, ids, context=context):
if status in ('draft', 'cancel'):
order_line_ids += [po_line.id for po_line in order.order_line]
else: # Do not change the status of already cancelled lines
order_line_ids += [po_line.id for po_line in order.order_line if po_line.state != 'cancel']
if order_line_ids:
line.write(cr, uid, order_line_ids, {'state': status}, context=context)
if order_line_ids and status == 'cancel':
procs = proc_obj.search(cr, uid, [('purchase_line_id', 'in', order_line_ids)], context=context)
if procs:
proc_obj.write(cr, uid, procs, {'state': 'exception'}, context=context)
return True
def button_dummy(self, cr, uid, ids, context=None):
return True
def onchange_pricelist(self, cr, uid, ids, pricelist_id, context=None):
if not pricelist_id:
return {}
return {'value': {'currency_id': self.pool.get('product.pricelist').browse(cr, uid, pricelist_id, context=context).currency_id.id}}
#Destination address is used when dropshipping
def onchange_dest_address_id(self, cr, uid, ids, address_id, context=None):
if not address_id:
return {}
address = self.pool.get('res.partner')
values = {}
supplier = address.browse(cr, uid, address_id, context=context)
if supplier:
location_id = supplier.property_stock_customer.id
values.update({'location_id': location_id})
return {'value':values}
def onchange_picking_type_id(self, cr, uid, ids, picking_type_id, context=None):
value = {}
if picking_type_id:
picktype = self.pool.get("stock.picking.type").browse(cr, uid, picking_type_id, context=context)
if picktype.default_location_dest_id:
value.update({'location_id': picktype.default_location_dest_id.id, 'related_usage': picktype.default_location_dest_id.usage})
value.update({'related_location_id': picktype.default_location_dest_id.id})
return {'value': value}
def onchange_partner_id(self, cr, uid, ids, partner_id, context=None):
partner = self.pool.get('res.partner')
if not partner_id:
return {'value': {
'fiscal_position_id': False,
'payment_term_id': False,
}}
company_id = self.pool.get('res.users')._get_company(cr, uid, context=context)
if not company_id:
raise osv.except_osv(_('Error!'), _('There is no default company for the current user!'))
fp = self.pool['account.fiscal.position'].get_fiscal_position(cr, uid, company_id, partner_id, context=context)
supplier_address = partner.address_get(cr, uid, [partner_id], ['default'], context=context)
supplier = partner.browse(cr, uid, partner_id, context=context)
return {'value': {
'pricelist_id': supplier.property_product_pricelist_purchase.id,
'fiscal_position_id': fp or supplier.property_account_position_id and supplier.property_account_position_id.id or False,
'payment_term_id': supplier.property_supplier_payment_term_id.id or False,
}}
def invoice_open(self, cr, uid, ids, context=None):
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
action_id = mod_obj.xmlid_to_res_id(cr, uid, 'account.action_invoice_tree2')
result = act_obj.read(cr, uid, action_id, context=context)
inv_ids = []
for po in self.browse(cr, uid, ids, context=context):
inv_ids += [invoice.id for invoice in po.invoice_ids]
if not inv_ids:
raise UserError(_('Please create Invoices.'))
if len(inv_ids) > 1:
result['domain'] = [('id', 'in', inv_ids)]
else:
res = mod_obj.xmlid_to_res_id(cr, uid, 'account.invoice_supplier_form')
result['views'] = [(res, 'form')]
result['res_id'] = inv_ids and inv_ids[0] or False
return result
def view_invoice(self, cr, uid, ids, context=None):
'''
This function returns an action that display existing invoices of given sales order ids. It can either be a in a list or in a form view, if there is only one invoice to show.
'''
context = dict(context or {})
mod_obj = self.pool.get('ir.model.data')
wizard_obj = self.pool.get('purchase.order.line_invoice')
#compute the number of invoices to display
inv_ids = []
for po in self.browse(cr, uid, ids, context=context):
if po.invoice_method == 'manual':
if not po.invoice_ids:
context.update({'active_ids' : [line.id for line in po.order_line if line.state != 'cancel']})
wizard_obj.makeInvoices(cr, uid, [], context=context)
for po in self.browse(cr, uid, ids, context=context):
inv_ids+= [invoice.id for invoice in po.invoice_ids]
res = mod_obj.get_object_reference(cr, uid, 'account', 'invoice_supplier_form')
res_id = res and res[1] or False
return {
'name': _('Supplier Bills'),
'view_type': 'form',
'view_mode': 'form',
'view_id': [res_id],
'res_model': 'account.invoice',
'context': "{'type':'in_invoice', 'journal_type': 'purchase'}",
'type': 'ir.actions.act_window',
'target': 'current',
'res_id': inv_ids and inv_ids[0] or False,
}
def view_picking(self, cr, uid, ids, context=None):
'''
This function returns an action that display existing picking orders of given purchase order ids.
'''
if context is None:
context = {}
mod_obj = self.pool.get('ir.model.data')
dummy, action_id = tuple(mod_obj.get_object_reference(cr, uid, 'stock', 'action_picking_tree'))
action = self.pool.get('ir.actions.act_window').read(cr, uid, action_id, context=context)
pick_ids = []
for po in self.browse(cr, uid, ids, context=context):
pick_ids += [picking.id for picking in po.picking_ids]
#override the context to get rid of the default filtering on picking type
action['context'] = {}
#choose the view_mode accordingly
if len(pick_ids) > 1:
action['domain'] = "[('id','in',[" + ','.join(map(str, pick_ids)) + "])]"
else:
res = mod_obj.get_object_reference(cr, uid, 'stock', 'view_picking_form')
action['views'] = [(res and res[1] or False, 'form')]
action['res_id'] = pick_ids and pick_ids[0] or False
return action
def wkf_approve_order(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'approved', 'date_approve': fields.date.context_today(self,cr,uid,context=context)})
return True
def wkf_bid_received(self, cr, uid, ids, context=None):
bid_date = fields.date.context_today(self, cr, uid, context=context)
self.message_post(cr, uid, ids, body=_("Bid received on %s") % (bid_date), context=context)
return self.write(cr, uid, ids, {'state':'bid', 'bid_date': bid_date})
def wkf_send_rfq(self, cr, uid, ids, context=None):
'''
This function opens a window to compose an email, with the edi purchase template message loaded by default
'''
if not context:
context= {}
ir_model_data = self.pool.get('ir.model.data')
try:
if context.get('send_rfq', False):
template_id = ir_model_data.get_object_reference(cr, uid, 'purchase', 'email_template_edi_purchase')[1]
else:
template_id = ir_model_data.get_object_reference(cr, uid, 'purchase', 'email_template_edi_purchase_done')[1]
except ValueError:
template_id = False
try:
compose_form_id = ir_model_data.get_object_reference(cr, uid, 'mail', 'email_compose_message_wizard_form')[1]
except ValueError:
compose_form_id = False
ctx = dict(context)
ctx.update({
'default_model': 'purchase.order',
'default_res_id': ids[0],
'default_use_template': bool(template_id),
'default_template_id': template_id,
'default_composition_mode': 'comment',
})
return {
'name': _('Compose Email'),
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'mail.compose.message',
'views': [(compose_form_id, 'form')],
'view_id': compose_form_id,
'target': 'new',
'context': ctx,
}
def print_quotation(self, cr, uid, ids, context=None):
'''
This function prints the request for quotation and mark it as sent, so that we can see more easily the next step of the workflow
'''
assert len(ids) == 1, 'This option should only be used for a single id at a time'
self.signal_workflow(cr, uid, ids, 'send_rfq')
return self.pool['report'].get_action(cr, uid, ids, 'purchase.report_purchasequotation', context=context)
def wkf_confirm_order(self, cr, uid, ids, context=None):
todo = []
for po in self.browse(cr, uid, ids, context=context):
if not any(line.state != 'cancel' for line in po.order_line):
raise UserError(_('You cannot confirm a purchase order without any purchase order line.'))
if po.invoice_method == 'picking' and not any([l.product_id and l.product_id.type in ('product', 'consu') and l.state != 'cancel' for l in po.order_line]):
raise osv.except_osv(
_('Error!'),
_("You cannot confirm a purchase order with Invoice Control Method 'Based on incoming shipments' that doesn't contain any stockable item."))
for line in po.order_line:
if line.state=='draft':
todo.append(line.id)
self.pool.get('purchase.order.line').action_confirm(cr, uid, todo, context)
for id in ids:
self.write(cr, uid, [id], {'state' : 'confirmed', 'validator' : uid})
return True
def _choose_account_from_po_line(self, cr, uid, po_line, context=None):
fiscal_obj = self.pool.get('account.fiscal.position')
property_obj = self.pool.get('ir.property')
if po_line.product_id:
acc_id = po_line.product_id.property_account_expense_id.id
if not acc_id:
acc_id = po_line.product_id.categ_id.property_account_expense_categ_id.id
if not acc_id:
raise UserError(_('Define an expense account for this product: "%s" (id:%d).') % (po_line.product_id.name, po_line.product_id.id,))
else:
acc_id = property_obj.get(cr, uid, 'property_account_expense_categ_id', 'product.category', context=context).id
fpos = po_line.order_id.fiscal_position_id or False
#For anglo-saxon accounting
account_id = fiscal_obj.map_account(cr, uid, fpos, acc_id)
if po_line.company_id.anglo_saxon_accounting and po_line.product_id and not po_line.product_id.type == 'service':
acc_id = po_line.product_id.property_stock_account_input and po_line.product_id.property_stock_account_input.id
if not acc_id:
acc_id = po_line.product_id.categ_id.property_stock_account_input_categ_id and po_line.product_id.categ_id.property_stock_account_input_categ_id.id
if acc_id:
fpos = po_line.order_id.fiscal_position_id or False
account_id = self.pool.get('account.fiscal.position').map_account(cr, uid, fpos, acc_id)
return account_id
def _prepare_inv_line(self, cr, uid, account_id, order_line, context=None):
"""Collects require data from purchase order line that is used to create invoice line
for that purchase order line
:param account_id: Expense account of the product of PO line if any.
:param browse_record order_line: Purchase order line browse record
:return: Value for fields of invoice lines.
:rtype: dict
"""
return {
'name': order_line.name,
'account_id': account_id,
'price_unit': order_line.price_unit or 0.0,
'quantity': order_line.product_qty,
'product_id': order_line.product_id.id or False,
'uos_id': order_line.product_uom.id or False,
'invoice_line_tax_ids': [(6, 0, [x.id for x in order_line.taxes_id])],
'account_analytic_id': order_line.account_analytic_id.id or False,
'purchase_line_id': order_line.id,
}
def _prepare_invoice(self, cr, uid, order, line_ids, context=None):
"""Prepare the dict of values to create the new invoice for a
purchase order. This method may be overridden to implement custom
invoice generation (making sure to call super() to establish
a clean extension chain).
:param browse_record order: purchase.order record to invoice
:param list(int) line_ids: list of invoice line IDs that must be
attached to the invoice
:return: dict of value to create() the invoice
"""
journal_ids = self.pool['account.journal'].search(
cr, uid, [('type', '=', 'purchase'),
('company_id', '=', order.company_id.id)],
limit=1)
if not journal_ids:
raise UserError(_('Define purchase journal for this company: "%s" (id:%d).') % (order.company_id.name, order.company_id.id))
return {
'name': order.partner_ref or order.name,
'reference': order.partner_ref or order.name,
'account_id': order.partner_id.property_account_payable_id.id,
'type': 'in_invoice',
'partner_id': order.partner_id.id,
'currency_id': order.currency_id.id,
'journal_id': len(journal_ids) and journal_ids[0] or False,
'invoice_line_ids': [(6, 0, line_ids)],
'origin': order.name,
'fiscal_position_id': order.fiscal_position_id.id or False,
'payment_term_id': order.payment_term_id.id or False,
'company_id': order.company_id.id,
}
def action_cancel_draft(self, cr, uid, ids, context=None):
if not len(ids):
return False
self.write(cr, uid, ids, {'state':'draft','shipped':0})
self.set_order_line_status(cr, uid, ids, 'draft', context=context)
for p_id in ids:
# Deleting the existing instance of workflow for PO
self.delete_workflow(cr, uid, [p_id]) # TODO is it necessary to interleave the calls?
self.create_workflow(cr, uid, [p_id])
return True
def wkf_po_done(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'done'}, context=context)
self.set_order_line_status(cr, uid, ids, 'done', context=context)
def action_invoice_create(self, cr, uid, ids, context=None):
"""Generates invoice for given ids of purchase orders and links that invoice ID to purchase order.
:param ids: list of ids of purchase orders.
:return: ID of created invoice.
:rtype: int
"""
context = dict(context or {})
inv_obj = self.pool.get('account.invoice')
inv_line_obj = self.pool.get('account.invoice.line')
res = False
uid_company_id = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.id
for order in self.browse(cr, uid, ids, context=context):
context.pop('force_company', None)
if order.company_id.id != uid_company_id:
#if the company of the document is different than the current user company, force the company in the context
#then re-do a browse to read the property fields for the good company.
context['force_company'] = order.company_id.id
order = self.browse(cr, uid, order.id, context=context)
# generate invoice line correspond to PO line and link that to created invoice (inv_id) and PO line
inv_lines = []
for po_line in order.order_line:
if po_line.state == 'cancel':
continue
acc_id = self._choose_account_from_po_line(cr, uid, po_line, context=context)
inv_line_data = self._prepare_inv_line(cr, uid, acc_id, po_line, context=context)
inv_line_id = inv_line_obj.create(cr, uid, inv_line_data, context=context)
inv_lines.append(inv_line_id)
po_line.write({'invoice_lines': [(4, inv_line_id)]})
# get invoice data and create invoice
inv_data = self._prepare_invoice(cr, uid, order, inv_lines, context=context)
inv_id = inv_obj.create(cr, uid, inv_data, context=context)
# Link this new invoice to related purchase order
order.write({'invoice_ids': [(4, inv_id)]})
res = inv_id
return res
def invoice_done(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'approved'}, context=context)
return True
def has_stockable_product(self, cr, uid, ids, *args):
for order in self.browse(cr, uid, ids):
for order_line in order.order_line:
if order_line.state == 'cancel':
continue
if order_line.product_id and order_line.product_id.type in ('product', 'consu'):
return True
return False
def wkf_action_cancel(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'cancel'}, context=context)
self.set_order_line_status(cr, uid, ids, 'cancel', context=context)
def action_cancel(self, cr, uid, ids, context=None):
for purchase in self.browse(cr, uid, ids, context=context):
for pick in purchase.picking_ids:
for move in pick.move_lines:
if pick.state == 'done':
raise UserError(_('Unable to cancel the purchase order %s.') % (purchase.name) + _('You have already received some goods for it. '))
self.pool.get('stock.picking').action_cancel(cr, uid, [x.id for x in purchase.picking_ids if x.state != 'cancel'], context=context)
for inv in purchase.invoice_ids:
if inv and inv.state not in ('cancel', 'draft'):
raise UserError(_("Unable to cancel this purchase order.") + " " + _("You must first cancel all invoices related to this purchase order."))
self.pool.get('account.invoice') \
.signal_workflow(cr, uid, map(attrgetter('id'), purchase.invoice_ids), 'invoice_cancel')
self.signal_workflow(cr, uid, ids, 'purchase_cancel')
return True
def _prepare_order_line_move(self, cr, uid, order, order_line, picking_id, group_id, context=None):
''' prepare the stock move data from the PO line. This function returns a list of dictionary ready to be used in stock.move's create()'''
product_uom = self.pool.get('product.uom')
price_unit = order_line.price_unit
if order_line.product_uom.id != order_line.product_id.uom_id.id:
price_unit *= order_line.product_uom.factor / order_line.product_id.uom_id.factor
if order.currency_id.id != order.company_id.currency_id.id:
#we don't round the price_unit, as we may want to store the standard price with more digits than allowed by the currency
price_unit = self.pool.get('res.currency').compute(cr, uid, order.currency_id.id, order.company_id.currency_id.id, price_unit, round=False, context=context)
res = []
move_template = {
'name': order_line.name or '',
'product_id': order_line.product_id.id,
'product_uom': order_line.product_uom.id,
'product_uos': order_line.product_uom.id,
'date': order.date_order,
'date_expected': order_line.date_planned,
'location_id': order.partner_id.property_stock_supplier.id,
'location_dest_id': order.location_id.id,
'picking_id': picking_id,
'partner_id': order.dest_address_id.id,
'move_dest_id': False,
'state': 'draft',
'purchase_line_id': order_line.id,
'company_id': order.company_id.id,
'price_unit': price_unit,
'picking_type_id': order.picking_type_id.id,
'group_id': group_id,
'procurement_id': False,
'origin': order.name,
'route_ids': order.picking_type_id.warehouse_id and [(6, 0, [x.id for x in order.picking_type_id.warehouse_id.route_ids])] or [],
'warehouse_id':order.picking_type_id.warehouse_id.id,
'invoice_state': order.invoice_method == 'picking' and '2binvoiced' or 'none',
}
diff_quantity = order_line.product_qty
for procurement in order_line.procurement_ids:
procurement_qty = product_uom._compute_qty(cr, uid, procurement.product_uom.id, procurement.product_qty, to_uom_id=order_line.product_uom.id)
tmp = move_template.copy()
tmp.update({
'product_uom_qty': min(procurement_qty, diff_quantity),
'product_uos_qty': min(procurement_qty, diff_quantity),
'move_dest_id': procurement.move_dest_id.id, #move destination is same as procurement destination
'procurement_id': procurement.id,
'invoice_state': procurement.rule_id.invoice_state or (procurement.location_id and procurement.location_id.usage == 'customer' and procurement.invoice_state=='2binvoiced' and '2binvoiced') or (order.invoice_method == 'picking' and '2binvoiced') or 'none', #dropship case takes from sale
'propagate': procurement.rule_id.propagate,
})
diff_quantity -= min(procurement_qty, diff_quantity)
res.append(tmp)
#if the order line has a bigger quantity than the procurement it was for (manually changed or minimal quantity), then
#split the future stock move in two because the route followed may be different.
if float_compare(diff_quantity, 0.0, precision_rounding=order_line.product_uom.rounding) > 0:
move_template['product_uom_qty'] = diff_quantity
move_template['product_uos_qty'] = diff_quantity
res.append(move_template)
return res
def _create_stock_moves(self, cr, uid, order, order_lines, picking_id=False, context=None):
"""Creates appropriate stock moves for given order lines, whose can optionally create a
picking if none is given or no suitable is found, then confirms the moves, makes them
available, and confirms the pickings.
If ``picking_id`` is provided, the stock moves will be added to it, otherwise a standard
incoming picking will be created to wrap the stock moves (default behavior of the stock.move)
Modules that wish to customize the procurements or partition the stock moves over
multiple stock pickings may override this method and call ``super()`` with
different subsets of ``order_lines`` and/or preset ``picking_id`` values.
:param browse_record order: purchase order to which the order lines belong
:param list(browse_record) order_lines: purchase order line records for which picking
and moves should be created.
:param int picking_id: optional ID of a stock picking to which the created stock moves
will be added. A new picking will be created if omitted.
:return: None
"""
stock_move = self.pool.get('stock.move')
todo_moves = []
if order.group_id:
new_group = order.group_id.id
else:
new_group = self.pool.get("procurement.group").create(cr, uid, {'name': order.name, 'partner_id': order.partner_id.id}, context=context)
for order_line in order_lines:
if order_line.state == 'cancel':
continue
if not order_line.product_id:
continue
if order_line.product_id.type in ('product', 'consu'):
for vals in self._prepare_order_line_move(cr, uid, order, order_line, picking_id, new_group, context=context):
move = stock_move.create(cr, uid, vals, context=context)
todo_moves.append(move)
todo_moves = stock_move.action_confirm(cr, uid, todo_moves)
stock_move.force_assign(cr, uid, todo_moves)
def test_moves_done(self, cr, uid, ids, context=None):
'''PO is done at the delivery side if all the incoming shipments are done'''
for purchase in self.browse(cr, uid, ids, context=context):
for picking in purchase.picking_ids:
if picking.state != 'done':
return False
return True
def test_moves_except(self, cr, uid, ids, context=None):
''' PO is in exception at the delivery side if one of the picking is canceled
and the other pickings are completed (done or canceled)
'''
at_least_one_canceled = False
alldoneorcancel = True
for purchase in self.browse(cr, uid, ids, context=context):
for picking in purchase.picking_ids:
if picking.state == 'cancel':
at_least_one_canceled = True
if picking.state not in ['done', 'cancel']:
alldoneorcancel = False
return at_least_one_canceled and alldoneorcancel
def move_lines_get(self, cr, uid, ids, *args):
res = []
for order in self.browse(cr, uid, ids, context={}):
for line in order.order_line:
res += [x.id for x in line.move_ids]
return res
def action_picking_create(self, cr, uid, ids, context=None):
for order in self.browse(cr, uid, ids):
picking_vals = {
'picking_type_id': order.picking_type_id.id,
'partner_id': order.partner_id.id,
'date': order.date_order,
'origin': order.name,
'location_id': order.partner_id.property_stock_supplier.id,
'location_dest_id': order.location_id.id,
}
picking_id = self.pool.get('stock.picking').create(cr, uid, picking_vals, context=context)
self._create_stock_moves(cr, uid, order, order.order_line, picking_id, context=context)
def picking_done(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'shipped':1,'state':'approved'}, context=context)
# Do check on related procurements:
proc_obj = self.pool.get("procurement.order")
po_lines = []
for po in self.browse(cr, uid, ids, context=context):
po_lines += [x.id for x in po.order_line if x.state != 'cancel']
if po_lines:
procs = proc_obj.search(cr, uid, [('purchase_line_id', 'in', po_lines)], context=context)
if procs:
proc_obj.check(cr, uid, procs, context=context)
for id in ids:
self.message_post(cr, uid, id, body=_("Products received"), context=context)
return True
def do_merge(self, cr, uid, ids, context=None):
"""
To merge similar type of purchase orders.
Orders will only be merged if:
* Purchase Orders are in draft
* Purchase Orders belong to the same partner
* Purchase Orders are have same stock location, same pricelist, same currency
Lines will only be merged if:
* Order lines are exactly the same except for the quantity and unit
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param ids: the ID or list of IDs
@param context: A standard dictionary
@return: new purchase order id
"""
#TOFIX: merged order line should be unlink
def make_key(br, fields):
list_key = []
for field in fields:
field_val = getattr(br, field)
if field in ('product_id', 'account_analytic_id'):
if not field_val:
field_val = False
if isinstance(field_val, browse_record):
field_val = field_val.id
elif isinstance(field_val, browse_null):
field_val = False
elif isinstance(field_val, browse_record_list):
field_val = ((6, 0, tuple([v.id for v in field_val])),)
list_key.append((field, field_val))
list_key.sort()
return tuple(list_key)
context = dict(context or {})
# Compute what the new orders should contain
new_orders = {}
order_lines_to_move = {}
for porder in [order for order in self.browse(cr, uid, ids, context=context) if order.state == 'draft']:
order_key = make_key(porder, ('partner_id', 'location_id', 'pricelist_id', 'currency_id'))
new_order = new_orders.setdefault(order_key, ({}, []))
new_order[1].append(porder.id)
order_infos = new_order[0]
order_lines_to_move.setdefault(order_key, [])
if not order_infos:
order_infos.update({
'origin': porder.origin,
'date_order': porder.date_order,
'partner_id': porder.partner_id.id,
'dest_address_id': porder.dest_address_id.id,
'picking_type_id': porder.picking_type_id.id,
'location_id': porder.location_id.id,
'pricelist_id': porder.pricelist_id.id,
'currency_id': porder.currency_id.id,
'state': 'draft',
'order_line': {},
'notes': '%s' % (porder.notes or '',),
'fiscal_position_id': porder.fiscal_position_id and porder.fiscal_position_id.id or False,
})
else:
if porder.date_order < order_infos['date_order']:
order_infos['date_order'] = porder.date_order
if porder.notes:
order_infos['notes'] = (order_infos['notes'] or '') + ('\n%s' % (porder.notes,))
if porder.origin:
order_infos['origin'] = (order_infos['origin'] or '') + ' ' + porder.origin
order_lines_to_move[order_key] += [order_line.id for order_line in porder.order_line
if order_line.state != 'cancel']
allorders = []
orders_info = {}
for order_key, (order_data, old_ids) in new_orders.iteritems():
# skip merges with only one order
if len(old_ids) < 2:
allorders += (old_ids or [])
continue
# cleanup order line data
for key, value in order_data['order_line'].iteritems():
del value['uom_factor']
value.update(dict(key))
order_data['order_line'] = [(6, 0, order_lines_to_move[order_key])]
# create the new order
context.update({'mail_create_nolog': True})
neworder_id = self.create(cr, uid, order_data)
self.message_post(cr, uid, [neworder_id], body=_("RFQ created"), context=context)
orders_info.update({neworder_id: old_ids})
allorders.append(neworder_id)
# make triggers pointing to the old orders point to the new order
for old_id in old_ids:
self.redirect_workflow(cr, uid, [(old_id, neworder_id)])
self.signal_workflow(cr, uid, [old_id], 'purchase_cancel')
return orders_info
class purchase_order_line(osv.osv):
def _amount_line(self, cr, uid, ids, prop, arg, context=None):
res = {}
for line in self.browse(cr, uid, ids, context=context):
cur = line.order_id.pricelist_id.currency_id
res[line.id] = line.taxes_id.compute_all(line.price_unit, cur, line.product_qty, product=line.product_id, partner=line.order_id.partner_id)['total_excluded']
return res
def _get_uom_id(self, cr, uid, context=None):
try:
proxy = self.pool.get('ir.model.data')
result = proxy.get_object_reference(cr, uid, 'product', 'product_uom_unit')
return result[1]
except Exception, ex:
return False
_columns = {
'name': fields.text('Description', required=True),
'product_qty': fields.float('Quantity', digits_compute=dp.get_precision('Product Unit of Measure'), required=True),
'date_planned': fields.datetime('Scheduled Date', required=True, select=True),
'taxes_id': fields.many2many('account.tax', 'purchase_order_taxe', 'ord_id', 'tax_id', 'Taxes'),
'product_uom': fields.many2one('product.uom', 'Product Unit of Measure', required=True),
'product_id': fields.many2one('product.product', 'Product', domain=[('purchase_ok','=',True)], change_default=True),
'move_ids': fields.one2many('stock.move', 'purchase_line_id', 'Reservation', readonly=True, ondelete='set null'),
'price_unit': fields.float('Unit Price', required=True, digits_compute= dp.get_precision('Product Price')),
'price_subtotal': fields.function(_amount_line, string='Subtotal', digits=0),
'order_id': fields.many2one('purchase.order', 'Order Reference', select=True, required=True, ondelete='cascade'),
'account_analytic_id':fields.many2one('account.analytic.account', 'Analytic Account',),
'company_id': fields.related('order_id','company_id',type='many2one',relation='res.company',string='Company', store=True, readonly=True),
'state': fields.selection([('draft', 'Draft'), ('confirmed', 'Confirmed'), ('done', 'Done'), ('cancel', 'Cancelled')],
'Status', required=True, readonly=True, copy=False,
help=' * The \'Draft\' status is set automatically when purchase order in draft status. \
\n* The \'Confirmed\' status is set automatically as confirm when purchase order in confirm status. \
\n* The \'Done\' status is set automatically when purchase order is set as done. \
\n* The \'Cancelled\' status is set automatically when user cancel purchase order.'),
'invoice_lines': fields.many2many('account.invoice.line', 'purchase_order_line_invoice_rel',
'order_line_id', 'invoice_id', 'Invoice Lines',
readonly=True, copy=False),
'invoiced': fields.boolean('Invoiced', readonly=True, copy=False),
'partner_id': fields.related('order_id', 'partner_id', string='Partner', readonly=True, type="many2one", relation="res.partner", store=True),
'date_order': fields.related('order_id', 'date_order', string='Order Date', readonly=True, type="datetime"),
'procurement_ids': fields.one2many('procurement.order', 'purchase_line_id', string='Associated procurements'),
}
_defaults = {
'product_uom' : _get_uom_id,
'product_qty': lambda *a: 1.0,
'state': lambda *args: 'draft',
'invoiced': lambda *a: 0,
}
_table = 'purchase_order_line'
_name = 'purchase.order.line'
_description = 'Purchase Order Line'
def unlink(self, cr, uid, ids, context=None):
for line in self.browse(cr, uid, ids, context=context):
if line.order_id.state in ['approved', 'done'] and line.state not in ['draft', 'cancel']:
raise UserError(_('Cannot delete a purchase order line which is in state \'%s\'.') %(line.state,))
procurement_obj = self.pool.get('procurement.order')
procurement_ids_to_except = procurement_obj.search(cr, uid, [('purchase_line_id', 'in', ids)], context=context)
if procurement_ids_to_except:
for po_id in procurement_ids_to_except:
procurement_obj.message_post(cr, uid, po_id, body=_('Purchase order line deleted.'), context=context)
procurement_obj.write(cr, uid, procurement_ids_to_except, {'state': 'exception'}, context=context)
return super(purchase_order_line, self).unlink(cr, uid, ids, context=context)
def onchange_product_uom(self, cr, uid, ids, pricelist_id, product_id, qty, uom_id,
partner_id, date_order=False, fiscal_position_id=False, date_planned=False,
name=False, price_unit=False, state='draft', context=None):
"""
onchange handler of product_uom.
"""
if context is None:
context = {}
if not uom_id:
return {'value': {'price_unit': price_unit or 0.0, 'name': name or '', 'product_uom' : uom_id or False}}
context = dict(context, purchase_uom_check=True)
return self.onchange_product_id(cr, uid, ids, pricelist_id, product_id, qty, uom_id,
partner_id, date_order=date_order, fiscal_position_id=fiscal_position_id, date_planned=date_planned,
name=name, price_unit=price_unit, state=state, replace=False, context=context)
def _get_date_planned(self, cr, uid, supplier_info, date_order_str, context=None):
"""Return the datetime value to use as Schedule Date (``date_planned``) for
PO Lines that correspond to the given product.supplierinfo,
when ordered at `date_order_str`.
:param browse_record | False supplier_info: product.supplierinfo, used to
determine delivery delay (if False, default delay = 0)
:param str date_order_str: date of order field, as a string in
DEFAULT_SERVER_DATETIME_FORMAT
:rtype: datetime
:return: desired Schedule Date for the PO line
"""
supplier_delay = int(supplier_info.delay) if supplier_info else 0
return datetime.strptime(date_order_str, DEFAULT_SERVER_DATETIME_FORMAT) + relativedelta(days=supplier_delay)
def action_cancel(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'cancel'}, context=context)
# We will group by PO first, so we do the check only once for each PO
purchase_orders = list(set([x.order_id for x in self.browse(cr, uid, ids, context=context)]))
for purchase in purchase_orders:
if all([l.state == 'cancel' for l in purchase.order_line]):
self.pool.get('purchase.order').action_cancel(cr, uid, [purchase.id], context=context)
def _check_product_uom_group(self, cr, uid, context=None):
group_uom = self.pool.get('ir.model.data').get_object(cr, uid, 'product', 'group_uom')
res = [user for user in group_uom.users if user.id == uid]
return len(res) and True or False
def onchange_product_id(self, cr, uid, ids, pricelist_id, product_id, qty, uom_id,
partner_id, date_order=False, fiscal_position_id=False, date_planned=False,
name=False, price_unit=False, state='draft', replace=True, context=None):
"""
onchange handler of product_id.
"""
if context is None:
context = {}
res = {'value': {'price_unit': price_unit or 0.0, 'name': name or '', 'product_uom' : uom_id or False}}
if not product_id:
return res
product_product = self.pool.get('product.product')
product_uom = self.pool.get('product.uom')
res_partner = self.pool.get('res.partner')
product_pricelist = self.pool.get('product.pricelist')
account_fiscal_position = self.pool.get('account.fiscal.position')
account_tax = self.pool.get('account.tax')
# - check for the presence of partner_id and pricelist_id
#if not partner_id:
# raise UserError(_('Select a partner in purchase order to choose a product.'))
#if not pricelist_id:
# raise UserError(_('Select a price list in the purchase order form before choosing a product.'))
# - determine name and notes based on product in partner lang.
context_partner = context.copy()
if partner_id:
lang = res_partner.browse(cr, uid, partner_id).lang
context_partner.update( {'lang': lang, 'partner_id': partner_id} )
product = product_product.browse(cr, uid, product_id, context=context_partner)
if replace:
#call name_get() with partner in the context to eventually match name and description in the seller_ids field
dummy, name = product_product.name_get(cr, uid, product_id, context=context_partner)[0]
if product.description_purchase:
name += '\n' + product.description_purchase
res['value'].update({'name': name})
# - set a domain on product_uom
res['domain'] = {'product_uom': [('category_id','=',product.uom_id.category_id.id)]}
# - check that uom and product uom belong to the same category
product_uom_po_id = product.uom_po_id.id
if not uom_id:
uom_id = product_uom_po_id
if product.uom_id.category_id.id != product_uom.browse(cr, uid, uom_id, context=context).category_id.id:
if context.get('purchase_uom_check') and self._check_product_uom_group(cr, uid, context=context):
res['warning'] = {'title': _('Warning!'), 'message': _('Selected Unit of Measure does not belong to the same category as the product Unit of Measure.')}
uom_id = product_uom_po_id
res['value'].update({'product_uom': uom_id})
# - determine product_qty and date_planned based on seller info
if not date_order:
date_order = fields.datetime.now()
supplierinfo = False
precision = self.pool.get('decimal.precision').precision_get(cr, uid, 'Product Unit of Measure')
for supplier in product.seller_ids:
if partner_id and (supplier.name.id == partner_id):
supplierinfo = supplier
if supplierinfo.product_uom.id != uom_id:
res['warning'] = {'title': _('Warning!'), 'message': _('The selected supplier only sells this product by %s') % supplierinfo.product_uom.name }
min_qty = product_uom._compute_qty(cr, uid, supplierinfo.product_uom.id, supplierinfo.min_qty, to_uom_id=uom_id)
if float_compare(min_qty , qty, precision_digits=precision) == 1: # If the supplier quantity is greater than entered from user, set minimal.
if qty:
res['warning'] = {'title': _('Warning!'), 'message': _('The selected supplier has a minimal quantity set to %s %s, you should not purchase less.') % (supplierinfo.min_qty, supplierinfo.product_uom.name)}
qty = min_qty
dt = self._get_date_planned(cr, uid, supplierinfo, date_order, context=context).strftime(DEFAULT_SERVER_DATETIME_FORMAT)
qty = qty or 1.0
res['value'].update({'date_planned': date_planned or dt})
if qty:
res['value'].update({'product_qty': qty})
price = price_unit
if price_unit is False or price_unit is None:
# - determine price_unit and taxes_id
if pricelist_id:
date_order_str = datetime.strptime(date_order, DEFAULT_SERVER_DATETIME_FORMAT).strftime(DEFAULT_SERVER_DATE_FORMAT)
price = product_pricelist.price_get(cr, uid, [pricelist_id],
product.id, qty or 1.0, partner_id or False, {'uom': uom_id, 'date': date_order_str})[pricelist_id]
else:
price = product.standard_price
taxes = account_tax.browse(cr, uid, map(lambda x: x.id, product.supplier_taxes_id))
fpos = fiscal_position_id and account_fiscal_position.browse(cr, uid, fiscal_position_id, context=context) or False
taxes_ids = account_fiscal_position.map_tax(cr, uid, fpos, taxes)
res['value'].update({'price_unit': price, 'taxes_id': taxes_ids})
return res
product_id_change = onchange_product_id
product_uom_change = onchange_product_uom
def action_confirm(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'confirmed'}, context=context)
return True
class procurement_rule(osv.osv):
_inherit = 'procurement.rule'
def _get_action(self, cr, uid, context=None):
return [('buy', _('Buy'))] + super(procurement_rule, self)._get_action(cr, uid, context=context)
class procurement_order(osv.osv):
_inherit = 'procurement.order'
_columns = {
'purchase_line_id': fields.many2one('purchase.order.line', 'Purchase Order Line'),
'purchase_id': fields.related('purchase_line_id', 'order_id', type='many2one', relation='purchase.order', string='Purchase Order'),
}
def propagate_cancels(self, cr, uid, ids, context=None):
purchase_line_obj = self.pool.get('purchase.order.line')
lines_to_cancel = []
uom_obj = self.pool.get("product.uom")
for procurement in self.browse(cr, uid, ids, context=context):
if procurement.rule_id.action == 'buy' and procurement.purchase_line_id:
if procurement.purchase_line_id.state not in ('draft', 'cancel'):
raise UserError(
_('Can not cancel this procurement like this as the related purchase order has been confirmed already. Please cancel the purchase order first. '))
new_qty, new_price = self._calc_new_qty_price(cr, uid, procurement, cancel=True, context=context)
if new_qty != procurement.purchase_line_id.product_qty:
purchase_line_obj.write(cr, uid, [procurement.purchase_line_id.id], {'product_qty': new_qty, 'price_unit': new_price}, context=context)
if float_compare(new_qty, 0.0, precision_rounding=procurement.product_uom.rounding) != 1:
if procurement.purchase_line_id.id not in lines_to_cancel:
lines_to_cancel += [procurement.purchase_line_id.id]
if lines_to_cancel:
purchase_line_obj.action_cancel(cr, uid, lines_to_cancel, context=context)
purchase_line_obj.unlink(cr, uid, lines_to_cancel, context=context)
return super(procurement_order, self).propagate_cancels(cr, uid, ids, context=context)
def _run(self, cr, uid, procurement, context=None):
if procurement.rule_id and procurement.rule_id.action == 'buy':
#make a purchase order for the procurement
return self.make_po(cr, uid, [procurement.id], context=context)[procurement.id]
return super(procurement_order, self)._run(cr, uid, procurement, context=context)
#TODO: Autocommit needed?
def run(self, cr, uid, ids, autocommit=False, context=None):
procs = self.browse(cr, uid, ids, context=context)
to_assign = [x for x in procs if x.state not in ('running', 'done')]
self._assign_multi(cr, uid, to_assign, context=context)
buy_ids = [x.id for x in to_assign if x.rule_id and x.rule_id.action == 'buy']
if buy_ids:
result_dict = self.make_po(cr, uid, buy_ids, context=context)
runnings = []
exceptions = []
for proc in result_dict.keys():
if result_dict[proc]:
runnings += [proc]
else:
exceptions += [proc]
if runnings:
self.write(cr, uid, runnings, {'state': 'running'}, context=context)
if exceptions:
self.write(cr, uid, exceptions, {'state': 'exception'}, context=context)
set_others = set(ids) - set(buy_ids)
return super(procurement_order, self).run(cr, uid, list(set_others), context=context)
def _check(self, cr, uid, procurement, context=None):
if procurement.purchase_line_id:
if procurement.purchase_line_id.order_id.shipped:
return True
elif procurement.move_ids:
moves = self.pool.get('stock.move').browse(cr, uid, [x.id for x in procurement.move_ids], context=context)
return all(move.state == 'done' for move in moves)
return super(procurement_order, self)._check(cr, uid, procurement, context=context)
def _check_supplier_info(self, cr, uid, ids, context=None):
''' Check the supplier info field of a product and write an error message on the procurement if needed.
Returns True if all needed information is there, False if some configuration mistake is detected.
'''
partner_obj = self.pool.get('res.partner')
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
for procurement in self.browse(cr, uid, ids, context=context):
message = ''
partner = procurement.product_id.seller_id #Taken Main Supplier of Product of Procurement.
if not procurement.product_id.seller_ids:
message = _('No supplier defined for this product !')
elif not partner:
message = _('No default supplier defined for this product')
elif not partner_obj.address_get(cr, uid, [partner.id], ['delivery'])['delivery']:
message = _('No address defined for the supplier')
if message:
if procurement.message != message:
cr.execute('update procurement_order set message=%s where id=%s', (message, procurement.id))
return False
if user.company_id and user.company_id.partner_id:
if partner.id == user.company_id.partner_id.id:
raise UserError(_('The product "%s" has been defined with your company as reseller which seems to be a configuration error!' % procurement.product_id.name))
return True
def create_procurement_purchase_order(self, cr, uid, procurement, po_vals, line_vals, context=None):
"""Create the purchase order from the procurement, using
the provided field values, after adding the given purchase
order line in the purchase order.
:params procurement: the procurement object generating the purchase order
:params dict po_vals: field values for the new purchase order (the
``order_line`` field will be overwritten with one
single line, as passed in ``line_vals``).
:params dict line_vals: field values of the single purchase order line that
the purchase order will contain.
:return: id of the newly created purchase order
:rtype: int
"""
po_vals.update({'order_line': [(0,0,line_vals)]})
return self.pool.get('purchase.order').create(cr, uid, po_vals, context=context)
def _get_purchase_schedule_date(self, cr, uid, procurement, company, context=None):
"""Return the datetime value to use as Schedule Date (``date_planned``) for the
Purchase Order Lines created to satisfy the given procurement.
:param browse_record procurement: the procurement for which a PO will be created.
:param browse_report company: the company to which the new PO will belong to.
:rtype: datetime
:return: the desired Schedule Date for the PO lines
"""
procurement_date_planned = datetime.strptime(procurement.date_planned, DEFAULT_SERVER_DATETIME_FORMAT)
schedule_date = (procurement_date_planned - relativedelta(days=company.po_lead))
return schedule_date
def _get_purchase_order_date(self, cr, uid, procurement, company, schedule_date, context=None):
"""Return the datetime value to use as Order Date (``date_order``) for the
Purchase Order created to satisfy the given procurement.
:param browse_record procurement: the procurement for which a PO will be created.
:param browse_report company: the company to which the new PO will belong to.
:param datetime schedule_date: desired Scheduled Date for the Purchase Order lines.
:rtype: datetime
:return: the desired Order Date for the PO
"""
seller_delay = int(procurement.product_id.seller_delay)
return schedule_date - relativedelta(days=seller_delay)
def _get_product_supplier(self, cr, uid, procurement, context=None):
''' returns the main supplier of the procurement's product given as argument'''
supplierinfo = self.pool['product.supplierinfo']
company_supplier = supplierinfo.search(cr, uid,
[('product_tmpl_id', '=', procurement.product_id.product_tmpl_id.id), ('company_id', '=', procurement.company_id.id)], limit=1, context=context)
if company_supplier:
return supplierinfo.browse(cr, uid, company_supplier[0], context=context).name
return procurement.product_id.seller_id
def _get_po_line_values_from_procs(self, cr, uid, procurements, partner, schedule_date, context=None):
res = {}
if context is None:
context = {}
uom_obj = self.pool.get('product.uom')
pricelist_obj = self.pool.get('product.pricelist')
prod_obj = self.pool.get('product.product')
acc_pos_obj = self.pool.get('account.fiscal.position')
pricelist_id = partner.property_product_pricelist_purchase.id
prices_qty = []
for procurement in procurements:
seller_qty = procurement.product_id.seller_qty if procurement.location_id.usage != 'customer' else 0.0
uom_id = procurement.product_id.uom_po_id.id
qty = uom_obj._compute_qty(cr, uid, procurement.product_uom.id, procurement.product_qty, uom_id)
if seller_qty:
qty = max(qty, seller_qty)
prices_qty += [(procurement.product_id, qty, partner)]
prices = pricelist_obj.price_get_multi(cr, uid, [pricelist_id], prices_qty)
#Passing partner_id to context for purchase order line integrity of Line name
new_context = context.copy()
new_context.update({'lang': partner.lang, 'partner_id': partner.id})
names = prod_obj.name_get(cr, uid, [x.product_id.id for x in procurements], context=context)
names_dict = {}
for id, name in names:
names_dict[id] = name
for procurement in procurements:
taxes_ids = procurement.product_id.supplier_taxes_id
taxes = acc_pos_obj.map_tax(cr, uid, partner.property_account_position_id, taxes_ids)
name = names_dict[procurement.product_id.id]
if procurement.product_id.description_purchase:
name += '\n' + procurement.product_id.description_purchase
price = prices[procurement.product_id.id][pricelist_id]
values = {
'name': name,
'product_qty': qty,
'product_id': procurement.product_id.id,
'product_uom': procurement.product_id.uom_po_id.id,
'price_unit': price or 0.0,
'date_planned': schedule_date.strftime(DEFAULT_SERVER_DATETIME_FORMAT),
'taxes_id': [(6, 0, taxes)],
'procurement_ids': [(4, procurement.id)]
}
res[procurement.id] = values
return res
def _calc_new_qty_price(self, cr, uid, procurement, po_line=None, cancel=False, context=None):
if not po_line:
po_line = procurement.purchase_line_id
uom_obj = self.pool.get('product.uom')
qty = uom_obj._compute_qty(cr, uid, procurement.product_uom.id, procurement.product_qty,
procurement.product_id.uom_po_id.id)
if cancel:
qty = -qty
# Make sure we use the minimum quantity of the partner corresponding to the PO
# This does not apply in case of dropshipping
supplierinfo_min_qty = 0.0
if po_line.order_id.location_id.usage != 'customer':
if po_line.product_id.seller_id.id == po_line.order_id.partner_id.id:
supplierinfo_min_qty = po_line.product_id.seller_qty
else:
supplierinfo_obj = self.pool.get('product.supplierinfo')
supplierinfo_ids = supplierinfo_obj.search(cr, uid, [('name', '=', po_line.order_id.partner_id.id), ('product_tmpl_id', '=', po_line.product_id.product_tmpl_id.id)])
supplierinfo_min_qty = supplierinfo_obj.browse(cr, uid, supplierinfo_ids).min_qty
if supplierinfo_min_qty == 0.0:
qty += po_line.product_qty
else:
# Recompute quantity by adding existing running procurements.
for proc in po_line.procurement_ids:
qty += uom_obj._compute_qty(cr, uid, proc.product_uom.id, proc.product_qty,
proc.product_id.uom_po_id.id) if proc.state == 'running' else 0.0
qty = max(qty, supplierinfo_min_qty) if qty > 0.0 else 0.0
price = po_line.price_unit
if qty != po_line.product_qty:
pricelist_obj = self.pool.get('product.pricelist')
pricelist_id = po_line.order_id.partner_id.property_product_pricelist_purchase.id
price = pricelist_obj.price_get(cr, uid, [pricelist_id], procurement.product_id.id, qty, po_line.order_id.partner_id.id, {'uom': procurement.product_uom.id})[pricelist_id]
return qty, price
def _get_grouping_dicts(self, cr, uid, ids, context=None):
"""
It will group the procurements according to the pos they should go into. That way, lines going to the same
po, can be processed at once.
Returns two dictionaries:
add_purchase_dicts: key: po value: procs to add to the po
create_purchase_dicts: key: values for proc to create (not that necessary as they are in procurement => TODO),
values: procs to add
"""
po_obj = self.pool.get('purchase.order')
# Regroup POs
cr.execute("""
SELECT psi.name, p.id, pr.id, pr.picking_type_id, p.location_id, p.partner_dest_id, p.company_id, p.group_id,
pr.group_propagation_option, pr.group_id, psi.qty
FROM procurement_order AS p
LEFT JOIN procurement_rule AS pr ON pr.id = p.rule_id
LEFT JOIN procurement_group AS pg ON p.group_id = pg.id,
product_supplierinfo AS psi, product_product AS pp
WHERE
p.product_id = pp.id AND p.id in %s AND psi.product_tmpl_id = pp.product_tmpl_id
AND (psi.company_id = p.company_id or psi.company_id IS NULL)
ORDER BY psi.sequence,
psi.name, p.rule_id, p.location_id, p.company_id, p.partner_dest_id, p.group_id
""", (tuple(ids), ))
res = cr.fetchall()
old = False
# A giant dict for grouping lines, ... to do at once
create_purchase_procs = {} # Lines to add to a newly to create po
add_purchase_procs = {} # Lines to add/adjust in an existing po
proc_seller = {} # To check we only process one po
for partner, proc, rule, pick_type, location, partner_dest, company, group, group_propagation, fixed_group, qty in res:
if not proc_seller.get(proc):
proc_seller[proc] = partner
new = partner, rule, pick_type, location, company, group, group_propagation, fixed_group
if new != old:
old = new
dom = [
('partner_id', '=', partner), ('state', '=', 'draft'), ('picking_type_id', '=', pick_type),
('location_id', '=', location), ('company_id', '=', company), ('dest_address_id', '=', partner_dest)]
if group_propagation == 'propagate':
dom += [('group_id', '=', group)]
elif group_propagation == 'fixed':
dom += [('group_id', '=', fixed_group)]
available_draft_po_ids = po_obj.search(cr, uid, dom, context=context)
available_draft_po = available_draft_po_ids and available_draft_po_ids[0] or False
# Add to dictionary
if available_draft_po:
if add_purchase_procs.get(available_draft_po):
add_purchase_procs[available_draft_po] += [proc]
else:
add_purchase_procs[available_draft_po] = [proc]
else:
if create_purchase_procs.get(new):
create_purchase_procs[new] += [proc]
else:
create_purchase_procs[new] = [proc]
return add_purchase_procs, create_purchase_procs
def make_po(self, cr, uid, ids, context=None):
res = {}
po_obj = self.pool.get('purchase.order')
po_line_obj = self.pool.get('purchase.order.line')
seq_obj = self.pool.get('ir.sequence')
uom_obj = self.pool.get('product.uom')
add_purchase_procs, create_purchase_procs = self._get_grouping_dicts(cr, uid, ids, context=context)
procs_done = []
# Let us check existing purchase orders and add/adjust lines on them
for add_purchase in add_purchase_procs.keys():
procs_done += add_purchase_procs[add_purchase]
po = po_obj.browse(cr, uid, add_purchase, context=context)
lines_to_update = {}
line_values = []
procurements = self.browse(cr, uid, add_purchase_procs[add_purchase], context=context)
po_line_ids = po_line_obj.search(cr, uid, [('order_id', '=', add_purchase), ('product_id', 'in', [x.product_id.id for x in procurements])], context=context)
po_lines = po_line_obj.browse(cr, uid, po_line_ids, context=context)
po_prod_dict = {}
for pol in po_lines:
po_prod_dict[pol.product_id.id] = pol
procs_to_create = []
#Check which procurements need a new line and which need to be added to an existing one
for proc in procurements:
if po_prod_dict.get(proc.product_id.id):
po_line = po_prod_dict[proc.product_id.id]
# FIXME: compute quantity using `_calc_new_qty_price` method.
# new_qty, new_price = self._calc_new_qty_price(cr, uid, proc, po_line=po_line, context=context)
uom_id = po_line.product_uom # Convert to UoM of existing line
qty = uom_obj._compute_qty_obj(cr, uid, proc.product_uom, proc.product_qty, uom_id)
if lines_to_update.get(po_line):
lines_to_update[po_line] += [(proc, qty)]
else:
lines_to_update[po_line] = [(proc, qty)]
else:
procs_to_create.append(proc)
procs = []
# FIXME: these are not real tracking values, it should be fixed if tracking values for one2many
# are managed
def format_message(message_description, tracked_values):
message = ''
if message_description:
message = '<span>%s</span>' % message_description
for name, values in tracked_values.iteritems():
message += '<div> • <b>%s</b>: ' % name
message += '%s</div>' % values
return message
# Update the quantities of the lines that need to
for line in lines_to_update.keys():
tot_qty = 0
for proc, qty in lines_to_update[line]:
tot_qty += qty
self.message_post(cr, uid, proc.id, body=_("Quantity added in existing Purchase Order Line"), context=context)
msg = format_message(_('Quantity added in existing Purchase Order Line'), {'Product': proc.product_id.name, 'Quantity': proc.product_qty, 'Procurement': proc.origin})
po_obj.message_post(cr, uid, [add_purchase], body=msg, context=context)
line_values += [(1, line.id, {'product_qty': line.product_qty + tot_qty, 'procurement_ids': [(4, x[0].id) for x in lines_to_update[line]]})]
# Create lines for which no line exists yet
if procs_to_create:
partner = po.partner_id
schedule_date = datetime.strptime(po.minimum_planned_date, DEFAULT_SERVER_DATETIME_FORMAT)
value_lines = self._get_po_line_values_from_procs(cr, uid, procs_to_create, partner, schedule_date, context=context)
line_values += [(0, 0, value_lines[x]) for x in value_lines.keys()]
for proc in procs_to_create:
self.message_post(cr, uid, [proc.id], body=_("Purchase line created and linked to an existing Purchase Order"), context=context)
msg = format_message(_('Purchase order line added'), {'Product': proc.product_id.name, 'Quantity': proc.product_qty, 'Procurement': proc.origin})
po_obj.message_post(cr, uid, [add_purchase], body=msg, context=context)
po_obj.write(cr, uid, [add_purchase], {'order_line': line_values},context=context)
# Create new purchase orders
partner_obj = self.pool.get("res.partner")
new_pos = []
for create_purchase in create_purchase_procs.keys():
procs_done += create_purchase_procs[create_purchase]
line_values = []
procurements = self.browse(cr, uid, create_purchase_procs[create_purchase], context=context)
partner = partner_obj.browse(cr, uid, create_purchase[0], context=context)
#Create purchase order itself:
procurement = procurements[0]
schedule_date = self._get_purchase_schedule_date(cr, uid, procurement, procurement.company_id, context=context)
purchase_date = self._get_purchase_order_date(cr, uid, procurement, procurement.company_id, schedule_date, context=context)
value_lines = self._get_po_line_values_from_procs(cr, uid, procurements, partner, schedule_date, context=context)
line_values += [(0, 0, value_lines[x]) for x in value_lines.keys()]
name = seq_obj.next_by_code(cr, uid, 'purchase.order') or _('PO: %s') % procurement.name
gpo = procurement.rule_id.group_propagation_option
group = (gpo == 'fixed' and procurement.rule_id.group_id.id) or (gpo == 'propagate' and procurement.group_id.id) or False
po_vals = {
'name': name,
'origin': procurement.origin,
'partner_id': create_purchase[0],
'location_id': procurement.location_id.id,
'picking_type_id': procurement.rule_id.picking_type_id.id,
'pricelist_id': partner.property_product_pricelist_purchase.id,
'date_order': purchase_date.strftime(DEFAULT_SERVER_DATETIME_FORMAT),
'company_id': procurement.company_id.id,
'fiscal_position_id': partner.property_account_position_id.id,
'payment_term_id': partner.property_supplier_payment_term_id.id,
'dest_address_id': procurement.partner_dest_id.id,
'group_id': group,
'order_line': line_values,
}
new_po = po_obj.create(cr, uid, po_vals, context=context)
new_pos.append(new_po)
for proc in create_purchase_procs[create_purchase]:
self.message_post(cr, uid, proc, body=_("Draft Purchase Order created"), context=context)
other_proc_ids = list(set(ids) - set(procs_done))
res = dict.fromkeys(ids, True)
if other_proc_ids:
other_procs = self.browse(cr, uid, other_proc_ids, context=context)
for procurement in other_procs:
res[procurement.id] = False
self.message_post(cr, uid, [procurement.id], _('There is no supplier associated to product %s') % (procurement.product_id.name))
return res
class mail_mail(osv.Model):
_name = 'mail.mail'
_inherit = 'mail.mail'
def _postprocess_sent_message(self, cr, uid, mail, context=None, mail_sent=True):
if mail_sent and mail.model == 'purchase.order':
obj = self.pool.get('purchase.order').browse(cr, uid, mail.res_id, context=context)
if obj.state == 'draft':
self.pool.get('purchase.order').signal_workflow(cr, uid, [mail.res_id], 'send_rfq')
return super(mail_mail, self)._postprocess_sent_message(cr, uid, mail=mail, context=context, mail_sent=mail_sent)
class product_template(osv.Model):
_name = 'product.template'
_inherit = 'product.template'
def _get_buy_route(self, cr, uid, context=None):
buy_route = self.pool.get('ir.model.data').xmlid_to_res_id(cr, uid, 'purchase.route_warehouse0_buy')
if buy_route:
return [buy_route]
return []
def _purchase_count(self, cr, uid, ids, field_name, arg, context=None):
res = dict.fromkeys(ids, 0)
for template in self.browse(cr, uid, ids, context=context):
res[template.id] = sum([p.purchase_count for p in template.product_variant_ids])
return res
_columns = {
'property_account_creditor_price_difference': fields.property(
type='many2one',
relation='account.account',
string="Price Difference Account",
help="This account will be used to value price difference between purchase price and cost price."),
'purchase_ok': fields.boolean('Can be Purchased', help="Specify if the product can be selected in a purchase order line."),
'purchase_count': fields.function(_purchase_count, string='# Purchases', type='integer'),
}
_defaults = {
'purchase_ok': 1,
'route_ids': _get_buy_route,
}
def action_view_purchases(self, cr, uid, ids, context=None):
products = self._get_products(cr, uid, ids, context=context)
result = self._get_act_window_dict(cr, uid, 'purchase.action_purchase_line_product_tree', context=context)
result['domain'] = "[('product_id','in',[" + ','.join(map(str, products)) + "])]"
return result
class product_product(osv.Model):
_name = 'product.product'
_inherit = 'product.product'
def _purchase_count(self, cr, uid, ids, field_name, arg, context=None):
r = dict.fromkeys(ids, 0)
domain = [
('state', 'in', ['confirmed', 'approved', 'except_picking', 'except_invoice', 'done']),
('product_id', 'in', ids),
]
for group in self.pool['purchase.report'].read_group(cr, uid, domain, ['product_id', 'quantity'], ['product_id'], context=context):
r[group['product_id'][0]] = group['quantity']
return r
def action_view_purchases(self, cr, uid, ids, context=None):
if isinstance(ids, (int, long)):
ids = [ids]
result = self.pool['product.template']._get_act_window_dict(cr, uid, 'purchase.action_purchase_line_product_tree', context=context)
result['domain'] = "[('product_id','in',[" + ','.join(map(str, ids)) + "])]"
return result
_columns = {
'purchase_count': fields.function(_purchase_count, string='# Purchases', type='integer'),
}
class product_category(osv.Model):
_inherit = "product.category"
_columns = {
'property_account_creditor_price_difference_categ': fields.property(
type='many2one',
relation='account.account',
string="Price Difference Account",
help="This account will be used to value price difference between purchase price and cost price."),
}
class mail_compose_message(osv.Model):
_inherit = 'mail.compose.message'
def send_mail(self, cr, uid, ids, auto_commit=False, context=None):
context = context or {}
if context.get('default_model') == 'purchase.order' and context.get('default_res_id'):
context = dict(context, mail_post_autofollow=True)
self.pool.get('purchase.order').signal_workflow(cr, uid, [context['default_res_id']], 'send_rfq')
return super(mail_compose_message, self).send_mail(cr, uid, ids, context=context)
class account_invoice(osv.Model):
""" Override account_invoice to add Chatter messages on the related purchase
orders, logging the invoice receipt or payment. """
_inherit = 'account.invoice'
def invoice_validate(self, cr, uid, ids, context=None):
res = super(account_invoice, self).invoice_validate(cr, uid, ids, context=context)
purchase_order_obj = self.pool.get('purchase.order')
# read access on purchase.order object is not required
if not purchase_order_obj.check_access_rights(cr, uid, 'read', raise_exception=False):
user_id = SUPERUSER_ID
else:
user_id = uid
po_ids = purchase_order_obj.search(cr, user_id, [('invoice_ids', 'in', ids)], context=context)
for order in purchase_order_obj.browse(cr, user_id, po_ids, context=context):
purchase_order_obj.message_post(cr, user_id, order.id, body=_("Invoice received"), context=context)
invoiced = []
shipped = True
# for invoice method manual or order, don't care about shipping state
# for invoices based on incoming shippment, beware of partial deliveries
if (order.invoice_method == 'picking' and
not all(picking.invoice_state in ['invoiced'] for picking in order.picking_ids)):
shipped = False
for po_line in order.order_line:
if all(line.invoice_id.state not in ['draft', 'cancel'] for line in po_line.invoice_lines):
invoiced.append(po_line.id)
if invoiced and shipped:
self.pool['purchase.order.line'].write(cr, user_id, invoiced, {'invoiced': True})
workflow.trg_write(user_id, 'purchase.order', order.id, cr)
return res
def confirm_paid(self, cr, uid, ids, context=None):
res = super(account_invoice, self).confirm_paid(cr, uid, ids, context=context)
purchase_order_obj = self.pool.get('purchase.order')
# read access on purchase.order object is not required
if not purchase_order_obj.check_access_rights(cr, uid, 'read', raise_exception=False):
user_id = SUPERUSER_ID
else:
user_id = uid
po_ids = purchase_order_obj.search(cr, user_id, [('invoice_ids', 'in', ids)], context=context)
for po_id in po_ids:
purchase_order_obj.message_post(cr, user_id, po_id, body=_("Invoice paid"), context=context)
return res
class account_invoice_line(osv.Model):
""" Override account_invoice_line to add the link to the purchase order line it is related to"""
_inherit = 'account.invoice.line'
_columns = {
'purchase_line_id': fields.many2one('purchase.order.line',
'Purchase Order Line', ondelete='set null', select=True,
readonly=True),
}
def move_line_get(self, cr, uid, invoice_id, context=None):
res = super(account_invoice_line,self).move_line_get(cr, uid, invoice_id, context=context)
if self.company_id.anglo_saxon_accounting:
if inv.type in ('in_invoice','in_refund'):
for i_line in inv.invoice_line_ids:
res.extend(self._anglo_saxon_purchase_move_lines(cr, uid, i_line, res, context=context))
return res
def _anglo_saxon_purchase_move_lines(self, cr, uid, i_line, res, context=None):
"""Return the additional move lines for purchase invoices and refunds.
i_line: An account.invoice.line object.
res: The move line entries produced so far by the parent move_line_get.
"""
inv = i_line.invoice_id
company_currency = inv.company_id.currency_id.id
if i_line.product_id and i_line.product_id.valuation == 'real_time':
if i_line.product_id.type != 'service':
# get the price difference account at the product
acc = i_line.product_id.property_account_creditor_price_difference and i_line.product_id.property_account_creditor_price_difference.id
if not acc:
# if not found on the product get the price difference account at the category
acc = i_line.product_id.categ_id.property_account_creditor_price_difference_categ and i_line.product_id.categ_id.property_account_creditor_price_difference_categ.id
a = None
# oa will be the stock input account
# first check the product, if empty check the category
oa = i_line.product_id.property_stock_account_input and i_line.product_id.property_stock_account_input.id
if not oa:
oa = i_line.product_id.categ_id.property_stock_account_input_categ_id and i_line.product_id.categ_id.property_stock_account_input_categ_id.id
if oa:
# get the fiscal position
fpos = i_line.invoice_id.fiscal_position_id or False
a = self.pool.get('account.fiscal.position').map_account(cr, uid, fpos, oa)
diff_res = []
account_prec = inv.company_id.currency_id.decimal_places
# calculate and write down the possible price difference between invoice price and product price
for line in res:
if line.get('invl_id', 0) == i_line.id and a == line['account_id']:
uom = i_line.product_id.uos_id or i_line.product_id.uom_id
valuation_price_unit = self.pool.get('product.uom')._compute_price(cr, uid, uom.id, i_line.product_id.standard_price, i_line.uos_id.id)
if i_line.product_id.cost_method != 'standard' and i_line.purchase_line_id:
#for average/fifo/lifo costing method, fetch real cost price from incomming moves
stock_move_obj = self.pool.get('stock.move')
valuation_stock_move = stock_move_obj.search(cr, uid, [('purchase_line_id', '=', i_line.purchase_line_id.id)], limit=1, context=context)
if valuation_stock_move:
valuation_price_unit = stock_move_obj.browse(cr, uid, valuation_stock_move[0], context=context).price_unit
if inv.currency_id.id != company_currency:
valuation_price_unit = self.pool.get('res.currency').compute(cr, uid, company_currency, inv.currency_id.id, valuation_price_unit, context={'date': inv.date_invoice})
if valuation_price_unit != i_line.price_unit and line['price_unit'] == i_line.price_unit and acc:
# price with discount and without tax included
price_unit = self.pool['account.tax'].compute_all(cr, uid, line['taxes'], i_line.price_unit * (1-(i_line.discount or 0.0)/100.0),
inv.currency_id.id, line['quantity'])['total_excluded']
price_line = round(valuation_price_unit * line['quantity'], account_prec)
price_diff = round(price_unit - price_line, account_prec)
line.update({'price': price_line})
diff_res.append({
'type': 'src',
'name': i_line.name[:64],
'price_unit': round(price_diff / line['quantity'], account_prec),
'quantity': line['quantity'],
'price': price_diff,
'account_id': acc,
'product_id': line['product_id'],
'uos_id': line['uos_id'],
'account_analytic_id': line['account_analytic_id'],
'taxes': line.get('taxes', []),
})
return diff_res
return []
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
Fixed a few type definition issues
|
# SALOME LifeCycleC RBA : implementation of containers and engines life cycle both in Python and C++
#
# Copyright (C) 2003 OPEN CASCADE, EADS/CCR, LIP6, CEA/DEN,
# CEDRAT, EDF R&D, LEG, PRINCIPIA R&D, BUREAU VERITAS
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# See http://www.opencascade.org/SALOME/ or email : webmaster.salome@opencascade.org
#
#
#
# File : LifeCycleCORBA.py
# Author : Paul RASCLE, EDF
# Module : SALOME
# $Header$
import os
import sys
import time
import string
from omniORB import CORBA
import CosNaming
import Engines
import SALOME_ModuleCatalog
from SALOME_utilities import *
from Utils_Identity import getShortHostName
import Utils_Identity
import Launchers
class LifeCycleCORBA:
_orb = None
_rootcontext = None
_containerRootContext = None
_catalog = None
#-------------------------------------------------------------------------
def __init__(self, orb):
MESSAGE( "LifeCycleCORBA::__init__" )
self._orb = orb
obj = self._orb.resolve_initial_references("NameService")
self._rootContext = obj._narrow(CosNaming.NamingContext)
if self._rootContext is None:
MESSAGE( "Name Service Reference is invalid" )
name = [CosNaming.NameComponent("Containers","dir")]
try:
self._containerRootContext = self._rootContext.bind_new_context(name)
except CosNaming.NamingContext.AlreadyBound, ex:
MESSAGE( "/Containers.dir Context already exists" )
obj = self._rootContext.resolve(name)
self._containerRootContext = obj._narrow(CosNaming.NamingContext)
if self._containerRootContext is None:
MESSAGE( "Containers.dir exists but it is not a NamingContext" )
name = [CosNaming.NameComponent("Kernel","dir"),
CosNaming.NameComponent("ModulCatalog","object")]
try:
obj = self._rootContext.resolve(name)
except CosNaming.NamingContext.NotFound, ex:
MESSAGE( "/Kernel.dir/ModulCatalog.object not found in Naming Service" )
self._catalog = obj._narrow(SALOME_ModuleCatalog.ModuleCatalog)
if self._catalog is None:
MESSAGE( "/Kernel.dir/ModulCatalog.object exists but is not a ModulCatalog" )
name = [CosNaming.NameComponent("ContainerManager","object")]
try:
obj = self._rootContext.resolve(name)
except CosNaming.NamingContext.NotFound, ex:
MESSAGE( "ContainerManager.object not found in Naming Service" )
self._contManager = obj._narrow(Engines.ContainerManager)
if self._contManager is None:
MESSAGE( "ContainerManager.object exists but is not a ContainerManager")
#-------------------------------------------------------------------------
def ContainerName(self, containerName):
theComputer = ""
try:
theComputer , theContainer = containerName.split('/')
except:
theComputer = ""
theContainer = containerName
if theComputer in ("","localhost") :
theComputer = getShortHostName()
MESSAGE( theComputer + theContainer )
return theComputer,theContainer
#-------------------------------------------------------------------------
def ComputerPath(self, ComputerName ):
try:
#path = self._catalog.GetPathPrefix( ComputerName )
path = os.getenv("KERNEL_ROOT_DIR") + "/bin/salome/"
except SALOME_ModuleCatalog.NotFound, ex:
path = ""
return path
#-------------------------------------------------------------------------
def FindContainer(self, containerName):
theComputer,theContainer = self.ContainerName( containerName )
name = [CosNaming.NameComponent(theComputer,"dir"),
CosNaming.NameComponent(theContainer,"object")]
obj = None
try:
obj = self._containerRootContext.resolve(name)
MESSAGE( containerName + ".object found in Naming Service" )
except CosNaming.NamingContext.NotFound, ex:
MESSAGE( containerName + ".object not found in Naming Service" )
if obj is None:
container = None
else:
container = obj._narrow(Engines.Container)
if container is None:
MESSAGE( containerName + ".object exists but is not a Container" )
return container
#-------------------------------------------------------------------------
def FindComponent(self,containerName,componentName,listOfMachines):
if containerName!="":
machinesOK=[]
for i in range(len(listOfMachines)):
currentMachine=listOfMachines[i]
componentNameForNS= [CosNaming.NameComponent(currentMachine,"dir"),
CosNaming.NameComponent(containerName,"dir"),
CosNaming.NameComponent(componentName,"object")]
obj=None
try:
obj = self._containerRootContext.resolve(componentNameForNS)
except CosNaming.NamingContext.NotFound, ex:
MESSAGE( "component " + componentName + " not found on machine " + currentMachine + " , trying to load" )
pass
if obj is not None:
machinesOK.append(currentMachine)
pass
pass
if len(machinesOK)!=0:
bestMachine=self._contManager.FindBest(machinesOK)
componentNameForNS= [CosNaming.NameComponent(bestMachine,"dir"),
CosNaming.NameComponent(containerName,"dir"),
CosNaming.NameComponent(componentName,"object")]
obj=None
try:
obj = self._containerRootContext.resolve(componentNameForNS)
except:
pass
if obj is not None:
return obj._narrow(Engines.Component)
else:
MESSAGE( "Big problem !!!")
return None
else:
return None
else:
bestMachine=self._contManager.FindBest(listOfMachines)
MESSAGE("Not implemented yet ...")
return None
pass
#-------------------------------------------------------------------------
def setLauncher(self,name):
"""Change default launcher to the launcher identified by name
See module Launchers.py
"""
Launchers.setLauncher(name)
#-------------------------------------------------------------------------
def StartContainer(self, theComputer , theContainer ):
"""Start a container on theComputer machine with theContainer name
"""
# Get the Naming Service address
#
addr=self._orb.object_to_string(self._rootContext)
#
# If container name contains "Py" launch a Python Container
#
if theContainer.find('Py') == -1 :
CMD=['SALOME_Container',theContainer,'-ORBInitRef','NameService='+addr]
else:
CMD=['SALOME_ContainerPy.py',theContainer,'-ORBInitRef','NameService='+addr]
if theComputer in ("","localhost"):
theComputer=getShortHostName()
#
# Get the appropriate launcher and ask to launch
#
Launchers.getLauncher(theComputer).launch(theComputer,CMD)
#
# Wait until the container is registered in Naming Service
#
count =5
aContainer=None
while aContainer is None and count > 0:
time.sleep(1)
count = count - 1
MESSAGE( str(count) + ". Waiting for " + theComputer + "/" + theContainer )
aContainer = self.FindContainer( theComputer + "/" + theContainer )
return aContainer
#-------------------------------------------------------------------------
def FindOrStartContainer(self, theComputer , theContainer ):
"""Find or Start a container on theComputer machine with theContainer name
"""
if theComputer in ("","localhost"):
theComputer=getShortHostName()
MESSAGE( "FindOrStartContainer: " + theComputer + theContainer )
aContainer = self.FindContainer( theComputer + "/" + theContainer )
if aContainer is None :
aContainer= self.StartContainer(theComputer , theContainer )
return aContainer
#-------------------------------------------------------------------------
def LoadComponent(self,containerName,componentName,listOfMachine):
container=self._contManager.FindOrStartContainer(containerName,listOfMachine)
implementation="lib"+componentName+"Engine.so"
try:
component = container.load_impl(componentName, implementation)
MESSAGE( "component " + component._get_instanceName() + " launched !" )
return component
except:
MESSAGE( "component " + componentName + " NOT launched !" )
return None
#-------------------------------------------------------------------------
def FindOrLoadComponent(self, containerName, componentName):
sp=containerName.split("/")
if len(sp)==1:
listOfMachine=[]
listOfMachine.append(getShortHostName())
comp=self.FindComponent(containerName,componentName,listOfMachine)
if comp is None:
return self.LoadComponent(containerName,componentName,listOfMachine)
else:
return comp
pass
else:
params= Engines.MachineParameters(sp[1],sp[0],"LINUX",0,0,0,0)
listOfMachine=self._contManager.GetFittingResources(params,componentName)
ret=self.FindComponent(sp[1],componentName,listOfMachine);
if ret is None:
return self.LoadComponent(sp[1],componentName,listOfMachine)
else:
return ret
pass
PR: from E.Adam, Engines import sometimes incomplete...
# SALOME LifeCycleC RBA : implementation of containers and engines life cycle both in Python and C++
#
# Copyright (C) 2003 OPEN CASCADE, EADS/CCR, LIP6, CEA/DEN,
# CEDRAT, EDF R&D, LEG, PRINCIPIA R&D, BUREAU VERITAS
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# See http://www.opencascade.org/SALOME/ or email : webmaster.salome@opencascade.org
#
#
#
# File : LifeCycleCORBA.py
# Author : Paul RASCLE, EDF
# Module : SALOME
# $Header$
import os
import sys
import time
import string
from omniORB import CORBA
import CosNaming
import Engines
reload(Engines)
import SALOME_ModuleCatalog
from SALOME_utilities import *
from Utils_Identity import getShortHostName
import Utils_Identity
import Launchers
class LifeCycleCORBA:
_orb = None
_rootcontext = None
_containerRootContext = None
_catalog = None
#-------------------------------------------------------------------------
def __init__(self, orb):
MESSAGE( "LifeCycleCORBA::__init__" )
self._orb = orb
obj = self._orb.resolve_initial_references("NameService")
self._rootContext = obj._narrow(CosNaming.NamingContext)
if self._rootContext is None:
MESSAGE( "Name Service Reference is invalid" )
name = [CosNaming.NameComponent("Containers","dir")]
try:
self._containerRootContext = self._rootContext.bind_new_context(name)
except CosNaming.NamingContext.AlreadyBound, ex:
MESSAGE( "/Containers.dir Context already exists" )
obj = self._rootContext.resolve(name)
self._containerRootContext = obj._narrow(CosNaming.NamingContext)
if self._containerRootContext is None:
MESSAGE( "Containers.dir exists but it is not a NamingContext" )
name = [CosNaming.NameComponent("Kernel","dir"),
CosNaming.NameComponent("ModulCatalog","object")]
try:
obj = self._rootContext.resolve(name)
except CosNaming.NamingContext.NotFound, ex:
MESSAGE( "/Kernel.dir/ModulCatalog.object not found in Naming Service" )
self._catalog = obj._narrow(SALOME_ModuleCatalog.ModuleCatalog)
if self._catalog is None:
MESSAGE( "/Kernel.dir/ModulCatalog.object exists but is not a ModulCatalog" )
name = [CosNaming.NameComponent("ContainerManager","object")]
try:
obj = self._rootContext.resolve(name)
except CosNaming.NamingContext.NotFound, ex:
MESSAGE( "ContainerManager.object not found in Naming Service" )
self._contManager = obj._narrow(Engines.ContainerManager)
if self._contManager is None:
MESSAGE( "ContainerManager.object exists but is not a ContainerManager")
#-------------------------------------------------------------------------
def ContainerName(self, containerName):
theComputer = ""
try:
theComputer , theContainer = containerName.split('/')
except:
theComputer = ""
theContainer = containerName
if theComputer in ("","localhost") :
theComputer = getShortHostName()
MESSAGE( theComputer + theContainer )
return theComputer,theContainer
#-------------------------------------------------------------------------
def ComputerPath(self, ComputerName ):
try:
#path = self._catalog.GetPathPrefix( ComputerName )
path = os.getenv("KERNEL_ROOT_DIR") + "/bin/salome/"
except SALOME_ModuleCatalog.NotFound, ex:
path = ""
return path
#-------------------------------------------------------------------------
def FindContainer(self, containerName):
theComputer,theContainer = self.ContainerName( containerName )
name = [CosNaming.NameComponent(theComputer,"dir"),
CosNaming.NameComponent(theContainer,"object")]
obj = None
try:
obj = self._containerRootContext.resolve(name)
MESSAGE( containerName + ".object found in Naming Service" )
except CosNaming.NamingContext.NotFound, ex:
MESSAGE( containerName + ".object not found in Naming Service" )
if obj is None:
container = None
else:
container = obj._narrow(Engines.Container)
if container is None:
MESSAGE( containerName + ".object exists but is not a Container" )
return container
#-------------------------------------------------------------------------
def FindComponent(self,containerName,componentName,listOfMachines):
if containerName!="":
machinesOK=[]
for i in range(len(listOfMachines)):
currentMachine=listOfMachines[i]
componentNameForNS= [CosNaming.NameComponent(currentMachine,"dir"),
CosNaming.NameComponent(containerName,"dir"),
CosNaming.NameComponent(componentName,"object")]
obj=None
try:
obj = self._containerRootContext.resolve(componentNameForNS)
except CosNaming.NamingContext.NotFound, ex:
MESSAGE( "component " + componentName + " not found on machine " + currentMachine + " , trying to load" )
pass
if obj is not None:
machinesOK.append(currentMachine)
pass
pass
if len(machinesOK)!=0:
bestMachine=self._contManager.FindBest(machinesOK)
componentNameForNS= [CosNaming.NameComponent(bestMachine,"dir"),
CosNaming.NameComponent(containerName,"dir"),
CosNaming.NameComponent(componentName,"object")]
obj=None
try:
obj = self._containerRootContext.resolve(componentNameForNS)
except:
pass
if obj is not None:
return obj._narrow(Engines.Component)
else:
MESSAGE( "Big problem !!!")
return None
else:
return None
else:
bestMachine=self._contManager.FindBest(listOfMachines)
MESSAGE("Not implemented yet ...")
return None
pass
#-------------------------------------------------------------------------
def setLauncher(self,name):
"""Change default launcher to the launcher identified by name
See module Launchers.py
"""
Launchers.setLauncher(name)
#-------------------------------------------------------------------------
def StartContainer(self, theComputer , theContainer ):
"""Start a container on theComputer machine with theContainer name
"""
# Get the Naming Service address
#
addr=self._orb.object_to_string(self._rootContext)
#
# If container name contains "Py" launch a Python Container
#
if theContainer.find('Py') == -1 :
CMD=['SALOME_Container',theContainer,'-ORBInitRef','NameService='+addr]
else:
CMD=['SALOME_ContainerPy.py',theContainer,'-ORBInitRef','NameService='+addr]
if theComputer in ("","localhost"):
theComputer=getShortHostName()
#
# Get the appropriate launcher and ask to launch
#
Launchers.getLauncher(theComputer).launch(theComputer,CMD)
#
# Wait until the container is registered in Naming Service
#
count =5
aContainer=None
while aContainer is None and count > 0:
time.sleep(1)
count = count - 1
MESSAGE( str(count) + ". Waiting for " + theComputer + "/" + theContainer )
aContainer = self.FindContainer( theComputer + "/" + theContainer )
return aContainer
#-------------------------------------------------------------------------
def FindOrStartContainer(self, theComputer , theContainer ):
"""Find or Start a container on theComputer machine with theContainer name
"""
if theComputer in ("","localhost"):
theComputer=getShortHostName()
MESSAGE( "FindOrStartContainer: " + theComputer + theContainer )
aContainer = self.FindContainer( theComputer + "/" + theContainer )
if aContainer is None :
aContainer= self.StartContainer(theComputer , theContainer )
return aContainer
#-------------------------------------------------------------------------
def LoadComponent(self,containerName,componentName,listOfMachine):
container=self._contManager.FindOrStartContainer(containerName,listOfMachine)
implementation="lib"+componentName+"Engine.so"
try:
component = container.load_impl(componentName, implementation)
MESSAGE( "component " + component._get_instanceName() + " launched !" )
return component
except:
MESSAGE( "component " + componentName + " NOT launched !" )
return None
#-------------------------------------------------------------------------
def FindOrLoadComponent(self, containerName, componentName):
sp=containerName.split("/")
if len(sp)==1:
listOfMachine=[]
listOfMachine.append(getShortHostName())
comp=self.FindComponent(containerName,componentName,listOfMachine)
if comp is None:
return self.LoadComponent(containerName,componentName,listOfMachine)
else:
return comp
pass
else:
params= Engines.MachineParameters(sp[1],sp[0],"LINUX",0,0,0,0)
listOfMachine=self._contManager.GetFittingResources(params,componentName)
ret=self.FindComponent(sp[1],componentName,listOfMachine);
if ret is None:
return self.LoadComponent(sp[1],componentName,listOfMachine)
else:
return ret
pass
|
import numpy as np
import matplotlib.pyplot as plt
import sectionproperties.pre.pre as pre
import sectionproperties.post.post as post
# TODO: ensure dimensions are floats
class Geometry:
"""Parent class for a cross-section geometry input.
Provides an interface for the user to specify the geometry defining a
cross-section. A method is provided for generating a triangular mesh, for
translating the cross-section by *(x, y)* and for plotting the geometry.
:cvar points: List of points *(x, y)* defining the vertices of the
cross-section
:vartype points: list[list[float, float]]
:cvar facets: List of point index pairs *(p1, p2)* defining the edges of
the cross-section
:vartype facets: list[list[int, int]]
:cvar holes: List of points *(x, y)* defining the locations of holes within
the cross-section. If there are no holes, provide an empty list [].
:vartype holes: list[list[float, float]]
:cvar control_points: A list of points *(x, y)* that define different
regions of the cross-section. A control point is an arbitrary point
within a region enclosed by facets.
:vartype control_points: list[list[float, float]]
:cvar shift: Vector that shifts the cross-section by *(x, y)*
:vartype shift: list[float, float]
"""
def __init__(self, control_points, shift):
"""Inits the Geometry class."""
self.control_points = control_points
self.shift = shift
self.points = []
self.facets = []
self.holes = []
def create_mesh(self, mesh_sizes):
"""Creates a quadratic triangular mesh from the Geometry object.
:param mesh_sizes: A list of maximum element areas corresponding to
each region within the cross-section geometry.
:type mesh_size: list[float]
:return: Object containing generated mesh data
:rtype: :class:`meshpy.triangle.MeshInfo`
:raises AssertionError: If the number of mesh sizes does not match the
number of regions
The following example creates a circular cross-section with a diameter
of 50 with 64 points, and generates a mesh with a maximum triangular
area of 2.5::
import sectionproperties.pre.sections as sections
geometry = sections.CircularSection(d=50, n=64)
mesh = geometry.create_mesh(mesh_sizes=[2.5])
.. figure:: ../images/sections/circle_mesh.png
:align: center
:scale: 75 %
Mesh generated from the above geometry.
"""
str = "Number of mesh_sizes ({0}), ".format(len(mesh_sizes))
str += "should match the number of regions "
str += "({0}).".format(len(self.control_points))
assert(len(mesh_sizes) == len(self.control_points)), str
return pre.create_mesh(self.points, self.facets, self.holes,
self.control_points, mesh_sizes)
def shift_section(self):
"""Shifts the cross-section parameters by the class variable vector
*shift*."""
for point in self.points:
point[0] += self.shift[0]
point[1] += self.shift[1]
for hole in self.holes:
hole[0] += self.shift[0]
hole[1] += self.shift[1]
for cp in self.control_points:
cp[0] += self.shift[0]
cp[1] += self.shift[1]
def rotate_section(self, angle, rot_point=None):
"""Rotates the geometry and specified angle about a point. If the
rotation point is not provided, rotates the section about the first
control point in the list of control points of the
:class:`~sectionproperties.pre.sections.Geometry` object.
:param float angle: Angle (degrees) by which to rotate the section. A
positive angle leads to a counter-clockwise rotation.
:param rot_point: Point *(x, y)* about which to rotate the section
:type rot_point: list[float, float]
The following example rotates a 200UB25 section clockwise by 30
degrees::
import sectionproperties.pre.sections as sections
geometry = sections.ISection(d=203, b=133, t_f=7.8, t_w=5.8, r=8.9, n_r=8)
geometry.rotate_section(angle=-30)
"""
# convert angle to radians
rot_phi = angle * np.pi / 180
def get_r(pt1, pt2):
"""Returns the distance between two points."""
return ((pt1[0] - pt2[0]) ** 2 + (pt1[1] - pt2[1]) ** 2) ** 0.5
def get_phi(pt1, pt2):
"""Returns the angle between two points."""
return np.arctan2(pt1[1] - pt2[1], pt1[0] - pt2[0])
def rotate_point(pt, rot_point, rot_phi):
"""Rotates a point given a rotation point and rotation angle."""
r = get_r(pt, rot_point)
phi = get_phi(pt, rot_point)
pt[0] = r * np.cos(phi + rot_phi) + rot_point[0]
pt[1] = r * np.sin(phi + rot_phi) + rot_point[1]
# use the first control point if no rotation point is specified
if rot_point is None:
rot_point = self.control_points[0]
# rotate all the points
for point in self.points:
rotate_point(point, rot_point, rot_phi)
# rotate all the holes
for hole in self.holes:
rotate_point(hole, rot_point, rot_phi)
# rotate all the control points
for cp in self.control_points:
rotate_point(cp, rot_point, rot_phi)
def mirror_section(self, axis='x', mirror_point=None):
"""Mirrors the geometry about a point on either the x or y-axis. If no
point is provided, mirrors the geometry about the first control point
in the list of control points of the
:class:`~sectionproperties.pre.sections.Geometry` object.
:param string axis: Axis about which to mirror the geometry, *'x'* or
*'y'*
:param mirror_point: Point about which to mirror the geometry *(x, y)*
:type mirror_point: list[float, float]
The following example mirrors a 200PFC section about the y-axis and the
point (0, 0)::
import sectionproperties.pre.sections as sections
geometry = sections.PfcSection(d=200, b=75, t_f=12, t_w=6, r=12, n_r=8)
geometry.mirror_section(axis='y', mirror_point=[0, 0])
"""
# use the first control point if no mirror point is specified
if mirror_point is None:
mirror_point = self.control_points[0]
# select the axis to mirror
if axis == 'x':
i = 1
elif axis == 'y':
i = 0
else:
raise RuntimeError("Enter a valid axis: 'x' or 'y'")
# mirror all points
for point in self.points:
point[i] = 2 * mirror_point[i] - point[i]
# mirror all holes
for hole in self.holes:
hole[i] = 2 * mirror_point[i] - hole[i]
# mirror all control points
for cp in self.control_points:
cp[i] = 2 * mirror_point[i] - cp[i]
def add_point(self, point):
"""Adds a point to the geometry and returns the added point id.
:param point: Location of the point
:type point: list[float, float]
:return: Point id
:rtype: int
"""
self.points.append(point)
return len(self.points) - 1
def add_facet(self, facet):
"""Adds a facet to the geometry and returns the added facet id.
:param facet: Point indices of the facet
:type facet: list[float, float]
:return: Facet id
:rtype: int
"""
self.facets.append(facet)
return len(self.facets) - 1
def add_hole(self, hole):
"""Adds a hole location to the geometry and returns the added hole id.
:param hole: Location of the hole
:type hole: list[float, float]
:return: Hole id
:rtype: int
"""
self.holes.append(hole)
return len(self.holes) - 1
def add_control_point(self, control_point):
"""Adds a control point to the geometry and returns the added control
point id.
:param hole: Location of the control point
:type hole: list[float, float]
:return: Control point id
:rtype: int
"""
self.control_points.append(control_point)
return len(self.control_points) - 1
def clean_geometry(self, verbose=False):
"""Peforms a full clean on the geometry.
:param bool verbose: If set to true, information related to the
geometry cleaning process is printed to the terminal.
.. note:: Cleaning the geometry is always recommended when creating a
merged section which may result in overlapping or intersecting
facets, or duplicate nodes.
"""
self = pre.GeometryCleaner(self, verbose).clean_geometry()
def plot_geometry(self, ax=None, pause=True, labels=False):
"""Plots the geometry defined by the input section. If no axes object
is supplied a new figure and axis is created.
:param ax: Axes object on which the mesh is plotted
:type ax: :class:`matplotlib.axes.Axes`
:param bool pause: If set to true, the figure pauses the script until
the window is closed. If set to false, the script continues
immediately after the window is rendered.
:param bool labels: If set to true, node and facet labels are displayed
The following example creates a CHS discretised with 64 points, with a
diameter of 48 and thickness of 3.2, and plots the geometry::
import sectionproperties.pre.sections as sections
geometry = sections.Chs(d=48, t=3.2, n=64)
geometry.plot_geometry()
.. figure:: ../images/sections/chs_geometry.png
:align: center
:scale: 75 %
Geometry generated by the above example.
"""
# if no axes object is supplied, create and setup the plot
if ax is None:
ax_supplied = False
(fig, ax) = plt.subplots()
post.setup_plot(ax, pause)
else:
ax_supplied = True
for (i, f) in enumerate(self.facets):
# plot the points and facets
if i == 0:
ax.plot([self.points[f[0]][0], self.points[f[1]][0]],
[self.points[f[0]][1], self.points[f[1]][1]],
'ko-', markersize=2, label='Points & Facets')
else:
ax.plot([self.points[f[0]][0], self.points[f[1]][0]],
[self.points[f[0]][1], self.points[f[1]][1]],
'ko-', markersize=2)
for (i, h) in enumerate(self.holes):
# plot the holes
if i == 0:
ax.plot(h[0], h[1], 'rx', markerSize=5, label='Holes')
else:
ax.plot(h[0], h[1], 'rx', markerSize=5)
for (i, cp) in enumerate(self.control_points):
# plot the control points
if i == 0:
ax.plot(cp[0], cp[1], 'bo', markerSize=5,
label='Control Points')
else:
ax.plot(cp[0], cp[1], 'bo', markerSize=5)
# display the legend
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
# display the labels
if labels:
# plot node labels
for (i, pt) in enumerate(self.points):
ax.annotate(str(i), xy=pt, color='r')
# plot facet labels
for (i, fct) in enumerate(self.facets):
pt1 = self.points[fct[0]]
pt2 = self.points[fct[1]]
xy = [(pt1[0] + pt2[0]) / 2, (pt1[1] + pt2[1]) / 2]
ax.annotate(str(i), xy=xy, color='b')
# if no axes object is supplied, finish the plot
if not ax_supplied:
post.finish_plot(ax, pause, title='Cross-Section Geometry')
def calculate_extents(self):
"""Calculates the minimum and maximum x and y-values amongst the list
of points.
:return: Minimum and maximum x and y-values
*(x_min, x_max, y_min, y_max)*
:rtype: tuple(float, float, float, float)
"""
# loop through all points
for (i, pt) in enumerate(self.points):
x = pt[0]
y = pt[1]
# initialise min, max variables
if i == 0:
x_min = x
x_max = x
y_min = y
y_max = y
# update the mins and maxs where necessary
x_min = min(x_min, x)
x_max = max(x_max, x)
y_min = min(y_min, y)
y_max = max(y_max, y)
return (x_min, x_max, y_min, y_max)
class CustomSection(Geometry):
"""Constructs a cross-section from a list of points, facets, holes and a
user specified control point.
:param points: List of points *(x, y)* defining the vertices of the
cross-section
:type points: list[list[float, float]]
:param facets: List of point index pairs *(p1, p2)* defining the edges of
the cross-section
:type facets: list[list[int, int]]
:param holes: List of points *(x, y)* defining the locations of holes
within the cross-section. If there are no holes, provide an empty list
[].
:type holes: list[list[float, float]]
:param control_points: A list of points *(x, y)* that define different
regions of the cross-section. A control point is an arbitrary point
within a region enclosed by facets.
:type control_points: list[list[float, float]]
:param shift: Vector that shifts the cross-section by *(x, y)*
:type shift: list[float, float]
The following example creates a hollow trapezium with a base width of 100,
top width of 50, height of 50 and a wall thickness of 10. A mesh is
generated with a maximum triangular area of 2.0::
import sectionproperties.pre.sections as sections
points = [[0, 0], [100, 0], [75, 50], [25, 50], [15, 10], [85, 10], [70, 40], [30, 40]]
facets = [[0, 1], [1, 2], [2, 3], [3, 0], [4, 5], [5, 6], [6, 7], [7, 4]]
holes = [[50, 25]]
control_points = [[5, 5]]
geometry = sections.CustomSection(points, facets, holes, control_points)
mesh = geometry.create_mesh(mesh_sizes=[2.0])
.. figure:: ../images/sections/custom_geometry.png
:align: center
:scale: 75 %
Custom section geometry.
.. figure:: ../images/sections/custom_mesh.png
:align: center
:scale: 75 %
Mesh generated from the above geometry.
"""
def __init__(self, points, facets, holes, control_points, shift=[0, 0]):
"""Inits the CustomSection class."""
super().__init__(control_points, shift)
self.points = points
self.facets = facets
self.holes = holes
self.shift_section()
class RectangularSection(Geometry):
"""Constructs a rectangular section with the bottom left corner at the
origin *(0, 0)*, with depth *d* and width *b*.
:param float d: Depth (y) of the rectangle
:param float b: Width (x) of the rectangle
:param shift: Vector that shifts the cross-section by *(x, y)*
:type shift: list[float, float]
The following example creates a rectangular cross-section with a depth of
100 and width of 50, and generates a mesh with a maximum triangular area of
5::
import sectionproperties.pre.sections as sections
geometry = sections.RectangularSection(d=100, b=50)
mesh = geometry.create_mesh(mesh_sizes=[5])
.. figure:: ../images/sections/rectangle_geometry.png
:align: center
:scale: 75 %
Rectangular section geometry.
.. figure:: ../images/sections/rectangle_mesh.png
:align: center
:scale: 75 %
Mesh generated from the above geometry.
"""
def __init__(self, d, b, shift=[0, 0]):
"""Inits the RectangularSection class."""
# assign control point
control_points = [[0.5 * b, 0.5 * d]]
super().__init__(control_points, shift)
# construct the points and facets
self.points = [[0, 0], [b, 0], [b, d], [0, d]]
self.facets = [[0, 1], [1, 2], [2, 3], [3, 0]]
self.shift_section()
class CircularSection(Geometry):
"""Constructs a solid circle centered at the origin *(0, 0)* with diameter
*d* and using *n* points to construct the circle.
:param float d: Diameter of the circle
:param int n: Number of points discretising the circle
:param shift: Vector that shifts the cross-section by *(x, y)*
:type shift: list[float, float]
The following example creates a circular cross-section with a diameter of
50 with 64 points, and generates a mesh with a maximum triangular area of
2.5::
import sectionproperties.pre.sections as sections
geometry = sections.CircularSection(d=50, n=64)
mesh = geometry.create_mesh(mesh_sizes=[2.5])
.. figure:: ../images/sections/circle_geometry.png
:align: center
:scale: 75 %
Circular section geometry.
.. figure:: ../images/sections/circle_mesh.png
:align: center
:scale: 75 %
Mesh generated from the above geometry.
"""
def __init__(self, d, n, shift=[0, 0]):
"""Inits the CircularSection class."""
# assign control point
control_points = [[0, 0]]
super().__init__(control_points, shift)
# loop through each point on the circle
for i in range(n):
# determine polar angle
theta = i * 2 * np.pi * 1.0 / n
# calculate location of the point
x = 0.5 * d * np.cos(theta)
y = 0.5 * d * np.sin(theta)
# append the current point to the points list
self.points.append([x, y])
# if we are not at the last point
if i != n - 1:
self.facets.append([i, i + 1])
# if we are at the last point, complete the circle
else:
self.facets.append([i, 0])
self.shift_section()
class EllipticalSection(Geometry):
"""Constructs a solid ellipse centered at the origin *(0, 0)* with vertical diameter
*d_y* and horizontal diameter *d_x*, using *n* points to construct the ellipse.
:param float d_y: Diameter of the ellipse in the y-dimension
:param float d_x: Diameter of the ellipse in the x-dimension
:param int n: Number of points discretising the ellipse
:param shift: Vector that shifts the cross-section by *(x, y)*
:type shift: list[float, float]
The following example creates an elliptical cross-section with a vertical diameter of
50 and horizontal diameter of 25, with 40 points, and generates a mesh with a maximum triangular area of
2.5:
import sectionproperties.pre.sections as sections
geometry = sections.EllipticalSection(d_y=50, d_x=25, n=40)
mesh = geometry.create_mesh(mesh_sizes=[2.5])
.. figure:: ../images/sections/ellipse_geometry.png
:align: center
:scale: 75 %
Elliptical section geometry.
.. figure:: ../images/sections/ellipse_mesh.png
:align: center
:scale: 75 %
Mesh generated from the above geometry.
"""
def __init__(self, d_y, d_x, n, shift=[0, 0]):
"""Inits the EllipticalSection class."""
# assign control point
control_points = [[0, 0]]
super().__init__(control_points, shift)
# loop through each point on the ellipse
for i in range(n):
# determine polar angle
theta = i * 2 * np.pi * 1.0 / n
# calculate location of the point
x = 0.5 * d_x * np.cos(theta)
y = 0.5 * d_y * np.sin(theta)
# append the current point to the points list
self.points.append([x, y])
# if we are not at the last point
if i != n - 1:
self.facets.append([i, i + 1])
# if we are at the last point, complete the ellipse
else:
self.facets.append([i, 0])
self.shift_section()
class Ehs(Geometry):
"""Constructs an elliptical hollow section centered at the origin *(0, 0)*,
with outer vertical diameter *d_yo*, outer horizontal diameter *d_xo*, and
thickness *t*, using *n* points to construct the inner and outer ellipses.
Note that the thickness of a hollow ellipse does not stay constant all
throughout the section.
:param float d_y: Diameter of the ellipse in the y-dimension
:param float d_x: Diameter of the ellipse in the x-dimension
:param float t: Thickness of the EHS
:param int n: Number of points discretising the inner and outer ellipses
:param shift: Vector that shifts the cross-section by *(x, y)*
:type shift: list[float, float]
The following example creates a EHS discretised with 30 points, with a
outer vertical diameter of 50, outer horizontal diameter of 25, and thickness of 5.0,
and generates a mesh with a maximum triangular area of 1.0::
import sectionproperties.pre.sections as sections
geometry = sections.Ehs(d_y=50, d_x=25, t=5.0, n=30)
mesh = geometry.create_mesh(mesh_sizes=[1.0])
.. figure:: ../images/sections/ehs_geometry.png
:align: center
:scale: 75 %
EHS geometry.
.. figure:: ../images/sections/ehs_mesh.png
:align: center
:scale: 75 %
Mesh generated from the above geometry.
"""
def __init__(self, d_y, d_x, t, n, shift=[0, 0]):
"""Inits the Ehs class."""
# assign control point
control_points = [[(d_x * 0.5) - (t * 0.5), 0]]
super().__init__(control_points, shift)
# specify a hole in the centre of the EHS
self.holes = [[0, 0]]
# loop through each point of the EHS
for i in range(n):
# determine polar angle
theta = i * 2 * np.pi * 1.0 / n
# calculate location of outer and inner points
x_outer = 0.5 * d_x * np.cos(theta)
y_outer = 0.5 * d_y * np.sin(theta)
x_inner = (0.5 * d_x - t) * np.cos(theta)
y_inner = (0.5 * d_y - t) * np.sin(theta)
# append the current points to the points list
self.points.append([x_outer, y_outer])
self.points.append([x_inner, y_inner])
# if we are not at the last point
if i != n - 1:
self.facets.append([i * 2, i * 2 + 2])
self.facets.append([i * 2 + 1, i * 2 + 3])
# if we are at the last point, complete the circle
else:
self.facets.append([i * 2, 0])
self.facets.append([i * 2 + 1, 1])
self.shift_section()
class Chs(Geometry):
"""Constructs a circular hollow section centered at the origin *(0, 0)*,
with diameter *d* and thickness *t*, using *n* points to construct the
inner and outer circles.
:param float d: Outer diameter of the CHS
:param float t: Thickness of the CHS
:param int n: Number of points discretising the inner and outer circles
:param shift: Vector that shifts the cross-section by *(x, y)*
:type shift: list[float, float]
The following example creates a CHS discretised with 64 points, with a
diameter of 48 and thickness of 3.2, and generates a mesh with a maximum
triangular area of 1.0::
import sectionproperties.pre.sections as sections
geometry = sections.Chs(d=48, t=3.2, n=64)
mesh = geometry.create_mesh(mesh_sizes=[1.0])
.. figure:: ../images/sections/chs_geometry.png
:align: center
:scale: 75 %
CHS geometry.
.. figure:: ../images/sections/chs_mesh.png
:align: center
:scale: 75 %
Mesh generated from the above geometry.
"""
def __init__(self, d, t, n, shift=[0, 0]):
"""Inits the Chs class."""
# assign control point
control_points = [[d * 0.5 - t * 0.5, 0]]
super().__init__(control_points, shift)
# specify a hole in the centre of the CHS
self.holes = [[0, 0]]
# loop through each point of the CHS
for i in range(n):
# determine polar angle
theta = i * 2 * np.pi * 1.0 / n
# calculate location of outer and inner points
x_outer = 0.5 * d * np.cos(theta)
y_outer = 0.5 * d * np.sin(theta)
x_inner = (0.5 * d - t) * np.cos(theta)
y_inner = (0.5 * d - t) * np.sin(theta)
# append the current points to the points list
self.points.append([x_outer, y_outer])
self.points.append([x_inner, y_inner])
# if we are not at the last point
if i != n - 1:
self.facets.append([i * 2, i * 2 + 2])
self.facets.append([i * 2 + 1, i * 2 + 3])
# if we are at the last point, complete the circle
else:
self.facets.append([i * 2, 0])
self.facets.append([i * 2 + 1, 1])
self.shift_section()
class Rhs(Geometry):
"""Constructs a rectangular hollow section centered at *(b/2, d/2)*, with
depth *d*, width *b*, thickness *t* and outer radius *r_out*, using *n_r*
points to construct the inner and outer radii.
:param float d: Depth of the RHS
:param float b: Width of the RHS
:param float t: Thickness of the RHS
:param float r_out: Outer radius of the RHS
:param int n_r: Number of points discretising the inner and outer radii
:param shift: Vector that shifts the cross-section by *(x, y)*
:type shift: list[float, float]
The following example creates an RHS with a depth of 100, a width of 50, a
thickness of 6 and an outer radius of 9, using 8 points to discretise the
inner and outer radii. A mesh is generated with a maximum triangular area
of 2.0::
import sectionproperties.pre.sections as sections
geometry = sections.Rhs(d=100, b=50, t=6, r_out=9, n_r=8)
mesh = geometry.create_mesh(mesh_sizes=[2.0])
.. figure:: ../images/sections/rhs_geometry.png
:align: center
:scale: 75 %
RHS geometry.
.. figure:: ../images/sections/rhs_mesh.png
:align: center
:scale: 75 %
Mesh generated from the above geometry.
"""
def __init__(self, d, b, t, r_out, n_r, shift=[0, 0]):
"""Inits the Rhs class."""
# assign control point
control_points = [[b - t * 0.5, d * 0.5]]
super().__init__(control_points, shift)
# specify a hole in the centre of the RHS
self.holes = [[b * 0.5, d * 0.5]]
r_in = r_out - t # calculate internal radius
# construct the bottom left radius
for i in range(n_r):
# determine polar angle
theta = np.pi + i * 1.0 / max(1, n_r - 1) * np.pi * 0.5
# calculate location of inner and outer points
x_outer = r_out + r_out * np.cos(theta)
y_outer = r_out + r_out * np.sin(theta)
x_inner = r_out + r_in * np.cos(theta)
y_inner = r_out + r_in * np.sin(theta)
# append the current points to the points list
self.points.append([x_outer, y_outer])
self.points.append([x_inner, y_inner])
# construct the bottom right radius
for i in range(n_r):
# determine polar angle
theta = 3.0 / 2 * np.pi + i * 1.0 / max(1, n_r - 1) * np.pi * 0.5
# calculate location of inner and outer points
x_outer = b - r_out + r_out * np.cos(theta)
y_outer = r_out + r_out * np.sin(theta)
x_inner = b - r_out + r_in * np.cos(theta)
y_inner = r_out + r_in * np.sin(theta)
# append the current points to the points list
self.points.append([x_outer, y_outer])
self.points.append([x_inner, y_inner])
# construct the top right radius
for i in range(n_r):
# determine polar angle
theta = i * 1.0 / max(1, n_r - 1) * np.pi * 0.5
# calculate location of inner and outer points
x_outer = b - r_out + r_out * np.cos(theta)
y_outer = d - r_out + r_out * np.sin(theta)
x_inner = b - r_out + r_in * np.cos(theta)
y_inner = d - r_out + r_in * np.sin(theta)
# append the current points to the points list
self.points.append([x_outer, y_outer])
self.points.append([x_inner, y_inner])
# construct the top left radius
for i in range(n_r):
# determine polar angle
theta = np.pi * 0.5 + i * 1.0 / max(1, n_r - 1) * np.pi * 0.5
# calculate location of inner and outer points
x_outer = r_out + r_out * np.cos(theta)
y_outer = d - r_out + r_out * np.sin(theta)
x_inner = r_out + r_in * np.cos(theta)
y_inner = d - r_out + r_in * np.sin(theta)
# append the current points to the points list
self.points.append([x_outer, y_outer])
self.points.append([x_inner, y_inner])
# build the facet list
for i in range(int(len(self.points) / 2)):
# if we are not at the last point
if i != int(len(self.points) / 2 - 1):
self.facets.append([i * 2, i * 2 + 2])
self.facets.append([i * 2 + 1, i * 2 + 3])
# if we are at the last point, complete the loop
else:
self.facets.append([i * 2, 0])
self.facets.append([i * 2 + 1, 1])
self.shift_section()
class ISection(Geometry):
"""Constructs an I-section centered at *(b/2, d/2)*, with depth *d*, width
*b*, flange thickness *t_f*, web thickness *t_w*, and root radius *r*,
using *n_r* points to construct the root radius.
:param float d: Depth of the I-section
:param float b: Width of the I-section
:param float t_f: Flange thickness of the I-section
:param float t_w: Web thickness of the I-section
:param float r: Root radius of the I-section
:param int n_r: Number of points discretising the root radius
:param shift: Vector that shifts the cross-section by *(x, y)*
:type shift: list[float, float]
The following example creates an I-section with a depth of 203, a width of
133, a flange thickness of 7.8, a web thickness of 5.8 and a root radius of
8.9, using 16 points to discretise the root radius. A mesh is generated
with a maximum triangular area of 3.0::
import sectionproperties.pre.sections as sections
geometry = sections.ISection(d=203, b=133, t_f=7.8, t_w=5.8, r=8.9, n_r=16)
mesh = geometry.create_mesh(mesh_sizes=[3.0])
.. figure:: ../images/sections/isection_geometry.png
:align: center
:scale: 75 %
I-section geometry.
.. figure:: ../images/sections/isection_mesh.png
:align: center
:scale: 75 %
Mesh generated from the above geometry.
"""
def __init__(self, d, b, t_f, t_w, r, n_r, shift=[0, 0]):
"""Inits the ISection class."""
# assign control point
control_points = [[b * 0.5, d * 0.5]]
super().__init__(control_points, shift)
# add first three points
self.points.append([0, 0])
self.points.append([b, 0])
self.points.append([b, t_f])
# construct the bottom right radius
for i in range(n_r):
# determine polar angle
theta = 3.0 / 2 * np.pi * (1 - i * 1.0 / max(1, n_r - 1) * 1.0 / 3)
# calculate the locations of the radius points
x = b * 0.5 + t_w * 0.5 + r + r * np.cos(theta)
y = t_f + r + r * np.sin(theta)
# append the current points to the points list
self.points.append([x, y])
# construct the top right radius
for i in range(n_r):
# determine polar angle
theta = np.pi * (1 - i * 1.0 / max(1, n_r - 1) * 0.5)
# calculate the locations of the radius points
x = b * 0.5 + t_w * 0.5 + r + r * np.cos(theta)
y = d - t_f - r + r * np.sin(theta)
# append the current points to the points list
self.points.append([x, y])
# add the next four points
self.points.append([b, d - t_f])
self.points.append([b, d])
self.points.append([0, d])
self.points.append([0, d - t_f])
# construct the top left radius
for i in range(n_r):
# determine polar angle
theta = np.pi * 0.5 * (1 - i * 1.0 / max(1, n_r - 1))
# calculate the locations of the radius points
x = b * 0.5 - t_w * 0.5 - r + r * np.cos(theta)
y = d - t_f - r + r * np.sin(theta)
# append the current points to the points list
self.points.append([x, y])
# construct the bottom left radius
for i in range(n_r):
# determine polar angle
theta = -np.pi * i * 1.0 / max(1, n_r - 1) * 0.5
# calculate the locations of the radius points
x = b * 0.5 - t_w * 0.5 - r + r * np.cos(theta)
y = t_f + r + r * np.sin(theta)
# append the current points to the points list
self.points.append([x, y])
# add the last point
self.points.append([0, t_f])
# build the facet list
for i in range(len(self.points)):
# if we are not at the last point
if i != len(self.points) - 1:
self.facets.append([i, i + 1])
# if we are at the last point, complete the loop
else:
self.facets.append([len(self.points) - 1, 0])
self.shift_section()
class MonoISection(Geometry):
"""Constructs a monosymmetric I-section centered at
*(max(b_t, b_b)/2, d/2)*, with depth *d*, top flange width *b_t*, bottom
flange width *b_b*, top flange thickness *t_ft*, top flange thickness
*t_fb*, web thickness *t_w*, and root radius *r*, using *n_r* points to
construct the root radius.
:param float d: Depth of the I-section
:param float b_t: Top flange width
:param float b_b: Bottom flange width
:param float t_ft: Top flange thickness of the I-section
:param float t_fb: Bottom flange thickness of the I-section
:param float t_w: Web thickness of the I-section
:param float r: Root radius of the I-section
:param int n_r: Number of points discretising the root radius
:param shift: Vector that shifts the cross-section by *(x, y)*
:type shift: list[float, float]
The following example creates a monosymmetric I-section with a depth of
200, a top flange width of 50, a top flange thickness of 12, a bottom
flange width of 130, a bottom flange thickness of 8, a web thickness of
6 and a root radius of 8, using 16 points to discretise the root
radius. A mesh is generated with a maximum triangular area of 3.0::
import sectionproperties.pre.sections as sections
geometry = sections.MonoISection(d=200, b_t=50, b_b=130, t_ft=12, t_fb=8, t_w=6, r=8, n_r=16)
mesh = geometry.create_mesh(mesh_sizes=[3.0])
.. figure:: ../images/sections/monoisection_geometry.png
:align: center
:scale: 75 %
I-section geometry.
.. figure:: ../images/sections/monoisection_mesh.png
:align: center
:scale: 75 %
Mesh generated from the above geometry.
"""
def __init__(self, d, b_t, b_b, t_fb, t_ft, t_w, r, n_r, shift=[0, 0]):
"""Inits the ISection class."""
# assign control point
control_points = [[max(b_t, b_b) * 0.5, d * 0.5]]
super().__init__(control_points, shift)
# calculate central axis
x_central = max(b_t, b_b) * 0.5
# add first three points
self.points.append([x_central - b_b * 0.5, 0])
self.points.append([x_central + b_b * 0.5, 0])
self.points.append([x_central + b_b * 0.5, t_fb])
# construct the bottom right radius
for i in range(n_r):
# determine polar angle
theta = 3.0 / 2 * np.pi * (1 - i * 1.0 / max(1, n_r - 1) * 1.0 / 3)
# calculate the locations of the radius points
x = x_central + t_w * 0.5 + r + r * np.cos(theta)
y = t_fb + r + r * np.sin(theta)
# append the current points to the points list
self.points.append([x, y])
# construct the top right radius
for i in range(n_r):
# determine polar angle
theta = np.pi * (1 - i * 1.0 / max(1, n_r - 1) * 0.5)
# calculate the locations of the radius points
x = x_central + t_w * 0.5 + r + r * np.cos(theta)
y = d - t_ft - r + r * np.sin(theta)
# append the current points to the points list
self.points.append([x, y])
# add the next four points
self.points.append([x_central + b_t * 0.5, d - t_ft])
self.points.append([x_central + b_t * 0.5, d])
self.points.append([x_central - b_t * 0.5, d])
self.points.append([x_central - b_t * 0.5, d - t_ft])
# construct the top left radius
for i in range(n_r):
# determine polar angle
theta = np.pi * 0.5 * (1 - i * 1.0 / max(1, n_r - 1))
# calculate the locations of the radius points
x = x_central - t_w * 0.5 - r + r * np.cos(theta)
y = d - t_ft - r + r * np.sin(theta)
# append the current points to the points list
self.points.append([x, y])
# construct the bottom left radius
for i in range(n_r):
# determine polar angle
theta = -np.pi * i * 1.0 / max(1, n_r - 1) * 0.5
# calculate the locations of the radius points
x = x_central - t_w * 0.5 - r + r * np.cos(theta)
y = t_fb + r + r * np.sin(theta)
# append the current points to the points list
self.points.append([x, y])
# add the last point
self.points.append([x_central - b_b * 0.5, t_fb])
# build the facet list
for i in range(len(self.points)):
# if we are not at the last point
if i != len(self.points) - 1:
self.facets.append([i, i + 1])
# if we are at the last point, complete the loop
else:
self.facets.append([len(self.points) - 1, 0])
self.shift_section()
class TaperedFlangeISection(Geometry):
"""Constructs a Tapered Flange I-section centered at *(b/2, d/2)*, with
depth *d*, width *b*, mid-flange thickness *t_f*, web thickness *t_w*, root
radius *r_r*, flange radius *r_f* and flange angle *alpha*, using *n_r*
points to construct the radii.
:param float d: Depth of the Tapered Flange I-section
:param float b: Width of the Tapered Flange I-section
:param float t_f: Mid-flange thickness of the Tapered Flange I-section
(measured at the point equidistant from the face of the web to the edge
of the flange)
:param float t_w: Web thickness of the Tapered Flange I-section
:param float r_r: Root radius of the Tapered Flange I-section
:param float r_f: Flange radius of the Tapered Flange I-section
:param float alpha: Flange angle of the Tapered Flange I-section (degrees)
:param int n_r: Number of points discretising the radii
:param shift: Vector that shifts the cross-section by *(x, y)*
:type shift: list[float, float]
The following example creates a Tapered Flange I-section with a depth of
588, a width of 191, a mid-flange thickness of 27.2, a web thickness of
15.2, a root radius of 17.8, a flange radius of 8.9 and a flange angle of
8°, using 16 points to discretise the radii. A mesh is generated with a
maximum triangular area of 20.0::
import sectionproperties.pre.sections as sections
geometry = sections.TaperedFlangeISection(d=588, b=191, t_f=27.2,
t_w=15.2, r_r=17.8, r_f=8.9, alpha=8, n_r=16)
mesh = geometry.create_mesh(mesh_sizes=[20.0])
.. figure:: ../images/sections/taperedisection_geometry.png
:align: center
:scale: 75 %
I-section geometry.
.. figure:: ../images/sections/taperedisection_mesh.png
:align: center
:scale: 75 %
Mesh generated from the above geometry.
"""
def __init__(self, d, b, t_f, t_w, r_r, r_f, alpha, n_r, shift=[0, 0]):
"""Inits the ISection class."""
# assign control point
control_points = [[b * 0.5, d * 0.5]]
super().__init__(control_points, shift)
# calculate alpha in radians
alpha_rad = np.pi * alpha / 180
# calculate the height of the flange toe and dimensions of the straight
x1 = b * 0.25 - t_w * 0.25 - r_f * (1 - np.sin(alpha_rad))
y1 = x1 * np.tan(alpha_rad)
x2 = b * 0.25 - t_w * 0.25 - r_r * (1 - np.sin(alpha_rad))
y2 = x2 * np.tan(alpha_rad)
y_t = t_f - y1 - r_f * np.cos(alpha_rad)
# add first two points
self.points.append([0, 0])
self.points.append([b, 0])
# construct the bottom right flange toe radius
for i in range(n_r):
# determine polar angle
theta = i * 1.0 / max(1, n_r - 1) * (np.pi * 0.5 - alpha_rad)
# calculate the locations of the radius points
x = b - r_f + r_f * np.cos(theta)
y = y_t + r_f * np.sin(theta)
# append the current points to the points list
self.points.append([x, y])
# construct the bottom right root radius
for i in range(n_r):
# determine polar angle
theta = (3.0 / 2 * np.pi - alpha_rad) - (
i * 1.0 / max(1, n_r - 1) * (np.pi * 0.5 - alpha_rad))
# calculate the locations of the radius points
x = b * 0.5 + t_w * 0.5 + r_r + r_r * np.cos(theta)
y = t_f + y2 + r_r * np.cos(alpha_rad) + r_r * np.sin(theta)
# append the current points to the points list
self.points.append([x, y])
# construct the top right root radius
for i in range(n_r):
# determine polar angle
theta = np.pi - i * 1.0 / max(1, n_r - 1) * (
np.pi * 0.5 - alpha_rad)
# calculate the locations of the radius points
x = b * 0.5 + t_w * 0.5 + r_r + r_r * np.cos(theta)
y = d - t_f - y2 - r_r * np.cos(alpha_rad) + r_r * np.sin(theta)
# append the current points to the points list
self.points.append([x, y])
# construct the top right flange toe radius
for i in range(n_r):
# determine polar angle
theta = (3.0 * np.pi / 2 + alpha_rad) + i * 1.0 / max(
1, n_r - 1) * (np.pi * 0.5 - alpha_rad)
# calculate the locations of the radius points
x = b - r_f + r_f * np.cos(theta)
y = d - y_t + r_f * np.sin(theta)
# append the current points to the points list
self.points.append([x, y])
# add the next two points
self.points.append([b, d])
self.points.append([0, d])
# construct the top left flange toe radius
for i in range(n_r):
# determine polar angle
theta = np.pi + (
i * 1.0 / max(1, n_r - 1) * (np.pi * 0.5 - alpha_rad))
# calculate the locations of the radius points
x = r_f + r_f * np.cos(theta)
y = d - y_t + r_f * np.sin(theta)
# append the current points to the points list
self.points.append([x, y])
# construct the top left root radius
for i in range(n_r):
# determine polar angle
theta = (np.pi * 0.5 - alpha_rad) - (
i * 1.0 / max(1, n_r - 1) * (np.pi * 0.5 - alpha_rad))
# calculate the locations of the radius points
x = b * 0.5 - t_w * 0.5 - r_r + r_r * np.cos(theta)
y = d - t_f - y2 - r_r * np.cos(alpha_rad) + r_r * np.sin(theta)
# append the current points to the points list
self.points.append([x, y])
# construct the bottom left root radius
for i in range(n_r):
# determine polar angle
theta = -i * 1.0 / max(1, n_r - 1) * (np.pi * 0.5 - alpha_rad)
# calculate the locations of the radius points
x = b * 0.5 - t_w * 0.5 - r_r + r_r * np.cos(theta)
y = t_f + y2 + r_r * np.cos(alpha_rad) + r_r * np.sin(theta)
# append the current points to the points list
self.points.append([x, y])
# construct the bottom left flange toe radius
for i in range(n_r):
# determine polar angle
theta = (np.pi * 0.5 + alpha_rad) + (
i * 1.0 / max(1, n_r - 1) * (np.pi * 0.5 - alpha_rad))
# calculate the locations of the radius points
x = r_f + r_f * np.cos(theta)
y = y_t + r_f * np.sin(theta)
# append the current points to the points list
self.points.append([x, y])
# build the facet list
for i in range(len(self.points)):
# if we are not at the last point
if i != len(self.points) - 1:
self.facets.append([i, i + 1])
# if we are at the last point, complete the loop
else:
self.facets.append([len(self.points) - 1, 0])
self.shift_section()
class PfcSection(Geometry):
"""Constructs a PFC section with the bottom left corner at the origin
*(0, 0)*, with depth *d*, width *b*, flange thickness *t_f*, web thickness
*t_w* and root radius *r*, using *n_r* points to construct the root radius.
:param float d: Depth of the PFC section
:param float b: Width of the PFC section
:param float t_f: Flange thickness of the PFC section
:param float t_w: Web thickness of the PFC section
:param float r: Root radius of the PFC section
:param int n_r: Number of points discretising the root radius
:param shift: Vector that shifts the cross-section by *(x, y)*
:type shift: list[float, float]
The following example creates a PFC section with a depth of 250, a width of
90, a flange thickness of 15, a web thickness of 8 and a root radius of
12, using 8 points to discretise the root radius. A mesh is generated
with a maximum triangular area of 5.0::
import sectionproperties.pre.sections as sections
geometry = sections.PfcSection(d=250, b=90, t_f=15, t_w=8, r=12, n_r=8)
mesh = geometry.create_mesh(mesh_sizes=[5.0])
.. figure:: ../images/sections/pfc_geometry.png
:align: center
:scale: 75 %
PFC geometry.
.. figure:: ../images/sections/pfc_mesh.png
:align: center
:scale: 75 %
Mesh generated from the above geometry.
"""
def __init__(self, d, b, t_f, t_w, r, n_r, shift=[0, 0]):
"""Inits the PfcSection class."""
# assign control point
control_points = [[t_w * 0.5, d * 0.5]]
super().__init__(control_points, shift)
# add first three points
self.points.append([0, 0])
self.points.append([b, 0])
self.points.append([b, t_f])
# construct the bottom right radius
for i in range(n_r):
# determine polar angle
theta = 3.0 / 2 * np.pi * (1 - i * 1.0 / max(1, n_r - 1) * 1.0 / 3)
# calculate the locations of the radius points
x = t_w + r + r * np.cos(theta)
y = t_f + r + r * np.sin(theta)
# append the current points to the points list
self.points.append([x, y])
# construct the top right radius
for i in range(n_r):
# determine polar angle
theta = np.pi * (1 - i * 1.0 / max(1, n_r - 1) * 0.5)
# calculate the locations of the radius points
x = t_w + r + r * np.cos(theta)
y = d - t_f - r + r * np.sin(theta)
# append the current points to the points list
self.points.append([x, y])
# add last three points
self.points.append([b, d - t_f])
self.points.append([b, d])
self.points.append([0, d])
# build the facet list
for i in range(len(self.points)):
# if we are not at the last point
if i != len(self.points) - 1:
self.facets.append([i, i + 1])
# if we are at the last point, complete the loop
else:
self.facets.append([len(self.points) - 1, 0])
self.shift_section()
class TaperedFlangeChannel(Geometry):
"""Constructs a Tapered Flange Channel section with the bottom left corner
at the origin *(0, 0)*, with depth *d*, width *b*, mid-flange thickness
*t_f*, web thickness *t_w*, root radius *r_r*, flange radius *r_f* and
flange angle *alpha*, using *n_r* points to construct the radii.
:param float d: Depth of the Tapered Flange Channel section
:param float b: Width of the Tapered Flange Channel section
:param float t_f: Mid-flange thickness of the Tapered Flange Channel
section (measured at the point equidistant from the face of the web to
the edge of the flange)
:param float t_w: Web thickness of the Tapered Flange Channel section
:param float r_r: Root radius of the Tapered Flange Channel section
:param float r_f: Flange radius of the Tapered Flange Channel section
:param float alpha: Flange angle of the Tapered Flange Channel section
(degrees)
:param int n_r: Number of points discretising the radii
:param shift: Vector that shifts the cross-section by *(x, y)*
:type shift: list[float, float]
The following example creates a Tapered Flange Channel section with a depth
of 10, a width of 3.5, a mid-flange thickness of 0.575, a web thickness of
0.475, a root radius of 0.575, a flange radius of 0.4 and a flange angle of
8°, using 16 points to discretise the radii. A mesh is generated with a
maximum triangular area of 0.02::
import sectionproperties.pre.sections as sections
geometry = sections.TaperedFlangeChannel(d=10, b=3.5, t_f=0.575,
t_w=0.475, r_r=0.575, r_f=0.4, alpha=8, n_r=16)
mesh = geometry.create_mesh(mesh_sizes=[0.02])
.. figure:: ../images/sections/taperedchannel_geometry.png
:align: center
:scale: 75 %
I-section geometry.
.. figure:: ../images/sections/taperedchannel_mesh.png
:align: center
:scale: 75 %
Mesh generated from the above geometry.
"""
def __init__(self, d, b, t_f, t_w, r_r, r_f, alpha, n_r, shift=[0, 0]):
"""Inits the ISection class."""
# assign control point
control_points = [[t_w * 0.5, d * 0.5]]
super().__init__(control_points, shift)
# calculate alpha in radians
alpha_rad = np.pi * alpha / 180
# calculate the height of the flange toe and dimensions of the straight
x1 = b * 0.5 - t_w * 0.5 - r_f * (1 - np.sin(alpha_rad))
y1 = x1 * np.tan(alpha_rad)
x2 = b * 0.5 - t_w * 0.5 - r_r * (1 - np.sin(alpha_rad))
y2 = x2 * np.tan(alpha_rad)
y_t = t_f - y1 - r_f * np.cos(alpha_rad)
# add first two points
self.points.append([0, 0])
self.points.append([b, 0])
# construct the bottom right flange toe radius
for i in range(n_r):
# determine polar angle
theta = i * 1.0 / max(1, n_r - 1) * (np.pi * 0.5 - alpha_rad)
# calculate the locations of the radius points
x = b - r_f + r_f * np.cos(theta)
y = y_t + r_f * np.sin(theta)
# append the current points to the points list
self.points.append([x, y])
# construct the bottom right root radius
for i in range(n_r):
# determine polar angle
theta = (3.0 / 2 * np.pi - alpha_rad) - (
i * 1.0 / max(1, n_r - 1) * (np.pi * 0.5 - alpha_rad))
# calculate the locations of the radius points
x = t_w + r_r + r_r * np.cos(theta)
y = t_f + y2 + r_r * np.cos(alpha_rad) + r_r * np.sin(theta)
# append the current points to the points list
self.points.append([x, y])
# construct the top right root radius
for i in range(n_r):
# determine polar angle
theta = np.pi - i * 1.0 / max(1, n_r - 1) * (
np.pi * 0.5 - alpha_rad)
# calculate the locations of the radius points
x = t_w + r_r + r_r * np.cos(theta)
y = d - t_f - y2 - r_r * np.cos(alpha_rad) + r_r * np.sin(theta)
# append the current points to the points list
self.points.append([x, y])
# construct the top right flange toe radius
for i in range(n_r):
# determine polar angle
theta = (3.0 * np.pi / 2 + alpha_rad) + i * 1.0 / max(
1, n_r - 1) * (np.pi * 0.5 - alpha_rad)
# calculate the locations of the radius points
x = b - r_f + r_f * np.cos(theta)
y = d - y_t + r_f * np.sin(theta)
# append the current points to the points list
self.points.append([x, y])
# add the final two points
self.points.append([b, d])
self.points.append([0, d])
# build the facet list
for i in range(len(self.points)):
# if we are not at the last point
if i != len(self.points) - 1:
self.facets.append([i, i + 1])
# if we are at the last point, complete the loop
else:
self.facets.append([len(self.points) - 1, 0])
self.shift_section()
class TeeSection(Geometry):
"""Constructs a Tee section with the top left corner at *(0, d)*, with
depth *d*, width *b*, flange thickness *t_f*, web thickness *t_w* and root
radius *r*, using *n_r* points to construct the root radius.
:param float d: Depth of the Tee section
:param float b: Width of the Tee section
:param float t_f: Flange thickness of the Tee section
:param float t_w: Web thickness of the Tee section
:param float r: Root radius of the Tee section
:param int n_r: Number of points discretising the root radius
:param shift: Vector that shifts the cross-section by *(x, y)*
:type shift: list[float, float]
The following example creates a Tee section with a depth of 200, a width of
100, a flange thickness of 12, a web thickness of 6 and a root radius of
8, using 8 points to discretise the root radius. A mesh is generated
with a maximum triangular area of 3.0::
import sectionproperties.pre.sections as sections
geometry = sections.TeeSection(d=200, b=100, t_f=12, t_w=6, r=8, n_r=8)
mesh = geometry.create_mesh(mesh_sizes=[3.0])
.. figure:: ../images/sections/tee_geometry.png
:align: center
:scale: 75 %
Tee section geometry.
.. figure:: ../images/sections/tee_mesh.png
:align: center
:scale: 75 %
Mesh generated from the above geometry.
"""
def __init__(self, d, b, t_f, t_w, r, n_r, shift=[0, 0]):
"""Inits the TeeSection class."""
# assign control point
control_points = [[b * 0.5, d - t_f * 0.5]]
super().__init__(control_points, shift)
# add first two points
self.points.append([b * 0.5 - t_w * 0.5, 0])
self.points.append([b * 0.5 + t_w * 0.5, 0])
# construct the top right radius
for i in range(n_r):
# determine polar angle
theta = np.pi * (1 - i * 1.0 / max(1, n_r - 1) * 0.5)
# calculate the locations of the radius points
x = b * 0.5 + t_w * 0.5 + r + r * np.cos(theta)
y = d - t_f - r + r * np.sin(theta)
# append the current points to the points list
self.points.append([x, y])
# add next four points
self.points.append([b, d - t_f])
self.points.append([b, d])
self.points.append([0, d])
self.points.append([0, d - t_f])
# construct the top left radius
for i in range(n_r):
# determine polar angle
theta = np.pi * 0.5 * (1 - i * 1.0 / max(1, n_r - 1))
# calculate the locations of the radius points
x = b * 0.5 - t_w * 0.5 - r + r * np.cos(theta)
y = d - t_f - r + r * np.sin(theta)
# append the current points to the points list
self.points.append([x, y])
# build the facet list
for i in range(len(self.points)):
# if we are not at the last point
if i != len(self.points) - 1:
self.facets.append([i, i + 1])
# if we are at the last point, complete the loop
else:
self.facets.append([len(self.points) - 1, 0])
self.shift_section()
class AngleSection(Geometry):
"""Constructs an angle section with the bottom left corner at the origin
*(0, 0)*, with depth *d*, width *b*, thickness *t*, root radius *r_r* and
toe radius *r_t*, using *n_r* points to construct the radii.
:param float d: Depth of the angle section
:param float b: Width of the angle section
:param float t: Thickness of the angle section
:param float r_r: Root radius of the angle section
:param float r_t: Toe radius of the angle section
:param int n_r: Number of points discretising the radii
:param shift: Vector that shifts the cross-section by *(x, y)*
:type shift: list[float, float]
The following example creates an angle section with a depth of 150, a width
of 100, a thickness of 8, a root radius of 12 and a toe radius of 5, using
16 points to discretise the radii. A mesh is generated with a maximum
triangular area of 2.0::
import sectionproperties.pre.sections as sections
geometry = sections.AngleSection(d=150, b=100, t=8, r_r=12, r_t=5, n_r=16)
mesh = geometry.create_mesh(mesh_sizes=[2.0])
.. figure:: ../images/sections/angle_geometry.png
:align: center
:scale: 75 %
Angle section geometry.
.. figure:: ../images/sections/angle_mesh.png
:align: center
:scale: 75 %
"""
def __init__(self, d, b, t, r_r, r_t, n_r, shift=[0, 0]):
"""Inits the AngleSection class."""
# assign control point
control_points = [[t * 0.5, t * 0.5]]
super().__init__(control_points, shift)
# add first two points
self.points.append([0, 0])
self.points.append([b, 0])
# construct the bottom toe radius
for i in range(n_r):
# determine polar angle
theta = i * 1.0 / max(1, n_r - 1) * np.pi * 0.5
# calculate the locations of the radius points
x = b - r_t + r_t * np.cos(theta)
y = t - r_t + r_t * np.sin(theta)
# append the current points to the points list
self.points.append([x, y])
# construct the root radius
for i in range(n_r):
# determine polar angle
theta = 3.0 / 2 * np.pi * (1 - i * 1.0 / max(1, n_r - 1) * 1.0 / 3)
# calculate the locations of the radius points
x = t + r_r + r_r * np.cos(theta)
y = t + r_r + r_r * np.sin(theta)
# append the current points to the points list
self.points.append([x, y])
# construct the top toe radius
for i in range(n_r):
# determine polar angle
theta = i * 1.0 / max(1, n_r - 1) * np.pi * 0.5
# calculate the locations of the radius points
x = t - r_t + r_t * np.cos(theta)
y = d - r_t + r_t * np.sin(theta)
# append the current points to the points list
self.points.append([x, y])
# add the next point
self.points.append([0, d])
# build the facet list
for i in range(len(self.points)):
# if we are not at the last point
if i != len(self.points) - 1:
self.facets.append([i, i + 1])
# if we are at the last point, complete the loop
else:
self.facets.append([len(self.points) - 1, 0])
self.shift_section()
class CeeSection(Geometry):
"""Constructs a Cee section with the bottom left corner at the origin
*(0, 0)*, with depth *d*, width *b*, lip *l*, thickness *t* and outer
radius *r_out*, using *n_r* points to construct the radius.
:param float d: Depth of the Cee section
:param float b: Width of the Cee section
:param float l: Lip of the Cee section
:param float t: Thickness of the Cee section
:param float r_out: Outer radius of the Cee section
:param int n_r: Number of points discretising the outer radius
:param shift: Vector that shifts the cross-section by *(x, y)*
:type shift: list[float, float]
The following example creates a Cee section with a depth of 125, a width
of 50, a lip of 30, a thickness of 1.5 and an outer radius of 6, using 8
points to discretise the radius. A mesh is generated with a maximum
triangular area of 0.25::
import sectionproperties.pre.sections as sections
geometry = sections.CeeSection(d=125, b=50, l=30, t=1.5, r_out=6, n_r=8)
mesh = geometry.create_mesh(mesh_sizes=[0.25])
.. figure:: ../images/sections/cee_geometry.png
:align: center
:scale: 75 %
Cee section geometry.
.. figure:: ../images/sections/cee_mesh.png
:align: center
:scale: 75 %
"""
def __init__(self, d, b, l, t, r_out, n_r, shift=[0, 0]):
"""Inits the CeeSection class."""
# assign control point
control_points = [[t * 0.5, d * 0.5]]
super().__init__(control_points, shift)
r_in = r_out - t # calculate internal radius
# construct the outer bottom left radius
for i in range(n_r):
# determine polar angle
theta = np.pi + i * 1.0 / max(1, n_r - 1) * np.pi * 0.5
# calculate location of inner and outer points
x_outer = r_out + r_out * np.cos(theta)
y_outer = r_out + r_out * np.sin(theta)
# append the current points to the points list
self.points.append([x_outer, y_outer])
# construct the outer bottom right radius
for i in range(n_r):
# determine polar angle
theta = 3.0 / 2 * np.pi + i * 1.0 / max(1, n_r - 1) * np.pi * 0.5
# calculate location of inner and outer points
x_outer = b - r_out + r_out * np.cos(theta)
y_outer = r_out + r_out * np.sin(theta)
# append the current points to the points list
self.points.append([x_outer, y_outer])
# add next two points
self.points.append([b, l])
self.points.append([b - t, l])
# construct the inner bottom right radius
for i in range(n_r):
# determine polar angle
theta = 2 * np.pi - i * 1.0 / max(1, n_r - 1) * np.pi * 0.5
# calculate location of inner and outer points
x_inner = b - r_out + r_in * np.cos(theta)
y_inner = r_out + r_in * np.sin(theta)
# append the current points to the points list
self.points.append([x_inner, y_inner])
# construct the inner bottom left radius
for i in range(n_r):
# determine polar angle
theta = 3.0 / 2 * np.pi - i * 1.0 / max(1, n_r - 1) * np.pi * 0.5
# calculate location of inner and outer points
x_inner = r_out + r_in * np.cos(theta)
y_inner = r_out + r_in * np.sin(theta)
# append the current points to the points list
self.points.append([x_inner, y_inner])
# construct the inner top left radius
for i in range(n_r):
# determine polar angle
theta = np.pi - i * 1.0 / max(1, n_r - 1) * np.pi * 0.5
# calculate location of inner and outer points
x_inner = r_out + r_in * np.cos(theta)
y_inner = d - r_out + r_in * np.sin(theta)
# append the current points to the points list
self.points.append([x_inner, y_inner])
# construct the inner top right radius
for i in range(n_r):
# determine polar angle
theta = np.pi * 0.5 - i * 1.0 / max(1, n_r - 1) * np.pi * 0.5
# calculate location of inner and outer points
x_inner = b - r_out + r_in * np.cos(theta)
y_inner = d - r_out + r_in * np.sin(theta)
# append the current points to the points list
self.points.append([x_inner, y_inner])
# add next two points
self.points.append([b - t, d - l])
self.points.append([b, d - l])
# construct the outer top right radius
for i in range(n_r):
# determine polar angle
theta = i * 1.0 / max(1, n_r - 1) * np.pi * 0.5
# calculate location of inner and outer points
x_outer = b - r_out + r_out * np.cos(theta)
y_outer = d - r_out + r_out * np.sin(theta)
# append the current points to the points list
self.points.append([x_outer, y_outer])
# construct the outer top left radius
for i in range(n_r):
# determine polar angle
theta = 0.5 * np.pi + i * 1.0 / max(1, n_r - 1) * np.pi * 0.5
# calculate location of inner and outer points
x_outer = r_out + r_out * np.cos(theta)
y_outer = d - r_out + r_out * np.sin(theta)
# append the current points to the points list
self.points.append([x_outer, y_outer])
# build the facet list
for i in range(len(self.points)):
# if we are not at the last point
if i != len(self.points) - 1:
self.facets.append([i, i + 1])
# if we are at the last point, complete the loop
else:
self.facets.append([len(self.points) - 1, 0])
self.shift_section()
class ZedSection(Geometry):
"""Constructs a Zed section with the bottom left corner at the origin
*(0, 0)*, with depth *d*, left flange width *b_l*, right flange width
*b_r*, lip *l*, thickness *t* and outer radius *r_out*, using *n_r* points
to construct the radius.
:param float d: Depth of the Zed section
:param float b_l: Left flange width of the Zed section
:param float b_r: Right flange width of the Zed section
:param float l: Lip of the Zed section
:param float t: Thickness of the Zed section
:param float r_out: Outer radius of the Zed section
:param int n_r: Number of points discretising the outer radius
:param shift: Vector that shifts the cross-section by *(x, y)*
:type shift: list[float, float]
The following example creates a Zed section with a depth of 100, a left
flange width of 40, a right flange width of 50, a lip of 20, a thickness of
1.2 and an outer radius of 5, using 8 points to discretise the radius.
A mesh is generated with a maximum triangular area of 0.15::
import sectionproperties.pre.sections as sections
geometry = sections.ZedSection(d=100, b_l=40, b_r=50, l=20, t=1.2, r_out=5, n_r=8)
mesh = geometry.create_mesh(mesh_sizes=[0.15])
.. figure:: ../images/sections/zed_geometry.png
:align: center
:scale: 75 %
Zed section geometry.
.. figure:: ../images/sections/zed_mesh.png
:align: center
:scale: 75 %
"""
def __init__(self, d, b_l, b_r, l, t, r_out, n_r, shift=[0, 0]):
"""Inits the ZedSection class."""
# assign control point
control_points = [[t * 0.5, d * 0.5]]
super().__init__(control_points, shift)
r_in = r_out - t # calculate internal radius
# construct the outer bottom left radius
for i in range(n_r):
# determine polar angle
theta = np.pi + i * 1.0 / max(1, n_r - 1) * np.pi * 0.5
# calculate location of inner and outer points
x_outer = r_out + r_out * np.cos(theta)
y_outer = r_out + r_out * np.sin(theta)
# append the current points to the points list
self.points.append([x_outer, y_outer])
# construct the outer bottom right radius
for i in range(n_r):
# determine polar angle
theta = 3.0 / 2 * np.pi + i * 1.0 / max(1, n_r - 1) * np.pi * 0.5
# calculate location of inner and outer points
x_outer = b_r - r_out + r_out * np.cos(theta)
y_outer = r_out + r_out * np.sin(theta)
# append the current points to the points list
self.points.append([x_outer, y_outer])
# add next two points
self.points.append([b_r, l])
self.points.append([b_r - t, l])
# construct the inner bottom right radius
for i in range(n_r):
# determine polar angle
theta = 2 * np.pi - i * 1.0 / max(1, n_r - 1) * np.pi * 0.5
# calculate location of inner and outer points
x_inner = b_r - r_out + r_in * np.cos(theta)
y_inner = r_out + r_in * np.sin(theta)
# append the current points to the points list
self.points.append([x_inner, y_inner])
# construct the inner bottom left radius
for i in range(n_r):
# determine polar angle
theta = 3.0 / 2 * np.pi - i * 1.0 / max(1, n_r - 1) * np.pi * 0.5
# calculate location of inner and outer points
x_inner = r_out + r_in * np.cos(theta)
y_inner = r_out + r_in * np.sin(theta)
# append the current points to the points list
self.points.append([x_inner, y_inner])
# construct the outer top right radius
for i in range(n_r):
# determine polar angle
theta = i * 1.0 / max(1, n_r - 1) * np.pi * 0.5
# calculate location of inner and outer points
x_outer = t - r_out + r_out * np.cos(theta)
y_outer = d - r_out + r_out * np.sin(theta)
# append the current points to the points list
self.points.append([x_outer, y_outer])
# construct the outer top left radius
for i in range(n_r):
# determine polar angle
theta = np.pi * 0.5 + i * 1.0 / max(1, n_r - 1) * np.pi * 0.5
# calculate location of inner and outer points
x_outer = t - b_l + r_out + r_out * np.cos(theta)
y_outer = d - r_out + r_out * np.sin(theta)
# append the current points to the points list
self.points.append([x_outer, y_outer])
# add the next two points
self.points.append([t - b_l, d - l])
self.points.append([t - b_l + t, d - l])
# construct the inner top left radius
for i in range(n_r):
# determine polar angle
theta = np.pi - i * 1.0 / max(1, n_r - 1) * np.pi * 0.5
# calculate location of inner and outer points
x_inner = t - b_l + r_out + r_in * np.cos(theta)
y_inner = d - r_out + r_in * np.sin(theta)
# append the current points to the points list
self.points.append([x_inner, y_inner])
# construct the inner top right radius
for i in range(n_r):
# determine polar angle
theta = 0.5 * np.pi - i * 1.0 / max(1, n_r - 1) * np.pi * 0.5
# calculate location of inner and outer points
x_inner = t - r_out + r_in * np.cos(theta)
y_inner = d - r_out + r_in * np.sin(theta)
# append the current points to the points list
self.points.append([x_inner, y_inner])
# build the facet list
for i in range(len(self.points)):
# if we are not at the last point
if i != len(self.points) - 1:
self.facets.append([i, i + 1])
# if we are at the last point, complete the loop
else:
self.facets.append([len(self.points) - 1, 0])
self.shift_section()
class CruciformSection(Geometry):
"""Constructs a cruciform section centered at the origin *(0, 0)*, with
depth *d*, width *b*, thickness *t* and root radius *r*, using *n_r* points
to construct the root radius.
:param float d: Depth of the cruciform section
:param float b: Width of the cruciform section
:param float t: Thickness of the cruciform section
:param float r: Root radius of the cruciform section
:param int n_r: Number of points discretising the root radius
:param shift: Vector that shifts the cross-section by *(x, y)*
:type shift: list[float, float]
The following example creates a cruciform section with a depth of 250, a
width of 175, a thickness of 12 and a root radius of 16, using 16 points to
discretise the radius. A mesh is generated with a maximum triangular area
of 5.0::
import sectionproperties.pre.sections as sections
geometry = sections.CruciformSection(d=250, b=175, t=12, r=16, n_r=16)
mesh = geometry.create_mesh(mesh_sizes=[5.0])
.. figure:: ../images/sections/cruciform_geometry.png
:align: center
:scale: 75 %
Cruciform section geometry.
.. figure:: ../images/sections/cruciform_mesh.png
:align: center
:scale: 75 %
"""
def __init__(self, d, b, t, r, n_r, shift=[0, 0]):
"""Inits the CruciformSection class."""
# assign control point
control_points = [[0, 0]]
super().__init__(control_points, shift)
# add first two points
self.points.append([-t * 0.5, -d * 0.5])
self.points.append([t * 0.5, -d * 0.5])
# construct the bottom right radius
for i in range(n_r):
# determine polar angle
theta = np.pi - i * 1.0 / max(1, n_r - 1) * np.pi * 0.5
# calculate the locations of the radius points
x = 0.5 * t + r + r * np.cos(theta)
y = -0.5 * t - r + r * np.sin(theta)
# append the current points to the points list
self.points.append([x, y])
# add the next two points
self.points.append([0.5 * b, -t * 0.5])
self.points.append([0.5 * b, t * 0.5])
# construct the top right radius
for i in range(n_r):
# determine polar angle
theta = 1.5 * np.pi - i * 1.0 / max(1, n_r - 1) * np.pi * 0.5
# calculate the locations of the radius points
x = 0.5 * t + r + r * np.cos(theta)
y = 0.5 * t + r + r * np.sin(theta)
# append the current points to the points list
self.points.append([x, y])
# add the next two points
self.points.append([t * 0.5, 0.5 * d])
self.points.append([-t * 0.5, 0.5 * d])
# construct the top left radius
for i in range(n_r):
# determine polar angle
theta = -i * 1.0 / max(1, n_r - 1) * np.pi * 0.5
# calculate the locations of the radius points
x = -0.5 * t - r + r * np.cos(theta)
y = 0.5 * t + r + r * np.sin(theta)
# append the current points to the points list
self.points.append([x, y])
# add the next two points
self.points.append([-0.5 * b, t * 0.5])
self.points.append([-0.5 * b, -t * 0.5])
# construct the bottom left radius
for i in range(n_r):
# determine polar angle
theta = np.pi * 0.5 - i * 1.0 / max(1, n_r - 1) * np.pi * 0.5
# calculate the locations of the radius points
x = -0.5 * t - r + r * np.cos(theta)
y = -0.5 * t - r + r * np.sin(theta)
# append the current points to the points list
self.points.append([x, y])
# build the facet list
for i in range(len(self.points)):
# if we are not at the last point
if i != len(self.points) - 1:
self.facets.append([i, i + 1])
# if we are at the last point, complete the loop
else:
self.facets.append([len(self.points) - 1, 0])
self.shift_section()
class MergedSection(Geometry):
"""Merges a number of section geometries into one geometry. Note that for
the meshing algorithm to work, there needs to be connectivity between all
regions of the provided geometries. Overlapping of geometries is permitted.
:param sections: A list of geometry objects to merge into one
:class:`~sectionproperties.pre.sections.Geometry` object
:type sections: list[:class:`~sectionproperties.pre.sections.Geometry`]
The following example creates a combined cross-section with a 150x100x6 RHS
placed on its side on top of a 200UB25.4. A mesh is generated with a
maximum triangle size of 5.0 for the I-section and 2.5 for the RHS::
import sectionproperties.pre.sections as sections
isection = sections.ISection(d=203, b=133, t_f=7.8, t_w=5.8, r=8.9, n_r=8)
box = sections.Rhs(d=100, b=150, t=6, r_out=15, n_r=8, shift=[-8.5, 203])
geometry = sections.MergedSection([isection, box])
geometry.clean_geometry()
mesh = geometry.create_mesh(mesh_sizes=[5.0, 2.5])
.. figure:: ../images/sections/merged_geometry.png
:align: center
:scale: 75 %
Merged section geometry.
.. figure:: ../images/sections/merged_mesh.png
:align: center
:scale: 75 %
"""
def __init__(self, sections):
"""Inits the MergedSection class."""
super().__init__([], [0, 0])
point_count = 0
# loop through all sections
for section in sections:
# add facets
for facet in section.facets:
self.facets.append([facet[0] + point_count,
facet[1] + point_count])
# add points and count points
for point in section.points:
self.points.append([point[0], point[1]])
point_count += 1
# add holes
for hole in section.holes:
self.holes.append([hole[0], hole[1]])
# add control points
for control_point in section.control_points:
self.control_points.append([control_point[0],
control_point[1]])
# def sectionParse(sectionTypes, sectionData, settings):
# """
# Generates the geometry for the structural cross-section to be analysed,
# defined by a number of different sectionTypes, containing various
# sectionData. Note that there must be connectivity between all sections
# (i.e. there cannot be isolated sections) or the meshing and/or
# cross-section analysis will not work.
# """
#
# # initialise output variables
# points = []
# facets = []
# holes = []
# controlPoints = []
#
# # initialise pointCount variable
# pointCount = 0
#
# # loop through each section
# for (i, section) in enumerate(sectionTypes):
# # generate new section depending on section type
# if (section == "custom"):
# # load data from current sectionData
# try:
# pointData = sectionData[i]["points"]
# facetData = sectionData[i]["facets"]
# holeData = sectionData[i]["holes"]
# x = sectionData[i]["x"]
# y = sectionData[i]["y"]
# controlPointData = sectionData[i]["control-point"]
# except KeyError as err:
# handleKeyError(err, section)
#
# # generate a new section
# newSection = generateCustom(pointData, facetData, holeData, x, y,
# controlPointData)
#
# elif (section == "rectangle"):
# try:
# # load data from current sectionData
# d = sectionData[i]["d"]
# b = sectionData[i]["b"]
# x = sectionData[i]["x"]
# y = sectionData[i]["y"]
# except KeyError as err:
# handleKeyError(err, section)
#
# # if there is a control-point, load it
# try:
# controlPointData = sectionData[i]["control-point"]
# # if there is no control-point, set it to None
# except KeyError:
# controlPointData = None
#
# # generate a new section
# newSection = generateRectangle(d, b, x, y, controlPointData)
#
# elif (section == "circle"):
# # load data from current sectionData
# try:
# d = sectionData[i]["d"]
# n = sectionData[i]["n"]
# x = sectionData[i]["x"]
# y = sectionData[i]["y"]
# except KeyError as err:
# handleKeyError(err, section)
#
# # if there is a control-point, load it
# try:
# controlPointData = sectionData[i]["control-point"]
# # if there is no control-point, set it to None
# except KeyError:
# controlPointData = None
#
# # generate a new section
# newSection = generateCircle(d, n, x, y, controlPointData)
#
# elif (section == "chs"):
# # load data from current sectionData
# try:
# d = sectionData[i]["d"]
# t = sectionData[i]["t"]
# n = sectionData[i]["n"]
# x = sectionData[i]["x"]
# y = sectionData[i]["y"]
# except KeyError as err:
# handleKeyError(err, section)
#
# # if there is a control-point, load it
# try:
# controlPointData = sectionData[i]["control-point"]
# # if there is no control-point, set it to None
# except KeyError:
# controlPointData = None
#
# # generate a new section
# newSection = generateCHS(d, t, n, x, y, controlPointData)
#
# elif (section == "rhs"):
# # load data from current sectionData
# try:
# d = sectionData[i]["d"]
# b = sectionData[i]["b"]
# t = sectionData[i]["t"]
# r_out = sectionData[i]["r_out"]
# n_r = sectionData[i]["n_r"]
# x = sectionData[i]["x"]
# y = sectionData[i]["y"]
# except KeyError as err:
# handleKeyError(err, section)
#
# # if there is a control-point, load it
# try:
# controlPointData = sectionData[i]["control-point"]
# # if there is no control-point, set it to None
# except KeyError:
# controlPointData = None
#
# # generate a new section
# newSection = generateRHS(
# d, b, t, r_out, n_r, x, y, controlPointData)
#
# elif (section == "i-section"):
# # load data from current sectionData
# try:
# d = sectionData[i]["d"]
# b = sectionData[i]["b"]
# tf = sectionData[i]["tf"]
# tw = sectionData[i]["tw"]
# r = sectionData[i]["r"]
# n_r = sectionData[i]["n_r"]
# x = sectionData[i]["x"]
# y = sectionData[i]["y"]
# except KeyError as err:
# handleKeyError(err, section)
#
# # if there is a control-point, load it
# try:
# controlPointData = sectionData[i]["control-point"]
# # if there is no control-point, set it to None
# except KeyError:
# controlPointData = None
#
# # generate a new section
# newSection = generateISection(
# d, b, tf, tw, r, n_r, x, y, controlPointData)
#
# elif (section == "pfc"):
# # load data from current sectionData
# try:
# d = sectionData[i]["d"]
# b = sectionData[i]["b"]
# tf = sectionData[i]["tf"]
# tw = sectionData[i]["tw"]
# r = sectionData[i]["r"]
# n_r = sectionData[i]["n_r"]
# x = sectionData[i]["x"]
# y = sectionData[i]["y"]
# except KeyError as err:
# handleKeyError(err, section)
#
# # if there is a control-point, load it
# try:
# controlPointData = sectionData[i]["control-point"]
# # if there is no control-point, set it to None
# except KeyError:
# controlPointData = None
#
# # generate a new section
# newSection = generatePFCSection(
# d, b, tf, tw, r, n_r, x, y, controlPointData)
#
# elif (section == "tee"):
# # load data from current sectionData
# try:
# d = sectionData[i]["d"]
# b = sectionData[i]["b"]
# tf = sectionData[i]["tf"]
# tw = sectionData[i]["tw"]
# r = sectionData[i]["r"]
# n_r = sectionData[i]["n_r"]
# x = sectionData[i]["x"]
# y = sectionData[i]["y"]
# except KeyError as err:
# handleKeyError(err, section)
#
# # if there is a control-point, load it
# try:
# controlPointData = sectionData[i]["control-point"]
# # if there is no control-point, set it to None
# except KeyError:
# controlPointData = None
#
# # generate a new section
# newSection = generateTeeSection(
# d, b, tf, tw, r, n_r, x, y, controlPointData)
#
# elif (section == "angle"):
# # load data from current sectionData
# try:
# d = sectionData[i]["d"]
# b = sectionData[i]["b"]
# t = sectionData[i]["t"]
# r_root = sectionData[i]["r_root"]
# r_toe = sectionData[i]["r_toe"]
# n_r = sectionData[i]["n_r"]
# x = sectionData[i]["x"]
# y = sectionData[i]["y"]
# except KeyError as err:
# handleKeyError(err, section)
#
# # if there is a control-point, load it
# try:
# controlPointData = sectionData[i]["control-point"]
# # if there is no control-point, set it to None
# except KeyError:
# controlPointData = None
#
# # generate a new section
# newSection = generateAngleSection(
# d, b, t, r_root, r_toe, n_r, x, y, controlPointData)
#
# elif (section == "cee"):
# # load data from current sectionData
# try:
# d = sectionData[i]["d"]
# b = sectionData[i]["b"]
# lip = sectionData[i]["l"]
# t = sectionData[i]["t"]
# r_out = sectionData[i]["r_out"]
# n_r = sectionData[i]["n_r"]
# x = sectionData[i]["x"]
# y = sectionData[i]["y"]
# except KeyError as err:
# handleKeyError(err, section)
#
# # if there is a control-point, load it
# try:
# controlPointData = sectionData[i]["control-point"]
# # if there is no control-point, set it to None
# except KeyError:
# controlPointData = None
#
# # generate a new section
# newSection = generateCeeSection(
# d, b, lip, t, r_out, n_r, x, y, controlPointData)
#
# elif (section == "zed"):
# # load data from current sectionData
# try:
# d = sectionData[i]["d"]
# b1 = sectionData[i]["b1"]
# b2 = sectionData[i]["b2"]
# lip = sectionData[i]["l"]
# t = sectionData[i]["t"]
# r_out = sectionData[i]["r_out"]
# n_r = sectionData[i]["n_r"]
# x = sectionData[i]["x"]
# y = sectionData[i]["y"]
# except KeyError as err:
# handleKeyError(err, section)
#
# # if there is a control-point, load it
# try:
# controlPointData = sectionData[i]["control-point"]
# # if there is no control-point, set it to None
# except KeyError:
# controlPointData = None
#
# # generate a new section
# newSection = generateZedSection(
# d, b1, b2, lip, t, r_out, n_r, x, y, controlPointData)
#
# elif (section == "cruciform"):
# # load data from current sectionData
# try:
# d = sectionData[i]["d"]
# b = sectionData[i]["b"]
# t = sectionData[i]["t"]
# r = sectionData[i]["r"]
# n_r = sectionData[i]["n_r"]
# x = sectionData[i]["x"]
# y = sectionData[i]["y"]
# except KeyError as err:
# handleKeyError(err, section)
#
# # if there is a control-point, load it
# try:
# controlPointData = sectionData[i]["control-point"]
# # if there is no control-point, set it to None
# except KeyError:
# controlPointData = None
#
# # generate a new section
# newSection = generateCruciform(
# d, b, t, r, n_r, x, y, controlPointData)
#
# else:
# print("Error: section type '{}' is not defined.".format(section))
# quit()
#
# # get points, facets, holes and controlpoint from newSection
# (newPoints, newFacets, newHoles,
# newControlPoint) = newSection.returnSection()
#
# # loop through the facets in the newSection and append to the list
# for f in newFacets:
# facets.append([f[0] + pointCount, f[1] + pointCount])
#
# # loop through the points in the newSection and append to the list
# for p in newPoints:
# pointCount += 1
# points.append([p[0], p[1]])
#
# # loop through the holes in the newSection and append to the list
# for h in newHoles:
# holes.append([h[0], h[1]])
#
# # append the controlPoint from the newSection
# controlPoints.append([newControlPoint[0], newControlPoint[1]])
#
# if (settings.outputLog):
# print("-- Loaded {0} points, {1} facets and {2} holes ".format(
# len(points), len(facets), len(holes)) +
# "from {0} sections.".format(len(sectionTypes)))
#
# return (points, facets, holes, controlPoints)
#
#
# def handleKeyError(err, section):
# """
# Displays an error message if the correct keys are not provided for the
# current section and quits the program.
# """
#
# print(
# "Error: Required key {0} not found for section type '{1}'.".format(
# err, section) +
# " Refer to the documentation for the required keys.")
# quit()
minor grammar and reformatting of comments
import numpy as np
import matplotlib.pyplot as plt
import sectionproperties.pre.pre as pre
import sectionproperties.post.post as post
# TODO: ensure dimensions are floats
class Geometry:
"""Parent class for a cross-section geometry input.
Provides an interface for the user to specify the geometry defining a
cross-section. A method is provided for generating a triangular mesh, for
translating the cross-section by *(x, y)* and for plotting the geometry.
:cvar points: List of points *(x, y)* defining the vertices of the
cross-section
:vartype points: list[list[float, float]]
:cvar facets: List of point index pairs *(p1, p2)* defining the edges of
the cross-section
:vartype facets: list[list[int, int]]
:cvar holes: List of points *(x, y)* defining the locations of holes within
the cross-section. If there are no holes, provide an empty list [].
:vartype holes: list[list[float, float]]
:cvar control_points: A list of points *(x, y)* that define different
regions of the cross-section. A control point is an arbitrary point
within a region enclosed by facets.
:vartype control_points: list[list[float, float]]
:cvar shift: Vector that shifts the cross-section by *(x, y)*
:vartype shift: list[float, float]
"""
def __init__(self, control_points, shift):
"""Inits the Geometry class."""
self.control_points = control_points
self.shift = shift
self.points = []
self.facets = []
self.holes = []
def create_mesh(self, mesh_sizes):
"""Creates a quadratic triangular mesh from the Geometry object.
:param mesh_sizes: A list of maximum element areas corresponding to
each region within the cross-section geometry.
:type mesh_size: list[float]
:return: Object containing generated mesh data
:rtype: :class:`meshpy.triangle.MeshInfo`
:raises AssertionError: If the number of mesh sizes does not match the
number of regions
The following example creates a circular cross-section with a diameter
of 50 with 64 points, and generates a mesh with a maximum triangular
area of 2.5::
import sectionproperties.pre.sections as sections
geometry = sections.CircularSection(d=50, n=64)
mesh = geometry.create_mesh(mesh_sizes=[2.5])
.. figure:: ../images/sections/circle_mesh.png
:align: center
:scale: 75 %
Mesh generated from the above geometry.
"""
str = "Number of mesh_sizes ({0}), ".format(len(mesh_sizes))
str += "should match the number of regions "
str += "({0}).".format(len(self.control_points))
assert(len(mesh_sizes) == len(self.control_points)), str
return pre.create_mesh(self.points, self.facets, self.holes,
self.control_points, mesh_sizes)
def shift_section(self):
"""Shifts the cross-section parameters by the class variable vector
*shift*."""
for point in self.points:
point[0] += self.shift[0]
point[1] += self.shift[1]
for hole in self.holes:
hole[0] += self.shift[0]
hole[1] += self.shift[1]
for cp in self.control_points:
cp[0] += self.shift[0]
cp[1] += self.shift[1]
def rotate_section(self, angle, rot_point=None):
"""Rotates the geometry and specified angle about a point. If the
rotation point is not provided, rotates the section about the first
control point in the list of control points of the
:class:`~sectionproperties.pre.sections.Geometry` object.
:param float angle: Angle (degrees) by which to rotate the section. A
positive angle leads to a counter-clockwise rotation.
:param rot_point: Point *(x, y)* about which to rotate the section
:type rot_point: list[float, float]
The following example rotates a 200UB25 section clockwise by 30
degrees::
import sectionproperties.pre.sections as sections
geometry = sections.ISection(d=203, b=133, t_f=7.8, t_w=5.8, r=8.9, n_r=8)
geometry.rotate_section(angle=-30)
"""
# convert angle to radians
rot_phi = angle * np.pi / 180
def get_r(pt1, pt2):
"""Returns the distance between two points."""
return ((pt1[0] - pt2[0]) ** 2 + (pt1[1] - pt2[1]) ** 2) ** 0.5
def get_phi(pt1, pt2):
"""Returns the angle between two points."""
return np.arctan2(pt1[1] - pt2[1], pt1[0] - pt2[0])
def rotate_point(pt, rot_point, rot_phi):
"""Rotates a point given a rotation point and rotation angle."""
r = get_r(pt, rot_point)
phi = get_phi(pt, rot_point)
pt[0] = r * np.cos(phi + rot_phi) + rot_point[0]
pt[1] = r * np.sin(phi + rot_phi) + rot_point[1]
# use the first control point if no rotation point is specified
if rot_point is None:
rot_point = self.control_points[0]
# rotate all the points
for point in self.points:
rotate_point(point, rot_point, rot_phi)
# rotate all the holes
for hole in self.holes:
rotate_point(hole, rot_point, rot_phi)
# rotate all the control points
for cp in self.control_points:
rotate_point(cp, rot_point, rot_phi)
def mirror_section(self, axis='x', mirror_point=None):
"""Mirrors the geometry about a point on either the x or y-axis. If no
point is provided, mirrors the geometry about the first control point
in the list of control points of the
:class:`~sectionproperties.pre.sections.Geometry` object.
:param string axis: Axis about which to mirror the geometry, *'x'* or
*'y'*
:param mirror_point: Point about which to mirror the geometry *(x, y)*
:type mirror_point: list[float, float]
The following example mirrors a 200PFC section about the y-axis and the
point (0, 0)::
import sectionproperties.pre.sections as sections
geometry = sections.PfcSection(d=200, b=75, t_f=12, t_w=6, r=12, n_r=8)
geometry.mirror_section(axis='y', mirror_point=[0, 0])
"""
# use the first control point if no mirror point is specified
if mirror_point is None:
mirror_point = self.control_points[0]
# select the axis to mirror
if axis == 'x':
i = 1
elif axis == 'y':
i = 0
else:
raise RuntimeError("Enter a valid axis: 'x' or 'y'")
# mirror all points
for point in self.points:
point[i] = 2 * mirror_point[i] - point[i]
# mirror all holes
for hole in self.holes:
hole[i] = 2 * mirror_point[i] - hole[i]
# mirror all control points
for cp in self.control_points:
cp[i] = 2 * mirror_point[i] - cp[i]
def add_point(self, point):
"""Adds a point to the geometry and returns the added point id.
:param point: Location of the point
:type point: list[float, float]
:return: Point id
:rtype: int
"""
self.points.append(point)
return len(self.points) - 1
def add_facet(self, facet):
"""Adds a facet to the geometry and returns the added facet id.
:param facet: Point indices of the facet
:type facet: list[float, float]
:return: Facet id
:rtype: int
"""
self.facets.append(facet)
return len(self.facets) - 1
def add_hole(self, hole):
"""Adds a hole location to the geometry and returns the added hole id.
:param hole: Location of the hole
:type hole: list[float, float]
:return: Hole id
:rtype: int
"""
self.holes.append(hole)
return len(self.holes) - 1
def add_control_point(self, control_point):
"""Adds a control point to the geometry and returns the added control
point id.
:param hole: Location of the control point
:type hole: list[float, float]
:return: Control point id
:rtype: int
"""
self.control_points.append(control_point)
return len(self.control_points) - 1
def clean_geometry(self, verbose=False):
"""Peforms a full clean on the geometry.
:param bool verbose: If set to true, information related to the
geometry cleaning process is printed to the terminal.
.. note:: Cleaning the geometry is always recommended when creating a
merged section which may result in overlapping or intersecting
facets, or duplicate nodes.
"""
self = pre.GeometryCleaner(self, verbose).clean_geometry()
def plot_geometry(self, ax=None, pause=True, labels=False):
"""Plots the geometry defined by the input section. If no axes object
is supplied a new figure and axis is created.
:param ax: Axes object on which the mesh is plotted
:type ax: :class:`matplotlib.axes.Axes`
:param bool pause: If set to true, the figure pauses the script until
the window is closed. If set to false, the script continues
immediately after the window is rendered.
:param bool labels: If set to true, node and facet labels are displayed
The following example creates a CHS discretised with 64 points, with a
diameter of 48 and thickness of 3.2, and plots the geometry::
import sectionproperties.pre.sections as sections
geometry = sections.Chs(d=48, t=3.2, n=64)
geometry.plot_geometry()
.. figure:: ../images/sections/chs_geometry.png
:align: center
:scale: 75 %
Geometry generated by the above example.
"""
# if no axes object is supplied, create and setup the plot
if ax is None:
ax_supplied = False
(fig, ax) = plt.subplots()
post.setup_plot(ax, pause)
else:
ax_supplied = True
for (i, f) in enumerate(self.facets):
# plot the points and facets
if i == 0:
ax.plot([self.points[f[0]][0], self.points[f[1]][0]],
[self.points[f[0]][1], self.points[f[1]][1]],
'ko-', markersize=2, label='Points & Facets')
else:
ax.plot([self.points[f[0]][0], self.points[f[1]][0]],
[self.points[f[0]][1], self.points[f[1]][1]],
'ko-', markersize=2)
for (i, h) in enumerate(self.holes):
# plot the holes
if i == 0:
ax.plot(h[0], h[1], 'rx', markerSize=5, label='Holes')
else:
ax.plot(h[0], h[1], 'rx', markerSize=5)
for (i, cp) in enumerate(self.control_points):
# plot the control points
if i == 0:
ax.plot(cp[0], cp[1], 'bo', markerSize=5,
label='Control Points')
else:
ax.plot(cp[0], cp[1], 'bo', markerSize=5)
# display the legend
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
# display the labels
if labels:
# plot node labels
for (i, pt) in enumerate(self.points):
ax.annotate(str(i), xy=pt, color='r')
# plot facet labels
for (i, fct) in enumerate(self.facets):
pt1 = self.points[fct[0]]
pt2 = self.points[fct[1]]
xy = [(pt1[0] + pt2[0]) / 2, (pt1[1] + pt2[1]) / 2]
ax.annotate(str(i), xy=xy, color='b')
# if no axes object is supplied, finish the plot
if not ax_supplied:
post.finish_plot(ax, pause, title='Cross-Section Geometry')
def calculate_extents(self):
"""Calculates the minimum and maximum x and y-values amongst the list
of points.
:return: Minimum and maximum x and y-values
*(x_min, x_max, y_min, y_max)*
:rtype: tuple(float, float, float, float)
"""
# loop through all points
for (i, pt) in enumerate(self.points):
x = pt[0]
y = pt[1]
# initialise min, max variables
if i == 0:
x_min = x
x_max = x
y_min = y
y_max = y
# update the mins and maxs where necessary
x_min = min(x_min, x)
x_max = max(x_max, x)
y_min = min(y_min, y)
y_max = max(y_max, y)
return (x_min, x_max, y_min, y_max)
class CustomSection(Geometry):
"""Constructs a cross-section from a list of points, facets, holes and a
user specified control point.
:param points: List of points *(x, y)* defining the vertices of the
cross-section
:type points: list[list[float, float]]
:param facets: List of point index pairs *(p1, p2)* defining the edges of
the cross-section
:type facets: list[list[int, int]]
:param holes: List of points *(x, y)* defining the locations of holes
within the cross-section. If there are no holes, provide an empty list
[].
:type holes: list[list[float, float]]
:param control_points: A list of points *(x, y)* that define different
regions of the cross-section. A control point is an arbitrary point
within a region enclosed by facets.
:type control_points: list[list[float, float]]
:param shift: Vector that shifts the cross-section by *(x, y)*
:type shift: list[float, float]
The following example creates a hollow trapezium with a base width of 100,
top width of 50, height of 50 and a wall thickness of 10. A mesh is
generated with a maximum triangular area of 2.0::
import sectionproperties.pre.sections as sections
points = [[0, 0], [100, 0], [75, 50], [25, 50], [15, 10], [85, 10], [70, 40], [30, 40]]
facets = [[0, 1], [1, 2], [2, 3], [3, 0], [4, 5], [5, 6], [6, 7], [7, 4]]
holes = [[50, 25]]
control_points = [[5, 5]]
geometry = sections.CustomSection(points, facets, holes, control_points)
mesh = geometry.create_mesh(mesh_sizes=[2.0])
.. figure:: ../images/sections/custom_geometry.png
:align: center
:scale: 75 %
Custom section geometry.
.. figure:: ../images/sections/custom_mesh.png
:align: center
:scale: 75 %
Mesh generated from the above geometry.
"""
def __init__(self, points, facets, holes, control_points, shift=[0, 0]):
"""Inits the CustomSection class."""
super().__init__(control_points, shift)
self.points = points
self.facets = facets
self.holes = holes
self.shift_section()
class RectangularSection(Geometry):
"""Constructs a rectangular section with the bottom left corner at the
origin *(0, 0)*, with depth *d* and width *b*.
:param float d: Depth (y) of the rectangle
:param float b: Width (x) of the rectangle
:param shift: Vector that shifts the cross-section by *(x, y)*
:type shift: list[float, float]
The following example creates a rectangular cross-section with a depth of
100 and width of 50, and generates a mesh with a maximum triangular area of
5::
import sectionproperties.pre.sections as sections
geometry = sections.RectangularSection(d=100, b=50)
mesh = geometry.create_mesh(mesh_sizes=[5])
.. figure:: ../images/sections/rectangle_geometry.png
:align: center
:scale: 75 %
Rectangular section geometry.
.. figure:: ../images/sections/rectangle_mesh.png
:align: center
:scale: 75 %
Mesh generated from the above geometry.
"""
def __init__(self, d, b, shift=[0, 0]):
"""Inits the RectangularSection class."""
# assign control point
control_points = [[0.5 * b, 0.5 * d]]
super().__init__(control_points, shift)
# construct the points and facets
self.points = [[0, 0], [b, 0], [b, d], [0, d]]
self.facets = [[0, 1], [1, 2], [2, 3], [3, 0]]
self.shift_section()
class CircularSection(Geometry):
"""Constructs a solid circle centered at the origin *(0, 0)* with diameter
*d* and using *n* points to construct the circle.
:param float d: Diameter of the circle
:param int n: Number of points discretising the circle
:param shift: Vector that shifts the cross-section by *(x, y)*
:type shift: list[float, float]
The following example creates a circular cross-section with a diameter of
50 with 64 points, and generates a mesh with a maximum triangular area of
2.5::
import sectionproperties.pre.sections as sections
geometry = sections.CircularSection(d=50, n=64)
mesh = geometry.create_mesh(mesh_sizes=[2.5])
.. figure:: ../images/sections/circle_geometry.png
:align: center
:scale: 75 %
Circular section geometry.
.. figure:: ../images/sections/circle_mesh.png
:align: center
:scale: 75 %
Mesh generated from the above geometry.
"""
def __init__(self, d, n, shift=[0, 0]):
"""Inits the CircularSection class."""
# assign control point
control_points = [[0, 0]]
super().__init__(control_points, shift)
# loop through each point on the circle
for i in range(n):
# determine polar angle
theta = i * 2 * np.pi * 1.0 / n
# calculate location of the point
x = 0.5 * d * np.cos(theta)
y = 0.5 * d * np.sin(theta)
# append the current point to the points list
self.points.append([x, y])
# if we are not at the last point
if i != n - 1:
self.facets.append([i, i + 1])
# if we are at the last point, complete the circle
else:
self.facets.append([i, 0])
self.shift_section()
class EllipticalSection(Geometry):
"""Constructs a solid ellipse centered at the origin *(0, 0)* with vertical diameter
*d_y* and horizontal diameter *d_x*, using *n* points to construct the ellipse.
:param float d_y: Diameter of the ellipse in the y-dimension
:param float d_x: Diameter of the ellipse in the x-dimension
:param int n: Number of points discretising the ellipse
:param shift: Vector that shifts the cross-section by *(x, y)*
:type shift: list[float, float]
The following example creates an elliptical cross-section with a vertical diameter of
50 and horizontal diameter of 25, with 40 points, and generates a mesh with a maximum triangular area of
1.0:
import sectionproperties.pre.sections as sections
geometry = sections.EllipticalSection(d_y=50, d_x=25, n=40)
mesh = geometry.create_mesh(mesh_sizes=[2.5])
.. figure:: ../images/sections/ellipse_geometry.png
:align: center
:scale: 75 %
Elliptical section geometry.
.. figure:: ../images/sections/ellipse_mesh.png
:align: center
:scale: 75 %
Mesh generated from the above geometry.
"""
def __init__(self, d_y, d_x, n, shift=[0, 0]):
"""Inits the EllipticalSection class."""
# assign control point centred at zero
control_points = [[0, 0]]
super().__init__(control_points, shift)
# loop through each point on the ellipse
for i in range(n):
# determine polar angle
theta = i * 2 * np.pi * 1.0 / n
# calculate location of the point
x = 0.5 * d_x * np.cos(theta)
y = 0.5 * d_y * np.sin(theta)
# append the current point to the points list
self.points.append([x, y])
# if we are not at the last point
if i != n - 1:
self.facets.append([i, i + 1])
# if we are at the last point, complete the ellipse
else:
self.facets.append([i, 0])
self.shift_section()
class Ehs(Geometry):
"""Constructs an elliptical hollow section centered at the origin *(0, 0)*,
with outer vertical diameter *d_y*, outer horizontal diameter *d_x*, and
thickness *t*, using *n* points to construct the inner and outer ellipses.
Note that the thickness of a hollow ellipse does not stay constant all
throughout the section.
:param float d_y: Diameter of the ellipse in the y-dimension
:param float d_x: Diameter of the ellipse in the x-dimension
:param float t: Thickness of the EHS
:param int n: Number of points discretising the inner and outer ellipses
:param shift: Vector that shifts the cross-section by *(x, y)*
:type shift: list[float, float]
The following example creates a EHS discretised with 30 points, with a
outer vertical diameter of 50, outer horizontal diameter of 25, and thickness of 5.0,
and generates a mesh with a maximum triangular area of 1.0::
import sectionproperties.pre.sections as sections
geometry = sections.Ehs(d_y=50, d_x=25, t=5.0, n=30)
mesh = geometry.create_mesh(mesh_sizes=[1.0])
.. figure:: ../images/sections/ehs_geometry.png
:align: center
:scale: 75 %
EHS geometry.
.. figure:: ../images/sections/ehs_mesh.png
:align: center
:scale: 75 %
Mesh generated from the above geometry.
"""
def __init__(self, d_y, d_x, t, n, shift=[0, 0]):
"""Inits the Ehs class."""
# assign control point
control_points = [[(d_x * 0.5) - (t * 0.5), 0]]
super().__init__(control_points, shift)
# specify a hole in the centre of the EHS
self.holes = [[0, 0]]
# loop through each point of the EHS
for i in range(n):
# determine polar angle
theta = i * 2 * np.pi * 1.0 / n
# calculate location of outer and inner points
x_outer = 0.5 * d_x * np.cos(theta)
y_outer = 0.5 * d_y * np.sin(theta)
x_inner = ((0.5 * d_x) - t) * np.cos(theta)
y_inner = ((0.5 * d_y) - t) * np.sin(theta)
# append the current points to the points list
self.points.append([x_outer, y_outer])
self.points.append([x_inner, y_inner])
# if we are not at the last point
if i != n - 1:
self.facets.append([i * 2, i * 2 + 2])
self.facets.append([i * 2 + 1, i * 2 + 3])
# if we are at the last point, complete the circle
else:
self.facets.append([i * 2, 0])
self.facets.append([i * 2 + 1, 1])
self.shift_section()
class Chs(Geometry):
"""Constructs a circular hollow section centered at the origin *(0, 0)*,
with diameter *d* and thickness *t*, using *n* points to construct the
inner and outer circles.
:param float d: Outer diameter of the CHS
:param float t: Thickness of the CHS
:param int n: Number of points discretising the inner and outer circles
:param shift: Vector that shifts the cross-section by *(x, y)*
:type shift: list[float, float]
The following example creates a CHS discretised with 64 points, with a
diameter of 48 and thickness of 3.2, and generates a mesh with a maximum
triangular area of 1.0::
import sectionproperties.pre.sections as sections
geometry = sections.Chs(d=48, t=3.2, n=64)
mesh = geometry.create_mesh(mesh_sizes=[1.0])
.. figure:: ../images/sections/chs_geometry.png
:align: center
:scale: 75 %
CHS geometry.
.. figure:: ../images/sections/chs_mesh.png
:align: center
:scale: 75 %
Mesh generated from the above geometry.
"""
def __init__(self, d, t, n, shift=[0, 0]):
"""Inits the Chs class."""
# assign control point
control_points = [[d * 0.5 - t * 0.5, 0]]
super().__init__(control_points, shift)
# specify a hole in the centre of the CHS
self.holes = [[0, 0]]
# loop through each point of the CHS
for i in range(n):
# determine polar angle
theta = i * 2 * np.pi * 1.0 / n
# calculate location of outer and inner points
x_outer = 0.5 * d * np.cos(theta)
y_outer = 0.5 * d * np.sin(theta)
x_inner = (0.5 * d - t) * np.cos(theta)
y_inner = (0.5 * d - t) * np.sin(theta)
# append the current points to the points list
self.points.append([x_outer, y_outer])
self.points.append([x_inner, y_inner])
# if we are not at the last point
if i != n - 1:
self.facets.append([i * 2, i * 2 + 2])
self.facets.append([i * 2 + 1, i * 2 + 3])
# if we are at the last point, complete the circle
else:
self.facets.append([i * 2, 0])
self.facets.append([i * 2 + 1, 1])
self.shift_section()
class Rhs(Geometry):
"""Constructs a rectangular hollow section centered at *(b/2, d/2)*, with
depth *d*, width *b*, thickness *t* and outer radius *r_out*, using *n_r*
points to construct the inner and outer radii.
:param float d: Depth of the RHS
:param float b: Width of the RHS
:param float t: Thickness of the RHS
:param float r_out: Outer radius of the RHS
:param int n_r: Number of points discretising the inner and outer radii
:param shift: Vector that shifts the cross-section by *(x, y)*
:type shift: list[float, float]
The following example creates an RHS with a depth of 100, a width of 50, a
thickness of 6 and an outer radius of 9, using 8 points to discretise the
inner and outer radii. A mesh is generated with a maximum triangular area
of 2.0::
import sectionproperties.pre.sections as sections
geometry = sections.Rhs(d=100, b=50, t=6, r_out=9, n_r=8)
mesh = geometry.create_mesh(mesh_sizes=[2.0])
.. figure:: ../images/sections/rhs_geometry.png
:align: center
:scale: 75 %
RHS geometry.
.. figure:: ../images/sections/rhs_mesh.png
:align: center
:scale: 75 %
Mesh generated from the above geometry.
"""
def __init__(self, d, b, t, r_out, n_r, shift=[0, 0]):
"""Inits the Rhs class."""
# assign control point
control_points = [[b - t * 0.5, d * 0.5]]
super().__init__(control_points, shift)
# specify a hole in the centre of the RHS
self.holes = [[b * 0.5, d * 0.5]]
r_in = r_out - t # calculate internal radius
# construct the bottom left radius
for i in range(n_r):
# determine polar angle
theta = np.pi + i * 1.0 / max(1, n_r - 1) * np.pi * 0.5
# calculate location of inner and outer points
x_outer = r_out + r_out * np.cos(theta)
y_outer = r_out + r_out * np.sin(theta)
x_inner = r_out + r_in * np.cos(theta)
y_inner = r_out + r_in * np.sin(theta)
# append the current points to the points list
self.points.append([x_outer, y_outer])
self.points.append([x_inner, y_inner])
# construct the bottom right radius
for i in range(n_r):
# determine polar angle
theta = 3.0 / 2 * np.pi + i * 1.0 / max(1, n_r - 1) * np.pi * 0.5
# calculate location of inner and outer points
x_outer = b - r_out + r_out * np.cos(theta)
y_outer = r_out + r_out * np.sin(theta)
x_inner = b - r_out + r_in * np.cos(theta)
y_inner = r_out + r_in * np.sin(theta)
# append the current points to the points list
self.points.append([x_outer, y_outer])
self.points.append([x_inner, y_inner])
# construct the top right radius
for i in range(n_r):
# determine polar angle
theta = i * 1.0 / max(1, n_r - 1) * np.pi * 0.5
# calculate location of inner and outer points
x_outer = b - r_out + r_out * np.cos(theta)
y_outer = d - r_out + r_out * np.sin(theta)
x_inner = b - r_out + r_in * np.cos(theta)
y_inner = d - r_out + r_in * np.sin(theta)
# append the current points to the points list
self.points.append([x_outer, y_outer])
self.points.append([x_inner, y_inner])
# construct the top left radius
for i in range(n_r):
# determine polar angle
theta = np.pi * 0.5 + i * 1.0 / max(1, n_r - 1) * np.pi * 0.5
# calculate location of inner and outer points
x_outer = r_out + r_out * np.cos(theta)
y_outer = d - r_out + r_out * np.sin(theta)
x_inner = r_out + r_in * np.cos(theta)
y_inner = d - r_out + r_in * np.sin(theta)
# append the current points to the points list
self.points.append([x_outer, y_outer])
self.points.append([x_inner, y_inner])
# build the facet list
for i in range(int(len(self.points) / 2)):
# if we are not at the last point
if i != int(len(self.points) / 2 - 1):
self.facets.append([i * 2, i * 2 + 2])
self.facets.append([i * 2 + 1, i * 2 + 3])
# if we are at the last point, complete the loop
else:
self.facets.append([i * 2, 0])
self.facets.append([i * 2 + 1, 1])
self.shift_section()
class ISection(Geometry):
"""Constructs an I-section centered at *(b/2, d/2)*, with depth *d*, width
*b*, flange thickness *t_f*, web thickness *t_w*, and root radius *r*,
using *n_r* points to construct the root radius.
:param float d: Depth of the I-section
:param float b: Width of the I-section
:param float t_f: Flange thickness of the I-section
:param float t_w: Web thickness of the I-section
:param float r: Root radius of the I-section
:param int n_r: Number of points discretising the root radius
:param shift: Vector that shifts the cross-section by *(x, y)*
:type shift: list[float, float]
The following example creates an I-section with a depth of 203, a width of
133, a flange thickness of 7.8, a web thickness of 5.8 and a root radius of
8.9, using 16 points to discretise the root radius. A mesh is generated
with a maximum triangular area of 3.0::
import sectionproperties.pre.sections as sections
geometry = sections.ISection(d=203, b=133, t_f=7.8, t_w=5.8, r=8.9, n_r=16)
mesh = geometry.create_mesh(mesh_sizes=[3.0])
.. figure:: ../images/sections/isection_geometry.png
:align: center
:scale: 75 %
I-section geometry.
.. figure:: ../images/sections/isection_mesh.png
:align: center
:scale: 75 %
Mesh generated from the above geometry.
"""
def __init__(self, d, b, t_f, t_w, r, n_r, shift=[0, 0]):
"""Inits the ISection class."""
# assign control point
control_points = [[b * 0.5, d * 0.5]]
super().__init__(control_points, shift)
# add first three points
self.points.append([0, 0])
self.points.append([b, 0])
self.points.append([b, t_f])
# construct the bottom right radius
for i in range(n_r):
# determine polar angle
theta = 3.0 / 2 * np.pi * (1 - i * 1.0 / max(1, n_r - 1) * 1.0 / 3)
# calculate the locations of the radius points
x = b * 0.5 + t_w * 0.5 + r + r * np.cos(theta)
y = t_f + r + r * np.sin(theta)
# append the current points to the points list
self.points.append([x, y])
# construct the top right radius
for i in range(n_r):
# determine polar angle
theta = np.pi * (1 - i * 1.0 / max(1, n_r - 1) * 0.5)
# calculate the locations of the radius points
x = b * 0.5 + t_w * 0.5 + r + r * np.cos(theta)
y = d - t_f - r + r * np.sin(theta)
# append the current points to the points list
self.points.append([x, y])
# add the next four points
self.points.append([b, d - t_f])
self.points.append([b, d])
self.points.append([0, d])
self.points.append([0, d - t_f])
# construct the top left radius
for i in range(n_r):
# determine polar angle
theta = np.pi * 0.5 * (1 - i * 1.0 / max(1, n_r - 1))
# calculate the locations of the radius points
x = b * 0.5 - t_w * 0.5 - r + r * np.cos(theta)
y = d - t_f - r + r * np.sin(theta)
# append the current points to the points list
self.points.append([x, y])
# construct the bottom left radius
for i in range(n_r):
# determine polar angle
theta = -np.pi * i * 1.0 / max(1, n_r - 1) * 0.5
# calculate the locations of the radius points
x = b * 0.5 - t_w * 0.5 - r + r * np.cos(theta)
y = t_f + r + r * np.sin(theta)
# append the current points to the points list
self.points.append([x, y])
# add the last point
self.points.append([0, t_f])
# build the facet list
for i in range(len(self.points)):
# if we are not at the last point
if i != len(self.points) - 1:
self.facets.append([i, i + 1])
# if we are at the last point, complete the loop
else:
self.facets.append([len(self.points) - 1, 0])
self.shift_section()
class MonoISection(Geometry):
"""Constructs a monosymmetric I-section centered at
*(max(b_t, b_b)/2, d/2)*, with depth *d*, top flange width *b_t*, bottom
flange width *b_b*, top flange thickness *t_ft*, top flange thickness
*t_fb*, web thickness *t_w*, and root radius *r*, using *n_r* points to
construct the root radius.
:param float d: Depth of the I-section
:param float b_t: Top flange width
:param float b_b: Bottom flange width
:param float t_ft: Top flange thickness of the I-section
:param float t_fb: Bottom flange thickness of the I-section
:param float t_w: Web thickness of the I-section
:param float r: Root radius of the I-section
:param int n_r: Number of points discretising the root radius
:param shift: Vector that shifts the cross-section by *(x, y)*
:type shift: list[float, float]
The following example creates a monosymmetric I-section with a depth of
200, a top flange width of 50, a top flange thickness of 12, a bottom
flange width of 130, a bottom flange thickness of 8, a web thickness of
6 and a root radius of 8, using 16 points to discretise the root
radius. A mesh is generated with a maximum triangular area of 3.0::
import sectionproperties.pre.sections as sections
geometry = sections.MonoISection(d=200, b_t=50, b_b=130, t_ft=12, t_fb=8, t_w=6, r=8, n_r=16)
mesh = geometry.create_mesh(mesh_sizes=[3.0])
.. figure:: ../images/sections/monoisection_geometry.png
:align: center
:scale: 75 %
I-section geometry.
.. figure:: ../images/sections/monoisection_mesh.png
:align: center
:scale: 75 %
Mesh generated from the above geometry.
"""
def __init__(self, d, b_t, b_b, t_fb, t_ft, t_w, r, n_r, shift=[0, 0]):
"""Inits the ISection class."""
# assign control point
control_points = [[max(b_t, b_b) * 0.5, d * 0.5]]
super().__init__(control_points, shift)
# calculate central axis
x_central = max(b_t, b_b) * 0.5
# add first three points
self.points.append([x_central - b_b * 0.5, 0])
self.points.append([x_central + b_b * 0.5, 0])
self.points.append([x_central + b_b * 0.5, t_fb])
# construct the bottom right radius
for i in range(n_r):
# determine polar angle
theta = 3.0 / 2 * np.pi * (1 - i * 1.0 / max(1, n_r - 1) * 1.0 / 3)
# calculate the locations of the radius points
x = x_central + t_w * 0.5 + r + r * np.cos(theta)
y = t_fb + r + r * np.sin(theta)
# append the current points to the points list
self.points.append([x, y])
# construct the top right radius
for i in range(n_r):
# determine polar angle
theta = np.pi * (1 - i * 1.0 / max(1, n_r - 1) * 0.5)
# calculate the locations of the radius points
x = x_central + t_w * 0.5 + r + r * np.cos(theta)
y = d - t_ft - r + r * np.sin(theta)
# append the current points to the points list
self.points.append([x, y])
# add the next four points
self.points.append([x_central + b_t * 0.5, d - t_ft])
self.points.append([x_central + b_t * 0.5, d])
self.points.append([x_central - b_t * 0.5, d])
self.points.append([x_central - b_t * 0.5, d - t_ft])
# construct the top left radius
for i in range(n_r):
# determine polar angle
theta = np.pi * 0.5 * (1 - i * 1.0 / max(1, n_r - 1))
# calculate the locations of the radius points
x = x_central - t_w * 0.5 - r + r * np.cos(theta)
y = d - t_ft - r + r * np.sin(theta)
# append the current points to the points list
self.points.append([x, y])
# construct the bottom left radius
for i in range(n_r):
# determine polar angle
theta = -np.pi * i * 1.0 / max(1, n_r - 1) * 0.5
# calculate the locations of the radius points
x = x_central - t_w * 0.5 - r + r * np.cos(theta)
y = t_fb + r + r * np.sin(theta)
# append the current points to the points list
self.points.append([x, y])
# add the last point
self.points.append([x_central - b_b * 0.5, t_fb])
# build the facet list
for i in range(len(self.points)):
# if we are not at the last point
if i != len(self.points) - 1:
self.facets.append([i, i + 1])
# if we are at the last point, complete the loop
else:
self.facets.append([len(self.points) - 1, 0])
self.shift_section()
class TaperedFlangeISection(Geometry):
"""Constructs a Tapered Flange I-section centered at *(b/2, d/2)*, with
depth *d*, width *b*, mid-flange thickness *t_f*, web thickness *t_w*, root
radius *r_r*, flange radius *r_f* and flange angle *alpha*, using *n_r*
points to construct the radii.
:param float d: Depth of the Tapered Flange I-section
:param float b: Width of the Tapered Flange I-section
:param float t_f: Mid-flange thickness of the Tapered Flange I-section
(measured at the point equidistant from the face of the web to the edge
of the flange)
:param float t_w: Web thickness of the Tapered Flange I-section
:param float r_r: Root radius of the Tapered Flange I-section
:param float r_f: Flange radius of the Tapered Flange I-section
:param float alpha: Flange angle of the Tapered Flange I-section (degrees)
:param int n_r: Number of points discretising the radii
:param shift: Vector that shifts the cross-section by *(x, y)*
:type shift: list[float, float]
The following example creates a Tapered Flange I-section with a depth of
588, a width of 191, a mid-flange thickness of 27.2, a web thickness of
15.2, a root radius of 17.8, a flange radius of 8.9 and a flange angle of
8°, using 16 points to discretise the radii. A mesh is generated with a
maximum triangular area of 20.0::
import sectionproperties.pre.sections as sections
geometry = sections.TaperedFlangeISection(d=588, b=191, t_f=27.2,
t_w=15.2, r_r=17.8, r_f=8.9, alpha=8, n_r=16)
mesh = geometry.create_mesh(mesh_sizes=[20.0])
.. figure:: ../images/sections/taperedisection_geometry.png
:align: center
:scale: 75 %
I-section geometry.
.. figure:: ../images/sections/taperedisection_mesh.png
:align: center
:scale: 75 %
Mesh generated from the above geometry.
"""
def __init__(self, d, b, t_f, t_w, r_r, r_f, alpha, n_r, shift=[0, 0]):
"""Inits the ISection class."""
# assign control point
control_points = [[b * 0.5, d * 0.5]]
super().__init__(control_points, shift)
# calculate alpha in radians
alpha_rad = np.pi * alpha / 180
# calculate the height of the flange toe and dimensions of the straight
x1 = b * 0.25 - t_w * 0.25 - r_f * (1 - np.sin(alpha_rad))
y1 = x1 * np.tan(alpha_rad)
x2 = b * 0.25 - t_w * 0.25 - r_r * (1 - np.sin(alpha_rad))
y2 = x2 * np.tan(alpha_rad)
y_t = t_f - y1 - r_f * np.cos(alpha_rad)
# add first two points
self.points.append([0, 0])
self.points.append([b, 0])
# construct the bottom right flange toe radius
for i in range(n_r):
# determine polar angle
theta = i * 1.0 / max(1, n_r - 1) * (np.pi * 0.5 - alpha_rad)
# calculate the locations of the radius points
x = b - r_f + r_f * np.cos(theta)
y = y_t + r_f * np.sin(theta)
# append the current points to the points list
self.points.append([x, y])
# construct the bottom right root radius
for i in range(n_r):
# determine polar angle
theta = (3.0 / 2 * np.pi - alpha_rad) - (
i * 1.0 / max(1, n_r - 1) * (np.pi * 0.5 - alpha_rad))
# calculate the locations of the radius points
x = b * 0.5 + t_w * 0.5 + r_r + r_r * np.cos(theta)
y = t_f + y2 + r_r * np.cos(alpha_rad) + r_r * np.sin(theta)
# append the current points to the points list
self.points.append([x, y])
# construct the top right root radius
for i in range(n_r):
# determine polar angle
theta = np.pi - i * 1.0 / max(1, n_r - 1) * (
np.pi * 0.5 - alpha_rad)
# calculate the locations of the radius points
x = b * 0.5 + t_w * 0.5 + r_r + r_r * np.cos(theta)
y = d - t_f - y2 - r_r * np.cos(alpha_rad) + r_r * np.sin(theta)
# append the current points to the points list
self.points.append([x, y])
# construct the top right flange toe radius
for i in range(n_r):
# determine polar angle
theta = (3.0 * np.pi / 2 + alpha_rad) + i * 1.0 / max(
1, n_r - 1) * (np.pi * 0.5 - alpha_rad)
# calculate the locations of the radius points
x = b - r_f + r_f * np.cos(theta)
y = d - y_t + r_f * np.sin(theta)
# append the current points to the points list
self.points.append([x, y])
# add the next two points
self.points.append([b, d])
self.points.append([0, d])
# construct the top left flange toe radius
for i in range(n_r):
# determine polar angle
theta = np.pi + (
i * 1.0 / max(1, n_r - 1) * (np.pi * 0.5 - alpha_rad))
# calculate the locations of the radius points
x = r_f + r_f * np.cos(theta)
y = d - y_t + r_f * np.sin(theta)
# append the current points to the points list
self.points.append([x, y])
# construct the top left root radius
for i in range(n_r):
# determine polar angle
theta = (np.pi * 0.5 - alpha_rad) - (
i * 1.0 / max(1, n_r - 1) * (np.pi * 0.5 - alpha_rad))
# calculate the locations of the radius points
x = b * 0.5 - t_w * 0.5 - r_r + r_r * np.cos(theta)
y = d - t_f - y2 - r_r * np.cos(alpha_rad) + r_r * np.sin(theta)
# append the current points to the points list
self.points.append([x, y])
# construct the bottom left root radius
for i in range(n_r):
# determine polar angle
theta = -i * 1.0 / max(1, n_r - 1) * (np.pi * 0.5 - alpha_rad)
# calculate the locations of the radius points
x = b * 0.5 - t_w * 0.5 - r_r + r_r * np.cos(theta)
y = t_f + y2 + r_r * np.cos(alpha_rad) + r_r * np.sin(theta)
# append the current points to the points list
self.points.append([x, y])
# construct the bottom left flange toe radius
for i in range(n_r):
# determine polar angle
theta = (np.pi * 0.5 + alpha_rad) + (
i * 1.0 / max(1, n_r - 1) * (np.pi * 0.5 - alpha_rad))
# calculate the locations of the radius points
x = r_f + r_f * np.cos(theta)
y = y_t + r_f * np.sin(theta)
# append the current points to the points list
self.points.append([x, y])
# build the facet list
for i in range(len(self.points)):
# if we are not at the last point
if i != len(self.points) - 1:
self.facets.append([i, i + 1])
# if we are at the last point, complete the loop
else:
self.facets.append([len(self.points) - 1, 0])
self.shift_section()
class PfcSection(Geometry):
"""Constructs a PFC section with the bottom left corner at the origin
*(0, 0)*, with depth *d*, width *b*, flange thickness *t_f*, web thickness
*t_w* and root radius *r*, using *n_r* points to construct the root radius.
:param float d: Depth of the PFC section
:param float b: Width of the PFC section
:param float t_f: Flange thickness of the PFC section
:param float t_w: Web thickness of the PFC section
:param float r: Root radius of the PFC section
:param int n_r: Number of points discretising the root radius
:param shift: Vector that shifts the cross-section by *(x, y)*
:type shift: list[float, float]
The following example creates a PFC section with a depth of 250, a width of
90, a flange thickness of 15, a web thickness of 8 and a root radius of
12, using 8 points to discretise the root radius. A mesh is generated
with a maximum triangular area of 5.0::
import sectionproperties.pre.sections as sections
geometry = sections.PfcSection(d=250, b=90, t_f=15, t_w=8, r=12, n_r=8)
mesh = geometry.create_mesh(mesh_sizes=[5.0])
.. figure:: ../images/sections/pfc_geometry.png
:align: center
:scale: 75 %
PFC geometry.
.. figure:: ../images/sections/pfc_mesh.png
:align: center
:scale: 75 %
Mesh generated from the above geometry.
"""
def __init__(self, d, b, t_f, t_w, r, n_r, shift=[0, 0]):
"""Inits the PfcSection class."""
# assign control point
control_points = [[t_w * 0.5, d * 0.5]]
super().__init__(control_points, shift)
# add first three points
self.points.append([0, 0])
self.points.append([b, 0])
self.points.append([b, t_f])
# construct the bottom right radius
for i in range(n_r):
# determine polar angle
theta = 3.0 / 2 * np.pi * (1 - i * 1.0 / max(1, n_r - 1) * 1.0 / 3)
# calculate the locations of the radius points
x = t_w + r + r * np.cos(theta)
y = t_f + r + r * np.sin(theta)
# append the current points to the points list
self.points.append([x, y])
# construct the top right radius
for i in range(n_r):
# determine polar angle
theta = np.pi * (1 - i * 1.0 / max(1, n_r - 1) * 0.5)
# calculate the locations of the radius points
x = t_w + r + r * np.cos(theta)
y = d - t_f - r + r * np.sin(theta)
# append the current points to the points list
self.points.append([x, y])
# add last three points
self.points.append([b, d - t_f])
self.points.append([b, d])
self.points.append([0, d])
# build the facet list
for i in range(len(self.points)):
# if we are not at the last point
if i != len(self.points) - 1:
self.facets.append([i, i + 1])
# if we are at the last point, complete the loop
else:
self.facets.append([len(self.points) - 1, 0])
self.shift_section()
class TaperedFlangeChannel(Geometry):
"""Constructs a Tapered Flange Channel section with the bottom left corner
at the origin *(0, 0)*, with depth *d*, width *b*, mid-flange thickness
*t_f*, web thickness *t_w*, root radius *r_r*, flange radius *r_f* and
flange angle *alpha*, using *n_r* points to construct the radii.
:param float d: Depth of the Tapered Flange Channel section
:param float b: Width of the Tapered Flange Channel section
:param float t_f: Mid-flange thickness of the Tapered Flange Channel
section (measured at the point equidistant from the face of the web to
the edge of the flange)
:param float t_w: Web thickness of the Tapered Flange Channel section
:param float r_r: Root radius of the Tapered Flange Channel section
:param float r_f: Flange radius of the Tapered Flange Channel section
:param float alpha: Flange angle of the Tapered Flange Channel section
(degrees)
:param int n_r: Number of points discretising the radii
:param shift: Vector that shifts the cross-section by *(x, y)*
:type shift: list[float, float]
The following example creates a Tapered Flange Channel section with a depth
of 10, a width of 3.5, a mid-flange thickness of 0.575, a web thickness of
0.475, a root radius of 0.575, a flange radius of 0.4 and a flange angle of
8°, using 16 points to discretise the radii. A mesh is generated with a
maximum triangular area of 0.02::
import sectionproperties.pre.sections as sections
geometry = sections.TaperedFlangeChannel(d=10, b=3.5, t_f=0.575,
t_w=0.475, r_r=0.575, r_f=0.4, alpha=8, n_r=16)
mesh = geometry.create_mesh(mesh_sizes=[0.02])
.. figure:: ../images/sections/taperedchannel_geometry.png
:align: center
:scale: 75 %
I-section geometry.
.. figure:: ../images/sections/taperedchannel_mesh.png
:align: center
:scale: 75 %
Mesh generated from the above geometry.
"""
def __init__(self, d, b, t_f, t_w, r_r, r_f, alpha, n_r, shift=[0, 0]):
"""Inits the ISection class."""
# assign control point
control_points = [[t_w * 0.5, d * 0.5]]
super().__init__(control_points, shift)
# calculate alpha in radians
alpha_rad = np.pi * alpha / 180
# calculate the height of the flange toe and dimensions of the straight
x1 = b * 0.5 - t_w * 0.5 - r_f * (1 - np.sin(alpha_rad))
y1 = x1 * np.tan(alpha_rad)
x2 = b * 0.5 - t_w * 0.5 - r_r * (1 - np.sin(alpha_rad))
y2 = x2 * np.tan(alpha_rad)
y_t = t_f - y1 - r_f * np.cos(alpha_rad)
# add first two points
self.points.append([0, 0])
self.points.append([b, 0])
# construct the bottom right flange toe radius
for i in range(n_r):
# determine polar angle
theta = i * 1.0 / max(1, n_r - 1) * (np.pi * 0.5 - alpha_rad)
# calculate the locations of the radius points
x = b - r_f + r_f * np.cos(theta)
y = y_t + r_f * np.sin(theta)
# append the current points to the points list
self.points.append([x, y])
# construct the bottom right root radius
for i in range(n_r):
# determine polar angle
theta = (3.0 / 2 * np.pi - alpha_rad) - (
i * 1.0 / max(1, n_r - 1) * (np.pi * 0.5 - alpha_rad))
# calculate the locations of the radius points
x = t_w + r_r + r_r * np.cos(theta)
y = t_f + y2 + r_r * np.cos(alpha_rad) + r_r * np.sin(theta)
# append the current points to the points list
self.points.append([x, y])
# construct the top right root radius
for i in range(n_r):
# determine polar angle
theta = np.pi - i * 1.0 / max(1, n_r - 1) * (
np.pi * 0.5 - alpha_rad)
# calculate the locations of the radius points
x = t_w + r_r + r_r * np.cos(theta)
y = d - t_f - y2 - r_r * np.cos(alpha_rad) + r_r * np.sin(theta)
# append the current points to the points list
self.points.append([x, y])
# construct the top right flange toe radius
for i in range(n_r):
# determine polar angle
theta = (3.0 * np.pi / 2 + alpha_rad) + i * 1.0 / max(
1, n_r - 1) * (np.pi * 0.5 - alpha_rad)
# calculate the locations of the radius points
x = b - r_f + r_f * np.cos(theta)
y = d - y_t + r_f * np.sin(theta)
# append the current points to the points list
self.points.append([x, y])
# add the final two points
self.points.append([b, d])
self.points.append([0, d])
# build the facet list
for i in range(len(self.points)):
# if we are not at the last point
if i != len(self.points) - 1:
self.facets.append([i, i + 1])
# if we are at the last point, complete the loop
else:
self.facets.append([len(self.points) - 1, 0])
self.shift_section()
class TeeSection(Geometry):
"""Constructs a Tee section with the top left corner at *(0, d)*, with
depth *d*, width *b*, flange thickness *t_f*, web thickness *t_w* and root
radius *r*, using *n_r* points to construct the root radius.
:param float d: Depth of the Tee section
:param float b: Width of the Tee section
:param float t_f: Flange thickness of the Tee section
:param float t_w: Web thickness of the Tee section
:param float r: Root radius of the Tee section
:param int n_r: Number of points discretising the root radius
:param shift: Vector that shifts the cross-section by *(x, y)*
:type shift: list[float, float]
The following example creates a Tee section with a depth of 200, a width of
100, a flange thickness of 12, a web thickness of 6 and a root radius of
8, using 8 points to discretise the root radius. A mesh is generated
with a maximum triangular area of 3.0::
import sectionproperties.pre.sections as sections
geometry = sections.TeeSection(d=200, b=100, t_f=12, t_w=6, r=8, n_r=8)
mesh = geometry.create_mesh(mesh_sizes=[3.0])
.. figure:: ../images/sections/tee_geometry.png
:align: center
:scale: 75 %
Tee section geometry.
.. figure:: ../images/sections/tee_mesh.png
:align: center
:scale: 75 %
Mesh generated from the above geometry.
"""
def __init__(self, d, b, t_f, t_w, r, n_r, shift=[0, 0]):
"""Inits the TeeSection class."""
# assign control point
control_points = [[b * 0.5, d - t_f * 0.5]]
super().__init__(control_points, shift)
# add first two points
self.points.append([b * 0.5 - t_w * 0.5, 0])
self.points.append([b * 0.5 + t_w * 0.5, 0])
# construct the top right radius
for i in range(n_r):
# determine polar angle
theta = np.pi * (1 - i * 1.0 / max(1, n_r - 1) * 0.5)
# calculate the locations of the radius points
x = b * 0.5 + t_w * 0.5 + r + r * np.cos(theta)
y = d - t_f - r + r * np.sin(theta)
# append the current points to the points list
self.points.append([x, y])
# add next four points
self.points.append([b, d - t_f])
self.points.append([b, d])
self.points.append([0, d])
self.points.append([0, d - t_f])
# construct the top left radius
for i in range(n_r):
# determine polar angle
theta = np.pi * 0.5 * (1 - i * 1.0 / max(1, n_r - 1))
# calculate the locations of the radius points
x = b * 0.5 - t_w * 0.5 - r + r * np.cos(theta)
y = d - t_f - r + r * np.sin(theta)
# append the current points to the points list
self.points.append([x, y])
# build the facet list
for i in range(len(self.points)):
# if we are not at the last point
if i != len(self.points) - 1:
self.facets.append([i, i + 1])
# if we are at the last point, complete the loop
else:
self.facets.append([len(self.points) - 1, 0])
self.shift_section()
class AngleSection(Geometry):
"""Constructs an angle section with the bottom left corner at the origin
*(0, 0)*, with depth *d*, width *b*, thickness *t*, root radius *r_r* and
toe radius *r_t*, using *n_r* points to construct the radii.
:param float d: Depth of the angle section
:param float b: Width of the angle section
:param float t: Thickness of the angle section
:param float r_r: Root radius of the angle section
:param float r_t: Toe radius of the angle section
:param int n_r: Number of points discretising the radii
:param shift: Vector that shifts the cross-section by *(x, y)*
:type shift: list[float, float]
The following example creates an angle section with a depth of 150, a width
of 100, a thickness of 8, a root radius of 12 and a toe radius of 5, using
16 points to discretise the radii. A mesh is generated with a maximum
triangular area of 2.0::
import sectionproperties.pre.sections as sections
geometry = sections.AngleSection(d=150, b=100, t=8, r_r=12, r_t=5, n_r=16)
mesh = geometry.create_mesh(mesh_sizes=[2.0])
.. figure:: ../images/sections/angle_geometry.png
:align: center
:scale: 75 %
Angle section geometry.
.. figure:: ../images/sections/angle_mesh.png
:align: center
:scale: 75 %
"""
def __init__(self, d, b, t, r_r, r_t, n_r, shift=[0, 0]):
"""Inits the AngleSection class."""
# assign control point
control_points = [[t * 0.5, t * 0.5]]
super().__init__(control_points, shift)
# add first two points
self.points.append([0, 0])
self.points.append([b, 0])
# construct the bottom toe radius
for i in range(n_r):
# determine polar angle
theta = i * 1.0 / max(1, n_r - 1) * np.pi * 0.5
# calculate the locations of the radius points
x = b - r_t + r_t * np.cos(theta)
y = t - r_t + r_t * np.sin(theta)
# append the current points to the points list
self.points.append([x, y])
# construct the root radius
for i in range(n_r):
# determine polar angle
theta = 3.0 / 2 * np.pi * (1 - i * 1.0 / max(1, n_r - 1) * 1.0 / 3)
# calculate the locations of the radius points
x = t + r_r + r_r * np.cos(theta)
y = t + r_r + r_r * np.sin(theta)
# append the current points to the points list
self.points.append([x, y])
# construct the top toe radius
for i in range(n_r):
# determine polar angle
theta = i * 1.0 / max(1, n_r - 1) * np.pi * 0.5
# calculate the locations of the radius points
x = t - r_t + r_t * np.cos(theta)
y = d - r_t + r_t * np.sin(theta)
# append the current points to the points list
self.points.append([x, y])
# add the next point
self.points.append([0, d])
# build the facet list
for i in range(len(self.points)):
# if we are not at the last point
if i != len(self.points) - 1:
self.facets.append([i, i + 1])
# if we are at the last point, complete the loop
else:
self.facets.append([len(self.points) - 1, 0])
self.shift_section()
class CeeSection(Geometry):
"""Constructs a Cee section with the bottom left corner at the origin
*(0, 0)*, with depth *d*, width *b*, lip *l*, thickness *t* and outer
radius *r_out*, using *n_r* points to construct the radius.
:param float d: Depth of the Cee section
:param float b: Width of the Cee section
:param float l: Lip of the Cee section
:param float t: Thickness of the Cee section
:param float r_out: Outer radius of the Cee section
:param int n_r: Number of points discretising the outer radius
:param shift: Vector that shifts the cross-section by *(x, y)*
:type shift: list[float, float]
The following example creates a Cee section with a depth of 125, a width
of 50, a lip of 30, a thickness of 1.5 and an outer radius of 6, using 8
points to discretise the radius. A mesh is generated with a maximum
triangular area of 0.25::
import sectionproperties.pre.sections as sections
geometry = sections.CeeSection(d=125, b=50, l=30, t=1.5, r_out=6, n_r=8)
mesh = geometry.create_mesh(mesh_sizes=[0.25])
.. figure:: ../images/sections/cee_geometry.png
:align: center
:scale: 75 %
Cee section geometry.
.. figure:: ../images/sections/cee_mesh.png
:align: center
:scale: 75 %
"""
def __init__(self, d, b, l, t, r_out, n_r, shift=[0, 0]):
"""Inits the CeeSection class."""
# assign control point
control_points = [[t * 0.5, d * 0.5]]
super().__init__(control_points, shift)
r_in = r_out - t # calculate internal radius
# construct the outer bottom left radius
for i in range(n_r):
# determine polar angle
theta = np.pi + i * 1.0 / max(1, n_r - 1) * np.pi * 0.5
# calculate location of inner and outer points
x_outer = r_out + r_out * np.cos(theta)
y_outer = r_out + r_out * np.sin(theta)
# append the current points to the points list
self.points.append([x_outer, y_outer])
# construct the outer bottom right radius
for i in range(n_r):
# determine polar angle
theta = 3.0 / 2 * np.pi + i * 1.0 / max(1, n_r - 1) * np.pi * 0.5
# calculate location of inner and outer points
x_outer = b - r_out + r_out * np.cos(theta)
y_outer = r_out + r_out * np.sin(theta)
# append the current points to the points list
self.points.append([x_outer, y_outer])
# add next two points
self.points.append([b, l])
self.points.append([b - t, l])
# construct the inner bottom right radius
for i in range(n_r):
# determine polar angle
theta = 2 * np.pi - i * 1.0 / max(1, n_r - 1) * np.pi * 0.5
# calculate location of inner and outer points
x_inner = b - r_out + r_in * np.cos(theta)
y_inner = r_out + r_in * np.sin(theta)
# append the current points to the points list
self.points.append([x_inner, y_inner])
# construct the inner bottom left radius
for i in range(n_r):
# determine polar angle
theta = 3.0 / 2 * np.pi - i * 1.0 / max(1, n_r - 1) * np.pi * 0.5
# calculate location of inner and outer points
x_inner = r_out + r_in * np.cos(theta)
y_inner = r_out + r_in * np.sin(theta)
# append the current points to the points list
self.points.append([x_inner, y_inner])
# construct the inner top left radius
for i in range(n_r):
# determine polar angle
theta = np.pi - i * 1.0 / max(1, n_r - 1) * np.pi * 0.5
# calculate location of inner and outer points
x_inner = r_out + r_in * np.cos(theta)
y_inner = d - r_out + r_in * np.sin(theta)
# append the current points to the points list
self.points.append([x_inner, y_inner])
# construct the inner top right radius
for i in range(n_r):
# determine polar angle
theta = np.pi * 0.5 - i * 1.0 / max(1, n_r - 1) * np.pi * 0.5
# calculate location of inner and outer points
x_inner = b - r_out + r_in * np.cos(theta)
y_inner = d - r_out + r_in * np.sin(theta)
# append the current points to the points list
self.points.append([x_inner, y_inner])
# add next two points
self.points.append([b - t, d - l])
self.points.append([b, d - l])
# construct the outer top right radius
for i in range(n_r):
# determine polar angle
theta = i * 1.0 / max(1, n_r - 1) * np.pi * 0.5
# calculate location of inner and outer points
x_outer = b - r_out + r_out * np.cos(theta)
y_outer = d - r_out + r_out * np.sin(theta)
# append the current points to the points list
self.points.append([x_outer, y_outer])
# construct the outer top left radius
for i in range(n_r):
# determine polar angle
theta = 0.5 * np.pi + i * 1.0 / max(1, n_r - 1) * np.pi * 0.5
# calculate location of inner and outer points
x_outer = r_out + r_out * np.cos(theta)
y_outer = d - r_out + r_out * np.sin(theta)
# append the current points to the points list
self.points.append([x_outer, y_outer])
# build the facet list
for i in range(len(self.points)):
# if we are not at the last point
if i != len(self.points) - 1:
self.facets.append([i, i + 1])
# if we are at the last point, complete the loop
else:
self.facets.append([len(self.points) - 1, 0])
self.shift_section()
class ZedSection(Geometry):
"""Constructs a Zed section with the bottom left corner at the origin
*(0, 0)*, with depth *d*, left flange width *b_l*, right flange width
*b_r*, lip *l*, thickness *t* and outer radius *r_out*, using *n_r* points
to construct the radius.
:param float d: Depth of the Zed section
:param float b_l: Left flange width of the Zed section
:param float b_r: Right flange width of the Zed section
:param float l: Lip of the Zed section
:param float t: Thickness of the Zed section
:param float r_out: Outer radius of the Zed section
:param int n_r: Number of points discretising the outer radius
:param shift: Vector that shifts the cross-section by *(x, y)*
:type shift: list[float, float]
The following example creates a Zed section with a depth of 100, a left
flange width of 40, a right flange width of 50, a lip of 20, a thickness of
1.2 and an outer radius of 5, using 8 points to discretise the radius.
A mesh is generated with a maximum triangular area of 0.15::
import sectionproperties.pre.sections as sections
geometry = sections.ZedSection(d=100, b_l=40, b_r=50, l=20, t=1.2, r_out=5, n_r=8)
mesh = geometry.create_mesh(mesh_sizes=[0.15])
.. figure:: ../images/sections/zed_geometry.png
:align: center
:scale: 75 %
Zed section geometry.
.. figure:: ../images/sections/zed_mesh.png
:align: center
:scale: 75 %
"""
def __init__(self, d, b_l, b_r, l, t, r_out, n_r, shift=[0, 0]):
"""Inits the ZedSection class."""
# assign control point
control_points = [[t * 0.5, d * 0.5]]
super().__init__(control_points, shift)
r_in = r_out - t # calculate internal radius
# construct the outer bottom left radius
for i in range(n_r):
# determine polar angle
theta = np.pi + i * 1.0 / max(1, n_r - 1) * np.pi * 0.5
# calculate location of inner and outer points
x_outer = r_out + r_out * np.cos(theta)
y_outer = r_out + r_out * np.sin(theta)
# append the current points to the points list
self.points.append([x_outer, y_outer])
# construct the outer bottom right radius
for i in range(n_r):
# determine polar angle
theta = 3.0 / 2 * np.pi + i * 1.0 / max(1, n_r - 1) * np.pi * 0.5
# calculate location of inner and outer points
x_outer = b_r - r_out + r_out * np.cos(theta)
y_outer = r_out + r_out * np.sin(theta)
# append the current points to the points list
self.points.append([x_outer, y_outer])
# add next two points
self.points.append([b_r, l])
self.points.append([b_r - t, l])
# construct the inner bottom right radius
for i in range(n_r):
# determine polar angle
theta = 2 * np.pi - i * 1.0 / max(1, n_r - 1) * np.pi * 0.5
# calculate location of inner and outer points
x_inner = b_r - r_out + r_in * np.cos(theta)
y_inner = r_out + r_in * np.sin(theta)
# append the current points to the points list
self.points.append([x_inner, y_inner])
# construct the inner bottom left radius
for i in range(n_r):
# determine polar angle
theta = 3.0 / 2 * np.pi - i * 1.0 / max(1, n_r - 1) * np.pi * 0.5
# calculate location of inner and outer points
x_inner = r_out + r_in * np.cos(theta)
y_inner = r_out + r_in * np.sin(theta)
# append the current points to the points list
self.points.append([x_inner, y_inner])
# construct the outer top right radius
for i in range(n_r):
# determine polar angle
theta = i * 1.0 / max(1, n_r - 1) * np.pi * 0.5
# calculate location of inner and outer points
x_outer = t - r_out + r_out * np.cos(theta)
y_outer = d - r_out + r_out * np.sin(theta)
# append the current points to the points list
self.points.append([x_outer, y_outer])
# construct the outer top left radius
for i in range(n_r):
# determine polar angle
theta = np.pi * 0.5 + i * 1.0 / max(1, n_r - 1) * np.pi * 0.5
# calculate location of inner and outer points
x_outer = t - b_l + r_out + r_out * np.cos(theta)
y_outer = d - r_out + r_out * np.sin(theta)
# append the current points to the points list
self.points.append([x_outer, y_outer])
# add the next two points
self.points.append([t - b_l, d - l])
self.points.append([t - b_l + t, d - l])
# construct the inner top left radius
for i in range(n_r):
# determine polar angle
theta = np.pi - i * 1.0 / max(1, n_r - 1) * np.pi * 0.5
# calculate location of inner and outer points
x_inner = t - b_l + r_out + r_in * np.cos(theta)
y_inner = d - r_out + r_in * np.sin(theta)
# append the current points to the points list
self.points.append([x_inner, y_inner])
# construct the inner top right radius
for i in range(n_r):
# determine polar angle
theta = 0.5 * np.pi - i * 1.0 / max(1, n_r - 1) * np.pi * 0.5
# calculate location of inner and outer points
x_inner = t - r_out + r_in * np.cos(theta)
y_inner = d - r_out + r_in * np.sin(theta)
# append the current points to the points list
self.points.append([x_inner, y_inner])
# build the facet list
for i in range(len(self.points)):
# if we are not at the last point
if i != len(self.points) - 1:
self.facets.append([i, i + 1])
# if we are at the last point, complete the loop
else:
self.facets.append([len(self.points) - 1, 0])
self.shift_section()
class CruciformSection(Geometry):
"""Constructs a cruciform section centered at the origin *(0, 0)*, with
depth *d*, width *b*, thickness *t* and root radius *r*, using *n_r* points
to construct the root radius.
:param float d: Depth of the cruciform section
:param float b: Width of the cruciform section
:param float t: Thickness of the cruciform section
:param float r: Root radius of the cruciform section
:param int n_r: Number of points discretising the root radius
:param shift: Vector that shifts the cross-section by *(x, y)*
:type shift: list[float, float]
The following example creates a cruciform section with a depth of 250, a
width of 175, a thickness of 12 and a root radius of 16, using 16 points to
discretise the radius. A mesh is generated with a maximum triangular area
of 5.0::
import sectionproperties.pre.sections as sections
geometry = sections.CruciformSection(d=250, b=175, t=12, r=16, n_r=16)
mesh = geometry.create_mesh(mesh_sizes=[5.0])
.. figure:: ../images/sections/cruciform_geometry.png
:align: center
:scale: 75 %
Cruciform section geometry.
.. figure:: ../images/sections/cruciform_mesh.png
:align: center
:scale: 75 %
"""
def __init__(self, d, b, t, r, n_r, shift=[0, 0]):
"""Inits the CruciformSection class."""
# assign control point
control_points = [[0, 0]]
super().__init__(control_points, shift)
# add first two points
self.points.append([-t * 0.5, -d * 0.5])
self.points.append([t * 0.5, -d * 0.5])
# construct the bottom right radius
for i in range(n_r):
# determine polar angle
theta = np.pi - i * 1.0 / max(1, n_r - 1) * np.pi * 0.5
# calculate the locations of the radius points
x = 0.5 * t + r + r * np.cos(theta)
y = -0.5 * t - r + r * np.sin(theta)
# append the current points to the points list
self.points.append([x, y])
# add the next two points
self.points.append([0.5 * b, -t * 0.5])
self.points.append([0.5 * b, t * 0.5])
# construct the top right radius
for i in range(n_r):
# determine polar angle
theta = 1.5 * np.pi - i * 1.0 / max(1, n_r - 1) * np.pi * 0.5
# calculate the locations of the radius points
x = 0.5 * t + r + r * np.cos(theta)
y = 0.5 * t + r + r * np.sin(theta)
# append the current points to the points list
self.points.append([x, y])
# add the next two points
self.points.append([t * 0.5, 0.5 * d])
self.points.append([-t * 0.5, 0.5 * d])
# construct the top left radius
for i in range(n_r):
# determine polar angle
theta = -i * 1.0 / max(1, n_r - 1) * np.pi * 0.5
# calculate the locations of the radius points
x = -0.5 * t - r + r * np.cos(theta)
y = 0.5 * t + r + r * np.sin(theta)
# append the current points to the points list
self.points.append([x, y])
# add the next two points
self.points.append([-0.5 * b, t * 0.5])
self.points.append([-0.5 * b, -t * 0.5])
# construct the bottom left radius
for i in range(n_r):
# determine polar angle
theta = np.pi * 0.5 - i * 1.0 / max(1, n_r - 1) * np.pi * 0.5
# calculate the locations of the radius points
x = -0.5 * t - r + r * np.cos(theta)
y = -0.5 * t - r + r * np.sin(theta)
# append the current points to the points list
self.points.append([x, y])
# build the facet list
for i in range(len(self.points)):
# if we are not at the last point
if i != len(self.points) - 1:
self.facets.append([i, i + 1])
# if we are at the last point, complete the loop
else:
self.facets.append([len(self.points) - 1, 0])
self.shift_section()
class MergedSection(Geometry):
"""Merges a number of section geometries into one geometry. Note that for
the meshing algorithm to work, there needs to be connectivity between all
regions of the provided geometries. Overlapping of geometries is permitted.
:param sections: A list of geometry objects to merge into one
:class:`~sectionproperties.pre.sections.Geometry` object
:type sections: list[:class:`~sectionproperties.pre.sections.Geometry`]
The following example creates a combined cross-section with a 150x100x6 RHS
placed on its side on top of a 200UB25.4. A mesh is generated with a
maximum triangle size of 5.0 for the I-section and 2.5 for the RHS::
import sectionproperties.pre.sections as sections
isection = sections.ISection(d=203, b=133, t_f=7.8, t_w=5.8, r=8.9, n_r=8)
box = sections.Rhs(d=100, b=150, t=6, r_out=15, n_r=8, shift=[-8.5, 203])
geometry = sections.MergedSection([isection, box])
geometry.clean_geometry()
mesh = geometry.create_mesh(mesh_sizes=[5.0, 2.5])
.. figure:: ../images/sections/merged_geometry.png
:align: center
:scale: 75 %
Merged section geometry.
.. figure:: ../images/sections/merged_mesh.png
:align: center
:scale: 75 %
"""
def __init__(self, sections):
"""Inits the MergedSection class."""
super().__init__([], [0, 0])
point_count = 0
# loop through all sections
for section in sections:
# add facets
for facet in section.facets:
self.facets.append([facet[0] + point_count,
facet[1] + point_count])
# add points and count points
for point in section.points:
self.points.append([point[0], point[1]])
point_count += 1
# add holes
for hole in section.holes:
self.holes.append([hole[0], hole[1]])
# add control points
for control_point in section.control_points:
self.control_points.append([control_point[0],
control_point[1]])
# def sectionParse(sectionTypes, sectionData, settings):
# """
# Generates the geometry for the structural cross-section to be analysed,
# defined by a number of different sectionTypes, containing various
# sectionData. Note that there must be connectivity between all sections
# (i.e. there cannot be isolated sections) or the meshing and/or
# cross-section analysis will not work.
# """
#
# # initialise output variables
# points = []
# facets = []
# holes = []
# controlPoints = []
#
# # initialise pointCount variable
# pointCount = 0
#
# # loop through each section
# for (i, section) in enumerate(sectionTypes):
# # generate new section depending on section type
# if (section == "custom"):
# # load data from current sectionData
# try:
# pointData = sectionData[i]["points"]
# facetData = sectionData[i]["facets"]
# holeData = sectionData[i]["holes"]
# x = sectionData[i]["x"]
# y = sectionData[i]["y"]
# controlPointData = sectionData[i]["control-point"]
# except KeyError as err:
# handleKeyError(err, section)
#
# # generate a new section
# newSection = generateCustom(pointData, facetData, holeData, x, y,
# controlPointData)
#
# elif (section == "rectangle"):
# try:
# # load data from current sectionData
# d = sectionData[i]["d"]
# b = sectionData[i]["b"]
# x = sectionData[i]["x"]
# y = sectionData[i]["y"]
# except KeyError as err:
# handleKeyError(err, section)
#
# # if there is a control-point, load it
# try:
# controlPointData = sectionData[i]["control-point"]
# # if there is no control-point, set it to None
# except KeyError:
# controlPointData = None
#
# # generate a new section
# newSection = generateRectangle(d, b, x, y, controlPointData)
#
# elif (section == "circle"):
# # load data from current sectionData
# try:
# d = sectionData[i]["d"]
# n = sectionData[i]["n"]
# x = sectionData[i]["x"]
# y = sectionData[i]["y"]
# except KeyError as err:
# handleKeyError(err, section)
#
# # if there is a control-point, load it
# try:
# controlPointData = sectionData[i]["control-point"]
# # if there is no control-point, set it to None
# except KeyError:
# controlPointData = None
#
# # generate a new section
# newSection = generateCircle(d, n, x, y, controlPointData)
#
# elif (section == "chs"):
# # load data from current sectionData
# try:
# d = sectionData[i]["d"]
# t = sectionData[i]["t"]
# n = sectionData[i]["n"]
# x = sectionData[i]["x"]
# y = sectionData[i]["y"]
# except KeyError as err:
# handleKeyError(err, section)
#
# # if there is a control-point, load it
# try:
# controlPointData = sectionData[i]["control-point"]
# # if there is no control-point, set it to None
# except KeyError:
# controlPointData = None
#
# # generate a new section
# newSection = generateCHS(d, t, n, x, y, controlPointData)
#
# elif (section == "rhs"):
# # load data from current sectionData
# try:
# d = sectionData[i]["d"]
# b = sectionData[i]["b"]
# t = sectionData[i]["t"]
# r_out = sectionData[i]["r_out"]
# n_r = sectionData[i]["n_r"]
# x = sectionData[i]["x"]
# y = sectionData[i]["y"]
# except KeyError as err:
# handleKeyError(err, section)
#
# # if there is a control-point, load it
# try:
# controlPointData = sectionData[i]["control-point"]
# # if there is no control-point, set it to None
# except KeyError:
# controlPointData = None
#
# # generate a new section
# newSection = generateRHS(
# d, b, t, r_out, n_r, x, y, controlPointData)
#
# elif (section == "i-section"):
# # load data from current sectionData
# try:
# d = sectionData[i]["d"]
# b = sectionData[i]["b"]
# tf = sectionData[i]["tf"]
# tw = sectionData[i]["tw"]
# r = sectionData[i]["r"]
# n_r = sectionData[i]["n_r"]
# x = sectionData[i]["x"]
# y = sectionData[i]["y"]
# except KeyError as err:
# handleKeyError(err, section)
#
# # if there is a control-point, load it
# try:
# controlPointData = sectionData[i]["control-point"]
# # if there is no control-point, set it to None
# except KeyError:
# controlPointData = None
#
# # generate a new section
# newSection = generateISection(
# d, b, tf, tw, r, n_r, x, y, controlPointData)
#
# elif (section == "pfc"):
# # load data from current sectionData
# try:
# d = sectionData[i]["d"]
# b = sectionData[i]["b"]
# tf = sectionData[i]["tf"]
# tw = sectionData[i]["tw"]
# r = sectionData[i]["r"]
# n_r = sectionData[i]["n_r"]
# x = sectionData[i]["x"]
# y = sectionData[i]["y"]
# except KeyError as err:
# handleKeyError(err, section)
#
# # if there is a control-point, load it
# try:
# controlPointData = sectionData[i]["control-point"]
# # if there is no control-point, set it to None
# except KeyError:
# controlPointData = None
#
# # generate a new section
# newSection = generatePFCSection(
# d, b, tf, tw, r, n_r, x, y, controlPointData)
#
# elif (section == "tee"):
# # load data from current sectionData
# try:
# d = sectionData[i]["d"]
# b = sectionData[i]["b"]
# tf = sectionData[i]["tf"]
# tw = sectionData[i]["tw"]
# r = sectionData[i]["r"]
# n_r = sectionData[i]["n_r"]
# x = sectionData[i]["x"]
# y = sectionData[i]["y"]
# except KeyError as err:
# handleKeyError(err, section)
#
# # if there is a control-point, load it
# try:
# controlPointData = sectionData[i]["control-point"]
# # if there is no control-point, set it to None
# except KeyError:
# controlPointData = None
#
# # generate a new section
# newSection = generateTeeSection(
# d, b, tf, tw, r, n_r, x, y, controlPointData)
#
# elif (section == "angle"):
# # load data from current sectionData
# try:
# d = sectionData[i]["d"]
# b = sectionData[i]["b"]
# t = sectionData[i]["t"]
# r_root = sectionData[i]["r_root"]
# r_toe = sectionData[i]["r_toe"]
# n_r = sectionData[i]["n_r"]
# x = sectionData[i]["x"]
# y = sectionData[i]["y"]
# except KeyError as err:
# handleKeyError(err, section)
#
# # if there is a control-point, load it
# try:
# controlPointData = sectionData[i]["control-point"]
# # if there is no control-point, set it to None
# except KeyError:
# controlPointData = None
#
# # generate a new section
# newSection = generateAngleSection(
# d, b, t, r_root, r_toe, n_r, x, y, controlPointData)
#
# elif (section == "cee"):
# # load data from current sectionData
# try:
# d = sectionData[i]["d"]
# b = sectionData[i]["b"]
# lip = sectionData[i]["l"]
# t = sectionData[i]["t"]
# r_out = sectionData[i]["r_out"]
# n_r = sectionData[i]["n_r"]
# x = sectionData[i]["x"]
# y = sectionData[i]["y"]
# except KeyError as err:
# handleKeyError(err, section)
#
# # if there is a control-point, load it
# try:
# controlPointData = sectionData[i]["control-point"]
# # if there is no control-point, set it to None
# except KeyError:
# controlPointData = None
#
# # generate a new section
# newSection = generateCeeSection(
# d, b, lip, t, r_out, n_r, x, y, controlPointData)
#
# elif (section == "zed"):
# # load data from current sectionData
# try:
# d = sectionData[i]["d"]
# b1 = sectionData[i]["b1"]
# b2 = sectionData[i]["b2"]
# lip = sectionData[i]["l"]
# t = sectionData[i]["t"]
# r_out = sectionData[i]["r_out"]
# n_r = sectionData[i]["n_r"]
# x = sectionData[i]["x"]
# y = sectionData[i]["y"]
# except KeyError as err:
# handleKeyError(err, section)
#
# # if there is a control-point, load it
# try:
# controlPointData = sectionData[i]["control-point"]
# # if there is no control-point, set it to None
# except KeyError:
# controlPointData = None
#
# # generate a new section
# newSection = generateZedSection(
# d, b1, b2, lip, t, r_out, n_r, x, y, controlPointData)
#
# elif (section == "cruciform"):
# # load data from current sectionData
# try:
# d = sectionData[i]["d"]
# b = sectionData[i]["b"]
# t = sectionData[i]["t"]
# r = sectionData[i]["r"]
# n_r = sectionData[i]["n_r"]
# x = sectionData[i]["x"]
# y = sectionData[i]["y"]
# except KeyError as err:
# handleKeyError(err, section)
#
# # if there is a control-point, load it
# try:
# controlPointData = sectionData[i]["control-point"]
# # if there is no control-point, set it to None
# except KeyError:
# controlPointData = None
#
# # generate a new section
# newSection = generateCruciform(
# d, b, t, r, n_r, x, y, controlPointData)
#
# else:
# print("Error: section type '{}' is not defined.".format(section))
# quit()
#
# # get points, facets, holes and controlpoint from newSection
# (newPoints, newFacets, newHoles,
# newControlPoint) = newSection.returnSection()
#
# # loop through the facets in the newSection and append to the list
# for f in newFacets:
# facets.append([f[0] + pointCount, f[1] + pointCount])
#
# # loop through the points in the newSection and append to the list
# for p in newPoints:
# pointCount += 1
# points.append([p[0], p[1]])
#
# # loop through the holes in the newSection and append to the list
# for h in newHoles:
# holes.append([h[0], h[1]])
#
# # append the controlPoint from the newSection
# controlPoints.append([newControlPoint[0], newControlPoint[1]])
#
# if (settings.outputLog):
# print("-- Loaded {0} points, {1} facets and {2} holes ".format(
# len(points), len(facets), len(holes)) +
# "from {0} sections.".format(len(sectionTypes)))
#
# return (points, facets, holes, controlPoints)
#
#
# def handleKeyError(err, section):
# """
# Displays an error message if the correct keys are not provided for the
# current section and quits the program.
# """
#
# print(
# "Error: Required key {0} not found for section type '{1}'.".format(
# err, section) +
# " Refer to the documentation for the required keys.")
# quit()
|
from ipykernel.kernelbase import Kernel as KernelBase
from sorna.kernel import Kernel
from sorna.exceptions import SornaAPIError
class SornaKernelBase(KernelBase):
# ref: https://github.com/ipython/ipykernel/blob/master/ipykernel/kernelbase.py
implementation = 'Sorna'
implementation_version = '1.0'
language = 'python'
language_version = '3'
language_info = {
'name': 'Sorna (base)',
'mimetype': 'text/x-python3',
'file_extension': '.py',
}
banner = 'Sorna (base)'
sorna_lang = 'python3'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.log.debug(f'__init__: {self.ident}')
self.kernel = Kernel.get_or_create(self.sorna_lang, self.ident)
def do_execute(self, code, silent, store_history=True, user_expressions=None, allow_stdin=False):
self.log.debug('do_execute')
self._allow_stdin = allow_stdin
while True:
result = self.kernel.execute(code, mode='query')
if not silent:
for item in result['console']:
if item[0] == 'stdout':
self.send_response(self.iopub_socket, 'stream', {
'name': 'stdout',
'text': item[1],
})
elif item[0] == 'stderr':
self.send_response(self.iopub_socket, 'stream', {
'name': 'stderr',
'text': item[1],
})
elif item[0] == 'media':
self.send_response(self.iopub_socket, 'display_data', {
'source': '<user-code>',
'data': { item[1][0]: item[1][1] },
})
elif item[0] == 'html':
self.send_response(self.iopub_socket, 'display_data', {
'source': '<user-code>',
'data': { 'text/html': item[1] },
})
if result['status'] == 'finished':
break
elif result['status'] == 'waiting-input':
if allow_stdin:
code = self.raw_input('')
else:
code = '(user input not allowed)'
elif result['status'] == 'continued':
code = ''
return {
'status': 'ok',
'execution_count': self.execution_count,
'payload': [],
'user_expressions': {},
}
def do_shutdown(self, restart=False):
self.log.debug('do_shutdown')
if restart:
self.kernel.restart()
else:
try:
self.kernel.destroy()
except SornaAPIError as e:
if e.args[0] == 404:
pass
else:
self.log.exception()
except:
self.log.exception()
class SornaPythonKernel(SornaKernelBase):
language = 'python'
language_version = '3'
language_info = {
'name': 'Python 3 on Sorna',
'mimetype': 'text/x-python3',
'file_extension': '.py',
'codemirror_mode': 'python',
}
banner = 'Sorna (Python 3)'
sorna_lang = 'python3'
class SornaPythonTensorFlowKernel(SornaKernelBase):
language = 'python'
language_version = '3'
language_info = {
'name': 'TensorFlow (Python 3, CPU) on Sorna',
'mimetype': 'text/x-python3',
'file_extension': '.py',
'codemirror_mode': 'python',
}
banner = 'Sorna (TensorFlow with Python 3)'
sorna_lang = 'python3-tensorflow'
class SornaPythonTorchKernel(SornaKernelBase):
language = 'python'
language_version = '3'
language_info = {
'name': 'PyTorch (Python 3, CPU) on Sorna',
'mimetype': 'text/x-python3',
'file_extension': '.py',
'codemirror_mode': 'python',
}
banner = 'Sorna (TensorFlow with Python 3)'
sorna_lang = 'python3-torch'
class SornaPythonTorchGPUKernel(SornaKernelBase):
language = 'python'
language_version = '3'
language_info = {
'name': 'PyTorch (Python 3, GPU) on Sorna',
'mimetype': 'text/x-python3',
'file_extension': '.py',
'codemirror_mode': 'python',
}
banner = 'Sorna (TensorFlow with Python 3)'
sorna_lang = 'python3-torch-gpu'
class SornaPythonTensorFlowGPUKernel(SornaKernelBase):
language = 'python'
language_version = '3'
language_info = {
'name': 'TensorFlow (Python 3, GPU) on Sorna',
'mimetype': 'text/x-python3',
'file_extension': '.py',
'codemirror_mode': 'python',
}
banner = 'Sorna (GPU-accelerated TensorFlow with Python 3)'
sorna_lang = 'python3-tensorflow-gpu'
class SornaJavascriptKernel(SornaKernelBase):
language = 'javascript'
language_version = '6'
language_info = {
'name': 'Javascript (NodeJS 6) on Sorna',
'mimetype': 'text/javascript',
'file_extension': '.js',
'codemirror_mode': 'javascript',
}
banner = 'Sorna (NodeJS 6)'
sorna_lang = 'nodejs6'
class SornaPHPKernel(SornaKernelBase):
language = 'php'
language_version = '7'
language_info = {
'name': 'PHP 7 on Sorna',
'mimetype': 'text/x-php',
'file_extension': '.php',
'codemirror_mode': 'php',
}
banner = 'Sorna (PHP 7)'
sorna_lang = 'php7'
class SornaJuliaKernel(SornaKernelBase):
language = 'julia'
language_version = '0.5'
language_info = {
'name': 'Julia 0.5 on Sorna',
'mimetype': 'text/x-julia',
'file_extension': '.jl',
'codemirror_mode': 'julia',
}
banner = 'Sorna (Julia 0.5)'
sorna_lang = 'julia'
class SornaRKernel(SornaKernelBase):
language = 'r'
language_version = '3'
language_info = {
'name': 'R 3 on Sorna',
'mimetype': 'text/x-r-source',
'file_extension': '.R',
'codemirror_mode': 'Rscript',
}
banner = 'Sorna (R 3)'
sorna_lang = 'r3'
class SornaLuaKernel(SornaKernelBase):
language = 'lua'
language_version = '5.3'
language_info = {
'name': 'Lua 5.3 on Sorna',
'mimetype': 'text/x-lua',
'file_extension': '.lua',
'codemirror_mode': 'lua',
}
banner = 'Sorna (Lua 5.3)'
sorna_lang = 'lua5'
sorna_kernels = [
SornaPythonKernel,
SornaPythonTorchKernel,
SornaPythonTorchGPUKernel,
SornaPythonTensorFlowKernel,
SornaPythonTensorFlowGPUKernel,
SornaJavascriptKernel,
SornaPHPKernel,
SornaJuliaKernel,
SornaRKernel,
SornaLuaKernel,
]
Fix misuse of Python logging API.
from ipykernel.kernelbase import Kernel as KernelBase
from sorna.kernel import Kernel
from sorna.exceptions import SornaAPIError
class SornaKernelBase(KernelBase):
# ref: https://github.com/ipython/ipykernel/blob/master/ipykernel/kernelbase.py
implementation = 'Sorna'
implementation_version = '1.0'
language = 'python'
language_version = '3'
language_info = {
'name': 'Sorna (base)',
'mimetype': 'text/x-python3',
'file_extension': '.py',
}
banner = 'Sorna (base)'
sorna_lang = 'python3'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.log.debug(f'__init__: {self.ident}')
self.kernel = Kernel.get_or_create(self.sorna_lang, self.ident)
def do_execute(self, code, silent, store_history=True, user_expressions=None, allow_stdin=False):
self.log.debug('do_execute')
self._allow_stdin = allow_stdin
while True:
result = self.kernel.execute(code, mode='query')
if not silent:
for item in result['console']:
if item[0] == 'stdout':
self.send_response(self.iopub_socket, 'stream', {
'name': 'stdout',
'text': item[1],
})
elif item[0] == 'stderr':
self.send_response(self.iopub_socket, 'stream', {
'name': 'stderr',
'text': item[1],
})
elif item[0] == 'media':
self.send_response(self.iopub_socket, 'display_data', {
'source': '<user-code>',
'data': { item[1][0]: item[1][1] },
})
elif item[0] == 'html':
self.send_response(self.iopub_socket, 'display_data', {
'source': '<user-code>',
'data': { 'text/html': item[1] },
})
if result['status'] == 'finished':
break
elif result['status'] == 'waiting-input':
if allow_stdin:
code = self.raw_input('')
else:
code = '(user input not allowed)'
elif result['status'] == 'continued':
code = ''
return {
'status': 'ok',
'execution_count': self.execution_count,
'payload': [],
'user_expressions': {},
}
def do_shutdown(self, restart=False):
self.log.debug('do_shutdown')
if restart:
self.kernel.restart()
else:
try:
self.kernel.destroy()
except SornaAPIError as e:
if e.args[0] == 404:
pass
else:
self.log.exception('Sorna API Error')
except:
self.log.exception('Sorna API Error')
class SornaPythonKernel(SornaKernelBase):
language = 'python'
language_version = '3'
language_info = {
'name': 'Python 3 on Sorna',
'mimetype': 'text/x-python3',
'file_extension': '.py',
'codemirror_mode': 'python',
}
banner = 'Sorna (Python 3)'
sorna_lang = 'python3'
class SornaPythonTensorFlowKernel(SornaKernelBase):
language = 'python'
language_version = '3'
language_info = {
'name': 'TensorFlow (Python 3, CPU) on Sorna',
'mimetype': 'text/x-python3',
'file_extension': '.py',
'codemirror_mode': 'python',
}
banner = 'Sorna (TensorFlow with Python 3)'
sorna_lang = 'python3-tensorflow'
class SornaPythonTorchKernel(SornaKernelBase):
language = 'python'
language_version = '3'
language_info = {
'name': 'PyTorch (Python 3, CPU) on Sorna',
'mimetype': 'text/x-python3',
'file_extension': '.py',
'codemirror_mode': 'python',
}
banner = 'Sorna (TensorFlow with Python 3)'
sorna_lang = 'python3-torch'
class SornaPythonTorchGPUKernel(SornaKernelBase):
language = 'python'
language_version = '3'
language_info = {
'name': 'PyTorch (Python 3, GPU) on Sorna',
'mimetype': 'text/x-python3',
'file_extension': '.py',
'codemirror_mode': 'python',
}
banner = 'Sorna (TensorFlow with Python 3)'
sorna_lang = 'python3-torch-gpu'
class SornaPythonTensorFlowGPUKernel(SornaKernelBase):
language = 'python'
language_version = '3'
language_info = {
'name': 'TensorFlow (Python 3, GPU) on Sorna',
'mimetype': 'text/x-python3',
'file_extension': '.py',
'codemirror_mode': 'python',
}
banner = 'Sorna (GPU-accelerated TensorFlow with Python 3)'
sorna_lang = 'python3-tensorflow-gpu'
class SornaJavascriptKernel(SornaKernelBase):
language = 'javascript'
language_version = '6'
language_info = {
'name': 'Javascript (NodeJS 6) on Sorna',
'mimetype': 'text/javascript',
'file_extension': '.js',
'codemirror_mode': 'javascript',
}
banner = 'Sorna (NodeJS 6)'
sorna_lang = 'nodejs6'
class SornaPHPKernel(SornaKernelBase):
language = 'php'
language_version = '7'
language_info = {
'name': 'PHP 7 on Sorna',
'mimetype': 'text/x-php',
'file_extension': '.php',
'codemirror_mode': 'php',
}
banner = 'Sorna (PHP 7)'
sorna_lang = 'php7'
class SornaJuliaKernel(SornaKernelBase):
language = 'julia'
language_version = '0.5'
language_info = {
'name': 'Julia 0.5 on Sorna',
'mimetype': 'text/x-julia',
'file_extension': '.jl',
'codemirror_mode': 'julia',
}
banner = 'Sorna (Julia 0.5)'
sorna_lang = 'julia'
class SornaRKernel(SornaKernelBase):
language = 'r'
language_version = '3'
language_info = {
'name': 'R 3 on Sorna',
'mimetype': 'text/x-r-source',
'file_extension': '.R',
'codemirror_mode': 'Rscript',
}
banner = 'Sorna (R 3)'
sorna_lang = 'r3'
class SornaLuaKernel(SornaKernelBase):
language = 'lua'
language_version = '5.3'
language_info = {
'name': 'Lua 5.3 on Sorna',
'mimetype': 'text/x-lua',
'file_extension': '.lua',
'codemirror_mode': 'lua',
}
banner = 'Sorna (Lua 5.3)'
sorna_lang = 'lua5'
sorna_kernels = [
SornaPythonKernel,
SornaPythonTorchKernel,
SornaPythonTorchGPUKernel,
SornaPythonTensorFlowKernel,
SornaPythonTensorFlowGPUKernel,
SornaJavascriptKernel,
SornaPHPKernel,
SornaJuliaKernel,
SornaRKernel,
SornaLuaKernel,
]
|
import logging
from abc import abstractmethod
from aoi import Aoi
from image_spec import ImageSpec
from mosaic import Mosaic
class MosaicSpec(ImageSpec):
def __init__(self, spec):
super(MosaicSpec, self).__init__()
self.spec = spec
self.aoi = Aoi.create(spec['aoi'])
self.target_day = int(spec.get('targetDayOfYear', 1))
self.target_day_weight = float(spec.get('targetDayOfYearWeight', 0))
self.shadow_tolerance = float(spec.get('shadowTolerance', 1))
self.haze_tolerance = float(spec.get('hazeTolerance', 0.05))
self.greenness_weight = float(spec.get('greennessWeight', 0))
self.bands = spec.get('bands', [])
self.median_composite = spec.get('median_composite', False)
self.mask_clouds = spec.get('maskClouds', False)
self.mask_snow = spec.get('maskSnow', False)
self.brdf_correct = bool(spec.get('brdfCorrect', False))
self.from_date = spec.get('fromDate', None)
self.to_date = spec.get('toDate', None)
self.surface_reflectance = spec.get('surfaceReflectance', False)
self.masked_on_analysis = self.surface_reflectance
def _viz_params(self):
return _viz_by_bands[', '.join(self.bands)]({
'from_days_since_epoch': self.from_date / _milis_per_day,
'to_days_since_epoch': self.to_date / _milis_per_day
})
def _ee_image(self):
logging.info('Creating mosaic of ' + str(self))
return Mosaic(self).create(self._data_sets())
@abstractmethod
def _data_sets(self):
raise AssertionError('Method in subclass expected to have been invoked')
_viz_by_bands = {
'red, green, blue': lambda params: {'bands': 'red, green, blue', 'min': '200, 400, 600', 'max': '2400, 2200, 2400',
'gamma': 1.2},
'nir, red, green': lambda params: {'bands': 'nir, red, green', 'min': '500, 200, 400', 'max': '5000, 2400, 2200'},
'nir, swir1, red': lambda params: {'bands': 'nir, swir1, red', 'min': 0, 'max': 5000, 'gamma': 1.5},
'swir2, nir, red': lambda params: {'bands': 'swir2, nir, red', 'min': '0, 500, 200', 'max': '1800, 6000, 3500'},
'swir2, swir1, red': lambda params: {'bands': 'swir2, swir1, red', 'min': '0, 500, 200', 'max': '1800, 3000, 2400'},
'swir2, nir, green': lambda params: {'bands': 'swir2, nir, green', 'min': '0, 500, 400', 'max': '1800, 6000, 3500'},
'unixTimeDays': lambda params: {
'bands': 'unixTimeDays',
'min': params['from_days_since_epoch'],
'max': params['to_days_since_epoch'],
'palette': '00FFFF, 000099'
},
'dayOfYear': lambda params: {
'bands': 'dayOfYear',
'min': 0,
'max': 183,
'palette': '00FFFF, 000099'
},
'daysFromTarget': lambda params: {
'bands': 'daysFromTarget',
'min': 0,
'max': 183,
'palette': '00FF00, FF0000'
},
}
_milis_per_day = 1000 * 60 * 60 * 24
Reformatted code.
import logging
from abc import abstractmethod
from aoi import Aoi
from image_spec import ImageSpec
from mosaic import Mosaic
class MosaicSpec(ImageSpec):
def __init__(self, spec):
super(MosaicSpec, self).__init__()
self.spec = spec
self.aoi = Aoi.create(spec['aoi'])
self.target_day = int(spec.get('targetDayOfYear', 1))
self.target_day_weight = float(spec.get('targetDayOfYearWeight', 0))
self.shadow_tolerance = float(spec.get('shadowTolerance', 1))
self.haze_tolerance = float(spec.get('hazeTolerance', 0.05))
self.greenness_weight = float(spec.get('greennessWeight', 0))
self.bands = spec.get('bands', [])
self.median_composite = spec.get('median_composite', False)
self.mask_clouds = spec.get('maskClouds', False)
self.mask_snow = spec.get('maskSnow', False)
self.brdf_correct = bool(spec.get('brdfCorrect', False))
self.from_date = spec.get('fromDate', None)
self.to_date = spec.get('toDate', None)
self.surface_reflectance = spec.get('surfaceReflectance', False)
self.masked_on_analysis = self.surface_reflectance
def _viz_params(self):
return _viz_by_bands[', '.join(self.bands)]({
'from_days_since_epoch': self.from_date / _milis_per_day,
'to_days_since_epoch': self.to_date / _milis_per_day
})
def _ee_image(self):
logging.info('Creating mosaic of ' + str(self))
return Mosaic(self).create(self._data_sets())
@abstractmethod
def _data_sets(self):
raise AssertionError('Method in subclass expected to have been invoked')
_viz_by_bands = {
'red, green, blue': lambda params: {
'bands': 'red, green, blue',
'min': '200, 400, 600',
'max': '2400, 2200, 2400',
'gamma': 1.2},
'nir, red, green': lambda params: {
'bands': 'nir, red, green',
'min': '500, 200, 400',
'max': '5000, 2400, 2200'},
'nir, swir1, red': lambda params: {
'bands': 'nir, swir1, red',
'min': 0,
'max': 5000,
'gamma': 1.5},
'swir2, nir, red': lambda params: {
'bands': 'swir2, nir, red',
'min': '0, 500, 200',
'max': '1800, 6000, 3500'},
'swir2, swir1, red': lambda params: {
'bands': 'swir2, swir1, red',
'min': '0, 500, 200',
'max': '1800, 3000, 2400'},
'swir2, nir, green': lambda params: {
'bands': 'swir2, nir, green',
'min': '0, 500, 400',
'max': '1800, 6000, 3500'},
'unixTimeDays': lambda params: {
'bands': 'unixTimeDays',
'min': params['from_days_since_epoch'],
'max': params['to_days_since_epoch'],
'palette': '00FFFF, 000099'
},
'dayOfYear': lambda params: {
'bands': 'dayOfYear',
'min': 0,
'max': 183,
'palette': '00FFFF, 000099'
},
'daysFromTarget': lambda params: {
'bands': 'daysFromTarget',
'min': 0,
'max': 183,
'palette': '00FF00, FF0000'
},
}
_milis_per_day = 1000 * 60 * 60 * 24
|
import tweepy
import logging
from datetime import datetime, timezone
from bot.lib.statusdb import Addstatus
from community.helpers import (
get_community_member_id,
get_community_twitter_tweepy_api,
)
from conversation.models import TwitterUserTimeline
from django.db.utils import DatabaseError
logger = logging.getLogger(__name__)
def community_timeline(community):
userids = get_community_member_id(community)
if not userids:
return
api = get_community_twitter_tweepy_api(community, backend=True)
for userid in userids:
try:
get_user_timeline(userid, api)
except tweepy.TweepError as e:
logger.error(f"Tweepy Error: {e}")
def get_user_timeline(userid, api):
api_call = False
first: bool = True
high_statusid: int = 0
try:
since_id = TwitterUserTimeline.objects.get(userid=userid).statusid
except TwitterUserTimeline.DoesNotExist:
since_id = None
logger.debug(f"{since_id=}")
for status in tweepy.Cursor(api.user_timeline,
user_id=userid,
count=200,
since_id=since_id,
max_id=None,
trim_user=False,
exclude_replies=False,
include_rts=True,
).items():
api_call = True
if first:
try:
tut = TwitterUserTimeline.objects.get(userid=userid)
except TwitterUserTimeline.DoesNotExist:
try:
tut = TwitterUserTimeline.objects.create(userid=userid)
except DatabaseError:
return
first = False
if status.id > high_statusid:
high_statusid = status.id
logger.debug(f"{high_statusid=}")
logger.debug(f"{status._json=}")
db = Addstatus(status._json)
db.addtweetdj()
# set TwitterUserTimeline statusid to the highest retrieved statusid
logger.debug(f"{high_statusid=}")
try:
tut = TwitterUserTimeline.objects.get(userid=userid)
tut.last_api_call = datetime.now(tz=timezone.utc)
if not tut.statusid or tut.statusid < high_statusid:
tut.statusid = high_statusid
tut.statusid_updated_at = datetime.now(tz=timezone.utc)
tut.save()
except TwitterUserTimeline.DoesNotExist:
return
fix user_timeline to get full text
import tweepy
import logging
from datetime import datetime, timezone
from bot.lib.statusdb import Addstatus
from community.helpers import (
get_community_member_id,
get_community_twitter_tweepy_api,
)
from conversation.models import TwitterUserTimeline
from django.db.utils import DatabaseError
logger = logging.getLogger(__name__)
def community_timeline(community):
userids = get_community_member_id(community)
if not userids:
return
api = get_community_twitter_tweepy_api(community, backend=True)
for userid in userids:
try:
get_user_timeline(userid, api)
except tweepy.TweepError as e:
logger.error(f"Tweepy Error: {e}")
def get_user_timeline(userid, api):
api_call = False
first: bool = True
high_statusid: int = 0
try:
since_id = TwitterUserTimeline.objects.get(userid=userid).statusid
except TwitterUserTimeline.DoesNotExist:
since_id = None
logger.debug(f"{since_id=}")
for status in tweepy.Cursor(api.user_timeline,
user_id=userid,
count=200,
since_id=since_id,
max_id=None,
trim_user=False,
exclude_replies=False,
include_rts=True,
tweet_mode="extended"
).items():
api_call = True
if first:
try:
tut = TwitterUserTimeline.objects.get(userid=userid)
except TwitterUserTimeline.DoesNotExist:
try:
tut = TwitterUserTimeline.objects.create(userid=userid)
except DatabaseError:
return
first = False
if status.id > high_statusid:
high_statusid = status.id
logger.debug(f"{high_statusid=}")
logger.debug(f"{status._json=}")
db = Addstatus(status._json)
db.addtweetdj()
# set TwitterUserTimeline statusid to the highest retrieved statusid
logger.debug(f"{high_statusid=}")
try:
tut = TwitterUserTimeline.objects.get(userid=userid)
tut.last_api_call = datetime.now(tz=timezone.utc)
if not tut.statusid or tut.statusid < high_statusid:
tut.statusid = high_statusid
tut.statusid_updated_at = datetime.now(tz=timezone.utc)
tut.save()
except TwitterUserTimeline.DoesNotExist:
return |
# coding: utf-8
# Copyright 2014 David BEAL @ Akretion <david.beal@akretion.com>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from openerp import models, fields, api
from openerp.exceptions import Warning as UserError
from openerp.tools.config import config
def _get_authorized_password():
""" You can define your own authorized keys
"""
return [config.get("secure_uninstall"), config.get("admin_passwd")]
class BaseModuleUpgrade(models.TransientModel):
_inherit = 'base.module.upgrade'
uninstall_password = fields.Char(
string='Password',
help="'secure_uninstall' value from Odoo configuration file ")
@api.multi
def upgrade_module(self):
for elm in self:
if not config.get("secure_uninstall"):
raise UserError(
"Missing configuration key\n--------------------\n"
"'secure_uninstall' configuration key "
"is not set in \n"
"your Odoo server configuration file: "
"please set it a value")
if elm.uninstall_password not in _get_authorized_password():
raise UserError(
"Password Error\n--------------------\n"
"Provided password '%s' doesn't match with "
"'Master Password'\n('secure_uninstall' key) found in "
"the Odoo server configuration file ."
"\n\nResolution\n-------------\n"
"Please check your password and retry or cancel"
% elm.uninstall_password)
# keep this password in db is insecure, then we remove it
elm.uninstall_password = False
return super(BaseModuleUpgrade, self).upgrade_module()
[FIX] add _() on strings on secure_uninstall
# coding: utf-8
# Copyright 2014 David BEAL @ Akretion <david.beal@akretion.com>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from openerp import _, models, fields, api
from openerp.exceptions import Warning as UserError
from openerp.tools.config import config
def _get_authorized_password():
""" You can define your own authorized keys
"""
return [config.get("secure_uninstall"), config.get("admin_passwd")]
class BaseModuleUpgrade(models.TransientModel):
_inherit = 'base.module.upgrade'
uninstall_password = fields.Char(
string='Password',
help="'secure_uninstall' value from Odoo configuration file ")
@api.multi
def upgrade_module(self):
for elm in self:
if not config.get("secure_uninstall"):
raise UserError(_(
"Missing configuration key\n--------------------\n"
"'secure_uninstall' configuration key "
"is not set in \n"
"your Odoo server configuration file: "
"please set it a value"))
if elm.uninstall_password not in _get_authorized_password():
raise UserError(_(
"Password Error\n--------------------\n"
"Provided password '%s' doesn't match with "
"'Master Password'\n('secure_uninstall' key) found in "
"the Odoo server configuration file ."
"\n\nResolution\n-------------\n"
"Please check your password and retry or cancel")
% elm.uninstall_password)
# keep this password in db is insecure, then we remove it
elm.uninstall_password = False
return super(BaseModuleUpgrade, self).upgrade_module()
|
from __future__ import print_function
import sys
import select
import tty
import termios
import time
import theano
import pprint
import theano.tensor as T
#import cv2
import pickle
import copy
import numpy as np
import scipy.io as sio
import matplotlib.pyplot as plt
from theano.tensor.shared_randomstreams import RandomStreams
import sklearn
from sklearn.ensemble import ExtraTreesClassifier
from DataLoader import DataLoader
import DeepLearningStack
from DeepLearningStack import FeedForwardNet
"""
This code implements the Deep Q-Learning model for active object recognition,
described in
Malmir M, Sikka K, Forster D, Movellan J, Cottrell GW.
Deep Q-learning for Active Recognition of GERMS:
Baseline performance on a standardized dataset for active learning.
InProceedings of the British Machine Vision Conference (BMVC),
pages 2016 Apr 13 (pp. 161-1).
This code requires the following data files:
train-[ARM].pickle
test-[ARM].pickle
val-[ARM].pickle
These files contain the belief encoding of single images of GERMS,
using features obtained from VGG deep network trained on ImageNet.
Data files can be found here in VGG-Beliefs folder:
https://drive.google.com/folderview?id=0BxZOUQHBUnwmQUdWRGlPMGw4WHM&usp=sharing
The code for VGG model is obtained from:
http://www.robots.ox.ac.uk/~vgg/software/deep_eval/
"""
batch_size = 128
D = 136#number of classes
arm = "left"
#load the data
print("##################################################")
print("loading train data")
data_files = ["train-"+arm+".pickle"]
train_data = DataLoader(data_files,"pkl",minibatch_size=batch_size)
train_data.shuffle_data()
C = np.unique(train_data.y).shape[0]
#print(train_data.y == np.argmax(train_data.x,axis=1)).mean()
print("data size:",train_data.x.shape)
print("number of classes:",C)
print("number of tracks:",np.unique(train_data.t).shape[0])
print("##################################################")
print("loading validation data")
data_files = ["val-"+arm+".pickle"]
val_data = DataLoader(data_files,"pkl",minibatch_size=batch_size)
val_data.shuffle_data()
C = np.unique(val_data.y).shape[0]
val_data.adapt_labels(train_data.obj2label)#data are already unified in their labels
print("data size:",val_data.x.shape)
print("number of classes:",C)
print("number of tracks:",np.unique(val_data.t).shape[0])
print("##################################################")
print("loading test data")
data_files = ["test-"+arm+".pickle"]
test_data = DataLoader(data_files,"pkl",minibatch_size=batch_size)
test_data.adapt_labels(train_data.obj2label)#data are already unified in their labels
test_data.shuffle_data()
print("data size:",test_data.x.shape)
print("number of classes:",np.unique(test_data.y).shape[0])
print("number of tracks:",np.unique(test_data.t).shape[0])
experiment_data = dict()
#train 20 different models, report the mean average
for exp_num in range(20):
test_data.shuffle_data()
print( "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
print( "EXPERIMENT ", exp_num)
print( "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
experiment_data[exp_num] = dict()
lr = 0.01#initial learning rate
lr_dec_step = 1000#learning rate decrease step
lr_dec_start= 0
num_actions = 10#number of actions
gamma = 0.9#RL discount factor
alpha = 0.01#stochastic approximation coefficient
R = 10.0#reward
n_moves = 5#length of object inspection sequence
n_test_moves= 5#length of inspection sequence for test objects
epsilon = 1.#for e-greedy annealing
iter_cnt = 1
epsilon_dec_step = 100
n_epochs = 50
#create deep net
print("##################################################")
print("Creating deep net...")
input = T.matrix("data",dtype=theano.config.floatX)#the input is concatenation of action history and beliefs
config = "DQLArch.xml"
rng = RandomStreams(seed=int(time.time()))
train_net = FeedForwardNet.FeedForwardNet(rng,{"input":input},config)
test_net = FeedForwardNet.FeedForwardNet(rng,{"input":input},config,clone_from=train_net)
pprint.pprint(train_net.output_dims)
print("##################################################")
print("creating cost layer...")
input_shared = theano.shared(np.zeros([D,batch_size],dtype=theano.config.floatX),borrow=True)
rot_target_shared = theano.shared(np.zeros([batch_size,],dtype=theano.config.floatX),borrow=True)
rot_index_shared = theano.shared(np.zeros([batch_size,],dtype=np.int32),borrow=True)
learning_rate = T.scalar("learning_rate",dtype=theano.config.floatX)
#target value consists of the target for rotation values and the target for sign-of-rotation values
layer_action_value = "fc3"
layer_action = "act1"
cost = T.sqrt( T.mean( (train_net.name2layer[layer_action_value].output[rot_index_shared,T.arange(batch_size)] - rot_target_shared)**2 ) )
grads = [theano.grad(cost,param) for param in train_net.params]
updates = [ (param,param-learning_rate * grad) for param,grad in zip(train_net.params,grads)]
fnx_action_selection = theano.function(inputs=[],outputs=[train_net.name2layer[layer_action].output,train_net.name2layer[layer_action_value].output,cost],
givens={
input:input_shared,
},
)
fnx_train = theano.function(inputs=[learning_rate],outputs=[train_net.name2layer[layer_action_value].output,cost],
givens={
input:input_shared,
},
updates=updates,
)
fnx_test = theano.function(inputs=[],outputs=[test_net.name2layer[layer_action].output,test_net.name2layer[layer_action_value].output],
givens={
input:input_shared,
},
)
print("##################################################")
print("classifying tracks tracks")
track_indices = dict()
for t in np.unique(test_data.t):
idx = np.where(test_data.t == t)[0]
poses = test_data.p[idx]
sorted_idx = np.argsort(poses)
track_indices[t] = idx[sorted_idx]
accuracy = np.zeros(n_test_moves)
for t in track_indices.keys():
# print"for track:",t
# print"number of frames:",len(track_indices[t])
belief = np.ones(C)
# print
for i in range(n_test_moves):
next_idx = track_indices[t][i]
belief = belief * test_data.x[next_idx,:]
belief = belief / belief.sum()
lbl = np.argmax(belief)
accuracy[i] += lbl == test_data.y[next_idx]
print("test-sequential",accuracy / np.unique(test_data.t).shape[0])
seq_acc = accuracy / np.unique(test_data.t).shape[0]
experiment_data[exp_num]["test_seq_acc"] = seq_acc
test_data.reset_minibatch_counter()
corrects = np.zeros([n_test_moves,batch_size])
for i in range(int(test_data.x.shape[0] / batch_size) + 1):
x,y,p,t,rng = test_data.get_next_minibatch()
beliefs = x.copy()
for mv in range(n_test_moves):
pred_rslt = np.argmax(beliefs,axis=1)
corrects[mv,:] += (pred_rslt==y)
rot = np.random.randint(0,num_actions,[batch_size])
# rot = num_actions/2 * np.ones([batch_size])
tgt = -1 * ( rot < num_actions/2) * 2**(rot+num_actions/2) + ( rot >= num_actions/2) * 2**(rot)
# printtgt+p
x,y,p,t,_ = test_data.get_data_for_pose(t,p + tgt)#get the data for the proposed set of rotations
beliefs = beliefs * x
beliefs = beliefs / beliefs.sum(axis=1).reshape([-1,1])
print("test-random:",corrects.sum(axis=1) / float(test_data.x.shape[0]))
rnd_acc = corrects.sum(axis=1) / float(test_data.x.shape[0])
experiment_data[exp_num]["test_rnd_acc"] = rnd_acc
print("##################################################")
print("training network...")
test_accuracies = []
costs = []
test_costs = []
for epoch in range(n_epochs):
print("Epoch:",epoch)
train_data.reset_minibatch_counter()
corrects = np.zeros([n_moves,batch_size])
move_hist = np.zeros([num_actions,],dtype=np.int32)
poses_hist = []
test_data.reset_minibatch_counter()
test_move_hist = np.zeros([num_actions,],dtype=np.int32)
for i in range(int(train_data.x.shape[0] / batch_size )+ 1):
if i is 0:
print("iteration:",iter_cnt)
alpha = max(0.00, 1. - iter_cnt / 20000.)#1. / iter_cnt
x,y,p,t,rng = train_data.get_next_minibatch()
poses_hist.append(p)
beliefs = x.copy()
for mv in range(n_moves):
iter_cnt += 1
epsilon = max(0.00, 0.5 - iter_cnt / 800.)
# lr = max(1.e-7, 0.01 * (1. - iter_cnt / 16000.) )
if iter_cnt>=lr_dec_start and iter_cnt%lr_dec_step==lr_dec_step-1:
lr = max(1.e-10,lr * 0.1)
input_shared.set_value(beliefs.T.astype(theano.config.floatX))
rot,prot,_ = fnx_action_selection()
rot = rot.reshape(-1)
#epsilon-greedy exploration
rand_acts = np.random.randint(0,num_actions,[batch_size])
rand_mask = np.random.binomial(1,epsilon,[batch_size])
idx_random = np.where(rand_mask == 1)[0]
idx_net_act = np.where(rand_mask != 1)[0]
temp111 = rot.copy()
temp111[idx_random] = rand_acts[idx_random]
temp111[idx_net_act]= rot[idx_net_act]
rot = temp111
rot_idx = rot.copy().astype(np.int32)
assert(rot_idx.shape[0]==batch_size)
hst = np.histogram(rot_idx,bins=range(0,num_actions))[0]
for kk in range(rot_idx.shape[0]):
move_hist[rot_idx[kk]] += 1
tgt = -1 * ( rot < num_actions/2) * 2**(rot+num_actions/2) + ( rot >= num_actions/2) * 2**(rot)
x1,y1,p1,t1,_ = train_data.get_data_for_pose(t,p + tgt)#get the data for the proposed set of rotations
assert((t1==t).sum()==batch_size)
assert((y1==y).sum()==batch_size)
x1 = x1 * beliefs
x1 = x1 / x1.sum(axis=1).reshape([-1,1])
input_shared.set_value(x1.T.astype(theano.config.floatX))
rot1,prot1,_ = fnx_action_selection()#calculate the Q(s,a) for all as in the next state
pred_rslt = np.argmax(x1,axis=1)#x1 should be 'beliefs' if we use Q = r(t) + gamma max_a Q(s,a').
prot_max = gamma * np.max(prot1,axis=0).reshape(-1).astype(theano.config.floatX)
#reward each move based on the amount of belief increase
srtd_beliefs = np.sort(x1,axis=1)#x1 should be 'beliefs' if we use Q = r(t) + gamma max_a Q(s,a').
if mv == n_moves-1:
prot_max += R * (pred_rslt==y1)* (srtd_beliefs[:,-1] - srtd_beliefs[:,-2]).reshape(-1)
prot_max -= R * (pred_rslt!=y1)
prot_max = alpha * prot_max + (1-alpha) * prot[rot_idx,range(batch_size)].reshape(-1)
corrects[mv,:] += (pred_rslt==y)
input_shared.set_value(beliefs.T.astype(theano.config.floatX))
rot_target_shared.set_value(prot_max.astype(theano.config.floatX))
rot_index_shared.set_value(rot_idx.reshape(-1))
prot2,c = fnx_train(lr)
costs.append(c)
x,y,p,t,_ = train_data.get_data_for_pose(t,p + tgt)#get the data for the proposed set of rotations
poses_hist.append(p)
beliefs = beliefs * x
beliefs = beliefs / beliefs.sum(axis=1).reshape([-1,1])
x,y,p,t,rng = test_data.get_next_minibatch()
beliefs = x.copy()
prev_actval = np.zeros([batch_size,]).reshape(-1)
for mv in range(n_moves+1):
input_shared.set_value(beliefs.T.astype(theano.config.floatX))
rot,prot = fnx_test()
rand_acts = np.random.randint(0,num_actions,[batch_size])
rand_mask = np.random.binomial(1,epsilon,[batch_size])
rot = (1-rand_mask) * rot + rand_mask * rand_acts
if mv>0:
c = np.sqrt(np.mean((prot.max(axis=0).reshape(-1) - prev_actval)**2))
test_costs.append(c)
prev_actval = prot[rot,range(batch_size)].reshape(-1)
rot_idx = rot.copy().astype(np.int32)
for kk in range(rot_idx.shape[1]):
test_move_hist[rot_idx[0,kk]] += 1
rot = rot.reshape(-1)
tgt = -1 * ( rot < num_actions/2) * 2**(rot+num_actions/2) + ( rot >= num_actions/2) * 2**(rot)
pred_rslt = np.argmax(beliefs,axis=1)
# test_corrects[mv,:]+= (pred_rslt==y)
x,y,p,t,_ = test_data.get_data_for_pose(t,p + tgt)#get the data for the proposed set of rotations
beliefs = beliefs * x
beliefs = beliefs / beliefs.sum(axis=1).reshape([-1,1])
print("epoch cost:",np.sum(costs))
print("train accuracy:",corrects.sum(axis=1) / float(train_data.x.shape[0]))
print("learning rate:",lr," RL epsilon:",epsilon)
test_poses_hist = []
corrects = np.zeros([n_test_moves,batch_size])
test_data.reset_minibatch_counter()
test_move_hist = np.zeros([num_actions,],dtype=np.int32)
for i in range(int(test_data.x.shape[0] / batch_size ) + 1):
x,y,p,t,rng = test_data.get_next_minibatch()
test_poses_hist.append(p)
beliefs = x.copy()
move_hist = np.zeros([n_test_moves,batch_size])
for mv in range(n_test_moves):
input_shared.set_value(beliefs.T.astype(theano.config.floatX))
rot = fnx_test()[0]
rot = rot.reshape(-1)
rand_acts = np.random.randint(0,num_actions,[batch_size])
rand_mask = np.random.binomial(1,epsilon,[batch_size])
idx_random = np.where(rand_mask == 1)[0]
idx_net_act = np.where(rand_mask != 1)[0]
temp111 = rot.copy()
temp111[idx_random] = rand_acts[idx_random]
temp111[idx_net_act]= rot[idx_net_act]
rot = temp111
rot_idx = rot.copy().astype(np.int32)
for kk in range(rot_idx.shape[0]):
test_move_hist[rot_idx[kk]] += 1
tgt = -1 * ( rot < num_actions/2) * 2**(rot+num_actions/2) + ( rot >= num_actions/2) * 2**(rot)
pred_rslt = np.argmax(beliefs,axis=1)
corrects[mv,:] += (pred_rslt==y)
x,y,p,t,_ = test_data.get_data_for_pose(t,p + tgt)#get the data for the proposed set of rotations
test_poses_hist.append(p)
beliefs = beliefs * x
beliefs = beliefs / beliefs.sum(axis=1).reshape([-1,1])
hst = np.histogram(move_hist.reshape(-1),bins=range(0,num_actions))[0]
print("test:",corrects.sum(axis=1) / float(test_data.x.shape[0]))
test_accuracies.append(corrects.sum(axis=1) / float(test_data.x.shape[0]))
experiment_data[exp_num]["test_dpq_acc"] = corrects.sum(axis=1) / float(test_data.x.shape[0])
experiment_data[exp_num]["test_RMSE"] = test_costs
experiment_data[exp_num]["train_RMSE"] = costs
experiment_data[exp_num]["train_net"] = copy.deepcopy(train_net)
experiment_data[exp_num]["test_net"] = test_net
experiment_data[exp_num]["train_poses_hist"] = poses_hist
experiment_data[exp_num]["test_poses_hist"] = test_poses_hist
colors = ["r","g","b","c","y","m","k"]
linestyle = ["-","--","-.",":"]
marker = ["o","v","^","<",">","*"]
i = 0
seq_acc = []
rnd_acc = []
dpq_acc = []
for i in experiment_data.keys():
seq_acc.append(experiment_data[i]["test_seq_acc"].reshape([1,-1]))
rnd_acc.append(experiment_data[i]["test_rnd_acc"].reshape([1,-1]))
dpq_acc.append(experiment_data[i]["test_dpq_acc"].reshape([1,-1]))
seq_acc = np.concatenate(seq_acc,axis=0)
rnd_acc = np.concatenate(rnd_acc,axis=0)
dpq_acc = np.concatenate(dpq_acc,axis=0)
plt.figure(1)
plt.hold(True)
i = 0
plt.errorbar(x=range(n_test_moves),y=dpq_acc.mean(axis=0),xerr=0,yerr=dpq_acc.std(axis=0),color=colors[i%len(colors)],linestyle=linestyle[i%len(linestyle)],label="DeepQ"+str(i),marker=marker[i%len(marker)],linewidth=4.)
i += 1
plt.errorbar(x=range(n_test_moves),y=seq_acc.mean(axis=0),xerr=0,yerr=seq_acc.std(axis=0),color=colors[i%len(colors)],linestyle=linestyle[i%len(linestyle)],label="sequential",marker=marker[i%len(marker)],linewidth=4.)
i += 1
plt.errorbar(x=range(n_test_moves),y=rnd_acc.mean(axis=0),xerr=0,yerr=rnd_acc.std(axis=0),color=colors[i%len(colors)],linestyle=linestyle[i%len(linestyle)],label="random",marker=marker[i%len(marker)],linewidth=4.)
handles, labels = plt.gca().get_legend_handles_labels()
plt.gca().legend(handles[::-1], labels[::-1],loc=4)
plt.xlabel("Number of Actions")
plt.ylabel("Accuracy")
for axis in ['top','bottom','left','right']:
plt.gca().spines[axis].set_linewidth(2.0)
i += 1
plt.xticks(range(n_test_moves+1))
plt.figure(2)
i=2
plt.plot(np.log(experiment_data[i]["train_RMSE"]),c='b')
plt.hold(True)
plt.plot(np.log(experiment_data[i]["test_RMSE"]),c='r')
plt.show()
print("saving experiment results...")
f = open("expresults-"+arm+"-"+str(n_test_moves)+".pickle","wb")
pickle.dump(experiment_data,f,protocol=pickle.HIGHEST_PROTOCOL)
f.close()
for loops counter to int
from __future__ import print_function
import sys
import select
import tty
import termios
import time
import theano
import pprint
import theano.tensor as T
#import cv2
import pickle
import copy
import numpy as np
import scipy.io as sio
import matplotlib.pyplot as plt
from theano.tensor.shared_randomstreams import RandomStreams
import sklearn
from sklearn.ensemble import ExtraTreesClassifier
from DataLoader import DataLoader
import DeepLearningStack
from DeepLearningStack import FeedForwardNet
"""
This code implements the Deep Q-Learning model for active object recognition,
described in
Malmir M, Sikka K, Forster D, Movellan J, Cottrell GW.
Deep Q-learning for Active Recognition of GERMS:
Baseline performance on a standardized dataset for active learning.
InProceedings of the British Machine Vision Conference (BMVC),
pages 2016 Apr 13 (pp. 161-1).
This code requires the following data files:
train-[ARM].pickle
test-[ARM].pickle
val-[ARM].pickle
These files contain the belief encoding of single images of GERMS,
using features obtained from VGG deep network trained on ImageNet.
Data files can be found here in VGG-Beliefs folder:
https://drive.google.com/folderview?id=0BxZOUQHBUnwmQUdWRGlPMGw4WHM&usp=sharing
The code for VGG model is obtained from:
http://www.robots.ox.ac.uk/~vgg/software/deep_eval/
"""
batch_size = 128
D = 136#number of classes
arm = "left"
#load the data
print("##################################################")
print("loading train data")
data_files = ["train-"+arm+".pickle"]
train_data = DataLoader(data_files,"pkl",minibatch_size=batch_size)
train_data.shuffle_data()
C = np.unique(train_data.y).shape[0]
#print(train_data.y == np.argmax(train_data.x,axis=1)).mean()
print("data size:",train_data.x.shape)
print("number of classes:",C)
print("number of tracks:",np.unique(train_data.t).shape[0])
print("##################################################")
print("loading validation data")
data_files = ["val-"+arm+".pickle"]
val_data = DataLoader(data_files,"pkl",minibatch_size=batch_size)
val_data.shuffle_data()
C = np.unique(val_data.y).shape[0]
val_data.adapt_labels(train_data.obj2label)#data are already unified in their labels
print("data size:",val_data.x.shape)
print("number of classes:",C)
print("number of tracks:",np.unique(val_data.t).shape[0])
print("##################################################")
print("loading test data")
data_files = ["test-"+arm+".pickle"]
test_data = DataLoader(data_files,"pkl",minibatch_size=batch_size)
test_data.adapt_labels(train_data.obj2label)#data are already unified in their labels
test_data.shuffle_data()
print("data size:",test_data.x.shape)
print("number of classes:",np.unique(test_data.y).shape[0])
print("number of tracks:",np.unique(test_data.t).shape[0])
experiment_data = dict()
#train 20 different models, report the mean average
for exp_num in range(20):
test_data.shuffle_data()
print( "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
print( "EXPERIMENT ", exp_num)
print( "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
experiment_data[exp_num] = dict()
lr = 0.01#initial learning rate
lr_dec_step = 1000#learning rate decrease step
lr_dec_start= 0
num_actions = 10#number of actions
gamma = 0.9#RL discount factor
alpha = 0.01#stochastic approximation coefficient
R = 10.0#reward
n_moves = 5#length of object inspection sequence
n_test_moves= 5#length of inspection sequence for test objects
epsilon = 1.#for e-greedy annealing
iter_cnt = 1
epsilon_dec_step = 100
n_epochs = 50
#create deep net
print("##################################################")
print("Creating deep net...")
input = T.matrix("data",dtype=theano.config.floatX)#the input is concatenation of action history and beliefs
config = "DQLArch.xml"
rng = RandomStreams(seed=int(time.time()))
train_net = FeedForwardNet.FeedForwardNet(rng,{"input":input},config)
test_net = FeedForwardNet.FeedForwardNet(rng,{"input":input},config,clone_from=train_net)
pprint.pprint(train_net.output_dims)
print("##################################################")
print("creating cost layer...")
input_shared = theano.shared(np.zeros([D,batch_size],dtype=theano.config.floatX),borrow=True)
rot_target_shared = theano.shared(np.zeros([batch_size,],dtype=theano.config.floatX),borrow=True)
rot_index_shared = theano.shared(np.zeros([batch_size,],dtype=np.int32),borrow=True)
learning_rate = T.scalar("learning_rate",dtype=theano.config.floatX)
#target value consists of the target for rotation values and the target for sign-of-rotation values
layer_action_value = "fc3"
layer_action = "act1"
cost = T.sqrt( T.mean( (train_net.name2layer[layer_action_value].output[rot_index_shared,T.arange(batch_size)] - rot_target_shared)**2 ) )
grads = [theano.grad(cost,param) for param in train_net.params]
updates = [ (param,param-learning_rate * grad) for param,grad in zip(train_net.params,grads)]
fnx_action_selection = theano.function(inputs=[],outputs=[train_net.name2layer[layer_action].output,train_net.name2layer[layer_action_value].output,cost],
givens={
input:input_shared,
},
)
fnx_train = theano.function(inputs=[learning_rate],outputs=[train_net.name2layer[layer_action_value].output,cost],
givens={
input:input_shared,
},
updates=updates,
)
fnx_test = theano.function(inputs=[],outputs=[test_net.name2layer[layer_action].output,test_net.name2layer[layer_action_value].output],
givens={
input:input_shared,
},
)
print("##################################################")
print("classifying tracks tracks")
track_indices = dict()
for t in np.unique(test_data.t):
idx = np.where(test_data.t == t)[0]
poses = test_data.p[idx]
sorted_idx = np.argsort(poses)
track_indices[t] = idx[sorted_idx]
accuracy = np.zeros(n_test_moves)
for t in track_indices.keys():
# print"for track:",t
# print"number of frames:",len(track_indices[t])
belief = np.ones(C)
# print
for i in range(n_test_moves):
next_idx = track_indices[t][i]
belief = belief * test_data.x[next_idx,:]
belief = belief / belief.sum()
lbl = np.argmax(belief)
accuracy[i] += lbl == test_data.y[next_idx]
print("test-sequential",accuracy / np.unique(test_data.t).shape[0])
seq_acc = accuracy / np.unique(test_data.t).shape[0]
experiment_data[exp_num]["test_seq_acc"] = seq_acc
test_data.reset_minibatch_counter()
corrects = np.zeros([n_test_moves,batch_size])
for i in range(int(test_data.x.shape[0] / batch_size) + 1):
x,y,p,t,rng = test_data.get_next_minibatch()
beliefs = x.copy()
for mv in range(n_test_moves):
pred_rslt = np.argmax(beliefs,axis=1)
corrects[mv,:] += (pred_rslt==y)
rot = np.random.randint(0,num_actions,[batch_size])
# rot = num_actions/2 * np.ones([batch_size])
tgt = -1 * ( rot < num_actions/2) * 2**(rot+num_actions/2) + ( rot >= num_actions/2) * 2**(rot)
# printtgt+p
x,y,p,t,_ = test_data.get_data_for_pose(t,p + tgt)#get the data for the proposed set of rotations
beliefs = beliefs * x
beliefs = beliefs / beliefs.sum(axis=1).reshape([-1,1])
print("test-random:",corrects.sum(axis=1) / float(test_data.x.shape[0]))
rnd_acc = corrects.sum(axis=1) / float(test_data.x.shape[0])
experiment_data[exp_num]["test_rnd_acc"] = rnd_acc
print("##################################################")
print("training network...")
test_accuracies = []
costs = []
test_costs = []
for epoch in range(n_epochs):
print("Epoch:",epoch)
train_data.reset_minibatch_counter()
corrects = np.zeros([n_moves,batch_size])
move_hist = np.zeros([num_actions,],dtype=np.int32)
poses_hist = []
test_data.reset_minibatch_counter()
test_move_hist = np.zeros([num_actions,],dtype=np.int32)
for i in range(int(train_data.x.shape[0] / batch_size )+ 1):
if i is 0:
print("iteration:",iter_cnt)
alpha = max(0.00, 1. - iter_cnt / 20000.)#1. / iter_cnt
x,y,p,t,rng = train_data.get_next_minibatch()
poses_hist.append(p)
beliefs = x.copy()
for mv in range(n_moves):
iter_cnt += 1
epsilon = max(0.00, 1.0 - iter_cnt / 800.)
if iter_cnt>=lr_dec_start and iter_cnt%lr_dec_step==lr_dec_step-1:
lr = max(1.e-10,lr * 0.1)
input_shared.set_value(beliefs.T.astype(theano.config.floatX))
rot,prot,_ = fnx_action_selection()
rot = rot.reshape(-1)
#epsilon-greedy exploration
rand_acts = np.random.randint(0,num_actions,[batch_size])
rand_mask = np.random.binomial(1,epsilon,[batch_size])
idx_random = np.where(rand_mask == 1)[0]
idx_net_act = np.where(rand_mask != 1)[0]
temp111 = rot.copy()
temp111[idx_random] = rand_acts[idx_random]
temp111[idx_net_act]= rot[idx_net_act]
rot = temp111
rot_idx = rot.copy().astype(np.int32)
assert(rot_idx.shape[0]==batch_size)
hst = np.histogram(rot_idx,bins=range(0,num_actions))[0]
for kk in range(rot_idx.shape[0]):
move_hist[rot_idx[kk]] += 1
tgt = -1 * ( rot < num_actions/2) * 2**(rot+num_actions/2) + ( rot >= num_actions/2) * 2**(rot)
x1,y1,p1,t1,_ = train_data.get_data_for_pose(t,p + tgt)#get the data for the proposed set of rotations
assert((t1==t).sum()==batch_size)
assert((y1==y).sum()==batch_size)
x1 = x1 * beliefs
x1 = x1 / x1.sum(axis=1).reshape([-1,1])
input_shared.set_value(x1.T.astype(theano.config.floatX))
rot1,prot1,_ = fnx_action_selection()#calculate the Q(s,a) for all as in the next state
pred_rslt = np.argmax(x1,axis=1)#x1 should be 'beliefs' if we use Q = r(t) + gamma max_a Q(s,a').
prot_max = gamma * np.max(prot1,axis=0).reshape(-1).astype(theano.config.floatX)
#reward each move based on the amount of belief increase
srtd_beliefs = np.sort(x1,axis=1)#x1 should be 'beliefs' if we use Q = r(t) + gamma max_a Q(s,a').
if mv == n_moves-1:
prot_max += R * (pred_rslt==y1)* (srtd_beliefs[:,-1] - srtd_beliefs[:,-2]).reshape(-1)
prot_max -= R * (pred_rslt!=y1)
prot_max = alpha * prot_max + (1-alpha) * prot[rot_idx,range(batch_size)].reshape(-1)
corrects[mv,:] += (pred_rslt==y)
input_shared.set_value(beliefs.T.astype(theano.config.floatX))
rot_target_shared.set_value(prot_max.astype(theano.config.floatX))
rot_index_shared.set_value(rot_idx.reshape(-1))
prot2,c = fnx_train(lr)
costs.append(c)
x,y,p,t,_ = train_data.get_data_for_pose(t,p + tgt)#get the data for the proposed set of rotations
poses_hist.append(p)
beliefs = beliefs * x
beliefs = beliefs / beliefs.sum(axis=1).reshape([-1,1])
x,y,p,t,rng = test_data.get_next_minibatch()
beliefs = x.copy()
prev_actval = np.zeros([batch_size,]).reshape(-1)
for mv in range(n_moves+1):
input_shared.set_value(beliefs.T.astype(theano.config.floatX))
rot,prot = fnx_test()
rand_acts = np.random.randint(0,num_actions,[batch_size])
rand_mask = np.random.binomial(1,epsilon,[batch_size])
rot = (1-rand_mask) * rot + rand_mask * rand_acts
if mv>0:
c = np.sqrt(np.mean((prot.max(axis=0).reshape(-1) - prev_actval)**2))
test_costs.append(c)
prev_actval = prot[rot,range(batch_size)].reshape(-1)
rot_idx = rot.copy().astype(np.int32)
for kk in range(rot_idx.shape[1]):
test_move_hist[rot_idx[0,kk]] += 1
rot = rot.reshape(-1)
tgt = -1 * ( rot < num_actions/2) * 2**(rot+num_actions/2) + ( rot >= num_actions/2) * 2**(rot)
pred_rslt = np.argmax(beliefs,axis=1)
# test_corrects[mv,:]+= (pred_rslt==y)
x,y,p,t,_ = test_data.get_data_for_pose(t,p + tgt)#get the data for the proposed set of rotations
beliefs = beliefs * x
beliefs = beliefs / beliefs.sum(axis=1).reshape([-1,1])
print("epoch cost:",np.sum(costs))
print("train accuracy:",corrects.sum(axis=1) / float(train_data.x.shape[0]))
print("learning rate:",lr," RL epsilon:",epsilon)
test_poses_hist = []
corrects = np.zeros([n_test_moves,batch_size])
test_data.reset_minibatch_counter()
test_move_hist = np.zeros([num_actions,],dtype=np.int32)
for i in range(int(test_data.x.shape[0] / batch_size ) + 1):
x,y,p,t,rng = test_data.get_next_minibatch()
test_poses_hist.append(p)
beliefs = x.copy()
move_hist = np.zeros([n_test_moves,batch_size])
for mv in range(n_test_moves):
input_shared.set_value(beliefs.T.astype(theano.config.floatX))
rot = fnx_test()[0]
rot = rot.reshape(-1)
rand_acts = np.random.randint(0,num_actions,[batch_size])
rand_mask = np.random.binomial(1,epsilon,[batch_size])
idx_random = np.where(rand_mask == 1)[0]
idx_net_act = np.where(rand_mask != 1)[0]
temp111 = rot.copy()
temp111[idx_random] = rand_acts[idx_random]
temp111[idx_net_act]= rot[idx_net_act]
rot = temp111
rot_idx = rot.copy().astype(np.int32)
for kk in range(rot_idx.shape[0]):
test_move_hist[rot_idx[kk]] += 1
tgt = -1 * ( rot < num_actions/2) * 2**(rot+num_actions/2) + ( rot >= num_actions/2) * 2**(rot)
pred_rslt = np.argmax(beliefs,axis=1)
corrects[mv,:] += (pred_rslt==y)
x,y,p,t,_ = test_data.get_data_for_pose(t,p + tgt)#get the data for the proposed set of rotations
test_poses_hist.append(p)
beliefs = beliefs * x
beliefs = beliefs / beliefs.sum(axis=1).reshape([-1,1])
hst = np.histogram(move_hist.reshape(-1),bins=range(0,num_actions))[0]
print("test:",corrects.sum(axis=1) / float(test_data.x.shape[0]))
test_accuracies.append(corrects.sum(axis=1) / float(test_data.x.shape[0]))
experiment_data[exp_num]["test_dpq_acc"] = corrects.sum(axis=1) / float(test_data.x.shape[0])
experiment_data[exp_num]["test_RMSE"] = test_costs
experiment_data[exp_num]["train_RMSE"] = costs
experiment_data[exp_num]["train_net"] = copy.deepcopy(train_net)
experiment_data[exp_num]["test_net"] = test_net
experiment_data[exp_num]["train_poses_hist"] = poses_hist
experiment_data[exp_num]["test_poses_hist"] = test_poses_hist
colors = ["r","g","b","c","y","m","k"]
linestyle = ["-","--","-.",":"]
marker = ["o","v","^","<",">","*"]
i = 0
seq_acc = []
rnd_acc = []
dpq_acc = []
for i in experiment_data.keys():
seq_acc.append(experiment_data[i]["test_seq_acc"].reshape([1,-1]))
rnd_acc.append(experiment_data[i]["test_rnd_acc"].reshape([1,-1]))
dpq_acc.append(experiment_data[i]["test_dpq_acc"].reshape([1,-1]))
seq_acc = np.concatenate(seq_acc,axis=0)
rnd_acc = np.concatenate(rnd_acc,axis=0)
dpq_acc = np.concatenate(dpq_acc,axis=0)
plt.figure(1)
plt.hold(True)
i = 0
plt.errorbar(x=range(n_test_moves),y=dpq_acc.mean(axis=0),xerr=0,yerr=dpq_acc.std(axis=0),color=colors[i%len(colors)],linestyle=linestyle[i%len(linestyle)],label="DeepQ"+str(i),marker=marker[i%len(marker)],linewidth=4.)
i += 1
plt.errorbar(x=range(n_test_moves),y=seq_acc.mean(axis=0),xerr=0,yerr=seq_acc.std(axis=0),color=colors[i%len(colors)],linestyle=linestyle[i%len(linestyle)],label="sequential",marker=marker[i%len(marker)],linewidth=4.)
i += 1
plt.errorbar(x=range(n_test_moves),y=rnd_acc.mean(axis=0),xerr=0,yerr=rnd_acc.std(axis=0),color=colors[i%len(colors)],linestyle=linestyle[i%len(linestyle)],label="random",marker=marker[i%len(marker)],linewidth=4.)
handles, labels = plt.gca().get_legend_handles_labels()
plt.gca().legend(handles[::-1], labels[::-1],loc=4)
plt.xlabel("Number of Actions")
plt.ylabel("Accuracy")
for axis in ['top','bottom','left','right']:
plt.gca().spines[axis].set_linewidth(2.0)
i += 1
plt.xticks(range(n_test_moves+1))
plt.figure(2)
i=2
plt.plot(np.log(experiment_data[i]["train_RMSE"]),c='b')
plt.hold(True)
plt.plot(np.log(experiment_data[i]["test_RMSE"]),c='r')
plt.show()
print("saving experiment results...")
f = open("expresults-"+arm+"-"+str(n_test_moves)+".pickle","wb")
pickle.dump(experiment_data,f,protocol=pickle.HIGHEST_PROTOCOL)
f.close()
|
"""
What's the big idea?
An endpoint that traverses all restful endpoints producing a swagger 2.0 schema
If a swagger yaml description is found in the docstrings for an endpoint
we add the endpoint to swagger specification output
"""
import inspect
import yaml
import re
import os
from collections import defaultdict
from flask import jsonify, Blueprint, url_for, current_app
from flask.views import MethodView
def _sanitize(comment):
return comment.replace('\n', '<br/>') if comment else comment
def get_path_from_doc(full_doc):
swag_path = full_doc.replace('file:', '').strip()
swag_type = swag_path.split('.')[-1]
return swag_path, swag_type
def json_to_yaml(content):
"""
TODO: convert json to yaml
"""
return content
def load_from_file(swag_path, swag_type):
try:
return open(swag_path).read()
except IOError:
swag_path = os.path.join(os.path.dirname(__file__), swag_path)
return open(swag_path).read()
# TODO:
# with open(swag_path) as swag_file:
# content = swag_file.read()
# if swag_type in ('yaml', 'yml'):
# return content
# elif swag_type == 'json':
# return json_to_yaml(content)
def _parse_docstring(obj, process_doc):
first_line, other_lines, swag = None, None, None
full_doc = inspect.getdoc(obj)
if hasattr(obj, 'swag_path'):
full_doc = load_from_file(obj.swag_path, obj.swag_type)
if full_doc.startswith('file:'):
swag_path, swag_type = get_path_from_doc(full_doc)
full_doc = load_from_file(swag_path, swag_type)
if full_doc:
line_feed = full_doc.find('\n')
if line_feed != -1:
first_line = process_doc(full_doc[:line_feed])
yaml_sep = full_doc[line_feed + 1:].find('---')
if yaml_sep != -1:
other_lines = process_doc(
full_doc[line_feed + 1: line_feed + yaml_sep]
)
swag = yaml.load(full_doc[line_feed + yaml_sep:])
else:
other_lines = process_doc(full_doc[line_feed + 1:])
else:
first_line = full_doc
return first_line, other_lines, swag
def _extract_definitions(alist, level=None):
"""
Since we couldn't be bothered to register models elsewhere
our definitions need to be extracted from the parameters.
We require an 'id' field for the schema to be correctly
added to the definitions list.
"""
def _extract_array_defs(source):
# extract any definitions that are within arrays
# this occurs recursively
ret = []
items = source.get('items')
if items is not None and 'schema' in items:
ret += _extract_definitions([items], level + 1)
return ret
# for tracking level of recursion
if level is None:
level = 0
defs = list()
if alist is not None:
for item in alist:
schema = item.get("schema")
if schema is not None:
schema_id = schema.get("id")
if schema_id is not None:
defs.append(schema)
ref = {"$ref": "#/definitions/{}".format(schema_id)}
# only add the reference as a schema if we are in a
# response or
# a parameter i.e. at the top level
# directly ref if a definition is used within another
# definition
if level == 0:
item['schema'] = ref
else:
item.update(ref)
del item['schema']
# extract any definitions that are within properties
# this occurs recursively
properties = schema.get('properties')
if properties is not None:
defs += _extract_definitions(
properties.values(), level + 1
)
defs += _extract_array_defs(schema)
defs += _extract_array_defs(item)
return defs
class SpecsView(MethodView):
def __init__(self, *args, **kwargs):
view_args = kwargs.pop('view_args', {})
self.config = view_args.get('config')
super(SpecsView, self).__init__(*args, **kwargs)
def get(self):
base_endpoint = self.config.get('endpoint', 'swagger')
specs = [
{
"url": url_for(".".join((base_endpoint, spec['endpoint']))),
"title": spec.get('title'),
"version": spec.get("version"),
"endpoint": spec.get('endpoint')
}
for spec in self.config.get('specs', [])
]
return jsonify(
{"specs": specs,
"title": self.config.get('title', 'Flasgger')}
)
class OutputView(MethodView):
def __init__(self, *args, **kwargs):
view_args = kwargs.pop('view_args', {})
self.config = view_args.get('config')
self.spec = view_args.get('spec')
self.process_doc = _sanitize
super(OutputView, self).__init__(*args, **kwargs)
def get_url_mappings(self, rule_filter=None):
rule_filter = rule_filter or (lambda rule: True)
app_rules = [
rule for rule in current_app.url_map.iter_rules()
if rule_filter(rule)
]
return app_rules
def get(self):
data = {
"swagger": self.config.get('swagger_version', "2.0"),
"info": {
"version": self.spec.get('version', "0.0.0"),
"title": self.spec.get('title', "A swagger API"),
"description": self.spec.get('description', "API description"),
"termsOfService": self.spec.get('termsOfService', "Terms of service"),
},
"host": self.config.get('host', "hostname"),
"basePath": self.config.get('basePath', "/"),
"paths": defaultdict(dict),
"definitions": defaultdict(dict)
}
paths = data['paths']
definitions = data['definitions']
ignore_verbs = {"HEAD", "OPTIONS"}
# technically only responses is non-optional
optional_fields = [
'tags', 'consumes', 'produces', 'schemes', 'security',
'deprecated', 'operationId', 'externalDocs'
]
for rule in self.get_url_mappings(self.spec.get('rule_filter')):
endpoint = current_app.view_functions[rule.endpoint]
methods = dict()
for verb in rule.methods.difference(ignore_verbs):
if hasattr(endpoint, 'methods') and verb in endpoint.methods:
verb = verb.lower()
methods[verb] = endpoint.view_class.__dict__.get(verb)
else:
methods[verb.lower()] = endpoint
operations = dict()
for verb, method in methods.items():
summary, description, swag = _parse_docstring(
method, self.process_doc
)
# we only add endpoints with swagger data in the docstrings
if swag is not None:
params = swag.get('parameters', [])
defs = _extract_definitions(params)
responses = swag.get('responses', {})
if responses is not None:
defs = defs + _extract_definitions(responses.values())
for definition in defs:
def_id = definition.pop('id')
if def_id is not None:
definitions[def_id].update(definition)
operation = dict(
summary=summary,
description=description,
responses=responses
)
# parameters - swagger ui dislikes empty parameter lists
if len(params) > 0:
operation['parameters'] = params
# other optionals
for key in optional_fields:
if key in swag:
operation[key] = swag.get(key)
operations[verb] = operation
if len(operations):
rule = str(rule)
for arg in re.findall('(<(.*?\:)?(.*?)>)', rule):
rule = rule.replace(arg[0], '{%s}' % arg[2])
paths[rule].update(operations)
return jsonify(data)
class Swagger(object):
DEFAULT_CONFIG = {
"headers": [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Headers', "Authorization, Content-Type"),
('Access-Control-Expose-Headers', "Authorization"),
('Access-Control-Allow-Methods',
"GET, POST, PUT, DELETE, OPTIONS"),
('Access-Control-Allow-Credentials', "true"),
('Access-Control-Max-Age', 60 * 60 * 24 * 20),
],
"specs": [
{
"version": "1.0.1",
"title": "A swagger API",
"endpoint": 'spec',
"route": '/spec',
"rule_filter": lambda rule: True # all in
}
],
"static_url_path": "/apidocs",
"static_folder": "swaggerui",
"specs_route": "/specs"
}
def __init__(self, app=None, config=None):
self.endpoints = []
self.config = config or self.DEFAULT_CONFIG.copy()
if app:
self.init_app(app)
def init_app(self, app):
self.load_config(app)
self.register_views(app)
self.add_headers(app)
def load_config(self, app):
self.config.update(app.config.get('SWAGGER', {}))
def register_views(self, app):
blueprint = Blueprint(
self.config.get('endpoint', 'swagger'),
__name__,
url_prefix=self.config.get('url_prefix', None),
subdomain=self.config.get('subdomain', None),
template_folder=self.config.get('template_folder', 'templates'),
static_folder=self.config.get('static_folder', 'static'),
static_url_path=self.config.get('static_url_path', None)
)
for spec in self.config['specs']:
self.endpoints.append(spec['endpoint'])
blueprint.add_url_rule(
spec['route'],
spec['endpoint'],
view_func=OutputView().as_view(
spec['endpoint'],
view_args=dict(app=app, config=self.config, spec=spec)
)
)
blueprint.add_url_rule(
self.config.get('specs_route', '/specs'),
'specs',
view_func=SpecsView().as_view(
'specs',
view_args=dict(config=self.config)
)
)
app.register_blueprint(blueprint)
def add_headers(self, app):
@app.after_request
def after_request(response): # noqa
for header, value in self.config.get('headers'):
response.headers.add(header, value)
return response
Update base.py
"""
What's the big idea?
An endpoint that traverses all restful endpoints producing a swagger 2.0 schema
If a swagger yaml description is found in the docstrings for an endpoint
we add the endpoint to swagger specification output
"""
import inspect
import yaml
import re
import os
from collections import defaultdict
from flask import jsonify, Blueprint, url_for, current_app
from flask.views import MethodView
def _sanitize(comment):
return comment.replace('\n', '<br/>') if comment else comment
def get_path_from_doc(full_doc):
swag_path = full_doc.replace('file:', '').strip()
swag_type = swag_path.split('.')[-1]
return swag_path, swag_type
def json_to_yaml(content):
"""
TODO: convert json to yaml
"""
return content
def load_from_file(swag_path, swag_type):
try:
return open(swag_path).read()
except IOError:
swag_path = os.path.join(os.path.dirname(__file__), swag_path)
return open(swag_path).read()
# TODO:
# with open(swag_path) as swag_file:
# content = swag_file.read()
# if swag_type in ('yaml', 'yml'):
# return content
# elif swag_type == 'json':
# return json_to_yaml(content)
def _parse_docstring(obj, process_doc):
first_line, other_lines, swag = None, None, None
full_doc = inspect.getdoc(obj)
if hasattr(obj, 'swag_path'):
full_doc = load_from_file(obj.swag_path, obj.swag_type)
if full_doc.startswith('file:'):
swag_path, swag_type = get_path_from_doc(full_doc)
full_doc = load_from_file(swag_path, swag_type)
if full_doc:
line_feed = full_doc.find('\n')
if line_feed != -1:
first_line = process_doc(full_doc[:line_feed])
yaml_sep = full_doc[line_feed + 1:].find('---')
if yaml_sep != -1:
other_lines = process_doc(
full_doc[line_feed + 1: line_feed + yaml_sep]
)
swag = yaml.load(full_doc[line_feed + yaml_sep:])
else:
other_lines = process_doc(full_doc[line_feed + 1:])
else:
first_line = full_doc
return first_line, other_lines, swag
def _extract_definitions(alist, level=None):
"""
Since we couldn't be bothered to register models elsewhere
our definitions need to be extracted from the parameters.
We require an 'id' field for the schema to be correctly
added to the definitions list.
"""
def _extract_array_defs(source):
# extract any definitions that are within arrays
# this occurs recursively
ret = []
items = source.get('items')
if items is not None and 'schema' in items:
ret += _extract_definitions([items], level + 1)
return ret
# for tracking level of recursion
if level is None:
level = 0
defs = list()
if alist is not None:
for item in alist:
schema = item.get("schema")
if schema is not None:
schema_id = schema.get("id")
if schema_id is not None:
defs.append(schema)
ref = {"$ref": "#/definitions/{}".format(schema_id)}
# only add the reference as a schema if we are in a
# response or
# a parameter i.e. at the top level
# directly ref if a definition is used within another
# definition
if level == 0:
item['schema'] = ref
else:
item.update(ref)
del item['schema']
# extract any definitions that are within properties
# this occurs recursively
properties = schema.get('properties')
if properties is not None:
defs += _extract_definitions(
properties.values(), level + 1
)
defs += _extract_array_defs(schema)
defs += _extract_array_defs(item)
return defs
class SpecsView(MethodView):
def __init__(self, *args, **kwargs):
view_args = kwargs.pop('view_args', {})
self.config = view_args.get('config')
super(SpecsView, self).__init__(*args, **kwargs)
def get(self):
base_endpoint = self.config.get('endpoint', 'swagger')
specs = [
{
"url": url_for(".".join((base_endpoint, spec['endpoint']))),
"title": spec.get('title'),
"version": spec.get("version"),
"endpoint": spec.get('endpoint')
}
for spec in self.config.get('specs', [])
]
return jsonify(
{"specs": specs,
"title": self.config.get('title', 'Flasgger')}
)
class OutputView(MethodView):
def __init__(self, *args, **kwargs):
view_args = kwargs.pop('view_args', {})
self.config = view_args.get('config')
self.spec = view_args.get('spec')
self.process_doc = _sanitize
super(OutputView, self).__init__(*args, **kwargs)
def get_url_mappings(self, rule_filter=None):
rule_filter = rule_filter or (lambda rule: True)
app_rules = [
rule for rule in current_app.url_map.iter_rules()
if rule_filter(rule)
]
return app_rules
def get(self):
data = {
"swagger": self.config.get('swagger_version', "2.0"),
"info": {
"version": self.spec.get('version', "0.0.0"),
"title": self.spec.get('title', "A swagger API"),
"description": self.spec.get('description',
"API description"),
"termsOfService": self.spec.get('termsOfService',
"Terms of service"),
},
"host": self.config.get('host', "hostname"),
"basePath": self.config.get('basePath', "/"),
"paths": defaultdict(dict),
"definitions": defaultdict(dict)
}
paths = data['paths']
definitions = data['definitions']
ignore_verbs = {"HEAD", "OPTIONS"}
# technically only responses is non-optional
optional_fields = [
'tags', 'consumes', 'produces', 'schemes', 'security',
'deprecated', 'operationId', 'externalDocs'
]
for rule in self.get_url_mappings(self.spec.get('rule_filter')):
endpoint = current_app.view_functions[rule.endpoint]
methods = dict()
for verb in rule.methods.difference(ignore_verbs):
if hasattr(endpoint, 'methods') and verb in endpoint.methods:
verb = verb.lower()
methods[verb] = endpoint.view_class.__dict__.get(verb)
else:
methods[verb.lower()] = endpoint
operations = dict()
for verb, method in methods.items():
summary, description, swag = _parse_docstring(
method, self.process_doc
)
# we only add endpoints with swagger data in the docstrings
if swag is not None:
params = swag.get('parameters', [])
defs = _extract_definitions(params)
responses = swag.get('responses', {})
if responses is not None:
defs = defs + _extract_definitions(responses.values())
for definition in defs:
def_id = definition.pop('id')
if def_id is not None:
definitions[def_id].update(definition)
operation = dict(
summary=summary,
description=description,
responses=responses
)
# parameters - swagger ui dislikes empty parameter lists
if len(params) > 0:
operation['parameters'] = params
# other optionals
for key in optional_fields:
if key in swag:
operation[key] = swag.get(key)
operations[verb] = operation
if len(operations):
rule = str(rule)
for arg in re.findall('(<(.*?\:)?(.*?)>)', rule):
rule = rule.replace(arg[0], '{%s}' % arg[2])
paths[rule].update(operations)
return jsonify(data)
class Swagger(object):
DEFAULT_CONFIG = {
"headers": [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Headers', "Authorization, Content-Type"),
('Access-Control-Expose-Headers', "Authorization"),
('Access-Control-Allow-Methods',
"GET, POST, PUT, DELETE, OPTIONS"),
('Access-Control-Allow-Credentials', "true"),
('Access-Control-Max-Age', 60 * 60 * 24 * 20),
],
"specs": [
{
"version": "1.0.1",
"title": "A swagger API",
"endpoint": 'spec',
"route": '/spec',
"rule_filter": lambda rule: True # all in
}
],
"static_url_path": "/apidocs",
"static_folder": "swaggerui",
"specs_route": "/specs"
}
def __init__(self, app=None, config=None):
self.endpoints = []
self.config = config or self.DEFAULT_CONFIG.copy()
if app:
self.init_app(app)
def init_app(self, app):
self.load_config(app)
self.register_views(app)
self.add_headers(app)
def load_config(self, app):
self.config.update(app.config.get('SWAGGER', {}))
def register_views(self, app):
blueprint = Blueprint(
self.config.get('endpoint', 'swagger'),
__name__,
url_prefix=self.config.get('url_prefix', None),
subdomain=self.config.get('subdomain', None),
template_folder=self.config.get('template_folder', 'templates'),
static_folder=self.config.get('static_folder', 'static'),
static_url_path=self.config.get('static_url_path', None)
)
for spec in self.config['specs']:
self.endpoints.append(spec['endpoint'])
blueprint.add_url_rule(
spec['route'],
spec['endpoint'],
view_func=OutputView().as_view(
spec['endpoint'],
view_args=dict(app=app, config=self.config, spec=spec)
)
)
blueprint.add_url_rule(
self.config.get('specs_route', '/specs'),
'specs',
view_func=SpecsView().as_view(
'specs',
view_args=dict(config=self.config)
)
)
app.register_blueprint(blueprint)
def add_headers(self, app):
@app.after_request
def after_request(response): # noqa
for header, value in self.config.get('headers'):
response.headers.add(header, value)
return response
|
from datetime import datetime, timedelta
from typing import Optional
import werkzeug
from flask import (Flask, render_template, flash, Markup, request, g, session,
url_for, redirect)
from flask_babel import gettext
from flask_assets import Environment
from flask_wtf.csrf import CSRFProtect, CSRFError
from jinja2 import evalcontextfilter
from os import path
from sqlalchemy.orm.exc import NoResultFound
from typing import Tuple
import i18n
import template_filters
import version
from crypto_util import CryptoUtil
from db import db
from models import InstanceConfig, Source
from request_that_secures_file_uploads import RequestThatSecuresFileUploads
from sdconfig import SDConfig
from source_app import main, info, api
from source_app.decorators import ignore_static
from source_app.utils import logged_in, was_in_generate_flow
from store import Storage
def create_app(config: SDConfig) -> Flask:
app = Flask(__name__,
template_folder=config.SOURCE_TEMPLATES_DIR,
static_folder=path.join(config.SECUREDROP_ROOT, 'static'))
app.request_class = RequestThatSecuresFileUploads
app.config.from_object(config.SOURCE_APP_FLASK_CONFIG_CLS)
# The default CSRF token expiration is 1 hour. Since large uploads can
# take longer than an hour over Tor, we increase the valid window to 24h.
app.config['WTF_CSRF_TIME_LIMIT'] = 60 * 60 * 24
CSRFProtect(app)
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SQLALCHEMY_DATABASE_URI'] = config.DATABASE_URI
db.init_app(app)
# TODO: Attaching a Storage dynamically like this disables all type checking (and
# breaks code analysis tools) for code that uses current_app.storage; it should be refactored
app.storage = Storage(config.STORE_DIR,
config.TEMP_DIR,
config.JOURNALIST_KEY)
# TODO: Attaching a CryptoUtil dynamically like this disables all type checking (and
# breaks code analysis tools) for code that uses current_app.storage; it should be refactored
app.crypto_util = CryptoUtil(
scrypt_params=config.SCRYPT_PARAMS,
scrypt_id_pepper=config.SCRYPT_ID_PEPPER,
scrypt_gpg_pepper=config.SCRYPT_GPG_PEPPER,
securedrop_root=config.SECUREDROP_ROOT,
word_list=config.WORD_LIST,
nouns_file=config.NOUNS,
adjectives_file=config.ADJECTIVES,
gpg_key_dir=config.GPG_KEY_DIR,
)
@app.errorhandler(CSRFError)
def handle_csrf_error(e: CSRFError) -> werkzeug.Response:
msg = render_template('session_timeout.html')
session.clear()
flash(Markup(msg), "important")
return redirect(url_for('main.index'))
assets = Environment(app)
app.config['assets'] = assets
i18n.setup_app(config, app)
app.jinja_env.trim_blocks = True
app.jinja_env.lstrip_blocks = True
app.jinja_env.globals['version'] = version.__version__
# Exported to source templates for being included in instructions
app.jinja_env.globals['submission_key_fpr'] = config.JOURNALIST_KEY
app.jinja_env.filters['rel_datetime_format'] = \
template_filters.rel_datetime_format
app.jinja_env.filters['nl2br'] = evalcontextfilter(template_filters.nl2br)
app.jinja_env.filters['filesizeformat'] = template_filters.filesizeformat
for module in [main, info, api]:
app.register_blueprint(module.make_blueprint(config)) # type: ignore
@app.before_request
@ignore_static
def setup_i18n() -> None:
"""Store i18n-related values in Flask's special g object"""
g.locale = i18n.get_locale(config)
g.text_direction = i18n.get_text_direction(g.locale)
g.html_lang = i18n.locale_to_rfc_5646(g.locale)
g.locales = i18n.get_locale2name()
@app.before_request
@ignore_static
def check_tor2web() -> None:
# ignore_static here so we only flash a single message warning
# about Tor2Web, corresponding to the initial page load.
if 'X-tor2web' in request.headers:
flash(Markup(gettext(
'<strong>WARNING: </strong> '
'You appear to be using Tor2Web. '
'This <strong> does not </strong> '
'provide anonymity. '
'<a href="{url}">Why is this dangerous?</a>')
.format(url=url_for('info.tor2web_warning'))),
"banner-warning")
@app.before_request
@ignore_static
def load_instance_config() -> None:
app.instance_config = InstanceConfig.get_current()
@app.before_request
@ignore_static
def setup_g() -> Optional[werkzeug.Response]:
"""Store commonly used values in Flask's special g object"""
if 'expires' in session and datetime.utcnow() >= session['expires']:
msg = render_template('session_timeout.html')
# Show expiration message only if the user was in the codename generation flow or was logged in
show_expiration_message = any([
session.get('show_expiration_message'),
logged_in(),
was_in_generate_flow(),
])
# clear the session after we render the message so it's localized
session.clear()
# Persist this properety across sessions to distinguish users whose sessions actually expired
# from users who never logged in or generated a codename
session['show_expiration_message'] = show_expiration_message
# Redirect to index with flashed message
if session['show_expiration_message']:
flash(Markup(msg), "important")
return redirect(url_for('main.index'))
session['expires'] = datetime.utcnow() + \
timedelta(minutes=getattr(config,
'SESSION_EXPIRATION_MINUTES',
120))
# ignore_static here because `crypto_util.hash_codename` is scrypt
# (very time consuming), and we don't need to waste time running if
# we're just serving a static resource that won't need to access
# these common values.
if logged_in():
g.codename = session['codename']
g.filesystem_id = app.crypto_util.hash_codename(g.codename)
try:
g.source = Source.query \
.filter(Source.filesystem_id == g.filesystem_id) \
.filter_by(deleted_at=None) \
.one()
except NoResultFound as e:
app.logger.error(
"Found no Sources when one was expected: %s" %
(e,))
del session['logged_in']
del session['codename']
return redirect(url_for('main.index'))
g.loc = app.storage.path(g.filesystem_id)
return None
@app.errorhandler(404)
def page_not_found(error: werkzeug.exceptions.HTTPException) -> Tuple[str, int]:
return render_template('notfound.html'), 404
@app.errorhandler(500)
def internal_error(error: werkzeug.exceptions.HTTPException) -> Tuple[str, int]:
return render_template('error.html'), 500
return app
Fix linting errors
from datetime import datetime, timedelta
from typing import Optional
import werkzeug
from flask import (Flask, render_template, flash, Markup, request, g, session,
url_for, redirect)
from flask_babel import gettext
from flask_assets import Environment
from flask_wtf.csrf import CSRFProtect, CSRFError
from jinja2 import evalcontextfilter
from os import path
from sqlalchemy.orm.exc import NoResultFound
from typing import Tuple
import i18n
import template_filters
import version
from crypto_util import CryptoUtil
from db import db
from models import InstanceConfig, Source
from request_that_secures_file_uploads import RequestThatSecuresFileUploads
from sdconfig import SDConfig
from source_app import main, info, api
from source_app.decorators import ignore_static
from source_app.utils import logged_in, was_in_generate_flow
from store import Storage
def create_app(config: SDConfig) -> Flask:
app = Flask(__name__,
template_folder=config.SOURCE_TEMPLATES_DIR,
static_folder=path.join(config.SECUREDROP_ROOT, 'static'))
app.request_class = RequestThatSecuresFileUploads
app.config.from_object(config.SOURCE_APP_FLASK_CONFIG_CLS)
# The default CSRF token expiration is 1 hour. Since large uploads can
# take longer than an hour over Tor, we increase the valid window to 24h.
app.config['WTF_CSRF_TIME_LIMIT'] = 60 * 60 * 24
CSRFProtect(app)
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SQLALCHEMY_DATABASE_URI'] = config.DATABASE_URI
db.init_app(app)
# TODO: Attaching a Storage dynamically like this disables all type checking (and
# breaks code analysis tools) for code that uses current_app.storage; it should be refactored
app.storage = Storage(config.STORE_DIR,
config.TEMP_DIR,
config.JOURNALIST_KEY)
# TODO: Attaching a CryptoUtil dynamically like this disables all type checking (and
# breaks code analysis tools) for code that uses current_app.storage; it should be refactored
app.crypto_util = CryptoUtil(
scrypt_params=config.SCRYPT_PARAMS,
scrypt_id_pepper=config.SCRYPT_ID_PEPPER,
scrypt_gpg_pepper=config.SCRYPT_GPG_PEPPER,
securedrop_root=config.SECUREDROP_ROOT,
word_list=config.WORD_LIST,
nouns_file=config.NOUNS,
adjectives_file=config.ADJECTIVES,
gpg_key_dir=config.GPG_KEY_DIR,
)
@app.errorhandler(CSRFError)
def handle_csrf_error(e: CSRFError) -> werkzeug.Response:
msg = render_template('session_timeout.html')
session.clear()
flash(Markup(msg), "important")
return redirect(url_for('main.index'))
assets = Environment(app)
app.config['assets'] = assets
i18n.setup_app(config, app)
app.jinja_env.trim_blocks = True
app.jinja_env.lstrip_blocks = True
app.jinja_env.globals['version'] = version.__version__
# Exported to source templates for being included in instructions
app.jinja_env.globals['submission_key_fpr'] = config.JOURNALIST_KEY
app.jinja_env.filters['rel_datetime_format'] = \
template_filters.rel_datetime_format
app.jinja_env.filters['nl2br'] = evalcontextfilter(template_filters.nl2br)
app.jinja_env.filters['filesizeformat'] = template_filters.filesizeformat
for module in [main, info, api]:
app.register_blueprint(module.make_blueprint(config)) # type: ignore
@app.before_request
@ignore_static
def setup_i18n() -> None:
"""Store i18n-related values in Flask's special g object"""
g.locale = i18n.get_locale(config)
g.text_direction = i18n.get_text_direction(g.locale)
g.html_lang = i18n.locale_to_rfc_5646(g.locale)
g.locales = i18n.get_locale2name()
@app.before_request
@ignore_static
def check_tor2web() -> None:
# ignore_static here so we only flash a single message warning
# about Tor2Web, corresponding to the initial page load.
if 'X-tor2web' in request.headers:
flash(Markup(gettext(
'<strong>WARNING: </strong> '
'You appear to be using Tor2Web. '
'This <strong> does not </strong> '
'provide anonymity. '
'<a href="{url}">Why is this dangerous?</a>')
.format(url=url_for('info.tor2web_warning'))),
"banner-warning")
@app.before_request
@ignore_static
def load_instance_config() -> None:
app.instance_config = InstanceConfig.get_current()
@app.before_request
@ignore_static
def setup_g() -> Optional[werkzeug.Response]:
"""Store commonly used values in Flask's special g object"""
if 'expires' in session and datetime.utcnow() >= session['expires']:
msg = render_template('session_timeout.html')
# Show expiration message only if the user was
# either in the codename generation flow or logged in
show_expiration_message = any([
session.get('show_expiration_message'),
logged_in(),
was_in_generate_flow(),
])
# clear the session after we render the message so it's localized
session.clear()
# Persist this properety across sessions to distinguish users whose sessions expired
# from users who never logged in or generated a codename
session['show_expiration_message'] = show_expiration_message
# Redirect to index with flashed message
if session['show_expiration_message']:
flash(Markup(msg), "important")
return redirect(url_for('main.index'))
session['expires'] = datetime.utcnow() + \
timedelta(minutes=getattr(config,
'SESSION_EXPIRATION_MINUTES',
120))
# ignore_static here because `crypto_util.hash_codename` is scrypt
# (very time consuming), and we don't need to waste time running if
# we're just serving a static resource that won't need to access
# these common values.
if logged_in():
g.codename = session['codename']
g.filesystem_id = app.crypto_util.hash_codename(g.codename)
try:
g.source = Source.query \
.filter(Source.filesystem_id == g.filesystem_id) \
.filter_by(deleted_at=None) \
.one()
except NoResultFound as e:
app.logger.error(
"Found no Sources when one was expected: %s" %
(e,))
del session['logged_in']
del session['codename']
return redirect(url_for('main.index'))
g.loc = app.storage.path(g.filesystem_id)
return None
@app.errorhandler(404)
def page_not_found(error: werkzeug.exceptions.HTTPException) -> Tuple[str, int]:
return render_template('notfound.html'), 404
@app.errorhandler(500)
def internal_error(error: werkzeug.exceptions.HTTPException) -> Tuple[str, int]:
return render_template('error.html'), 500
return app
|
from __future__ import print_function
import sys
import select
import tty
import termios
import time
import theano
import pprint
import theano.tensor as T
#import cv2
import pickle
import copy
import numpy as np
import scipy.io as sio
import matplotlib.pyplot as plt
from theano.tensor.shared_randomstreams import RandomStreams
import sklearn
from sklearn.ensemble import ExtraTreesClassifier
from DataLoader import DataLoader
import DeepLearningStack
from DeepLearningStack import FeedForwardNet
"""
This code implements the Deep Q-Learning model for active object recognition,
described in
Malmir M, Sikka K, Forster D, Movellan J, Cottrell GW.
Deep Q-learning for Active Recognition of GERMS:
Baseline performance on a standardized dataset for active learning.
InProceedings of the British Machine Vision Conference (BMVC),
pages 2016 Apr 13 (pp. 161-1).
This code requires the following data files:
train-[ARM].pickle
test-[ARM].pickle
val-[ARM].pickle
These files contain the belief encoding of single images of GERMS,
using features obtained from VGG deep network trained on ImageNet.
Data files can be found here in VGG-Beliefs folder:
https://drive.google.com/folderview?id=0BxZOUQHBUnwmQUdWRGlPMGw4WHM&usp=sharing
The code for VGG model is obtained from:
http://www.robots.ox.ac.uk/~vgg/software/deep_eval/
"""
batch_size = 128
D = 136#number of classes
arm = "left"
#load the data
print("##################################################")
print("loading train data")
data_files = ["train-"+arm+".pickle"]
train_data = DataLoader(data_files,"pkl",minibatch_size=batch_size)
train_data.shuffle_data()
C = np.unique(train_data.y).shape[0]
#print(train_data.y == np.argmax(train_data.x,axis=1)).mean()
print("data size:",train_data.x.shape)
print("number of classes:",C)
print("number of tracks:",np.unique(train_data.t).shape[0])
print("##################################################")
print("loading validation data")
data_files = ["val-"+arm+".pickle"]
val_data = DataLoader(data_files,"pkl",minibatch_size=batch_size)
val_data.shuffle_data()
C = np.unique(val_data.y).shape[0]
val_data.adapt_labels(train_data.obj2label)#data are already unified in their labels
print("data size:",val_data.x.shape)
print("number of classes:",C)
print("number of tracks:",np.unique(val_data.t).shape[0])
print("##################################################")
print("loading test data")
data_files = ["test-"+arm+".pickle"]
test_data = DataLoader(data_files,"pkl",minibatch_size=batch_size)
test_data.adapt_labels(train_data.obj2label)#data are already unified in their labels
test_data.shuffle_data()
print("data size:",test_data.x.shape)
print("number of classes:",np.unique(test_data.y).shape[0])
print("number of tracks:",np.unique(test_data.t).shape[0])
experiment_data = dict()
#train 20 different models, report the mean average
for exp_num in range(2):
test_data.shuffle_data()
print( "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
print( "EXPERIMENT ", exp_num)
experiment_data[exp_num] = dict()
lr = 0.01#initial learning rate
lr_dec_step = 5000#learning rate decrease step
num_actions = 10#number of actions
gamma = 0.9#RL discount factor
alpha = 0.01#stochastic approximation coefficient
R = 10.0#reward
n_moves = 5#length of object inspection sequence
n_test_moves= 5#length of inspection sequence for test objects
epsilon = 1.#for e-greedy annealing
iter_cnt = 1
epsilon_dec_step = 100
n_epochs = 50
#create deep net
print("##################################################")
print("Creating deep net...")
input = T.matrix("data",dtype=theano.config.floatX)#the input is concatenation of action history and beliefs
config = "DQLArch.xml"
rng = RandomStreams(seed=int(time.time()))
train_net = FeedForwardNet.FeedForwardNet(rng,{"input":input},config)
test_net = FeedForwardNet.FeedForwardNet(rng,{"input":input},config,clone_from=train_net)
pprint.pprint(train_net.output_dims)
print("##################################################")
print("creating cost layer...")
input_shared = theano.shared(np.zeros([D,batch_size],dtype=theano.config.floatX),borrow=True)
rot_target_shared = theano.shared(np.zeros([batch_size,],dtype=theano.config.floatX),borrow=True)
rot_index_shared = theano.shared(np.zeros([batch_size,],dtype=np.int32),borrow=True)
learning_rate = T.scalar("learning_rate",dtype=theano.config.floatX)
#target value consists of the target for rotation values and the target for sign-of-rotation values
layer_action_value = "fc3"
layer_action = "act1"
cost = T.sqrt( T.mean( (train_net.name2layer[layer_action_value].output[rot_index_shared,T.arange(batch_size)] - rot_target_shared)**2 ) )
grads = [theano.grad(cost,param) for param in train_net.params]
updates = [ (param,param-learning_rate * grad) for param,grad in zip(train_net.params,grads)]
fnx_action_selection = theano.function(inputs=[],outputs=[train_net.name2layer[layer_action].output,train_net.name2layer[layer_action_value].output,cost],
givens={
input:input_shared,
},
)
fnx_train = theano.function(inputs=[learning_rate],outputs=[train_net.name2layer[layer_action_value].output,cost],
givens={
input:input_shared,
},
updates=updates,
)
fnx_test = theano.function(inputs=[],outputs=[test_net.name2layer[layer_action].output,test_net.name2layer[layer_action_value].output],
givens={
input:input_shared,
},
)
print("##################################################")
print("sequential strategy")
track_indices = dict()
for t in np.unique(test_data.t):
idx = np.where(test_data.t == t)[0]
poses = test_data.p[idx]
sorted_idx = np.argsort(poses)
track_indices[t] = idx[sorted_idx]
accuracy = np.zeros(n_test_moves)
for t in track_indices.keys():
# print"for track:",t
# print"number of frames:",len(track_indices[t])
belief = np.ones(C)
# print
for i in range(n_test_moves):
next_idx = track_indices[t][i]
belief = belief * test_data.x[next_idx,:]
belief = belief / belief.sum()
lbl = np.argmax(belief)
accuracy[i] += lbl == test_data.y[next_idx]
print("test-sequential",accuracy / np.unique(test_data.t).shape[0])
seq_acc = accuracy / np.unique(test_data.t).shape[0]
experiment_data[exp_num]["test_seq_acc"] = seq_acc
print("##################################################")
print("##################################################")
print("random strategy")
test_data.reset_minibatch_counter()
corrects = np.zeros([n_test_moves,batch_size])
for i in range(int(test_data.x.shape[0] / batch_size) + 1):
x,y,p,t,rng = test_data.get_next_minibatch()
beliefs = x.copy()
for mv in range(n_test_moves):
pred_rslt = np.argmax(beliefs,axis=1)
corrects[mv,:] += (pred_rslt==y)
rot = np.random.randint(0,num_actions,[batch_size])
# rot = num_actions/2 * np.ones([batch_size])
tgt = -1 * ( rot < num_actions/2) * 2**(rot+num_actions/2) + ( rot >= num_actions/2) * 2**(rot)
# printtgt+p
x,y,p,t,_ = test_data.get_data_for_pose(t,p + tgt)#get the data for the proposed set of rotations
beliefs = beliefs * x
beliefs = beliefs / beliefs.sum(axis=1).reshape([-1,1])
print("test-random:",corrects.sum(axis=1) / float(test_data.x.shape[0]))
rnd_acc = corrects.sum(axis=1) / float(test_data.x.shape[0])
experiment_data[exp_num]["test_rnd_acc"] = rnd_acc
print("##################################################")
print("##################################################")
print("DQL strategy")
print("training network...")
for epoch in range(n_epochs):
print("-----------------------------------------------")
print("Epoch:",epoch)
costs = []
val_costs = []
test_costs = []
train_data.reset_minibatch_counter()
corrects = np.zeros([n_moves,batch_size])
move_hist = np.zeros([num_actions,],dtype=np.int32)
poses_hist = []
test_data.reset_minibatch_counter()
test_move_hist = np.zeros([num_actions,],dtype=np.int32)
for i in range(int(train_data.x.shape[0] / batch_size )+ 1):
if i is 0:
print("iteration:",iter_cnt)
alpha = max(0.00, 1. - iter_cnt / 20000.)#1. / iter_cnt
x,y,p,t,rng = train_data.get_next_minibatch()
poses_hist.append(p)
beliefs = x.copy()
for mv in range(n_moves):
iter_cnt += 1
epsilon = max(0.1, 1.0 - iter_cnt / 20000.)
if iter_cnt%lr_dec_step==lr_dec_step-1:
lr = max(1.e-10,lr * 0.1)
input_shared.set_value(beliefs.T.astype(theano.config.floatX))
rot,prot,_ = fnx_action_selection()
rot = rot.reshape(-1)
#epsilon-greedy exploration
rand_acts = np.random.randint(0,num_actions,[batch_size])
rand_mask = np.random.binomial(1,epsilon,[batch_size])
idx_random = np.where(rand_mask == 1)[0]
idx_net_act = np.where(rand_mask != 1)[0]
temp111 = rot.copy()
temp111[idx_random] = rand_acts[idx_random]
temp111[idx_net_act]= rot[idx_net_act]
rot = temp111
rot_idx = rot.copy().astype(np.int32)
assert(rot_idx.shape[0]==batch_size)
hst = np.histogram(rot_idx,bins=range(0,num_actions))[0]
for kk in range(rot_idx.shape[0]):
move_hist[rot_idx[kk]] += 1
tgt = -1 * ( rot < num_actions/2) * 2**(rot+num_actions/2) + ( rot >= num_actions/2) * 2**(rot)
x1,y1,p1,t1,_ = train_data.get_data_for_pose(t,p + tgt)#get the data for the proposed set of rotations
assert((t1==t).sum()==batch_size)
assert((y1==y).sum()==batch_size)
x1 = x1 * beliefs
x1 = x1 / x1.sum(axis=1).reshape([-1,1])
input_shared.set_value(x1.T.astype(theano.config.floatX))
rot1,prot1,_ = fnx_action_selection()#calculate the Q(s,a) for all as in the next state
pred_rslt = np.argmax(x1,axis=1)#x1 should be 'beliefs' if we use Q = r(t) + gamma max_a Q(s,a').
prot_max = gamma * np.max(prot1,axis=0).reshape(-1).astype(theano.config.floatX)
#reward each move based on the amount of belief increase
srtd_beliefs = np.sort(x1,axis=1)#x1 should be 'beliefs' if we use Q = r(t) + gamma max_a Q(s,a').
#if mv == n_moves-1:
prot_max += R * (pred_rslt==y1)* (srtd_beliefs[:,-1] - srtd_beliefs[:,-2]).reshape(-1)
prot_max -= R * (pred_rslt!=y1)
prot_max = alpha * prot_max + (1-alpha) * prot[rot_idx,range(batch_size)].reshape(-1)
corrects[mv,:] += (pred_rslt==y)
input_shared.set_value(beliefs.T.astype(theano.config.floatX))
rot_target_shared.set_value(prot_max.astype(theano.config.floatX))
rot_index_shared.set_value(rot_idx.reshape(-1))
prot2,c = fnx_train(lr)
costs.append(c)
x,y,p,t,_ = train_data.get_data_for_pose(t,p + tgt)#get the data for the proposed set of rotations
poses_hist.append(p)
beliefs = beliefs * x
beliefs = beliefs / beliefs.sum(axis=1).reshape([-1,1])
#validation accuracy
x,y,p,t,rng = val_data.get_next_minibatch()
beliefs = x.copy()
prev_actval = np.zeros([batch_size,]).reshape(-1)
for mv in range(n_moves+1):
input_shared.set_value(beliefs.T.astype(theano.config.floatX))
rot,prot = fnx_test()
rand_acts = np.random.randint(0,num_actions,[batch_size])
rand_mask = np.random.binomial(1,epsilon,[batch_size])
rot = (1-rand_mask) * rot + rand_mask * rand_acts
if mv>0:
c = np.sqrt(np.mean((prot.max(axis=0).reshape(-1) - prev_actval)**2))
val_costs.append(c)
prev_actval = prot[rot,range(batch_size)].reshape(-1)
rot_idx = rot.copy().astype(np.int32)
for kk in range(rot_idx.shape[1]):
test_move_hist[rot_idx[0,kk]] += 1
rot = rot.reshape(-1)
tgt = -1 * ( rot < num_actions/2) * 2**(rot+num_actions/2) + ( rot >= num_actions/2) * 2**(rot)
pred_rslt = np.argmax(beliefs,axis=1)
# test_corrects[mv,:]+= (pred_rslt==y)
x,y,p,t,_ = val_data.get_data_for_pose(t,p + tgt)#get the data for the proposed set of rotations
beliefs = beliefs * x
beliefs = beliefs / beliefs.sum(axis=1).reshape([-1,1])
print("epoch cost:",np.sum(costs))
print("validation cost:",np.sum(val_costs))
print("train accuracy:",corrects.sum(axis=1) / float(train_data.x.shape[0]))
print("learning rate:",lr," RL epsilon:",epsilon)
test_accuracies = []
test_poses_hist = []
corrects = np.zeros([n_test_moves,batch_size])
test_data.reset_minibatch_counter()
test_move_hist = np.zeros([num_actions,],dtype=np.int32)
for i in range(int(test_data.x.shape[0] / batch_size ) + 1):
x,y,p,t,rng = test_data.get_next_minibatch()
test_poses_hist.append(p)
beliefs = x.copy()
move_hist = np.zeros([n_test_moves,batch_size])
for mv in range(n_test_moves):
input_shared.set_value(beliefs.T.astype(theano.config.floatX))
rot = fnx_test()[0]
rot = rot.reshape(-1)
rand_acts = np.random.randint(0,num_actions,[batch_size])
rand_mask = np.random.binomial(1,epsilon,[batch_size])
idx_random = np.where(rand_mask == 1)[0]
idx_net_act = np.where(rand_mask != 1)[0]
temp111 = rot.copy()
temp111[idx_random] = rand_acts[idx_random]
temp111[idx_net_act]= rot[idx_net_act]
rot = temp111
rot_idx = rot.copy().astype(np.int32)
for kk in range(rot_idx.shape[0]):
test_move_hist[rot_idx[kk]] += 1
tgt = -1 * ( rot < num_actions/2) * 2**(rot+num_actions/2) + ( rot >= num_actions/2) * 2**(rot)
pred_rslt = np.argmax(beliefs,axis=1)
corrects[mv,:] += (pred_rslt==y)
x,y,p,t,_ = test_data.get_data_for_pose(t,p + tgt)#get the data for the proposed set of rotations
test_poses_hist.append(p)
beliefs = beliefs * x
beliefs = beliefs / beliefs.sum(axis=1).reshape([-1,1])
hst = np.histogram(move_hist.reshape(-1),bins=range(0,num_actions))[0]
print("test:",corrects.sum(axis=1) / float(test_data.x.shape[0]))
test_accuracies.append(corrects.sum(axis=1) / float(test_data.x.shape[0]))
print("##################################################")
experiment_data[exp_num]["test_dpq_acc"] = corrects.sum(axis=1) / float(test_data.x.shape[0])
experiment_data[exp_num]["val_RMSE"] = val_costs
experiment_data[exp_num]["train_RMSE"] = costs
experiment_data[exp_num]["train_net"] = copy.deepcopy(train_net)
experiment_data[exp_num]["test_net"] = test_net
experiment_data[exp_num]["train_poses_hist"] = poses_hist
experiment_data[exp_num]["test_poses_hist"] = test_poses_hist
print( "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
colors = ["r","g","b","c","y","m","k"]
linestyle = ["-","--","-.",":"]
marker = ["o","v","^","<",">","*"]
i = 0
seq_acc = []
rnd_acc = []
dpq_acc = []
for i in experiment_data.keys():
seq_acc.append(experiment_data[i]["test_seq_acc"].reshape([1,-1]))
rnd_acc.append(experiment_data[i]["test_rnd_acc"].reshape([1,-1]))
dpq_acc.append(experiment_data[i]["test_dpq_acc"].reshape([1,-1]))
seq_acc = np.concatenate(seq_acc,axis=0)
rnd_acc = np.concatenate(rnd_acc,axis=0)
dpq_acc = np.concatenate(dpq_acc,axis=0)
plt.figure(1)
plt.hold(True)
i = 0
plt.errorbar(x=range(n_test_moves),y=dpq_acc.mean(axis=0),xerr=0,yerr=dpq_acc.std(axis=0),color=colors[i%len(colors)],linestyle=linestyle[i%len(linestyle)],label="DeepQ"+str(i),marker=marker[i%len(marker)],linewidth=4.)
i += 1
plt.errorbar(x=range(n_test_moves),y=seq_acc.mean(axis=0),xerr=0,yerr=seq_acc.std(axis=0),color=colors[i%len(colors)],linestyle=linestyle[i%len(linestyle)],label="sequential",marker=marker[i%len(marker)],linewidth=4.)
i += 1
plt.errorbar(x=range(n_test_moves),y=rnd_acc.mean(axis=0),xerr=0,yerr=rnd_acc.std(axis=0),color=colors[i%len(colors)],linestyle=linestyle[i%len(linestyle)],label="random",marker=marker[i%len(marker)],linewidth=4.)
handles, labels = plt.gca().get_legend_handles_labels()
plt.gca().legend(handles[::-1], labels[::-1],loc=4)
plt.xlabel("Number of Actions")
plt.ylabel("Accuracy")
for axis in ['top','bottom','left','right']:
plt.gca().spines[axis].set_linewidth(2.0)
i += 1
plt.xticks(range(n_test_moves+1))
plt.figure(2)
i=0
plt.plot(np.log(experiment_data[i]["train_RMSE"]),c='b')
plt.hold(True)
plt.plot(np.log(experiment_data[i]["val_RMSE"]),c='r')
plt.show()
print("saving experiment results...")
f = open("expresults-"+arm+"-"+str(n_test_moves)+".pickle","wb")
pickle.dump(experiment_data,f,protocol=pickle.HIGHEST_PROTOCOL)
f.close()
reward update at each step
from __future__ import print_function
import sys
import select
import tty
import termios
import time
import theano
import pprint
import theano.tensor as T
#import cv2
import pickle
import copy
import numpy as np
import scipy.io as sio
import matplotlib.pyplot as plt
from theano.tensor.shared_randomstreams import RandomStreams
import sklearn
from sklearn.ensemble import ExtraTreesClassifier
from DataLoader import DataLoader
import DeepLearningStack
from DeepLearningStack import FeedForwardNet
"""
This code implements the Deep Q-Learning model for active object recognition,
described in
Malmir M, Sikka K, Forster D, Movellan J, Cottrell GW.
Deep Q-learning for Active Recognition of GERMS:
Baseline performance on a standardized dataset for active learning.
InProceedings of the British Machine Vision Conference (BMVC),
pages 2016 Apr 13 (pp. 161-1).
This code requires the following data files:
train-[ARM].pickle
test-[ARM].pickle
val-[ARM].pickle
These files contain the belief encoding of single images of GERMS,
using features obtained from VGG deep network trained on ImageNet.
Data files can be found here in VGG-Beliefs folder:
https://drive.google.com/folderview?id=0BxZOUQHBUnwmQUdWRGlPMGw4WHM&usp=sharing
The code for VGG model is obtained from:
http://www.robots.ox.ac.uk/~vgg/software/deep_eval/
"""
batch_size = 128
D = 136#number of classes
arm = "left"
#load the data
print("##################################################")
print("loading train data")
data_files = ["train-"+arm+".pickle"]
train_data = DataLoader(data_files,"pkl",minibatch_size=batch_size)
train_data.shuffle_data()
C = np.unique(train_data.y).shape[0]
#print(train_data.y == np.argmax(train_data.x,axis=1)).mean()
print("data size:",train_data.x.shape)
print("number of classes:",C)
print("number of tracks:",np.unique(train_data.t).shape[0])
print("##################################################")
print("loading validation data")
data_files = ["val-"+arm+".pickle"]
val_data = DataLoader(data_files,"pkl",minibatch_size=batch_size)
val_data.shuffle_data()
C = np.unique(val_data.y).shape[0]
val_data.adapt_labels(train_data.obj2label)#data are already unified in their labels
print("data size:",val_data.x.shape)
print("number of classes:",C)
print("number of tracks:",np.unique(val_data.t).shape[0])
print("##################################################")
print("loading test data")
data_files = ["test-"+arm+".pickle"]
test_data = DataLoader(data_files,"pkl",minibatch_size=batch_size)
test_data.adapt_labels(train_data.obj2label)#data are already unified in their labels
test_data.shuffle_data()
print("data size:",test_data.x.shape)
print("number of classes:",np.unique(test_data.y).shape[0])
print("number of tracks:",np.unique(test_data.t).shape[0])
experiment_data = dict()
#train 20 different models, report the mean average
for exp_num in range(2):
test_data.shuffle_data()
print( "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
print( "EXPERIMENT ", exp_num)
experiment_data[exp_num] = dict()
lr = 0.01#initial learning rate
lr_dec_step = 5000#learning rate decrease step
num_actions = 10#number of actions
gamma = 0.9#RL discount factor
alpha = 0.01#stochastic approximation coefficient
R = 10.0#reward
n_moves = 5#length of object inspection sequence
n_test_moves= 5#length of inspection sequence for test objects
epsilon = 1.#for e-greedy annealing
iter_cnt = 1
epsilon_dec_step = 100
n_epochs = 50
#create deep net
print("##################################################")
print("Creating deep net...")
input = T.matrix("data",dtype=theano.config.floatX)#the input is concatenation of action history and beliefs
config = "DQLArch.xml"
rng = RandomStreams(seed=int(time.time()))
train_net = FeedForwardNet.FeedForwardNet(rng,{"input":input},config)
test_net = FeedForwardNet.FeedForwardNet(rng,{"input":input},config,clone_from=train_net)
pprint.pprint(train_net.output_dims)
print("##################################################")
print("creating cost layer...")
input_shared = theano.shared(np.zeros([D,batch_size],dtype=theano.config.floatX),borrow=True)
rot_target_shared = theano.shared(np.zeros([batch_size,],dtype=theano.config.floatX),borrow=True)
rot_index_shared = theano.shared(np.zeros([batch_size,],dtype=np.int32),borrow=True)
learning_rate = T.scalar("learning_rate",dtype=theano.config.floatX)
#target value consists of the target for rotation values and the target for sign-of-rotation values
layer_action_value = "fc3"
layer_action = "act1"
cost = T.sqrt( T.mean( (train_net.name2layer[layer_action_value].output[rot_index_shared,T.arange(batch_size)] - rot_target_shared)**2 ) )
grads = [theano.grad(cost,param) for param in train_net.params]
updates = [ (param,param-learning_rate * grad) for param,grad in zip(train_net.params,grads)]
fnx_action_selection = theano.function(inputs=[],outputs=[train_net.name2layer[layer_action].output,train_net.name2layer[layer_action_value].output,cost],
givens={
input:input_shared,
},
)
fnx_train = theano.function(inputs=[learning_rate],outputs=[train_net.name2layer[layer_action_value].output,cost],
givens={
input:input_shared,
},
updates=updates,
)
fnx_test = theano.function(inputs=[],outputs=[test_net.name2layer[layer_action].output,test_net.name2layer[layer_action_value].output],
givens={
input:input_shared,
},
)
print("##################################################")
print("sequential strategy")
track_indices = dict()
for t in np.unique(test_data.t):
idx = np.where(test_data.t == t)[0]
poses = test_data.p[idx]
sorted_idx = np.argsort(poses)
track_indices[t] = idx[sorted_idx]
accuracy = np.zeros(n_test_moves)
for t in track_indices.keys():
# print"for track:",t
# print"number of frames:",len(track_indices[t])
belief = np.ones(C)
# print
for i in range(n_test_moves):
next_idx = track_indices[t][i]
belief = belief * test_data.x[next_idx,:]
belief = belief / belief.sum()
lbl = np.argmax(belief)
accuracy[i] += lbl == test_data.y[next_idx]
print("test-sequential",accuracy / np.unique(test_data.t).shape[0])
seq_acc = accuracy / np.unique(test_data.t).shape[0]
experiment_data[exp_num]["test_seq_acc"] = seq_acc
print("##################################################")
print("##################################################")
print("random strategy")
test_data.reset_minibatch_counter()
corrects = np.zeros([n_test_moves,batch_size])
for i in range(int(test_data.x.shape[0] / batch_size) + 1):
x,y,p,t,rng = test_data.get_next_minibatch()
beliefs = x.copy()
for mv in range(n_test_moves):
pred_rslt = np.argmax(beliefs,axis=1)
corrects[mv,:] += (pred_rslt==y)
rot = np.random.randint(0,num_actions,[batch_size])
# rot = num_actions/2 * np.ones([batch_size])
tgt = -1 * ( rot < num_actions/2) * 2**(rot+num_actions/2) + ( rot >= num_actions/2) * 2**(rot)
# printtgt+p
x,y,p,t,_ = test_data.get_data_for_pose(t,p + tgt)#get the data for the proposed set of rotations
beliefs = beliefs * x
beliefs = beliefs / beliefs.sum(axis=1).reshape([-1,1])
print("test-random:",corrects.sum(axis=1) / float(test_data.x.shape[0]))
rnd_acc = corrects.sum(axis=1) / float(test_data.x.shape[0])
experiment_data[exp_num]["test_rnd_acc"] = rnd_acc
print("##################################################")
print("##################################################")
print("DQL strategy")
print("training network...")
for epoch in range(n_epochs):
print("-----------------------------------------------")
print("Epoch:",epoch)
costs = []
val_costs = []
test_costs = []
train_data.reset_minibatch_counter()
corrects = np.zeros([n_moves,batch_size])
move_hist = np.zeros([num_actions,],dtype=np.int32)
poses_hist = []
test_data.reset_minibatch_counter()
test_move_hist = np.zeros([num_actions,],dtype=np.int32)
for i in range(int(train_data.x.shape[0] / batch_size )+ 1):
if i is 0:
print("iteration:",iter_cnt)
alpha = max(0.00, 1. - iter_cnt / 20000.)#1. / iter_cnt
x,y,p,t,rng = train_data.get_next_minibatch()
poses_hist.append(p)
beliefs = x.copy()
for mv in range(n_moves):
iter_cnt += 1
epsilon = max(0.1, 1.0 - iter_cnt / 20000.)
if iter_cnt%lr_dec_step==lr_dec_step-1:
lr = max(1.e-4,lr * 0.1)
input_shared.set_value(beliefs.T.astype(theano.config.floatX))
rot,prot,_ = fnx_action_selection()
rot = rot.reshape(-1)
#epsilon-greedy exploration
rand_acts = np.random.randint(0,num_actions,[batch_size])
rand_mask = np.random.binomial(1,epsilon,[batch_size])
idx_random = np.where(rand_mask == 1)[0]
idx_net_act = np.where(rand_mask != 1)[0]
temp111 = rot.copy()
temp111[idx_random] = rand_acts[idx_random]
temp111[idx_net_act]= rot[idx_net_act]
rot = temp111
rot_idx = rot.copy().astype(np.int32)
assert(rot_idx.shape[0]==batch_size)
hst = np.histogram(rot_idx,bins=range(0,num_actions))[0]
for kk in range(rot_idx.shape[0]):
move_hist[rot_idx[kk]] += 1
tgt = -1 * ( rot < num_actions/2) * 2**(rot+num_actions/2) + ( rot >= num_actions/2) * 2**(rot)
x1,y1,p1,t1,_ = train_data.get_data_for_pose(t,p + tgt)#get the data for the proposed set of rotations
assert((t1==t).sum()==batch_size)
assert((y1==y).sum()==batch_size)
x1 = x1 * beliefs
x1 = x1 / x1.sum(axis=1).reshape([-1,1])
input_shared.set_value(x1.T.astype(theano.config.floatX))
rot1,prot1,_ = fnx_action_selection()#calculate the Q(s,a) for all as in the next state
pred_rslt = np.argmax(x1,axis=1)#x1 should be 'beliefs' if we use Q = r(t) + gamma max_a Q(s,a').
prot_max = gamma * np.max(prot1,axis=0).reshape(-1).astype(theano.config.floatX)
#reward each move based on the amount of belief increase
srtd_beliefs = np.sort(x1,axis=1)#x1 should be 'beliefs' if we use Q = r(t) + gamma max_a Q(s,a').
#if mv == n_moves-1:
prot_max += R * (pred_rslt==y1)* (srtd_beliefs[:,-1] - srtd_beliefs[:,-2]).reshape(-1)
prot_max -= R * (pred_rslt!=y1)
prot_max = alpha * prot_max + (1-alpha) * prot[rot_idx,range(batch_size)].reshape(-1)
corrects[mv,:] += (pred_rslt==y)
input_shared.set_value(beliefs.T.astype(theano.config.floatX))
rot_target_shared.set_value(prot_max.astype(theano.config.floatX))
rot_index_shared.set_value(rot_idx.reshape(-1))
prot2,c = fnx_train(lr)
costs.append(c)
x,y,p,t,_ = train_data.get_data_for_pose(t,p + tgt)#get the data for the proposed set of rotations
poses_hist.append(p)
beliefs = beliefs * x
beliefs = beliefs / beliefs.sum(axis=1).reshape([-1,1])
#validation accuracy
x,y,p,t,rng = val_data.get_next_minibatch()
beliefs = x.copy()
prev_actval = np.zeros([batch_size,]).reshape(-1)
for mv in range(n_moves+1):
input_shared.set_value(beliefs.T.astype(theano.config.floatX))
rot,prot = fnx_test()
rand_acts = np.random.randint(0,num_actions,[batch_size])
rand_mask = np.random.binomial(1,epsilon,[batch_size])
rot = (1-rand_mask) * rot + rand_mask * rand_acts
if mv>0:
c = np.sqrt(np.mean((prot.max(axis=0).reshape(-1) - prev_actval)**2))
val_costs.append(c)
prev_actval = prot[rot,range(batch_size)].reshape(-1)
rot_idx = rot.copy().astype(np.int32)
for kk in range(rot_idx.shape[1]):
test_move_hist[rot_idx[0,kk]] += 1
rot = rot.reshape(-1)
tgt = -1 * ( rot < num_actions/2) * 2**(rot+num_actions/2) + ( rot >= num_actions/2) * 2**(rot)
pred_rslt = np.argmax(beliefs,axis=1)
# test_corrects[mv,:]+= (pred_rslt==y)
x,y,p,t,_ = val_data.get_data_for_pose(t,p + tgt)#get the data for the proposed set of rotations
beliefs = beliefs * x
beliefs = beliefs / beliefs.sum(axis=1).reshape([-1,1])
print("epoch cost:",np.sum(costs))
print("validation cost:",np.sum(val_costs))
print("train accuracy:",corrects.sum(axis=1) / float(train_data.x.shape[0]))
print("learning rate:",lr," RL epsilon:",epsilon)
test_accuracies = []
test_poses_hist = []
corrects = np.zeros([n_test_moves,batch_size])
test_data.reset_minibatch_counter()
test_move_hist = np.zeros([num_actions,],dtype=np.int32)
for i in range(int(test_data.x.shape[0] / batch_size ) + 1):
x,y,p,t,rng = test_data.get_next_minibatch()
test_poses_hist.append(p)
beliefs = x.copy()
move_hist = np.zeros([n_test_moves,batch_size])
for mv in range(n_test_moves):
input_shared.set_value(beliefs.T.astype(theano.config.floatX))
rot = fnx_test()[0]
rot = rot.reshape(-1)
rand_acts = np.random.randint(0,num_actions,[batch_size])
rand_mask = np.random.binomial(1,epsilon,[batch_size])
idx_random = np.where(rand_mask == 1)[0]
idx_net_act = np.where(rand_mask != 1)[0]
temp111 = rot.copy()
temp111[idx_random] = rand_acts[idx_random]
temp111[idx_net_act]= rot[idx_net_act]
rot = temp111
rot_idx = rot.copy().astype(np.int32)
for kk in range(rot_idx.shape[0]):
test_move_hist[rot_idx[kk]] += 1
tgt = -1 * ( rot < num_actions/2) * 2**(rot+num_actions/2) + ( rot >= num_actions/2) * 2**(rot)
pred_rslt = np.argmax(beliefs,axis=1)
corrects[mv,:] += (pred_rslt==y)
x,y,p,t,_ = test_data.get_data_for_pose(t,p + tgt)#get the data for the proposed set of rotations
test_poses_hist.append(p)
beliefs = beliefs * x
beliefs = beliefs / beliefs.sum(axis=1).reshape([-1,1])
hst = np.histogram(move_hist.reshape(-1),bins=range(0,num_actions))[0]
print("test:",corrects.sum(axis=1) / float(test_data.x.shape[0]))
test_accuracies.append(corrects.sum(axis=1) / float(test_data.x.shape[0]))
print("##################################################")
experiment_data[exp_num]["test_dpq_acc"] = corrects.sum(axis=1) / float(test_data.x.shape[0])
experiment_data[exp_num]["val_RMSE"] = val_costs
experiment_data[exp_num]["train_RMSE"] = costs
experiment_data[exp_num]["train_net"] = copy.deepcopy(train_net)
experiment_data[exp_num]["test_net"] = test_net
experiment_data[exp_num]["train_poses_hist"] = poses_hist
experiment_data[exp_num]["test_poses_hist"] = test_poses_hist
print( "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
colors = ["r","g","b","c","y","m","k"]
linestyle = ["-","--","-.",":"]
marker = ["o","v","^","<",">","*"]
i = 0
seq_acc = []
rnd_acc = []
dpq_acc = []
for i in experiment_data.keys():
seq_acc.append(experiment_data[i]["test_seq_acc"].reshape([1,-1]))
rnd_acc.append(experiment_data[i]["test_rnd_acc"].reshape([1,-1]))
dpq_acc.append(experiment_data[i]["test_dpq_acc"].reshape([1,-1]))
seq_acc = np.concatenate(seq_acc,axis=0)
rnd_acc = np.concatenate(rnd_acc,axis=0)
dpq_acc = np.concatenate(dpq_acc,axis=0)
plt.figure(1)
plt.hold(True)
i = 0
plt.errorbar(x=range(n_test_moves),y=dpq_acc.mean(axis=0),xerr=0,yerr=dpq_acc.std(axis=0),color=colors[i%len(colors)],linestyle=linestyle[i%len(linestyle)],label="DeepQ"+str(i),marker=marker[i%len(marker)],linewidth=4.)
i += 1
plt.errorbar(x=range(n_test_moves),y=seq_acc.mean(axis=0),xerr=0,yerr=seq_acc.std(axis=0),color=colors[i%len(colors)],linestyle=linestyle[i%len(linestyle)],label="sequential",marker=marker[i%len(marker)],linewidth=4.)
i += 1
plt.errorbar(x=range(n_test_moves),y=rnd_acc.mean(axis=0),xerr=0,yerr=rnd_acc.std(axis=0),color=colors[i%len(colors)],linestyle=linestyle[i%len(linestyle)],label="random",marker=marker[i%len(marker)],linewidth=4.)
handles, labels = plt.gca().get_legend_handles_labels()
plt.gca().legend(handles[::-1], labels[::-1],loc=4)
plt.xlabel("Number of Actions")
plt.ylabel("Accuracy")
for axis in ['top','bottom','left','right']:
plt.gca().spines[axis].set_linewidth(2.0)
i += 1
plt.xticks(range(n_test_moves+1))
plt.figure(2)
i=0
plt.plot(np.log(experiment_data[i]["train_RMSE"]),c='b')
plt.hold(True)
plt.plot(np.log(experiment_data[i]["val_RMSE"]),c='r')
plt.show()
print("saving experiment results...")
f = open("expresults-"+arm+"-"+str(n_test_moves)+".pickle","wb")
pickle.dump(experiment_data,f,protocol=pickle.HIGHEST_PROTOCOL)
f.close()
|
import os
import yaml
import gettext
from copy import deepcopy
from xml.etree.ElementTree import ParseError
from mapproxy.client import http
from mapproxy.script.scales import scale_to_res, res_to_scale
from mapproxy.srs import SRS, generate_envelope_points, TransformationError
from mapproxy.grid import tile_grid, GridError
from . import bottle
from . import config
from . import storage
from .bottle import request, response, static_file, template, SimpleTemplate
from .utils import requires_json
from .capabilities import parse_capabilities_url
configuration = config.ConfigParser.from_file('./config.ini')
app = bottle.Bottle()
bottle.TEMPLATE_PATH = [os.path.join(os.path.dirname(__file__), 'templates')]
SimpleTemplate.defaults["get_url"] = app.get_url
SimpleTemplate.defaults["demo"] = configuration.get_bool('app', 'demo')
SimpleTemplate.defaults["language"] = configuration.get('app', 'language')
try:
translation = gettext.translation('messages', os.path.join(os.path.dirname(os.path.realpath(__file__)), 'locale'), ['de'])
translation.install()
SimpleTemplate.defaults["_"] = translation.gettext
except IOError as e:
print e
class RESTBase(object):
def __init__(self, section):
self.section = section
def list(self, project, storage):
return storage.get_all(self.section, project, with_id=True, with_manual=True, with_locked=True)
@requires_json
def add(self, project, storage):
data = request.json
manual = data.get('_manual', False)
locked = data.get('_locked', False)
id = storage.add(self.section, project, data)
response.status = 201
data['_id'] = id
data['_manual'] = manual
data['_locked'] = locked
return data
def get(self, project, id, storage):
data = storage.get(id, self.section,project)
if not data:
response.status = 404
else:
return data
@requires_json
def update(self, project, id, storage):
data = request.json
manual = data.get('_manual', False)
locked = data.get('_locked', False)
# used deepcopy cause storage.update modifies data
storage.update(id, self.section, project, deepcopy(data))
response.status = 200
data['_manual'] = manual
data['_locked'] = locked
return data
def delete(self, project, id, storage):
if storage.delete(id, self.section, project):
response.status = 204
else:
response.status = 404
def setup_routing(self, app):
app.route('/conf/<project>/%s' % self.section, 'GET', self.list)
app.route('/conf/<project>/%s' % self.section, 'POST', self.add)
app.route('/conf/<project>/%s/<id:int>' % self.section, 'GET', self.get)
app.route('/conf/<project>/%s/<id:int>' % self.section, 'PUT', self.update)
app.route('/conf/<project>/%s/<id:int>' % self.section, 'DELETE', self.delete)
class RESTWMSCapabilities(RESTBase):
def __init__(self):
RESTBase.__init__(self, 'wms_capabilities')
@requires_json
def add(self, project, storage):
url = request.json.get('data', {}).get('url')
cap = {}
if not url:
response.status = 400
return {'error': _('missing URL')}
try:
cap['data'] = parse_capabilities_url(url)
except ParseError:
response.status = 400
return {'error': _('no capabilities document found')}
except (http.HTTPClientError, ):
response.status = 400
# TODO
return {'error': _('invalid URL')}
search = """%%"url": "%s"%%""" % cap['data']['url']
id = storage.exists_in_data(self.section, project, search)
if id:
return self.update(project, id, storage)
id = storage.add(self.section, project, cap)
cap['_id'] = id
response.status = 201
return cap
@requires_json
def update(self, project, id, storage):
url = request.json.get('data', {}).get('url')
if not url:
response.status = 400
return {'error': _('missing URL')}
cap = {}
cap['data'] = parse_capabilities_url(url)
storage.update(id, self.section, project, cap)
response.status = 200
cap['_id'] = id
return cap
class RESTLayers(RESTBase):
def __init__(self):
RESTBase.__init__(self, 'layers')
def list(self, project, storage):
return storage.get_all(self.section, project, with_rank=True, with_id=True, with_manual=True, with_locked=True)
@requires_json
def update_tree(self, project, storage):
data = request.json
storage.updates(self.section, project, data['tree'])
response.status = 200
def setup_routing(self, app):
super(RESTLayers, self).setup_routing(app)
app.route('/conf/<project>/%s' % self.section, 'PUT', self.update_tree)
class RESTGrids(RESTBase):
def __init__(self):
RESTBase.__init__(self, 'grids')
def list(self, project, storage):
default_grids = {
'GLOBAL_GEODETIC': {'_id': 'GLOBAL_GEODETIC', 'default': True, 'data': {
'name': 'GLOBAL_GEODETIC',
'srs': 'EPSG:4326',
'bbox': [-180, -90, 180, 90],
'bbox_srs': 'EPSG:4326',
'origin': 'sw',
'res': [
1.40625,
0.703125,
0.3515625,
0.17578125,
0.087890625,
0.0439453125,
0.02197265625,
0.010986328125,
0.0054931640625,
0.00274658203125,
0.001373291015625,
0.0006866455078125,
0.00034332275390625,
0.000171661376953125,
0.0000858306884765625,
0.00004291534423828125,
0.000021457672119140625,
0.000010728836059570312,
0.000005364418029785156,
0.000002682209014892578,
]
}},
'GLOBAL_MERCATOR': {'_id': 'GLOBAL_MERCATOR', 'default': True, 'data': {
'name': 'GLOBAL_MERCATOR',
'srs': 'EPSG:900913',
'bbox': [-20037508.342789244, -20037508.342789244, 20037508.342789244, 20037508.342789244],
'bbox_srs': 'EPSG:900913',
'origin': 'sw',
'res': [
156543.03392804097,
78271.51696402048,
39135.75848201024,
19567.87924100512,
9783.93962050256,
4891.96981025128,
2445.98490512564,
1222.99245256282,
611.49622628141,
305.748113140705,
152.8740565703525,
76.43702828517625,
38.21851414258813,
19.109257071294063,
9.554628535647032,
4.777314267823516,
2.388657133911758,
1.194328566955879,
0.5971642834779395,
0.29858214173896974,
]
}},
'GLOBAL_WEBMERCATOR': {'_id': 'GLOBAL_WEBMERCATOR', 'default': True, 'data': {
'name': 'GLOBAL_WEBMERCATOR',
'srs': 'EPSG:3857',
'bbox': [-20037508.342789244, -20037508.342789244, 20037508.342789244, 20037508.342789244],
'bbox_srs': 'EPSG:3857',
'origin': 'nw',
'res': [
156543.03392804097,
78271.51696402048,
39135.75848201024,
19567.87924100512,
9783.93962050256,
4891.96981025128,
2445.98490512564,
1222.99245256282,
611.49622628141,
305.748113140705,
152.8740565703525,
76.43702828517625,
38.21851414258813,
19.109257071294063,
9.554628535647032,
4.777314267823516,
2.388657133911758,
1.194328566955879,
0.5971642834779395,
0.29858214173896974,
]
}}
}
default_grids.update(storage.get_all(self.section, project, with_id=True, with_manual=True, with_locked=True))
return default_grids
RESTBase('sources').setup_routing(app)
RESTBase('caches').setup_routing(app)
RESTBase('globals').setup_routing(app)
RESTBase('services').setup_routing(app)
RESTBase('defaults').setup_routing(app)
RESTWMSCapabilities().setup_routing(app)
RESTLayers().setup_routing(app)
RESTGrids().setup_routing(app)
## other
@app.route('/', name='index')
def index():
return template('index')
@app.route('/projects', name='projects')
def projects(storage):
projects = {}
for project in storage.get_projects():
try:
mapproxy_conf = config.mapproxy_conf_from_storage(storage, project)
except config.ConfigError as e:
informal_only = False
errors = [e]
mapproxy_conf = False
if mapproxy_conf:
errors, informal_only = config.validate(mapproxy_conf)
projects[project] = {
'valid': informal_only,
'errors': errors
}
return template('projects', projects=projects)
@app.route('/project/<project>/conf', name='configuration')
def conf_index(project):
return template('config_index', project=project)
@app.route('/project/<project>', name='project_index')
def project_index(project):
return template('project_index', project=project)
@app.route('/project/<project>/conf/sources', name='sources')
def sources(project):
return template('sources', project=project)
@app.route('/project/<project>/conf/grids', name='grids')
def grids(project):
return template('grids', project=project)
@app.route('/project/<project>/conf/caches', name='caches')
def caches(project):
return template('caches', project=project)
@app.route('/project/<project>/conf/layers', name='layers')
def layers(project):
return template('layers', project=project)
@app.route('/project/<project>/conf/globals', name='globals')
def globals(project):
return template('globals', project=project)
@app.route('/project/<project>/conf/services', name='services')
def services(project):
return template('services', project=project)
@app.route('/conf/<project>/write_config', 'POST', name='write_config')
def write_config(project, storage):
mapproxy_conf = config.mapproxy_conf_from_storage(storage, project)
try:
config.write_mapproxy_yaml(mapproxy_conf, os.path.join(configuration.get('app', 'output_path'), project + '.yaml'))
return {'success': _('creating mapproxy config successful')}
except:
response.status = 400
return {'error': _('creating mapproxy config failed')}
@app.route('/static/<filepath:path>', name='static')
def static(filepath):
return static_file(filepath, root=os.path.join(os.path.dirname(__file__), 'static'))
@app.route('/template/<filename>', name='angular_template')
def angular_template(filename):
return template(os.path.join(os.path.dirname(__file__), 'templates/angular', filename))
@app.route('/resources/<filename>/<translated>', name='resource', translated=False)
def resources(filename, translated):
file_location = os.path.join(os.path.dirname(__file__), 'templates/resources')
if translated:
return template(os.path.join(file_location, filename))
else:
return static_file(filename, root=file_location)
@app.route('/yaml', 'POST', name='json_to_yaml')
def create_yaml():
data = request.json
try:
return yaml.safe_dump(data, default_flow_style=False)
except yaml.YAMLError:
response.status = 400
return {'error': _('creating yaml failed')}
@app.route('/json', 'POST', name='yaml_to_json')
def create_json():
data = request.json
try:
return yaml.load(data['yaml'])
except yaml.YAMLError:
response.status = 400
return {'error': _('parsing yaml failed')}
@app.route('/res', 'POST', name='scales_to_res')
@app.route('/scales', 'POST', name='res_to_scales')
def convert_res_scales():
data = request.json.get('data', [])
mode = request.json.get('mode', 'to_scale')
dpi = float(request.json.get('dpi', (2.54/(0.00028 * 100))))
units = request.json.get('units', 'm')
data = [float(d) if d else None for d in data]
units = 1 if units == 'm' else 111319.4907932736
convert = res_to_scale if mode == 'to_scale' else scale_to_res
result = []
for i, d in enumerate(data):
result.append(round(convert(d, dpi, units),9) if d else None)
return {'result': result}
@app.route('/calculate_tiles', 'POST', name='calculate_tiles')
def calculate_tiles():
data = request.json
origin = data.get('origin', None)
name = data.get('name', None)
srs = data.get('srs', None)
bbox = data.get('bbox', None)
bbox_srs = data.get('bbox_srs', None)
if bbox is not None and not all(bbox):
bbox = None
dpi = float(data.get('dpi', (2.54/(0.00028 * 100))))
units = 1 if data.get('units', 'm') == 'm' else 111319.4907932736
res = data.get('res', None)
if res:
res = [float(r) for r in res]
scales = data.get('scales', None)
if scales:
scales = [float(s) for s in scales]
if res is None and scales is not None:
res = [round(scale_to_res(scale, dpi, units), 9) for scale in scales]
tilegrid = tile_grid(srs=srs, bbox=bbox, bbox_srs=bbox_srs, res=res, origin=origin, name=name)
result = []
res_scale = 'resolution' if scales is None else 'scale'
for level, res in enumerate(tilegrid.resolutions):
tiles_in_x, tiles_in_y = tilegrid.grid_sizes[level]
total_tiles = tiles_in_x * tiles_in_y
result.append({
'level': level,
res_scale: res if scales is None else res_to_scale(res, dpi, units),
'tiles_in_x': tiles_in_x,
'tiles_in_y': tiles_in_y,
'total_tiles': total_tiles
})
return {'result': result}
@app.route('/get_max_extent', 'POST', name='get_max_extent')
def get_max_extent():
extent = request.json.get('extent', [-180, -90, 180, 90])
extent_srs = request.json.get('extent_srs', 'EPSG:4326')
fallback_extent = request.json.get('fallback_extent', None)
fallback_extent_srs = request.json.get('fallback_extent_srs', None)
fallback_with_buffer = request.json.get('fallback_with_buffer', True)
map_srs = request.json.get('map_srs')
bounds = extent
bounds_srs = extent_srs
extent_srs = SRS(extent_srs)
map_srs = SRS(map_srs)
extent = extent_srs.align_bbox(extent)
map_extent = transform_bbox(extent, extent_srs, map_srs)
if not map_extent and fallback_extent:
bounds = fallback_extent
bounds_srs = fallback_extent_srs
fallback_extent_srs = SRS(fallback_extent_srs)
fallback_extent = fallback_extent_srs.align_bbox(fallback_extent)
buffer_in_percent = 10 if fallback_with_buffer else 0
buffered_extent = map(lambda x: x + (x / 100 * buffer_in_percent), fallback_extent)
map_extent = transform_bbox(buffered_extent, fallback_extent_srs, map_srs)
while not map_extent and buffer_in_percent >= 0:
buffer_in_percent -= 1;
buffered_extent = map(lambda x: x + (x / 100 * buffer_in_percent), fallback_extent)
map_extent = transform_bbox(buffered_extent, fallback_extent_srs, map_srs)
if map_extent:
return {'result': {
'maxExtent': map_extent
}}
else:
response.status = 400
return {'error': _('Can not show map in %(srs)s with bounds %(bounds)s (%(bounds_srs)s)') % ({
'srs': map_srs.srs_code,
'bounds': bounds,
'bounds_srs': bounds_srs
})}
def transform_bbox(bbox, source, dest):
if is_valid_transformation(bbox, source, dest):
transformed_bbox = source.transform_bbox_to(dest, bbox)
return transformed_bbox
else:
return False
def is_valid_transformation(bbox, source_srs, dest_srs):
"""
>>> source_srs = SRS(4326)
>>> dest_srs = SRS(25833)
>>> bbox = [8,54,10,56]
>>> is_valid_transformation(bbox, source_srs, dest_srs)
True
>>> source_srs = SRS(4326)
>>> dest_srs = SRS(25833)
>>> bbox = [-15,54,-13,56]
>>> is_valid_transformation(bbox, source_srs, dest_srs)
False
"""
# 1 m = 0.000009 deg
FACTOR = 0.000009
# delta in m
delta = 50
# delta in deg or m
delta = delta * FACTOR if source_srs.is_latlong else delta
x0, y0, x1, y1 = bbox
p1 = (x0, y0)
p2 = (x1, y1)
pd1, pd2 = list(source_srs.transform_to(dest_srs, [p1, p2]))
if not float('inf') in pd1 + pd2:
ps1, ps2 = list(dest_srs.transform_to(source_srs, [pd1, pd2]))
bbox_t = list(ps1 + ps2)
if not float('inf') in bbox_t:
for i in range(4):
if abs(bbox[i] - bbox_t[i]) > delta:
return False
return True
return False
@app.route('/transform_grid', 'POST', name='transform_grid')
def transform_grid():
def return_map_message(points, message):
return {"type":"FeatureCollection",
"features": [{
"type": "Feature",
"geometry": {
"type": "Polygon",
"coordinates": [
points
]
},
"properties": {
"message": message
}
}]
}
request_bbox = request.forms.get('bbox', '').split(',')
if request_bbox:
request_bbox = map(float, request_bbox)
else:
request_bbox = None
grid_bbox =request.forms.get('grid_bbox', None)
if grid_bbox:
grid_bbox = grid_bbox.split(',')
if grid_bbox:
grid_bbox = map(float, grid_bbox)
else:
grid_bbox = None
level = request.forms.get('level', None)
if level:
level = int(level)
grid_srs = request.forms.get('srs', None)
if grid_srs:
grid_srs = SRS(grid_srs)
grid_bbox_srs = request.forms.get('bbox_srs', None)
if grid_bbox_srs:
grid_bbox_srs = SRS(grid_bbox_srs)
map_srs = request.forms.get('map_srs', None)
if map_srs:
map_srs = SRS(map_srs)
res = request.forms.get('res', None)
if res:
res = map(float, res.split(','))
scales = request.forms.get('scales', None)
if scales:
scales = map(float, scales.split(','))
units = 1 if request.forms.get('units', 'm') == 'm' else 111319.4907932736
dpi = float(request.forms.get('dpi', (2.54/(0.00028 * 100))))
res = [scale_to_res(scale, dpi, units) for scale in scales]
origin = request.forms.get('origin', 'll')
try:
tilegrid = tile_grid(srs=grid_srs, bbox=grid_bbox, bbox_srs=grid_bbox_srs, origin=origin, res=res)
except (ValueError, TransformationError):
x0, y0, x1, y1 = request_bbox
return return_map_message([[x0, y0], [x1, y0], [x1, y1], [x0, y1], [x0, y0]], _('Given bbox can not be used with given SRS'))
if grid_bbox is None:
grid_bbox = tilegrid.bbox
else:
grid_bbox = grid_bbox_srs.transform_bbox_to(grid_srs, grid_bbox) if grid_bbox_srs and grid_srs else grid_bbox
if map_srs and grid_srs:
if is_valid_transformation(request_bbox, map_srs, grid_srs):
view_bbox = map_srs.transform_bbox_to(grid_srs, map_srs.align_bbox(request_bbox))
else:
view_bbox = grid_bbox
else:
view_bbox = request_bbox
view_bbox = [
max(grid_bbox[0], view_bbox[0]),
max(grid_bbox[1], view_bbox[1]),
min(grid_bbox[2], view_bbox[2]),
min(grid_bbox[3], view_bbox[3])
]
try:
tiles_bbox, size, tiles = tilegrid.get_affected_level_tiles(bbox=view_bbox, level=level)
except GridError:
x0, y0, x1, y1 = request_bbox
return return_map_message([[x0, y0], [x1, y0], [x1, y1], [x0, y1], [x0, y0]], _('Given bbox can not be used with given SRS'))
feature_count = size[0] * size[1]
features = []
if feature_count > 1000:
polygon = generate_envelope_points(grid_srs.align_bbox(tiles_bbox), 128)
polygon = list(grid_srs.transform_to(map_srs, polygon)) if map_srs and grid_srs else list(polygon)
return return_map_message([list(point) for point in polygon] + [list(polygon[0])], _("Too many tiles. Please zoom in."))
else:
for tile in tiles:
if tile:
x, y, z = tile
tile_bbox = grid_srs.align_bbox(tilegrid.tile_bbox(tile))
polygon = generate_envelope_points(tile_bbox, 16)
polygon = list(grid_srs.transform_to(map_srs, polygon)) if map_srs and grid_srs else list(polygon)
features.append({
"type": "Feature",
"geometry": {
"type": "Polygon",
"coordinates": [
[list(point) for point in polygon] + [list(polygon[0])]
]
}
})
if feature_count == 1:
xc0, yc0, xc1, yc1 = grid_srs.transform_bbox_to(map_srs, view_bbox) if map_srs and grid_srs else view_bbox
features.append({
"type": "Feature",
"properties": {
"x": x,
"y": y,
"z": z
},
"geometry": {
"type": "Point",
"coordinates": [xc0 + (xc1-xc0) /2, yc0 + (yc1-yc0)/2]
}
})
elif feature_count <= 100:
xc0, yc0, xc1, yc1 = tile_bbox
features.append({
"type": "Feature",
"properties": {
"x": x,
"y": y,
"z": z
},
"geometry": {
"type": "Point",
"coordinates": [xc0 + (xc1-xc0) /2, yc0 + (yc1-yc0)/2]
}
})
return {"type":"FeatureCollection",
"features": features
}
def init_app(storage_dir):
app.install(storage.SQLiteStorePlugin(os.path.join(configuration.get('app', 'storage_path'), configuration.get('app', 'sqlite_db'))))
return app
if __name__ == '__main__':
app.run(host='localhost', port=8080, debug=True, reloader=True)
Fixed buffered extent calculation
import os
import yaml
import gettext
from copy import deepcopy
from xml.etree.ElementTree import ParseError
from mapproxy.client import http
from mapproxy.script.scales import scale_to_res, res_to_scale
from mapproxy.srs import SRS, generate_envelope_points, TransformationError
from mapproxy.grid import tile_grid, GridError
from . import bottle
from . import config
from . import storage
from .bottle import request, response, static_file, template, SimpleTemplate
from .utils import requires_json
from .capabilities import parse_capabilities_url
configuration = config.ConfigParser.from_file('./config.ini')
app = bottle.Bottle()
bottle.TEMPLATE_PATH = [os.path.join(os.path.dirname(__file__), 'templates')]
SimpleTemplate.defaults["get_url"] = app.get_url
SimpleTemplate.defaults["demo"] = configuration.get_bool('app', 'demo')
SimpleTemplate.defaults["language"] = configuration.get('app', 'language')
try:
translation = gettext.translation('messages', os.path.join(os.path.dirname(os.path.realpath(__file__)), 'locale'), ['de'])
translation.install()
SimpleTemplate.defaults["_"] = translation.gettext
except IOError as e:
print e
class RESTBase(object):
def __init__(self, section):
self.section = section
def list(self, project, storage):
return storage.get_all(self.section, project, with_id=True, with_manual=True, with_locked=True)
@requires_json
def add(self, project, storage):
data = request.json
manual = data.get('_manual', False)
locked = data.get('_locked', False)
id = storage.add(self.section, project, data)
response.status = 201
data['_id'] = id
data['_manual'] = manual
data['_locked'] = locked
return data
def get(self, project, id, storage):
data = storage.get(id, self.section,project)
if not data:
response.status = 404
else:
return data
@requires_json
def update(self, project, id, storage):
data = request.json
manual = data.get('_manual', False)
locked = data.get('_locked', False)
# used deepcopy cause storage.update modifies data
storage.update(id, self.section, project, deepcopy(data))
response.status = 200
data['_manual'] = manual
data['_locked'] = locked
return data
def delete(self, project, id, storage):
if storage.delete(id, self.section, project):
response.status = 204
else:
response.status = 404
def setup_routing(self, app):
app.route('/conf/<project>/%s' % self.section, 'GET', self.list)
app.route('/conf/<project>/%s' % self.section, 'POST', self.add)
app.route('/conf/<project>/%s/<id:int>' % self.section, 'GET', self.get)
app.route('/conf/<project>/%s/<id:int>' % self.section, 'PUT', self.update)
app.route('/conf/<project>/%s/<id:int>' % self.section, 'DELETE', self.delete)
class RESTWMSCapabilities(RESTBase):
def __init__(self):
RESTBase.__init__(self, 'wms_capabilities')
@requires_json
def add(self, project, storage):
url = request.json.get('data', {}).get('url')
cap = {}
if not url:
response.status = 400
return {'error': _('missing URL')}
try:
cap['data'] = parse_capabilities_url(url)
except ParseError:
response.status = 400
return {'error': _('no capabilities document found')}
except (http.HTTPClientError, ):
response.status = 400
# TODO
return {'error': _('invalid URL')}
search = """%%"url": "%s"%%""" % cap['data']['url']
id = storage.exists_in_data(self.section, project, search)
if id:
return self.update(project, id, storage)
id = storage.add(self.section, project, cap)
cap['_id'] = id
response.status = 201
return cap
@requires_json
def update(self, project, id, storage):
url = request.json.get('data', {}).get('url')
if not url:
response.status = 400
return {'error': _('missing URL')}
cap = {}
cap['data'] = parse_capabilities_url(url)
storage.update(id, self.section, project, cap)
response.status = 200
cap['_id'] = id
return cap
class RESTLayers(RESTBase):
def __init__(self):
RESTBase.__init__(self, 'layers')
def list(self, project, storage):
return storage.get_all(self.section, project, with_rank=True, with_id=True, with_manual=True, with_locked=True)
@requires_json
def update_tree(self, project, storage):
data = request.json
storage.updates(self.section, project, data['tree'])
response.status = 200
def setup_routing(self, app):
super(RESTLayers, self).setup_routing(app)
app.route('/conf/<project>/%s' % self.section, 'PUT', self.update_tree)
class RESTGrids(RESTBase):
def __init__(self):
RESTBase.__init__(self, 'grids')
def list(self, project, storage):
default_grids = {
'GLOBAL_GEODETIC': {'_id': 'GLOBAL_GEODETIC', 'default': True, 'data': {
'name': 'GLOBAL_GEODETIC',
'srs': 'EPSG:4326',
'bbox': [-180, -90, 180, 90],
'bbox_srs': 'EPSG:4326',
'origin': 'sw',
'res': [
1.40625,
0.703125,
0.3515625,
0.17578125,
0.087890625,
0.0439453125,
0.02197265625,
0.010986328125,
0.0054931640625,
0.00274658203125,
0.001373291015625,
0.0006866455078125,
0.00034332275390625,
0.000171661376953125,
0.0000858306884765625,
0.00004291534423828125,
0.000021457672119140625,
0.000010728836059570312,
0.000005364418029785156,
0.000002682209014892578,
]
}},
'GLOBAL_MERCATOR': {'_id': 'GLOBAL_MERCATOR', 'default': True, 'data': {
'name': 'GLOBAL_MERCATOR',
'srs': 'EPSG:900913',
'bbox': [-20037508.342789244, -20037508.342789244, 20037508.342789244, 20037508.342789244],
'bbox_srs': 'EPSG:900913',
'origin': 'sw',
'res': [
156543.03392804097,
78271.51696402048,
39135.75848201024,
19567.87924100512,
9783.93962050256,
4891.96981025128,
2445.98490512564,
1222.99245256282,
611.49622628141,
305.748113140705,
152.8740565703525,
76.43702828517625,
38.21851414258813,
19.109257071294063,
9.554628535647032,
4.777314267823516,
2.388657133911758,
1.194328566955879,
0.5971642834779395,
0.29858214173896974,
]
}},
'GLOBAL_WEBMERCATOR': {'_id': 'GLOBAL_WEBMERCATOR', 'default': True, 'data': {
'name': 'GLOBAL_WEBMERCATOR',
'srs': 'EPSG:3857',
'bbox': [-20037508.342789244, -20037508.342789244, 20037508.342789244, 20037508.342789244],
'bbox_srs': 'EPSG:3857',
'origin': 'nw',
'res': [
156543.03392804097,
78271.51696402048,
39135.75848201024,
19567.87924100512,
9783.93962050256,
4891.96981025128,
2445.98490512564,
1222.99245256282,
611.49622628141,
305.748113140705,
152.8740565703525,
76.43702828517625,
38.21851414258813,
19.109257071294063,
9.554628535647032,
4.777314267823516,
2.388657133911758,
1.194328566955879,
0.5971642834779395,
0.29858214173896974,
]
}}
}
default_grids.update(storage.get_all(self.section, project, with_id=True, with_manual=True, with_locked=True))
return default_grids
RESTBase('sources').setup_routing(app)
RESTBase('caches').setup_routing(app)
RESTBase('globals').setup_routing(app)
RESTBase('services').setup_routing(app)
RESTBase('defaults').setup_routing(app)
RESTWMSCapabilities().setup_routing(app)
RESTLayers().setup_routing(app)
RESTGrids().setup_routing(app)
## other
@app.route('/', name='index')
def index():
return template('index')
@app.route('/projects', name='projects')
def projects(storage):
projects = {}
for project in storage.get_projects():
try:
mapproxy_conf = config.mapproxy_conf_from_storage(storage, project)
except config.ConfigError as e:
informal_only = False
errors = [e]
mapproxy_conf = False
if mapproxy_conf:
errors, informal_only = config.validate(mapproxy_conf)
projects[project] = {
'valid': informal_only,
'errors': errors
}
return template('projects', projects=projects)
@app.route('/project/<project>/conf', name='configuration')
def conf_index(project):
return template('config_index', project=project)
@app.route('/project/<project>', name='project_index')
def project_index(project):
return template('project_index', project=project)
@app.route('/project/<project>/conf/sources', name='sources')
def sources(project):
return template('sources', project=project)
@app.route('/project/<project>/conf/grids', name='grids')
def grids(project):
return template('grids', project=project)
@app.route('/project/<project>/conf/caches', name='caches')
def caches(project):
return template('caches', project=project)
@app.route('/project/<project>/conf/layers', name='layers')
def layers(project):
return template('layers', project=project)
@app.route('/project/<project>/conf/globals', name='globals')
def globals(project):
return template('globals', project=project)
@app.route('/project/<project>/conf/services', name='services')
def services(project):
return template('services', project=project)
@app.route('/conf/<project>/write_config', 'POST', name='write_config')
def write_config(project, storage):
mapproxy_conf = config.mapproxy_conf_from_storage(storage, project)
try:
config.write_mapproxy_yaml(mapproxy_conf, os.path.join(configuration.get('app', 'output_path'), project + '.yaml'))
return {'success': _('creating mapproxy config successful')}
except:
response.status = 400
return {'error': _('creating mapproxy config failed')}
@app.route('/static/<filepath:path>', name='static')
def static(filepath):
return static_file(filepath, root=os.path.join(os.path.dirname(__file__), 'static'))
@app.route('/template/<filename>', name='angular_template')
def angular_template(filename):
return template(os.path.join(os.path.dirname(__file__), 'templates/angular', filename))
@app.route('/resources/<filename>/<translated>', name='resource', translated=False)
def resources(filename, translated):
file_location = os.path.join(os.path.dirname(__file__), 'templates/resources')
if translated:
return template(os.path.join(file_location, filename))
else:
return static_file(filename, root=file_location)
@app.route('/yaml', 'POST', name='json_to_yaml')
def create_yaml():
data = request.json
try:
return yaml.safe_dump(data, default_flow_style=False)
except yaml.YAMLError:
response.status = 400
return {'error': _('creating yaml failed')}
@app.route('/json', 'POST', name='yaml_to_json')
def create_json():
data = request.json
try:
return yaml.load(data['yaml'])
except yaml.YAMLError:
response.status = 400
return {'error': _('parsing yaml failed')}
@app.route('/res', 'POST', name='scales_to_res')
@app.route('/scales', 'POST', name='res_to_scales')
def convert_res_scales():
data = request.json.get('data', [])
mode = request.json.get('mode', 'to_scale')
dpi = float(request.json.get('dpi', (2.54/(0.00028 * 100))))
units = request.json.get('units', 'm')
data = [float(d) if d else None for d in data]
units = 1 if units == 'm' else 111319.4907932736
convert = res_to_scale if mode == 'to_scale' else scale_to_res
result = []
for i, d in enumerate(data):
result.append(round(convert(d, dpi, units),9) if d else None)
return {'result': result}
@app.route('/calculate_tiles', 'POST', name='calculate_tiles')
def calculate_tiles():
data = request.json
origin = data.get('origin', None)
name = data.get('name', None)
srs = data.get('srs', None)
bbox = data.get('bbox', None)
bbox_srs = data.get('bbox_srs', None)
if bbox is not None and not all(bbox):
bbox = None
dpi = float(data.get('dpi', (2.54/(0.00028 * 100))))
units = 1 if data.get('units', 'm') == 'm' else 111319.4907932736
res = data.get('res', None)
if res:
res = [float(r) for r in res]
scales = data.get('scales', None)
if scales:
scales = [float(s) for s in scales]
if res is None and scales is not None:
res = [round(scale_to_res(scale, dpi, units), 9) for scale in scales]
tilegrid = tile_grid(srs=srs, bbox=bbox, bbox_srs=bbox_srs, res=res, origin=origin, name=name)
result = []
res_scale = 'resolution' if scales is None else 'scale'
for level, res in enumerate(tilegrid.resolutions):
tiles_in_x, tiles_in_y = tilegrid.grid_sizes[level]
total_tiles = tiles_in_x * tiles_in_y
result.append({
'level': level,
res_scale: res if scales is None else res_to_scale(res, dpi, units),
'tiles_in_x': tiles_in_x,
'tiles_in_y': tiles_in_y,
'total_tiles': total_tiles
})
return {'result': result}
@app.route('/get_max_extent', 'POST', name='get_max_extent')
def get_max_extent():
extent = request.json.get('extent', [-180, -90, 180, 90])
extent_srs = request.json.get('extent_srs', 'EPSG:4326')
fallback_extent = request.json.get('fallback_extent', None)
fallback_extent_srs = request.json.get('fallback_extent_srs', None)
fallback_with_buffer = request.json.get('fallback_with_buffer', True)
map_srs = request.json.get('map_srs')
bounds = extent
bounds_srs = extent_srs
extent_srs = SRS(extent_srs)
map_srs = SRS(map_srs)
extent = extent_srs.align_bbox(extent)
map_extent = transform_bbox(extent, extent_srs, map_srs)
if not map_extent and fallback_extent:
bounds = fallback_extent
bounds_srs = fallback_extent_srs
fallback_extent_srs = SRS(fallback_extent_srs)
fallback_extent = fallback_extent_srs.align_bbox(fallback_extent)
buffer_in_percent = 10 if fallback_with_buffer else 0
map_extent = transform_bbox(buffered_extent(fallback_extent, buffer_in_percent), fallback_extent_srs, map_srs)
while not map_extent and buffer_in_percent >= 0:
buffer_in_percent -= 1;
map_extent = transform_bbox(buffered_extent(fallback_extent, buffer_in_percent), fallback_extent_srs, map_srs)
if map_extent:
return {'result': {
'maxExtent': map_extent
}}
else:
response.status = 400
return {'error': _('Can not show map in %(srs)s with bounds %(bounds)s (%(bounds_srs)s)') % ({
'srs': map_srs.srs_code,
'bounds': bounds,
'bounds_srs': bounds_srs
})}
def buffered_extent(extent, buffer):
x0, y0, x1, y1 = extent
xb0 = x0 - abs(x0 / 100 * buffer)
yb0 = y0 - abs(y0 / 100 * buffer)
xb1 = x1 + abs(x1 / 100 * buffer)
yb1 = y1 + abs(y1 / 100 * buffer)
return [xb0, yb0, xb1, yb1]
def transform_bbox(bbox, source, dest):
if is_valid_transformation(bbox, source, dest):
transformed_bbox = source.transform_bbox_to(dest, bbox)
return transformed_bbox
else:
return False
def is_valid_transformation(bbox, source_srs, dest_srs):
"""
>>> source_srs = SRS(4326)
>>> dest_srs = SRS(25833)
>>> bbox = [8,54,10,56]
>>> is_valid_transformation(bbox, source_srs, dest_srs)
True
>>> source_srs = SRS(4326)
>>> dest_srs = SRS(25833)
>>> bbox = [-15,54,-13,56]
>>> is_valid_transformation(bbox, source_srs, dest_srs)
False
"""
# 1 m = 0.000009 deg
FACTOR = 0.000009
# delta in m
delta = 50
# delta in deg or m
delta = delta * FACTOR if source_srs.is_latlong else delta
x0, y0, x1, y1 = bbox
p1 = (x0, y0)
p2 = (x1, y1)
pd1, pd2 = list(source_srs.transform_to(dest_srs, [p1, p2]))
if not float('inf') in pd1 + pd2:
ps1, ps2 = list(dest_srs.transform_to(source_srs, [pd1, pd2]))
bbox_t = list(ps1 + ps2)
if not float('inf') in bbox_t:
for i in range(4):
if abs(bbox[i] - bbox_t[i]) > delta:
return False
return True
return False
@app.route('/transform_grid', 'POST', name='transform_grid')
def transform_grid():
def return_map_message(points, message):
return {"type":"FeatureCollection",
"features": [{
"type": "Feature",
"geometry": {
"type": "Polygon",
"coordinates": [
points
]
},
"properties": {
"message": message
}
}]
}
request_bbox = request.forms.get('bbox', '').split(',')
if request_bbox:
request_bbox = map(float, request_bbox)
else:
request_bbox = None
grid_bbox =request.forms.get('grid_bbox', None)
if grid_bbox:
grid_bbox = grid_bbox.split(',')
if grid_bbox:
grid_bbox = map(float, grid_bbox)
else:
grid_bbox = None
level = request.forms.get('level', None)
if level:
level = int(level)
grid_srs = request.forms.get('srs', None)
if grid_srs:
grid_srs = SRS(grid_srs)
grid_bbox_srs = request.forms.get('bbox_srs', None)
if grid_bbox_srs:
grid_bbox_srs = SRS(grid_bbox_srs)
map_srs = request.forms.get('map_srs', None)
if map_srs:
map_srs = SRS(map_srs)
res = request.forms.get('res', None)
if res:
res = map(float, res.split(','))
scales = request.forms.get('scales', None)
if scales:
scales = map(float, scales.split(','))
units = 1 if request.forms.get('units', 'm') == 'm' else 111319.4907932736
dpi = float(request.forms.get('dpi', (2.54/(0.00028 * 100))))
res = [scale_to_res(scale, dpi, units) for scale in scales]
origin = request.forms.get('origin', 'll')
try:
tilegrid = tile_grid(srs=grid_srs, bbox=grid_bbox, bbox_srs=grid_bbox_srs, origin=origin, res=res)
except (ValueError, TransformationError):
x0, y0, x1, y1 = request_bbox
return return_map_message([[x0, y0], [x1, y0], [x1, y1], [x0, y1], [x0, y0]], _('Given bbox can not be used with given SRS'))
if grid_bbox is None:
grid_bbox = tilegrid.bbox
else:
grid_bbox = grid_bbox_srs.transform_bbox_to(grid_srs, grid_bbox) if grid_bbox_srs and grid_srs else grid_bbox
if map_srs and grid_srs:
if is_valid_transformation(request_bbox, map_srs, grid_srs):
view_bbox = map_srs.transform_bbox_to(grid_srs, map_srs.align_bbox(request_bbox))
else:
view_bbox = grid_bbox
else:
view_bbox = request_bbox
view_bbox = [
max(grid_bbox[0], view_bbox[0]),
max(grid_bbox[1], view_bbox[1]),
min(grid_bbox[2], view_bbox[2]),
min(grid_bbox[3], view_bbox[3])
]
try:
tiles_bbox, size, tiles = tilegrid.get_affected_level_tiles(bbox=view_bbox, level=level)
except GridError:
x0, y0, x1, y1 = request_bbox
return return_map_message([[x0, y0], [x1, y0], [x1, y1], [x0, y1], [x0, y0]], _('Given bbox can not be used with given SRS'))
feature_count = size[0] * size[1]
features = []
if feature_count > 1000:
polygon = generate_envelope_points(grid_srs.align_bbox(tiles_bbox), 128)
polygon = list(grid_srs.transform_to(map_srs, polygon)) if map_srs and grid_srs else list(polygon)
return return_map_message([list(point) for point in polygon] + [list(polygon[0])], _("Too many tiles. Please zoom in."))
else:
for tile in tiles:
if tile:
x, y, z = tile
tile_bbox = grid_srs.align_bbox(tilegrid.tile_bbox(tile))
polygon = generate_envelope_points(tile_bbox, 16)
polygon = list(grid_srs.transform_to(map_srs, polygon)) if map_srs and grid_srs else list(polygon)
features.append({
"type": "Feature",
"geometry": {
"type": "Polygon",
"coordinates": [
[list(point) for point in polygon] + [list(polygon[0])]
]
}
})
if feature_count == 1:
xc0, yc0, xc1, yc1 = grid_srs.transform_bbox_to(map_srs, view_bbox) if map_srs and grid_srs else view_bbox
features.append({
"type": "Feature",
"properties": {
"x": x,
"y": y,
"z": z
},
"geometry": {
"type": "Point",
"coordinates": [xc0 + (xc1-xc0) /2, yc0 + (yc1-yc0)/2]
}
})
elif feature_count <= 100:
xc0, yc0, xc1, yc1 = tile_bbox
features.append({
"type": "Feature",
"properties": {
"x": x,
"y": y,
"z": z
},
"geometry": {
"type": "Point",
"coordinates": [xc0 + (xc1-xc0) /2, yc0 + (yc1-yc0)/2]
}
})
return {"type":"FeatureCollection",
"features": features
}
def init_app(storage_dir):
app.install(storage.SQLiteStorePlugin(os.path.join(configuration.get('app', 'storage_path'), configuration.get('app', 'sqlite_db'))))
return app
if __name__ == '__main__':
app.run(host='localhost', port=8080, debug=True, reloader=True)
|
'''
flaskext.seasurf
----------------
A Flask extension providing fairly good protection against cross-site
request forgery (CSRF), otherwise known as "sea surf".
:copyright: (c) 2011 by Max Countryman.
:license: BSD, see LICENSE for more details.
'''
from __future__ import absolute_import
__version_info__ = ('0', '1', '17')
__version__ = '.'.join(__version_info__)
__author__ = 'Max Countryman'
__license__ = 'BSD'
__copyright__ = '(c) 2011 by Max Countryman'
__all__ = ['SeaSurf']
import hashlib
import random
import urlparse
from datetime import timedelta
from flask import g, request, abort
from werkzeug.security import safe_str_cmp
if hasattr(random, 'SystemRandom'):
randrange = random.SystemRandom().randrange
else:
randrange = random.randrange
_MAX_CSRF_KEY = 18446744073709551616L # 2 << 63
REASON_NO_REFERER = 'Referer checking failed: no referer.'
REASON_BAD_REFERER = 'Referer checking failed: %s does not match %s.'
REASON_NO_CSRF_TOKEN = 'CSRF token not set.'
REASON_BAD_TOKEN = 'CSRF token missing or incorrect.'
def csrf(app):
'''Helper function to wrap the SeaSurf class.'''
SeaSurf(app)
def xsrf(app):
'''Helper function to wrap the SeaSurf class.'''
SeaSurf(app)
def _same_origin(url1, url2):
'''Determine if two URLs share the same origin.'''
p1, p2 = urlparse.urlparse(url1), urlparse.urlparse(url2)
origin1 = p1.scheme, p1.hostname, p1.port
origin2 = p2.scheme, p2.hostname, p2.port
return origin1 == origin2
class SeaSurf(object):
'''Primary class container for CSRF validation logic. The main function of
this extension is to generate and validate CSRF tokens. The design and
implementation of this extension is influenced by Django's CSRF middleware.
Tokens are generated using a salted SHA1 hash. The salt is based off a
a random range. The OS's SystemRandom is used if available, otherwise
the core random.randrange is used.
You might intialize :class:`SeaSurf` something like this::
csrf = SeaSurf(app)
Validation will now be active for all requests whose methods are not GET,
HEAD, OPTIONS, or TRACE.
When using other request methods, such as POST for instance, you will need
to provide the CSRF token as a parameter. This can be achieved by making
use of the Jinja global. In your template::
<form method="POST">
...
<input type="hidden" name="_csrf_token" value="{{ csrf_token() }}">
</form>
This will assign a token to both the session cookie and the rendered HTML
which will then be validated on the backend. POST requests missing this
field will fail unless the header X-CSRFToken is specified.
.. admonition:: Excluding Views From Validation
For views that use methods which may be validated but for which you
wish to not run validation on you may make use of the :class:`exempt`
decorator to indicate that they should not be checked.
:param app: The Flask application object, defaults to None.
'''
def __init__(self, app=None):
self._exempt_views = set()
self._include_views = set()
if app is not None:
self.init_app(app)
def init_app(self, app):
'''Initializes a Flask object `app`, binds CSRF validation to
app.before_request, and assigns `csrf_token` as a Jinja global.
:param app: The Flask application object.
'''
self.app = app
app.before_request(self._before_request)
app.after_request(self._after_request)
# expose the CSRF token to the template
app.jinja_env.globals['csrf_token'] = self._get_token
self._csrf_name = app.config.get('CSRF_COOKIE_NAME', '_csrf_token')
self._csrf_header_name = app.config.get('CSRF_HEADER_NAME', 'X-CSRFToken')
self._csrf_disable = app.config.get('CSRF_DISABLE',
app.config.get('TESTING', False))
self._csrf_timeout = app.config.get('CSRF_COOKIE_TIMEOUT',
timedelta(days=5))
self._type = app.config.get('SEASURF_INCLUDE_OR_EXEMPT_VIEWS',
'exempt')
def exempt(self, view):
'''A decorator that can be used to exclude a view from CSRF validation.
Example usage of :class:`exempt` might look something like this::
csrf = SeaSurf(app)
@csrf.exempt
@app.route('/some_view')
def some_view():
"""This view is exempt from CSRF validation."""
return render_template('some_view.html')
:param view: The view to be wrapped by the decorator.
'''
self._exempt_views.add(view)
return view
def include(self, view):
'''A decorator that can be used to include a view from CSRF validation.
Example usage of :class:`include` might look something like this::
csrf = SeaSurf(app)
@csrf.include
@app.route('/some_view')
def some_view():
"""This view is include from CSRF validation."""
return render_template('some_view.html')
:param view: The view to be wrapped by the decorator.
'''
self._include_views.add(view)
return view
def _should_use_token(self, view_func):
'''Given a view function, determine whether or not we should
deliver a CSRF token to this view through the response and
validate CSRF tokens upon requests to this view.'''
if self._type == 'exempt':
if view_func in self._exempt_views:
return False
elif self._type == 'include':
if view_func not in self._include_views:
return False
else:
raise NotImplementedError
return True
def _before_request(self):
'''Determine if a view is exempt from CSRF validation and if not
then ensure the validity of the CSRF token. This method is bound to
the Flask `before_request` decorator.
If a request is determined to be secure, i.e. using HTTPS, then we
use strict referer checking to prevent a man-in-the-middle attack
from being plausible.
Validation is suspended if `TESTING` is True in your application's
configuration.
'''
if self._csrf_disable:
return # don't validate for testing
csrf_token = request.cookies.get(self._csrf_name, None)
if not csrf_token:
setattr(g, self._csrf_name, self._generate_token())
else:
setattr(g, self._csrf_name, csrf_token)
# Always set this to let the response know whether or not to set the CSRF token
g._view_func = self.app.view_functions.get(request.endpoint)
if request.method not in ('GET', 'HEAD', 'OPTIONS', 'TRACE'):
# Retrieve the view function based on the request endpoint and
# then compare it to the set of exempted views
if not self._should_use_token(g._view_func):
return
if request.is_secure:
referer = request.headers.get('Referer')
if referer is None:
error = (REASON_NO_REFERER, request.path)
self.app.logger.warning('Forbidden (%s): %s' % error)
return abort(403)
# by setting the Access-Control-Allow-Origin header, browsers will
# let you send cross-domain ajax requests so if there is an Origin
# header, the browser has already decided that it trusts this domain
# otherwise it would have blocked the request before it got here.
allowed_referer = request.headers.get('Origin') or request.url_root
if not _same_origin(referer, allowed_referer):
error = REASON_BAD_REFERER % (referer, allowed_referer)
error = (error, request.path)
self.app.logger.warning('Forbidden (%s): %s' % error)
return abort(403)
request_csrf_token = request.form.get(self._csrf_name, '')
if request_csrf_token == '':
# As per the Django middleware, this makes AJAX easier and
# PUT and DELETE possible
request_csrf_token = request.headers.get(self._csrf_header_name, '')
some_none = None in (request_csrf_token, csrf_token)
if some_none or not safe_str_cmp(request_csrf_token, csrf_token):
error = (REASON_BAD_TOKEN, request.path)
self.app.logger.warning('Forbidden (%s): %s' % error)
return abort(403)
def _after_request(self, response):
'''Checks if the flask.g object contains the CSRF token, and if
the view in question has CSRF protection enabled. If both, returns
the response with a cookie containing the token. If not then we just
return the response unaltered. Bound to the Flask `after_request`
decorator.'''
if getattr(g, self._csrf_name, None) is None:
return response
_view_func = getattr(g, '_view_func', False)
if not (_view_func and self._should_use_token(_view_func)):
return response
response.set_cookie(self._csrf_name,
getattr(g, self._csrf_name),
max_age=self._csrf_timeout)
response.vary.add('Cookie')
return response
def _get_token(self):
'''Attempts to get a token from the request cookies.'''
return getattr(g, self._csrf_name, None)
def _generate_token(self):
'''Generates a token with randomly salted SHA1. Returns a string.'''
salt = randrange(0, _MAX_CSRF_KEY)
return str(hashlib.sha1(str(salt)).hexdigest())
version 0.1.18
'''
flaskext.seasurf
----------------
A Flask extension providing fairly good protection against cross-site
request forgery (CSRF), otherwise known as "sea surf".
:copyright: (c) 2011 by Max Countryman.
:license: BSD, see LICENSE for more details.
'''
from __future__ import absolute_import
__version_info__ = ('0', '1', '18')
__version__ = '.'.join(__version_info__)
__author__ = 'Max Countryman'
__license__ = 'BSD'
__copyright__ = '(c) 2011 by Max Countryman'
__all__ = ['SeaSurf']
import hashlib
import random
import urlparse
from datetime import timedelta
from flask import g, request, abort
from werkzeug.security import safe_str_cmp
if hasattr(random, 'SystemRandom'):
randrange = random.SystemRandom().randrange
else:
randrange = random.randrange
_MAX_CSRF_KEY = 18446744073709551616L # 2 << 63
REASON_NO_REFERER = 'Referer checking failed: no referer.'
REASON_BAD_REFERER = 'Referer checking failed: %s does not match %s.'
REASON_NO_CSRF_TOKEN = 'CSRF token not set.'
REASON_BAD_TOKEN = 'CSRF token missing or incorrect.'
def csrf(app):
'''Helper function to wrap the SeaSurf class.'''
SeaSurf(app)
def xsrf(app):
'''Helper function to wrap the SeaSurf class.'''
SeaSurf(app)
def _same_origin(url1, url2):
'''Determine if two URLs share the same origin.'''
p1, p2 = urlparse.urlparse(url1), urlparse.urlparse(url2)
origin1 = p1.scheme, p1.hostname, p1.port
origin2 = p2.scheme, p2.hostname, p2.port
return origin1 == origin2
class SeaSurf(object):
'''Primary class container for CSRF validation logic. The main function of
this extension is to generate and validate CSRF tokens. The design and
implementation of this extension is influenced by Django's CSRF middleware.
Tokens are generated using a salted SHA1 hash. The salt is based off a
a random range. The OS's SystemRandom is used if available, otherwise
the core random.randrange is used.
You might intialize :class:`SeaSurf` something like this::
csrf = SeaSurf(app)
Validation will now be active for all requests whose methods are not GET,
HEAD, OPTIONS, or TRACE.
When using other request methods, such as POST for instance, you will need
to provide the CSRF token as a parameter. This can be achieved by making
use of the Jinja global. In your template::
<form method="POST">
...
<input type="hidden" name="_csrf_token" value="{{ csrf_token() }}">
</form>
This will assign a token to both the session cookie and the rendered HTML
which will then be validated on the backend. POST requests missing this
field will fail unless the header X-CSRFToken is specified.
.. admonition:: Excluding Views From Validation
For views that use methods which may be validated but for which you
wish to not run validation on you may make use of the :class:`exempt`
decorator to indicate that they should not be checked.
:param app: The Flask application object, defaults to None.
'''
def __init__(self, app=None):
self._exempt_views = set()
self._include_views = set()
if app is not None:
self.init_app(app)
def init_app(self, app):
'''Initializes a Flask object `app`, binds CSRF validation to
app.before_request, and assigns `csrf_token` as a Jinja global.
:param app: The Flask application object.
'''
self.app = app
app.before_request(self._before_request)
app.after_request(self._after_request)
# expose the CSRF token to the template
app.jinja_env.globals['csrf_token'] = self._get_token
self._csrf_name = app.config.get('CSRF_COOKIE_NAME', '_csrf_token')
self._csrf_header_name = app.config.get('CSRF_HEADER_NAME', 'X-CSRFToken')
self._csrf_disable = app.config.get('CSRF_DISABLE',
app.config.get('TESTING', False))
self._csrf_timeout = app.config.get('CSRF_COOKIE_TIMEOUT',
timedelta(days=5))
self._type = app.config.get('SEASURF_INCLUDE_OR_EXEMPT_VIEWS',
'exempt')
def exempt(self, view):
'''A decorator that can be used to exclude a view from CSRF validation.
Example usage of :class:`exempt` might look something like this::
csrf = SeaSurf(app)
@csrf.exempt
@app.route('/some_view')
def some_view():
"""This view is exempt from CSRF validation."""
return render_template('some_view.html')
:param view: The view to be wrapped by the decorator.
'''
self._exempt_views.add(view)
return view
def include(self, view):
'''A decorator that can be used to include a view from CSRF validation.
Example usage of :class:`include` might look something like this::
csrf = SeaSurf(app)
@csrf.include
@app.route('/some_view')
def some_view():
"""This view is include from CSRF validation."""
return render_template('some_view.html')
:param view: The view to be wrapped by the decorator.
'''
self._include_views.add(view)
return view
def _should_use_token(self, view_func):
'''Given a view function, determine whether or not we should
deliver a CSRF token to this view through the response and
validate CSRF tokens upon requests to this view.'''
if self._type == 'exempt':
if view_func in self._exempt_views:
return False
elif self._type == 'include':
if view_func not in self._include_views:
return False
else:
raise NotImplementedError
return True
def _before_request(self):
'''Determine if a view is exempt from CSRF validation and if not
then ensure the validity of the CSRF token. This method is bound to
the Flask `before_request` decorator.
If a request is determined to be secure, i.e. using HTTPS, then we
use strict referer checking to prevent a man-in-the-middle attack
from being plausible.
Validation is suspended if `TESTING` is True in your application's
configuration.
'''
if self._csrf_disable:
return # don't validate for testing
csrf_token = request.cookies.get(self._csrf_name, None)
if not csrf_token:
setattr(g, self._csrf_name, self._generate_token())
else:
setattr(g, self._csrf_name, csrf_token)
# Always set this to let the response know whether or not to set the CSRF token
g._view_func = self.app.view_functions.get(request.endpoint)
if request.method not in ('GET', 'HEAD', 'OPTIONS', 'TRACE'):
# Retrieve the view function based on the request endpoint and
# then compare it to the set of exempted views
if not self._should_use_token(g._view_func):
return
if request.is_secure:
referer = request.headers.get('Referer')
if referer is None:
error = (REASON_NO_REFERER, request.path)
self.app.logger.warning('Forbidden (%s): %s' % error)
return abort(403)
# by setting the Access-Control-Allow-Origin header, browsers will
# let you send cross-domain ajax requests so if there is an Origin
# header, the browser has already decided that it trusts this domain
# otherwise it would have blocked the request before it got here.
allowed_referer = request.headers.get('Origin') or request.url_root
if not _same_origin(referer, allowed_referer):
error = REASON_BAD_REFERER % (referer, allowed_referer)
error = (error, request.path)
self.app.logger.warning('Forbidden (%s): %s' % error)
return abort(403)
request_csrf_token = request.form.get(self._csrf_name, '')
if request_csrf_token == '':
# As per the Django middleware, this makes AJAX easier and
# PUT and DELETE possible
request_csrf_token = request.headers.get(self._csrf_header_name, '')
some_none = None in (request_csrf_token, csrf_token)
if some_none or not safe_str_cmp(request_csrf_token, csrf_token):
error = (REASON_BAD_TOKEN, request.path)
self.app.logger.warning('Forbidden (%s): %s' % error)
return abort(403)
def _after_request(self, response):
'''Checks if the flask.g object contains the CSRF token, and if
the view in question has CSRF protection enabled. If both, returns
the response with a cookie containing the token. If not then we just
return the response unaltered. Bound to the Flask `after_request`
decorator.'''
if getattr(g, self._csrf_name, None) is None:
return response
_view_func = getattr(g, '_view_func', False)
if not (_view_func and self._should_use_token(_view_func)):
return response
response.set_cookie(self._csrf_name,
getattr(g, self._csrf_name),
max_age=self._csrf_timeout)
response.vary.add('Cookie')
return response
def _get_token(self):
'''Attempts to get a token from the request cookies.'''
return getattr(g, self._csrf_name, None)
def _generate_token(self):
'''Generates a token with randomly salted SHA1. Returns a string.'''
salt = randrange(0, _MAX_CSRF_KEY)
return str(hashlib.sha1(str(salt)).hexdigest())
|
"""
This file is part of flatlib - (C) FlatAngle
Author: João Ventura (flatangleweb@gmail.com)
--------------------------------------------
This module defines the names of signs, objects, houses and fixed-stars.
It also includes the definition of some ordered lists for iterations.
"""
# === Base constants === */
# Four primitive qualities
QUALITY_HOT = 'Hot'
QUALITY_COLD = 'Cold'
QUALITY_DRY = 'Dry'
QUALITY_HUMID = 'Humid'
# Four Elements
ELEMENT_FIRE = 'Fire'
ELEMENT_EARTH = 'Earth'
ELEMENT_AIR = 'Air'
ELEMENT_WATER = 'Water'
# Four Temperaments
TEMPERAMENT_CHOLERIC = 'Choleric'
TEMPERAMENT_MELANCHOLIC = 'Melancholic'
TEMPERAMENT_SANGUINE = 'Sanguine'
TEMPERAMENT_PHLEGMATIC = 'Phlegmatic'
# Genders
GENDER_MASCULINE = 'Masculine'
GENDER_FEMININE = 'Feminine'
GENDER_NEUTRAL = 'Neutral'
# Factions
FACTION_DIURNAL = 'Diurnal'
FACTION_NOCTURNAL = 'Nocturnal'
FACTION_NEUTRAL = 'Neutral'
# Moon Quarters
MOON_FIRST_QUARTER = 'First Quarter'
MOON_SECOND_QUARTER = 'Second Quarter'
MOON_THIRD_QUARTER = 'Third Quarter'
MOON_LAST_QUARTER = 'Last Quarter'
# Sun seasons
SUN_SPRING = 'Spring'
SUN_SUMMER = 'Summer'
SUN_AUTUMN = 'Autumn'
SUN_WINTER = 'Winter'
# === Signs === */
ARIES = 'Aries'
TAURUS = 'Taurus'
GEMINI = 'Gemini'
CANCER = 'Cancer'
LEO = 'Leo'
VIRGO = 'Virgo'
LIBRA = 'Libra'
SCORPIO = 'Scorpio'
SAGITTARIUS = 'Sagittarius'
CAPRICORN = 'Capricorn'
AQUARIUS = 'Aquarius'
PISCES = 'Pisces'
# Sign modes
MODE_CARDINAL = 'Cardinal'
MODE_FIXED = 'Fixed'
MODE_MUTABLE = 'Mutable'
# Sign figures
SIGN_FIGURE_NONE = 'None'
SIGN_FIGURE_BEAST = 'Beast'
SIGN_FIGURE_HUMAN = 'Human'
SIGN_FIGURE_WILD = 'Wild'
# Sign fertilities
SIGN_FERTILE = 'Fertile'
SIGN_MODERATELY_FERTILE = 'Moderately Fertile'
SIGN_MODERATELY_STERILE = 'Moderately Sterile'
SIGN_STERILE = 'Sterile'
# === Objects === */
# Names
SUN = 'Sun'
MOON = 'Moon'
MERCURY = 'Mercury'
VENUS = 'Venus'
MARS = 'Mars'
JUPITER = 'Jupiter'
SATURN = 'Saturn'
URANUS = 'Uranus'
NEPTUNE = 'Neptune'
PLUTO = 'Pluto'
CHIRON = 'Chiron'
NORTH_NODE = 'North Node'
SOUTH_NODE = 'South Node'
SYZYGY = 'Syzygy'
PARS_FORTUNA = 'Pars Fortuna'
NO_PLANET = 'None'
# Mean daily motions
MEAN_MOTION_SUN = 0.9833
MEAN_MOTION_MOON = 13.1833
# Object types
OBJ_PLANET = 'Planet'
OBJ_HOUSE = 'House'
OBJ_MOON_NODE = 'Moon Node'
OBJ_ARABIC_PART = 'Arabic Part'
OBJ_FIXED_STAR = 'Fixed Star'
OBJ_ASTEROID = 'Asteroid'
OBJ_LUNATION = 'Lunation'
OBJ_GENERIC = 'Generic'
# Movements
MOV_DIRECT = 'Direct'
MOV_RETROGRADE = 'Retrogade'
MOV_STATIONARY = 'Stationary'
# === Houses === */
HOUSE1 = 'House1'
HOUSE2 = 'House2'
HOUSE3 = 'House3'
HOUSE4 = 'House4'
HOUSE5 = 'House5'
HOUSE6 = 'House6'
HOUSE7 = 'House7'
HOUSE8 = 'House8'
HOUSE9 = 'House9'
HOUSE10 = 'House10'
HOUSE11 = 'House11'
HOUSE12 = 'House12'
# House conditions
HOUSE_ANGULAR = 'Angular'
HOUSE_SUCCEDENT = 'Succedent'
HOUSE_CADENT = 'Cadent'
# Benefic/Malefic houses
HOUSES_BENEFIC = [HOUSE1, HOUSE5, HOUSE11]
HOUSES_MALEFIC = [HOUSE6, HOUSE12]
# House Systems
HOUSES_PLACIDUS = 'Placidus'
HOUSES_KOCH = 'Koch'
HOUSES_PORPHYRIUS = 'Porphyrius'
HOUSES_REGIOMONTANUS = 'Regiomontanus'
HOUSES_CAMPANUS = 'Campanus'
HOUSES_EQUAL = 'Equal'
HOUSES_EQUAL_2 = 'Equal 2'
HOUSES_VEHLOW_EQUAL = 'Vehlow Equal'
HOUSES_WHOLE_SIGN = 'Whole Sign'
HOUSES_MERIDIAN = 'Meridian'
HOUSES_AZIMUTHAL = 'Azimuthal'
HOUSES_POLICH_PAGE = 'Polich Page'
HOUSES_ALCABITUS = 'Alcabitus'
HOUSES_MORINUS = 'Morinus'
HOUSES_DEFAULT = HOUSES_ALCABITUS
# === Angles === */
ANGLE_ASC = 'Asc'
ANGLE_MC = 'MC'
ANGLE_DESC = 'Desc'
ANGLE_IC = 'IC'
# === Fixed Stars === */
STAR_ALGENIB = 'Algenib'
STAR_ALPHERATZ = 'Alpheratz'
STAR_ALGOL = 'Algol'
STAR_ALCYONE = 'Alcyone'
STAR_PLEIADES = STAR_ALCYONE
STAR_ALDEBARAN = 'Aldebaran'
STAR_RIGEL = 'Rigel'
STAR_CAPELLA = 'Capella'
STAR_BETELGEUSE = 'Betelgeuse'
STAR_SIRIUS = 'Sirius'
STAR_CANOPUS = 'Canopus'
STAR_CASTOR = 'Castor'
STAR_POLLUX = 'Pollux'
STAR_PROCYON = 'Procyon'
STAR_ASELLUS_BOREALIS = 'Asellus Borealis'
STAR_ASELLUS_AUSTRALIS = 'Asellus Australis'
STAR_ALPHARD = 'Alphard'
STAR_REGULUS = 'Regulus'
STAR_DENEBOLA = 'Denebola'
STAR_ALGORAB = 'Algorab'
STAR_SPICA = 'Spica'
STAR_ARCTURUS = 'Arcturus'
STAR_ALPHECCA = 'Alphecca'
STAR_ZUBEN_ELGENUBI = 'Zuben Elgenubi'
STAR_ZUBEN_ELSCHEMALI = 'Zuben Eshamali'
STAR_UNUKALHAI = 'Unukalhai'
STAR_AGENA = 'Agena'
STAR_RIGEL_CENTAURUS = 'Rigel Kentaurus'
STAR_ANTARES = 'Antares'
STAR_LESATH = 'Lesath'
STAR_VEGA = 'Vega'
STAR_ALTAIR = 'Altair'
STAR_DENEB_ALGEDI = 'Deneb Algedi'
STAR_FOMALHAUT = 'Fomalhaut'
STAR_DENEB_ADIGE = 'Deneb' # Alpha-Cygnus
STAR_ACHERNAR = 'Achernar'
# === Aspects === */
# Major Aspects
ASP_NO_ASPECT = -1
ASP_CONJUNCTION = 0
ASP_SEXTILE = 60
ASP_SQUARE = 90
ASP_TRINE = 120
ASP_OPPOSITION = 180
# Minor Aspects
ASP_SEMISEXTILE = 30
ASP_SEMIQUINTILE = 36
ASP_SEMISQUARE = 45
ASP_QUINTILE = 72
ASP_SESQUIQUINTILE = 108
ASP_SESQUISQUARE = 135
ASP_BIQUINTILE = 144
ASP_QUINCUNX = 150
# Useful lists
ASP_MAJOR_ASPECTS = [0, 60, 90, 120, 180]
ASP_MINOR_ASPECTS = [30, 36, 45, 72, 108, 135, 144, 150]
ASP_ALL_ASPECTS = ASP_MAJOR_ASPECTS + ASP_MINOR_ASPECTS
# Aspect movements
ASP_APPLICATIVE = 'Applicative'
ASP_SEPARATIVE = 'Separative'
ASP_EXACT = 'Exact'
ASP_STATIONARY = 'Stationary'
# Aspect direction
ASP_DEXTER = 'Dexter' # Right side
ASP_SINISTER = 'Sinister' # Left side
# Aspect properties
ASP_ASSOCIATE = 'Associate'
ASP_DISSOCIATE = 'Dissociate'
ASP_NOT_APPLICABLE = 'Not Applicable'
# === Some Lists === */
LIST_SIGNS = [
ARIES, TAURUS, GEMINI, CANCER, LEO, VIRGO,
LIBRA, SCORPIO, SAGITTARIUS, CAPRICORN, AQUARIUS,
PISCES,
]
LIST_OBJECTS = [
SUN, MOON, MERCURY, VENUS, MARS, JUPITER,
SATURN, URANUS, NEPTUNE, PLUTO, CHIRON, NORTH_NODE,
SOUTH_NODE, SYZYGY, PARS_FORTUNA,
]
LIST_OBJECTS_TRADITIONAL = [
SUN, MOON, MERCURY, VENUS, MARS, JUPITER, SATURN,
NORTH_NODE, SOUTH_NODE, SYZYGY, PARS_FORTUNA
]
LIST_SEVEN_PLANETS = [
SUN, MOON, MERCURY, VENUS, MARS, JUPITER, SATURN
]
LIST_HOUSES = [
HOUSE1, HOUSE2, HOUSE3, HOUSE4, HOUSE5, HOUSE6,
HOUSE7, HOUSE8, HOUSE9, HOUSE10, HOUSE11, HOUSE12,
]
LIST_FIXED_STARS = [
STAR_ALGENIB, STAR_ALPHERATZ, STAR_ALGOL, STAR_ALCYONE,
STAR_PLEIADES, STAR_ALDEBARAN, STAR_RIGEL, STAR_CAPELLA,
STAR_BETELGEUSE, STAR_SIRIUS, STAR_CANOPUS, STAR_CASTOR,
STAR_POLLUX, STAR_PROCYON, STAR_ASELLUS_BOREALIS,
STAR_ASELLUS_AUSTRALIS, STAR_ALPHARD, STAR_REGULUS,
STAR_DENEBOLA, STAR_ALGORAB, STAR_SPICA, STAR_ARCTURUS,
STAR_ALPHECCA, STAR_ZUBEN_ELSCHEMALI, STAR_UNUKALHAI,
STAR_AGENA, STAR_RIGEL_CENTAURUS, STAR_ANTARES,
STAR_LESATH, STAR_VEGA, STAR_ALTAIR, STAR_DENEB_ALGEDI,
STAR_FOMALHAUT, STAR_DENEB_ADIGE, STAR_ACHERNAR,
]
Fix pep8 violations
"""
This file is part of flatlib - (C) FlatAngle
Author: João Ventura (flatangleweb@gmail.com)
--------------------------------------------
This module defines the names of signs, objects, houses and fixed-stars.
It also includes the definition of some ordered lists for iterations.
"""
# === Base constants === */
# Four primitive qualities
QUALITY_HOT = 'Hot'
QUALITY_COLD = 'Cold'
QUALITY_DRY = 'Dry'
QUALITY_HUMID = 'Humid'
# Four Elements
ELEMENT_FIRE = 'Fire'
ELEMENT_EARTH = 'Earth'
ELEMENT_AIR = 'Air'
ELEMENT_WATER = 'Water'
# Four Temperaments
TEMPERAMENT_CHOLERIC = 'Choleric'
TEMPERAMENT_MELANCHOLIC = 'Melancholic'
TEMPERAMENT_SANGUINE = 'Sanguine'
TEMPERAMENT_PHLEGMATIC = 'Phlegmatic'
# Genders
GENDER_MASCULINE = 'Masculine'
GENDER_FEMININE = 'Feminine'
GENDER_NEUTRAL = 'Neutral'
# Factions
FACTION_DIURNAL = 'Diurnal'
FACTION_NOCTURNAL = 'Nocturnal'
FACTION_NEUTRAL = 'Neutral'
# Moon Quarters
MOON_FIRST_QUARTER = 'First Quarter'
MOON_SECOND_QUARTER = 'Second Quarter'
MOON_THIRD_QUARTER = 'Third Quarter'
MOON_LAST_QUARTER = 'Last Quarter'
# Sun seasons
SUN_SPRING = 'Spring'
SUN_SUMMER = 'Summer'
SUN_AUTUMN = 'Autumn'
SUN_WINTER = 'Winter'
# === Signs === */
ARIES = 'Aries'
TAURUS = 'Taurus'
GEMINI = 'Gemini'
CANCER = 'Cancer'
LEO = 'Leo'
VIRGO = 'Virgo'
LIBRA = 'Libra'
SCORPIO = 'Scorpio'
SAGITTARIUS = 'Sagittarius'
CAPRICORN = 'Capricorn'
AQUARIUS = 'Aquarius'
PISCES = 'Pisces'
# Sign modes
MODE_CARDINAL = 'Cardinal'
MODE_FIXED = 'Fixed'
MODE_MUTABLE = 'Mutable'
# Sign figures
SIGN_FIGURE_NONE = 'None'
SIGN_FIGURE_BEAST = 'Beast'
SIGN_FIGURE_HUMAN = 'Human'
SIGN_FIGURE_WILD = 'Wild'
# Sign fertilities
SIGN_FERTILE = 'Fertile'
SIGN_MODERATELY_FERTILE = 'Moderately Fertile'
SIGN_MODERATELY_STERILE = 'Moderately Sterile'
SIGN_STERILE = 'Sterile'
# === Objects === */
# Names
SUN = 'Sun'
MOON = 'Moon'
MERCURY = 'Mercury'
VENUS = 'Venus'
MARS = 'Mars'
JUPITER = 'Jupiter'
SATURN = 'Saturn'
URANUS = 'Uranus'
NEPTUNE = 'Neptune'
PLUTO = 'Pluto'
CHIRON = 'Chiron'
NORTH_NODE = 'North Node'
SOUTH_NODE = 'South Node'
SYZYGY = 'Syzygy'
PARS_FORTUNA = 'Pars Fortuna'
NO_PLANET = 'None'
# Mean daily motions
MEAN_MOTION_SUN = 0.9833
MEAN_MOTION_MOON = 13.1833
# Object types
OBJ_PLANET = 'Planet'
OBJ_HOUSE = 'House'
OBJ_MOON_NODE = 'Moon Node'
OBJ_ARABIC_PART = 'Arabic Part'
OBJ_FIXED_STAR = 'Fixed Star'
OBJ_ASTEROID = 'Asteroid'
OBJ_LUNATION = 'Lunation'
OBJ_GENERIC = 'Generic'
# Movements
MOV_DIRECT = 'Direct'
MOV_RETROGRADE = 'Retrogade'
MOV_STATIONARY = 'Stationary'
# === Houses === */
HOUSE1 = 'House1'
HOUSE2 = 'House2'
HOUSE3 = 'House3'
HOUSE4 = 'House4'
HOUSE5 = 'House5'
HOUSE6 = 'House6'
HOUSE7 = 'House7'
HOUSE8 = 'House8'
HOUSE9 = 'House9'
HOUSE10 = 'House10'
HOUSE11 = 'House11'
HOUSE12 = 'House12'
# House conditions
HOUSE_ANGULAR = 'Angular'
HOUSE_SUCCEDENT = 'Succedent'
HOUSE_CADENT = 'Cadent'
# Benefic/Malefic houses
HOUSES_BENEFIC = [HOUSE1, HOUSE5, HOUSE11]
HOUSES_MALEFIC = [HOUSE6, HOUSE12]
# House Systems
HOUSES_PLACIDUS = 'Placidus'
HOUSES_KOCH = 'Koch'
HOUSES_PORPHYRIUS = 'Porphyrius'
HOUSES_REGIOMONTANUS = 'Regiomontanus'
HOUSES_CAMPANUS = 'Campanus'
HOUSES_EQUAL = 'Equal'
HOUSES_EQUAL_2 = 'Equal 2'
HOUSES_VEHLOW_EQUAL = 'Vehlow Equal'
HOUSES_WHOLE_SIGN = 'Whole Sign'
HOUSES_MERIDIAN = 'Meridian'
HOUSES_AZIMUTHAL = 'Azimuthal'
HOUSES_POLICH_PAGE = 'Polich Page'
HOUSES_ALCABITUS = 'Alcabitus'
HOUSES_MORINUS = 'Morinus'
HOUSES_DEFAULT = HOUSES_ALCABITUS
# === Angles === */
ANGLE_ASC = 'Asc'
ANGLE_MC = 'MC'
ANGLE_DESC = 'Desc'
ANGLE_IC = 'IC'
# === Fixed Stars === */
STAR_ALGENIB = 'Algenib'
STAR_ALPHERATZ = 'Alpheratz'
STAR_ALGOL = 'Algol'
STAR_ALCYONE = 'Alcyone'
STAR_PLEIADES = STAR_ALCYONE
STAR_ALDEBARAN = 'Aldebaran'
STAR_RIGEL = 'Rigel'
STAR_CAPELLA = 'Capella'
STAR_BETELGEUSE = 'Betelgeuse'
STAR_SIRIUS = 'Sirius'
STAR_CANOPUS = 'Canopus'
STAR_CASTOR = 'Castor'
STAR_POLLUX = 'Pollux'
STAR_PROCYON = 'Procyon'
STAR_ASELLUS_BOREALIS = 'Asellus Borealis'
STAR_ASELLUS_AUSTRALIS = 'Asellus Australis'
STAR_ALPHARD = 'Alphard'
STAR_REGULUS = 'Regulus'
STAR_DENEBOLA = 'Denebola'
STAR_ALGORAB = 'Algorab'
STAR_SPICA = 'Spica'
STAR_ARCTURUS = 'Arcturus'
STAR_ALPHECCA = 'Alphecca'
STAR_ZUBEN_ELGENUBI = 'Zuben Elgenubi'
STAR_ZUBEN_ELSCHEMALI = 'Zuben Eshamali'
STAR_UNUKALHAI = 'Unukalhai'
STAR_AGENA = 'Agena'
STAR_RIGEL_CENTAURUS = 'Rigel Kentaurus'
STAR_ANTARES = 'Antares'
STAR_LESATH = 'Lesath'
STAR_VEGA = 'Vega'
STAR_ALTAIR = 'Altair'
STAR_DENEB_ALGEDI = 'Deneb Algedi'
STAR_FOMALHAUT = 'Fomalhaut'
STAR_DENEB_ADIGE = 'Deneb' # Alpha-Cygnus
STAR_ACHERNAR = 'Achernar'
# === Aspects === */
# Major Aspects
ASP_NO_ASPECT = -1
ASP_CONJUNCTION = 0
ASP_SEXTILE = 60
ASP_SQUARE = 90
ASP_TRINE = 120
ASP_OPPOSITION = 180
# Minor Aspects
ASP_SEMISEXTILE = 30
ASP_SEMIQUINTILE = 36
ASP_SEMISQUARE = 45
ASP_QUINTILE = 72
ASP_SESQUIQUINTILE = 108
ASP_SESQUISQUARE = 135
ASP_BIQUINTILE = 144
ASP_QUINCUNX = 150
# Useful lists
ASP_MAJOR_ASPECTS = [0, 60, 90, 120, 180]
ASP_MINOR_ASPECTS = [30, 36, 45, 72, 108, 135, 144, 150]
ASP_ALL_ASPECTS = ASP_MAJOR_ASPECTS + ASP_MINOR_ASPECTS
# Aspect movements
ASP_APPLICATIVE = 'Applicative'
ASP_SEPARATIVE = 'Separative'
ASP_EXACT = 'Exact'
ASP_STATIONARY = 'Stationary'
# Aspect direction
ASP_DEXTER = 'Dexter' # Right side
ASP_SINISTER = 'Sinister' # Left side
# Aspect properties
ASP_ASSOCIATE = 'Associate'
ASP_DISSOCIATE = 'Dissociate'
ASP_NOT_APPLICABLE = 'Not Applicable'
# === Some Lists === */
LIST_SIGNS = [
ARIES, TAURUS, GEMINI, CANCER, LEO, VIRGO,
LIBRA, SCORPIO, SAGITTARIUS, CAPRICORN, AQUARIUS,
PISCES,
]
LIST_OBJECTS = [
SUN, MOON, MERCURY, VENUS, MARS, JUPITER,
SATURN, URANUS, NEPTUNE, PLUTO, CHIRON, NORTH_NODE,
SOUTH_NODE, SYZYGY, PARS_FORTUNA,
]
LIST_OBJECTS_TRADITIONAL = [
SUN, MOON, MERCURY, VENUS, MARS, JUPITER, SATURN,
NORTH_NODE, SOUTH_NODE, SYZYGY, PARS_FORTUNA
]
LIST_SEVEN_PLANETS = [
SUN, MOON, MERCURY, VENUS, MARS, JUPITER, SATURN
]
LIST_HOUSES = [
HOUSE1, HOUSE2, HOUSE3, HOUSE4, HOUSE5, HOUSE6,
HOUSE7, HOUSE8, HOUSE9, HOUSE10, HOUSE11, HOUSE12,
]
LIST_FIXED_STARS = [
STAR_ALGENIB, STAR_ALPHERATZ, STAR_ALGOL, STAR_ALCYONE,
STAR_PLEIADES, STAR_ALDEBARAN, STAR_RIGEL, STAR_CAPELLA,
STAR_BETELGEUSE, STAR_SIRIUS, STAR_CANOPUS, STAR_CASTOR,
STAR_POLLUX, STAR_PROCYON, STAR_ASELLUS_BOREALIS,
STAR_ASELLUS_AUSTRALIS, STAR_ALPHARD, STAR_REGULUS,
STAR_DENEBOLA, STAR_ALGORAB, STAR_SPICA, STAR_ARCTURUS,
STAR_ALPHECCA, STAR_ZUBEN_ELSCHEMALI, STAR_UNUKALHAI,
STAR_AGENA, STAR_RIGEL_CENTAURUS, STAR_ANTARES,
STAR_LESATH, STAR_VEGA, STAR_ALTAIR, STAR_DENEB_ALGEDI,
STAR_FOMALHAUT, STAR_DENEB_ADIGE, STAR_ACHERNAR,
]
|
#!/usr/bin/env python
'''Prints all window events to stdout.
'''
from pyglet import window
from pyglet.window.event import WindowEventLogger
win = window.Window()
win.push_handlers(WindowEventLogger())
while not win.has_exit:
win.dispatch_events()
win.clear()
win.flip()
Make events example window resizable.
#!/usr/bin/env python
'''Prints all window events to stdout.
'''
from pyglet import window
from pyglet.window.event import WindowEventLogger
win = window.Window(resizable=True)
win.push_handlers(WindowEventLogger())
while not win.has_exit:
win.dispatch_events()
win.clear()
win.flip()
|
"""
This file is part of flatlib - (C) FlatAngle
Author: João Ventura (flatangleweb@gmail.com)
This module defines the names of signs, objects, angles,
houses and fixed-stars used in the library.
"""
# === Base constants === */
# Four primitive qualities
HOT = 'Hot'
COLD = 'Cold'
DRY = 'Dry'
HUMID = 'Humid'
# Four Elements
FIRE = 'Fire'
EARTH = 'Earth'
AIR = 'Air'
WATER = 'Water'
# Four Temperaments
CHOLERIC = 'Choleric'
MELANCHOLIC = 'Melancholic'
SANGUINE = 'Sanguine'
PHLEGMATIC = 'Phlegmatic'
# Genders
MASCULINE = 'Masculine'
FEMININE = 'Feminine'
NEUTRAL = 'Neutral'
# Factions
DIURNAL = 'Diurnal'
NOCTURNAL = 'Nocturnal'
# Sun seasons
SPRING = 'Spring'
SUMMER = 'Summer'
AUTUMN = 'Autumn'
WINTER = 'Winter'
# Moon Quarters
MOON_FIRST_QUARTER = 'First Quarter'
MOON_SECOND_QUARTER = 'Second Quarter'
MOON_THIRD_QUARTER = 'Third Quarter'
MOON_LAST_QUARTER = 'Last Quarter'
# === Signs === */
ARIES = 'Aries'
TAURUS = 'Taurus'
GEMINI = 'Gemini'
CANCER = 'Cancer'
LEO = 'Leo'
VIRGO = 'Virgo'
LIBRA = 'Libra'
SCORPIO = 'Scorpio'
SAGITTARIUS = 'Sagittarius'
CAPRICORN = 'Capricorn'
AQUARIUS = 'Aquarius'
PISCES = 'Pisces'
# Sign modes
CARDINAL = 'Cardinal'
FIXED = 'Fixed'
MUTABLE = 'Mutable'
# Sign figures
SIGN_FIGURE_NONE = 'None'
SIGN_FIGURE_BEAST = 'Beast'
SIGN_FIGURE_HUMAN = 'Human'
SIGN_FIGURE_WILD = 'Wild'
# Sign fertilities
SIGN_FERTILE = 'Fertile'
SIGN_STERILE = 'Sterile'
SIGN_MODERATELY_FERTILE = 'Moderately Fertile'
SIGN_MODERATELY_STERILE = 'Moderately Sterile'
# === Objects === */
# Names
SUN = 'Sun'
MOON = 'Moon'
MERCURY = 'Mercury'
VENUS = 'Venus'
MARS = 'Mars'
JUPITER = 'Jupiter'
SATURN = 'Saturn'
URANUS = 'Uranus'
NEPTUNE = 'Neptune'
PLUTO = 'Pluto'
CHIRON = 'Chiron'
NORTH_NODE = 'North Node'
SOUTH_NODE = 'South Node'
SYZYGY = 'Syzygy'
PARS_FORTUNA = 'Pars Fortuna'
NO_PLANET = 'None'
# Object movement
DIRECT = 'Direct'
RETROGRADE = 'Retrogade'
STATIONARY = 'Stationary'
# Mean daily motions
MEAN_MOTION_SUN = 0.9833
MEAN_MOTION_MOON = 13.1833
# Object type
OBJ_PLANET = 'Planet'
OBJ_HOUSE = 'House'
OBJ_MOON_NODE = 'Moon Node'
OBJ_ARABIC_PART = 'Arabic Part'
OBJ_FIXED_STAR = 'Fixed Star'
OBJ_ASTEROID = 'Asteroid'
OBJ_LUNATION = 'Lunation'
OBJ_GENERIC = 'Generic'
# === Houses === */
HOUSE1 = 'House1'
HOUSE2 = 'House2'
HOUSE3 = 'House3'
HOUSE4 = 'House4'
HOUSE5 = 'House5'
HOUSE6 = 'House6'
HOUSE7 = 'House7'
HOUSE8 = 'House8'
HOUSE9 = 'House9'
HOUSE10 = 'House10'
HOUSE11 = 'House11'
HOUSE12 = 'House12'
# House conditions
ANGULAR = 'Angular'
SUCCEDENT = 'Succedent'
CADENT = 'Cadent'
# Benefic/Malefic houses
HOUSES_BENEFIC = [HOUSE1, HOUSE5, HOUSE11]
HOUSES_MALEFIC = [HOUSE6, HOUSE12]
# House Systems
HOUSES_PLACIDUS = 'Placidus'
HOUSES_KOCH = 'Koch'
HOUSES_PORPHYRIUS = 'Porphyrius'
HOUSES_REGIOMONTANUS = 'Regiomontanus'
HOUSES_CAMPANUS = 'Campanus'
HOUSES_EQUAL = 'Equal'
HOUSES_EQUAL_2 = 'Equal 2'
HOUSES_VEHLOW_EQUAL = 'Vehlow Equal'
HOUSES_WHOLE_SIGN = 'Whole Sign'
HOUSES_MERIDIAN = 'Meridian'
HOUSES_AZIMUTHAL = 'Azimuthal'
HOUSES_POLICH_PAGE = 'Polich Page'
HOUSES_ALCABITUS = 'Alcabitus'
HOUSES_MORINUS = 'Morinus'
HOUSES_DEFAULT = HOUSES_ALCABITUS
# === Angles === */
ASC = 'Asc'
DESC = 'Desc'
MC = 'MC'
IC = 'IC'
# === Fixed Stars === */
STAR_ALGENIB = 'Algenib'
STAR_ALPHERATZ = 'Alpheratz'
STAR_ALGOL = 'Algol'
STAR_ALCYONE = 'Alcyone'
STAR_PLEIADES = STAR_ALCYONE
STAR_ALDEBARAN = 'Aldebaran'
STAR_RIGEL = 'Rigel'
STAR_CAPELLA = 'Capella'
STAR_BETELGEUSE = 'Betelgeuse'
STAR_SIRIUS = 'Sirius'
STAR_CANOPUS = 'Canopus'
STAR_CASTOR = 'Castor'
STAR_POLLUX = 'Pollux'
STAR_PROCYON = 'Procyon'
STAR_ASELLUS_BOREALIS = 'Asellus Borealis'
STAR_ASELLUS_AUSTRALIS = 'Asellus Australis'
STAR_ALPHARD = 'Alphard'
STAR_REGULUS = 'Regulus'
STAR_DENEBOLA = 'Denebola'
STAR_ALGORAB = 'Algorab'
STAR_SPICA = 'Spica'
STAR_ARCTURUS = 'Arcturus'
STAR_ALPHECCA = 'Alphecca'
STAR_ZUBEN_ELGENUBI = 'Zuben Elgenubi'
STAR_ZUBEN_ELSCHEMALI = 'Zuben Eshamali'
STAR_UNUKALHAI = 'Unukalhai'
STAR_AGENA = 'Agena'
STAR_RIGEL_CENTAURUS = 'Rigel Kentaurus'
STAR_ANTARES = 'Antares'
STAR_LESATH = 'Lesath'
STAR_VEGA = 'Vega'
STAR_ALTAIR = 'Altair'
STAR_DENEB_ALGEDI = 'Deneb Algedi'
STAR_FOMALHAUT = 'Fomalhaut'
STAR_DENEB_ADIGE = 'Deneb' # Alpha-Cygnus
STAR_ACHERNAR = 'Achernar'
# === Aspects === */
# Major Aspects
NO_ASPECT = -1
CONJUNCTION = 0
SEXTILE = 60
SQUARE = 90
TRINE = 120
OPPOSITION = 180
# Minor Aspects
SEMISEXTILE = 30
SEMIQUINTILE = 36
SEMISQUARE = 45
QUINTILE = 72
SESQUIQUINTILE = 108
SESQUISQUARE = 135
BIQUINTILE = 144
QUINCUNX = 150
# Aspect movement
APPLICATIVE = 'Applicative'
SEPARATIVE = 'Separative'
EXACT = 'Exact'
NO_MOVEMENT = 'None'
# Aspect direction
DEXTER = 'Dexter' # Right side
SINISTER = 'Sinister' # Left side
# Aspect properties
ASSOCIATE = 'Associate'
DISSOCIATE = 'Dissociate'
# Aspect lists
MAJOR_ASPECTS = [0, 60, 90, 120, 180]
MINOR_ASPECTS = [30, 36, 45, 72, 108, 135, 144, 150]
ALL_ASPECTS = MAJOR_ASPECTS + MINOR_ASPECTS
# === Some Lists === */
LIST_SIGNS = [
ARIES, TAURUS, GEMINI, CANCER, LEO, VIRGO, LIBRA,
SCORPIO, SAGITTARIUS, CAPRICORN, AQUARIUS, PISCES
]
LIST_OBJECTS = [
SUN, MOON, MERCURY, VENUS, MARS, JUPITER, SATURN,
URANUS, NEPTUNE, PLUTO, CHIRON, NORTH_NODE,
SOUTH_NODE, SYZYGY, PARS_FORTUNA,
]
LIST_OBJECTS_TRADITIONAL = [
SUN, MOON, MERCURY, VENUS, MARS, JUPITER, SATURN,
NORTH_NODE, SOUTH_NODE, SYZYGY, PARS_FORTUNA
]
LIST_SEVEN_PLANETS = [
SUN, MOON, MERCURY, VENUS, MARS, JUPITER, SATURN
]
LIST_HOUSES = [
HOUSE1, HOUSE2, HOUSE3, HOUSE4, HOUSE5, HOUSE6,
HOUSE7, HOUSE8, HOUSE9, HOUSE10, HOUSE11, HOUSE12,
]
LIST_ANGLES = [
ASC, MC, DESC, IC
]
LIST_FIXED_STARS = [
STAR_ALGENIB, STAR_ALPHERATZ, STAR_ALGOL, STAR_ALCYONE,
STAR_PLEIADES, STAR_ALDEBARAN, STAR_RIGEL, STAR_CAPELLA,
STAR_BETELGEUSE, STAR_SIRIUS, STAR_CANOPUS, STAR_CASTOR,
STAR_POLLUX, STAR_PROCYON, STAR_ASELLUS_BOREALIS,
STAR_ASELLUS_AUSTRALIS, STAR_ALPHARD, STAR_REGULUS,
STAR_DENEBOLA, STAR_ALGORAB, STAR_SPICA, STAR_ARCTURUS,
STAR_ALPHECCA, STAR_ZUBEN_ELSCHEMALI, STAR_UNUKALHAI,
STAR_AGENA, STAR_RIGEL_CENTAURUS, STAR_ANTARES,
STAR_LESATH, STAR_VEGA, STAR_ALTAIR, STAR_DENEB_ALGEDI,
STAR_FOMALHAUT, STAR_DENEB_ADIGE, STAR_ACHERNAR,
]
Fixed typo "Retrogade".
"""
This file is part of flatlib - (C) FlatAngle
Author: João Ventura (flatangleweb@gmail.com)
This module defines the names of signs, objects, angles,
houses and fixed-stars used in the library.
"""
# === Base constants === */
# Four primitive qualities
HOT = 'Hot'
COLD = 'Cold'
DRY = 'Dry'
HUMID = 'Humid'
# Four Elements
FIRE = 'Fire'
EARTH = 'Earth'
AIR = 'Air'
WATER = 'Water'
# Four Temperaments
CHOLERIC = 'Choleric'
MELANCHOLIC = 'Melancholic'
SANGUINE = 'Sanguine'
PHLEGMATIC = 'Phlegmatic'
# Genders
MASCULINE = 'Masculine'
FEMININE = 'Feminine'
NEUTRAL = 'Neutral'
# Factions
DIURNAL = 'Diurnal'
NOCTURNAL = 'Nocturnal'
# Sun seasons
SPRING = 'Spring'
SUMMER = 'Summer'
AUTUMN = 'Autumn'
WINTER = 'Winter'
# Moon Quarters
MOON_FIRST_QUARTER = 'First Quarter'
MOON_SECOND_QUARTER = 'Second Quarter'
MOON_THIRD_QUARTER = 'Third Quarter'
MOON_LAST_QUARTER = 'Last Quarter'
# === Signs === */
ARIES = 'Aries'
TAURUS = 'Taurus'
GEMINI = 'Gemini'
CANCER = 'Cancer'
LEO = 'Leo'
VIRGO = 'Virgo'
LIBRA = 'Libra'
SCORPIO = 'Scorpio'
SAGITTARIUS = 'Sagittarius'
CAPRICORN = 'Capricorn'
AQUARIUS = 'Aquarius'
PISCES = 'Pisces'
# Sign modes
CARDINAL = 'Cardinal'
FIXED = 'Fixed'
MUTABLE = 'Mutable'
# Sign figures
SIGN_FIGURE_NONE = 'None'
SIGN_FIGURE_BEAST = 'Beast'
SIGN_FIGURE_HUMAN = 'Human'
SIGN_FIGURE_WILD = 'Wild'
# Sign fertilities
SIGN_FERTILE = 'Fertile'
SIGN_STERILE = 'Sterile'
SIGN_MODERATELY_FERTILE = 'Moderately Fertile'
SIGN_MODERATELY_STERILE = 'Moderately Sterile'
# === Objects === */
# Names
SUN = 'Sun'
MOON = 'Moon'
MERCURY = 'Mercury'
VENUS = 'Venus'
MARS = 'Mars'
JUPITER = 'Jupiter'
SATURN = 'Saturn'
URANUS = 'Uranus'
NEPTUNE = 'Neptune'
PLUTO = 'Pluto'
CHIRON = 'Chiron'
NORTH_NODE = 'North Node'
SOUTH_NODE = 'South Node'
SYZYGY = 'Syzygy'
PARS_FORTUNA = 'Pars Fortuna'
NO_PLANET = 'None'
# Object movement
DIRECT = 'Direct'
RETROGRADE = 'Retrograde'
STATIONARY = 'Stationary'
# Mean daily motions
MEAN_MOTION_SUN = 0.9833
MEAN_MOTION_MOON = 13.1833
# Object type
OBJ_PLANET = 'Planet'
OBJ_HOUSE = 'House'
OBJ_MOON_NODE = 'Moon Node'
OBJ_ARABIC_PART = 'Arabic Part'
OBJ_FIXED_STAR = 'Fixed Star'
OBJ_ASTEROID = 'Asteroid'
OBJ_LUNATION = 'Lunation'
OBJ_GENERIC = 'Generic'
# === Houses === */
HOUSE1 = 'House1'
HOUSE2 = 'House2'
HOUSE3 = 'House3'
HOUSE4 = 'House4'
HOUSE5 = 'House5'
HOUSE6 = 'House6'
HOUSE7 = 'House7'
HOUSE8 = 'House8'
HOUSE9 = 'House9'
HOUSE10 = 'House10'
HOUSE11 = 'House11'
HOUSE12 = 'House12'
# House conditions
ANGULAR = 'Angular'
SUCCEDENT = 'Succedent'
CADENT = 'Cadent'
# Benefic/Malefic houses
HOUSES_BENEFIC = [HOUSE1, HOUSE5, HOUSE11]
HOUSES_MALEFIC = [HOUSE6, HOUSE12]
# House Systems
HOUSES_PLACIDUS = 'Placidus'
HOUSES_KOCH = 'Koch'
HOUSES_PORPHYRIUS = 'Porphyrius'
HOUSES_REGIOMONTANUS = 'Regiomontanus'
HOUSES_CAMPANUS = 'Campanus'
HOUSES_EQUAL = 'Equal'
HOUSES_EQUAL_2 = 'Equal 2'
HOUSES_VEHLOW_EQUAL = 'Vehlow Equal'
HOUSES_WHOLE_SIGN = 'Whole Sign'
HOUSES_MERIDIAN = 'Meridian'
HOUSES_AZIMUTHAL = 'Azimuthal'
HOUSES_POLICH_PAGE = 'Polich Page'
HOUSES_ALCABITUS = 'Alcabitus'
HOUSES_MORINUS = 'Morinus'
HOUSES_DEFAULT = HOUSES_ALCABITUS
# === Angles === */
ASC = 'Asc'
DESC = 'Desc'
MC = 'MC'
IC = 'IC'
# === Fixed Stars === */
STAR_ALGENIB = 'Algenib'
STAR_ALPHERATZ = 'Alpheratz'
STAR_ALGOL = 'Algol'
STAR_ALCYONE = 'Alcyone'
STAR_PLEIADES = STAR_ALCYONE
STAR_ALDEBARAN = 'Aldebaran'
STAR_RIGEL = 'Rigel'
STAR_CAPELLA = 'Capella'
STAR_BETELGEUSE = 'Betelgeuse'
STAR_SIRIUS = 'Sirius'
STAR_CANOPUS = 'Canopus'
STAR_CASTOR = 'Castor'
STAR_POLLUX = 'Pollux'
STAR_PROCYON = 'Procyon'
STAR_ASELLUS_BOREALIS = 'Asellus Borealis'
STAR_ASELLUS_AUSTRALIS = 'Asellus Australis'
STAR_ALPHARD = 'Alphard'
STAR_REGULUS = 'Regulus'
STAR_DENEBOLA = 'Denebola'
STAR_ALGORAB = 'Algorab'
STAR_SPICA = 'Spica'
STAR_ARCTURUS = 'Arcturus'
STAR_ALPHECCA = 'Alphecca'
STAR_ZUBEN_ELGENUBI = 'Zuben Elgenubi'
STAR_ZUBEN_ELSCHEMALI = 'Zuben Eshamali'
STAR_UNUKALHAI = 'Unukalhai'
STAR_AGENA = 'Agena'
STAR_RIGEL_CENTAURUS = 'Rigel Kentaurus'
STAR_ANTARES = 'Antares'
STAR_LESATH = 'Lesath'
STAR_VEGA = 'Vega'
STAR_ALTAIR = 'Altair'
STAR_DENEB_ALGEDI = 'Deneb Algedi'
STAR_FOMALHAUT = 'Fomalhaut'
STAR_DENEB_ADIGE = 'Deneb' # Alpha-Cygnus
STAR_ACHERNAR = 'Achernar'
# === Aspects === */
# Major Aspects
NO_ASPECT = -1
CONJUNCTION = 0
SEXTILE = 60
SQUARE = 90
TRINE = 120
OPPOSITION = 180
# Minor Aspects
SEMISEXTILE = 30
SEMIQUINTILE = 36
SEMISQUARE = 45
QUINTILE = 72
SESQUIQUINTILE = 108
SESQUISQUARE = 135
BIQUINTILE = 144
QUINCUNX = 150
# Aspect movement
APPLICATIVE = 'Applicative'
SEPARATIVE = 'Separative'
EXACT = 'Exact'
NO_MOVEMENT = 'None'
# Aspect direction
DEXTER = 'Dexter' # Right side
SINISTER = 'Sinister' # Left side
# Aspect properties
ASSOCIATE = 'Associate'
DISSOCIATE = 'Dissociate'
# Aspect lists
MAJOR_ASPECTS = [0, 60, 90, 120, 180]
MINOR_ASPECTS = [30, 36, 45, 72, 108, 135, 144, 150]
ALL_ASPECTS = MAJOR_ASPECTS + MINOR_ASPECTS
# === Some Lists === */
LIST_SIGNS = [
ARIES, TAURUS, GEMINI, CANCER, LEO, VIRGO, LIBRA,
SCORPIO, SAGITTARIUS, CAPRICORN, AQUARIUS, PISCES
]
LIST_OBJECTS = [
SUN, MOON, MERCURY, VENUS, MARS, JUPITER, SATURN,
URANUS, NEPTUNE, PLUTO, CHIRON, NORTH_NODE,
SOUTH_NODE, SYZYGY, PARS_FORTUNA,
]
LIST_OBJECTS_TRADITIONAL = [
SUN, MOON, MERCURY, VENUS, MARS, JUPITER, SATURN,
NORTH_NODE, SOUTH_NODE, SYZYGY, PARS_FORTUNA
]
LIST_SEVEN_PLANETS = [
SUN, MOON, MERCURY, VENUS, MARS, JUPITER, SATURN
]
LIST_HOUSES = [
HOUSE1, HOUSE2, HOUSE3, HOUSE4, HOUSE5, HOUSE6,
HOUSE7, HOUSE8, HOUSE9, HOUSE10, HOUSE11, HOUSE12,
]
LIST_ANGLES = [
ASC, MC, DESC, IC
]
LIST_FIXED_STARS = [
STAR_ALGENIB, STAR_ALPHERATZ, STAR_ALGOL, STAR_ALCYONE,
STAR_PLEIADES, STAR_ALDEBARAN, STAR_RIGEL, STAR_CAPELLA,
STAR_BETELGEUSE, STAR_SIRIUS, STAR_CANOPUS, STAR_CASTOR,
STAR_POLLUX, STAR_PROCYON, STAR_ASELLUS_BOREALIS,
STAR_ASELLUS_AUSTRALIS, STAR_ALPHARD, STAR_REGULUS,
STAR_DENEBOLA, STAR_ALGORAB, STAR_SPICA, STAR_ARCTURUS,
STAR_ALPHECCA, STAR_ZUBEN_ELSCHEMALI, STAR_UNUKALHAI,
STAR_AGENA, STAR_RIGEL_CENTAURUS, STAR_ANTARES,
STAR_LESATH, STAR_VEGA, STAR_ALTAIR, STAR_DENEB_ALGEDI,
STAR_FOMALHAUT, STAR_DENEB_ADIGE, STAR_ACHERNAR,
]
|
from __future__ import unicode_literals, division, absolute_import
from builtins import *
from future.utils import PY2, native_str
import copy
import functools
import logging
from flexget.plugin import PluginError
from flexget.utils.lazy_dict import LazyDict, LazyLookup
from flexget.utils.template import render_from_entry
log = logging.getLogger('entry')
class EntryUnicodeError(Exception):
"""This exception is thrown when trying to set non-unicode compatible field value to entry."""
def __init__(self, key, value):
self.key = key
self.value = value
def __str__(self):
return 'Entry strings must be unicode: %s (%r)' % (self.key, self.value)
class Entry(LazyDict):
"""
Represents one item in task. Must have `url` and *title* fields.
Stores automatically *original_url* key, which is necessary because
plugins (eg. urlrewriters) may change *url* into something else
and otherwise that information would be lost.
Entry will also transparently convert all ascii strings into unicode
and raises :class:`EntryUnicodeError` if conversion fails on any value
being set. Such failures are caught by :class:`~flexget.task.Task`
and trigger :meth:`~flexget.task.Task.abort`.
"""
def __init__(self, *args, **kwargs):
super(Entry, self).__init__()
self.traces = []
self.snapshots = {}
self._state = 'undecided'
self._hooks = {'accept': [], 'reject': [], 'fail': [], 'complete': []}
self.task = None
if len(args) == 2:
kwargs['title'] = args[0]
kwargs['url'] = args[1]
args = []
# Make sure constructor does not escape our __setitem__ enforcement
self.update(*args, **kwargs)
def trace(self, message, operation=None, plugin=None):
"""
Adds trace message to the entry which should contain useful information about why
plugin did not operate on entry. Accept and Reject messages are added to trace automatically.
:param string message: Message to add into entry trace.
:param string operation: None, reject, accept or fail
:param plugin: Uses task.current_plugin by default, pass value to override
"""
if operation not in (None, 'accept', 'reject', 'fail'):
raise ValueError('Unknown operation %s' % operation)
item = (plugin, operation, message)
if item not in self.traces:
self.traces.append(item)
def run_hooks(self, action, **kwargs):
"""
Run hooks that have been registered for given ``action``.
:param action: Name of action to run hooks for
:param kwargs: Keyword arguments that should be passed to the registered functions
"""
for func in self._hooks[action]:
func(self, **kwargs)
def add_hook(self, action, func, **kwargs):
"""
Add a hook for ``action`` to this entry.
:param string action: One of: 'accept', 'reject', 'fail', 'complete'
:param func: Function to execute when event occurs
:param kwargs: Keyword arguments that should be passed to ``func``
:raises: ValueError when given an invalid ``action``
"""
try:
self._hooks[action].append(functools.partial(func, **kwargs))
except KeyError:
raise ValueError('`%s` is not a valid entry action' % action)
def on_accept(self, func, **kwargs):
"""
Register a function to be called when this entry is accepted.
:param func: The function to call
:param kwargs: Keyword arguments that should be passed to the registered function
"""
self.add_hook('accept', func, **kwargs)
def on_reject(self, func, **kwargs):
"""
Register a function to be called when this entry is rejected.
:param func: The function to call
:param kwargs: Keyword arguments that should be passed to the registered function
"""
self.add_hook('reject', func, **kwargs)
def on_fail(self, func, **kwargs):
"""
Register a function to be called when this entry is failed.
:param func: The function to call
:param kwargs: Keyword arguments that should be passed to the registered function
"""
self.add_hook('fail', func, **kwargs)
def on_complete(self, func, **kwargs):
"""
Register a function to be called when a :class:`Task` has finished processing this entry.
:param func: The function to call
:param kwargs: Keyword arguments that should be passed to the registered function
"""
self.add_hook('complete', func, **kwargs)
def accept(self, reason=None, **kwargs):
if self.rejected:
log.debug('tried to accept rejected %r' % self)
elif not self.accepted:
self._state = 'accepted'
self.trace(reason, operation='accept')
# Run entry on_accept hooks
self.run_hooks('accept', reason=reason, **kwargs)
def reject(self, reason=None, **kwargs):
# ignore rejections on immortal entries
if self.get('immortal'):
reason_str = '(%s)' % reason if reason else ''
log.info('Tried to reject immortal %s %s' % (self['title'], reason_str))
self.trace('Tried to reject immortal %s' % reason_str)
return
if not self.rejected:
self._state = 'rejected'
self.trace(reason, operation='reject')
# Run entry on_reject hooks
self.run_hooks('reject', reason=reason, **kwargs)
def fail(self, reason=None, **kwargs):
log.debug('Marking entry \'%s\' as failed' % self['title'])
if not self.failed:
self._state = 'failed'
self.trace(reason, operation='fail')
log.error('Failed %s (%s)' % (self['title'], reason))
# Run entry on_fail hooks
self.run_hooks('fail', reason=reason, **kwargs)
def complete(self, **kwargs):
# Run entry on_complete hooks
self.run_hooks('complete', **kwargs)
@property
def state(self):
return self._state
@property
def accepted(self):
return self._state == 'accepted'
@property
def rejected(self):
return self._state == 'rejected'
@property
def failed(self):
return self._state == 'failed'
@property
def undecided(self):
return self._state == 'undecided'
def __setitem__(self, key, value):
# Enforce unicode compatibility.
if PY2 and isinstance(value, native_str):
# Allow Python 2's implicit string decoding, but fail now instead of when entry fields are used.
# If encoding is anything but ascii, it should be decoded it to text before setting an entry field
try:
value = value.decode('ascii')
except UnicodeDecodeError:
raise EntryUnicodeError(key, value)
# url and original_url handling
if key == 'url':
if not isinstance(value, (str, LazyLookup)):
raise PluginError('Tried to set %r url to %r' % (self.get('title'), value))
self.setdefault('original_url', value)
# title handling
if key == 'title':
if not isinstance(value, (str, LazyLookup)):
raise PluginError('Tried to set title to %r' % value)
try:
log.trace('ENTRY SET: %s = %r' % (key, value))
except Exception as e:
log.debug('trying to debug key `%s` value threw exception: %s' % (key, e))
super(Entry, self).__setitem__(key, value)
def safe_str(self):
return '%s | %s' % (self['title'], self['url'])
# TODO: this is too manual, maybe we should somehow check this internally and throw some exception if
# application is trying to operate on invalid entry
def isvalid(self):
"""
:return: True if entry is valid. Return False if this cannot be used.
:rtype: bool
"""
if 'title' not in self:
return False
if 'url' not in self:
return False
if not isinstance(self['url'], str):
return False
if not isinstance(self['title'], str):
return False
return True
def take_snapshot(self, name):
"""
Takes a snapshot of the entry under *name*. Snapshots can be accessed via :attr:`.snapshots`.
:param string name: Snapshot name
"""
snapshot = {}
for field, value in self.items():
try:
snapshot[field] = copy.deepcopy(value)
except TypeError:
log.warning('Unable to take `%s` snapshot for field `%s` in `%s`' % (name, field, self['title']))
if snapshot:
if name in self.snapshots:
log.warning('Snapshot `%s` is being overwritten for `%s`' % (name, self['title']))
self.snapshots[name] = snapshot
def update_using_map(self, field_map, source_item, ignore_none=False):
"""
Populates entry fields from a source object using a dictionary that maps from entry field names to
attributes (or keys) in the source object.
:param dict field_map:
A dictionary mapping entry field names to the attribute in source_item (or keys,
if source_item is a dict)(nested attributes/dicts are also supported, separated by a dot,)
or a function that takes source_item as an argument
:param source_item:
Source of information to be used by the map
:param ignore_none:
Ignore any None values, do not record it to the Entry
"""
func = dict.get if isinstance(source_item, dict) else getattr
for field, value in field_map.items():
if isinstance(value, str):
v = functools.reduce(func, value.split('.'), source_item)
else:
v = value(source_item)
if ignore_none and v is None:
continue
self[field] = v
def render(self, template):
"""
Renders a template string based on fields in the entry.
:param string template: A template string that uses jinja2 or python string replacement format.
:return: The result of the rendering.
:rtype: string
:raises RenderError: If there is a problem.
"""
if not isinstance(template, str):
raise ValueError('Trying to render non string template, got %s' % repr(template))
log.trace('rendering: %s' % template)
return render_from_entry(template, self)
def __eq__(self, other):
return self.get('title') == other.get('title') and self.get('original_url') == other.get('original_url')
def __hash__(self):
return hash(self.get('title', '') + self.get('original_url', ''))
def __repr__(self):
return '<Entry(title=%s,state=%s)>' % (self['title'], self._state)
Outright ban 'bytes' type on entry fields
from __future__ import unicode_literals, division, absolute_import
from builtins import *
from future.utils import PY2, native_str
import copy
import functools
import logging
from flexget.plugin import PluginError
from flexget.utils.lazy_dict import LazyDict, LazyLookup
from flexget.utils.template import render_from_entry
log = logging.getLogger('entry')
class EntryUnicodeError(Exception):
"""This exception is thrown when trying to set non-unicode compatible field value to entry."""
def __init__(self, key, value):
self.key = key
self.value = value
def __str__(self):
return 'Entry strings must be unicode: %s (%r)' % (self.key, self.value)
class Entry(LazyDict):
"""
Represents one item in task. Must have `url` and *title* fields.
Stores automatically *original_url* key, which is necessary because
plugins (eg. urlrewriters) may change *url* into something else
and otherwise that information would be lost.
Entry will also transparently convert all ascii strings into unicode
and raises :class:`EntryUnicodeError` if conversion fails on any value
being set. Such failures are caught by :class:`~flexget.task.Task`
and trigger :meth:`~flexget.task.Task.abort`.
"""
def __init__(self, *args, **kwargs):
super(Entry, self).__init__()
self.traces = []
self.snapshots = {}
self._state = 'undecided'
self._hooks = {'accept': [], 'reject': [], 'fail': [], 'complete': []}
self.task = None
if len(args) == 2:
kwargs['title'] = args[0]
kwargs['url'] = args[1]
args = []
# Make sure constructor does not escape our __setitem__ enforcement
self.update(*args, **kwargs)
def trace(self, message, operation=None, plugin=None):
"""
Adds trace message to the entry which should contain useful information about why
plugin did not operate on entry. Accept and Reject messages are added to trace automatically.
:param string message: Message to add into entry trace.
:param string operation: None, reject, accept or fail
:param plugin: Uses task.current_plugin by default, pass value to override
"""
if operation not in (None, 'accept', 'reject', 'fail'):
raise ValueError('Unknown operation %s' % operation)
item = (plugin, operation, message)
if item not in self.traces:
self.traces.append(item)
def run_hooks(self, action, **kwargs):
"""
Run hooks that have been registered for given ``action``.
:param action: Name of action to run hooks for
:param kwargs: Keyword arguments that should be passed to the registered functions
"""
for func in self._hooks[action]:
func(self, **kwargs)
def add_hook(self, action, func, **kwargs):
"""
Add a hook for ``action`` to this entry.
:param string action: One of: 'accept', 'reject', 'fail', 'complete'
:param func: Function to execute when event occurs
:param kwargs: Keyword arguments that should be passed to ``func``
:raises: ValueError when given an invalid ``action``
"""
try:
self._hooks[action].append(functools.partial(func, **kwargs))
except KeyError:
raise ValueError('`%s` is not a valid entry action' % action)
def on_accept(self, func, **kwargs):
"""
Register a function to be called when this entry is accepted.
:param func: The function to call
:param kwargs: Keyword arguments that should be passed to the registered function
"""
self.add_hook('accept', func, **kwargs)
def on_reject(self, func, **kwargs):
"""
Register a function to be called when this entry is rejected.
:param func: The function to call
:param kwargs: Keyword arguments that should be passed to the registered function
"""
self.add_hook('reject', func, **kwargs)
def on_fail(self, func, **kwargs):
"""
Register a function to be called when this entry is failed.
:param func: The function to call
:param kwargs: Keyword arguments that should be passed to the registered function
"""
self.add_hook('fail', func, **kwargs)
def on_complete(self, func, **kwargs):
"""
Register a function to be called when a :class:`Task` has finished processing this entry.
:param func: The function to call
:param kwargs: Keyword arguments that should be passed to the registered function
"""
self.add_hook('complete', func, **kwargs)
def accept(self, reason=None, **kwargs):
if self.rejected:
log.debug('tried to accept rejected %r' % self)
elif not self.accepted:
self._state = 'accepted'
self.trace(reason, operation='accept')
# Run entry on_accept hooks
self.run_hooks('accept', reason=reason, **kwargs)
def reject(self, reason=None, **kwargs):
# ignore rejections on immortal entries
if self.get('immortal'):
reason_str = '(%s)' % reason if reason else ''
log.info('Tried to reject immortal %s %s' % (self['title'], reason_str))
self.trace('Tried to reject immortal %s' % reason_str)
return
if not self.rejected:
self._state = 'rejected'
self.trace(reason, operation='reject')
# Run entry on_reject hooks
self.run_hooks('reject', reason=reason, **kwargs)
def fail(self, reason=None, **kwargs):
log.debug('Marking entry \'%s\' as failed' % self['title'])
if not self.failed:
self._state = 'failed'
self.trace(reason, operation='fail')
log.error('Failed %s (%s)' % (self['title'], reason))
# Run entry on_fail hooks
self.run_hooks('fail', reason=reason, **kwargs)
def complete(self, **kwargs):
# Run entry on_complete hooks
self.run_hooks('complete', **kwargs)
@property
def state(self):
return self._state
@property
def accepted(self):
return self._state == 'accepted'
@property
def rejected(self):
return self._state == 'rejected'
@property
def failed(self):
return self._state == 'failed'
@property
def undecided(self):
return self._state == 'undecided'
def __setitem__(self, key, value):
# Enforce unicode compatibility.
if PY2 and isinstance(value, native_str):
# Allow Python 2's implicit string decoding, but fail now instead of when entry fields are used.
# If encoding is anything but ascii, it should be decoded it to text before setting an entry field
try:
value = value.decode('ascii')
except UnicodeDecodeError:
raise EntryUnicodeError(key, value)
elif isinstance(value, bytes):
raise EntryUnicodeError(key, value)
# url and original_url handling
if key == 'url':
if not isinstance(value, (str, LazyLookup)):
raise PluginError('Tried to set %r url to %r' % (self.get('title'), value))
self.setdefault('original_url', value)
# title handling
if key == 'title':
if not isinstance(value, (str, LazyLookup)):
raise PluginError('Tried to set title to %r' % value)
try:
log.trace('ENTRY SET: %s = %r' % (key, value))
except Exception as e:
log.debug('trying to debug key `%s` value threw exception: %s' % (key, e))
super(Entry, self).__setitem__(key, value)
def safe_str(self):
return '%s | %s' % (self['title'], self['url'])
# TODO: this is too manual, maybe we should somehow check this internally and throw some exception if
# application is trying to operate on invalid entry
def isvalid(self):
"""
:return: True if entry is valid. Return False if this cannot be used.
:rtype: bool
"""
if 'title' not in self:
return False
if 'url' not in self:
return False
if not isinstance(self['url'], str):
return False
if not isinstance(self['title'], str):
return False
return True
def take_snapshot(self, name):
"""
Takes a snapshot of the entry under *name*. Snapshots can be accessed via :attr:`.snapshots`.
:param string name: Snapshot name
"""
snapshot = {}
for field, value in self.items():
try:
snapshot[field] = copy.deepcopy(value)
except TypeError:
log.warning('Unable to take `%s` snapshot for field `%s` in `%s`' % (name, field, self['title']))
if snapshot:
if name in self.snapshots:
log.warning('Snapshot `%s` is being overwritten for `%s`' % (name, self['title']))
self.snapshots[name] = snapshot
def update_using_map(self, field_map, source_item, ignore_none=False):
"""
Populates entry fields from a source object using a dictionary that maps from entry field names to
attributes (or keys) in the source object.
:param dict field_map:
A dictionary mapping entry field names to the attribute in source_item (or keys,
if source_item is a dict)(nested attributes/dicts are also supported, separated by a dot,)
or a function that takes source_item as an argument
:param source_item:
Source of information to be used by the map
:param ignore_none:
Ignore any None values, do not record it to the Entry
"""
func = dict.get if isinstance(source_item, dict) else getattr
for field, value in field_map.items():
if isinstance(value, str):
v = functools.reduce(func, value.split('.'), source_item)
else:
v = value(source_item)
if ignore_none and v is None:
continue
self[field] = v
def render(self, template):
"""
Renders a template string based on fields in the entry.
:param string template: A template string that uses jinja2 or python string replacement format.
:return: The result of the rendering.
:rtype: string
:raises RenderError: If there is a problem.
"""
if not isinstance(template, str):
raise ValueError('Trying to render non string template, got %s' % repr(template))
log.trace('rendering: %s' % template)
return render_from_entry(template, self)
def __eq__(self, other):
return self.get('title') == other.get('title') and self.get('original_url') == other.get('original_url')
def __hash__(self):
return hash(self.get('title', '') + self.get('original_url', ''))
def __repr__(self):
return '<Entry(title=%s,state=%s)>' % (self['title'], self._state)
|
#!/usr/bin/env python
# Mrs Fulton -- run Mrs programs on Marylou4 (BYU's supercomputer)
# Copyright 2008-2012 Brigham Young University
#
# This file is part of Mrs.
#
# Mrs is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# Mrs is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# Mrs. If not, see <http://www.gnu.org/licenses/>.
#
# Inquiries regarding any further use of Mrs, please contact the Copyright
# Licensing Office, Brigham Young University, 3760 HBLL, Provo, UT 84602,
# (801) 422-9339 or 422-3821, e-mail copyright@byu.edu.
"""Script for submitting jobs to a PBS scheduler.
This file is not meant to be a completely working example; We've used it to
submit MapReduce jobs to Brigham Young University's Fulton Supercomputing Lab.
It is included just as a helpful example.
"""
# TODO: make the number of slaves per job default to '0' (all-in-one) and
# also include the master in the same job as the first batch of slaves.
from __future__ import division
import math
import os
from subprocess import Popen, PIPE
import sys
DEFAULT_INTERPRETER = "/usr/bin/python"
INTERFACES = "ib0 eth1 eth0"
RUN_DIRECTORY = "$HOME/compute/run"
QSUB_NAME_DEFAULT = "mrs_fulton"
QSUB_NAME_MAXLEN = 15
def main():
parser = create_parser()
opts, args = parser.parse_args()
if opts.slaves_per_job > 0:
total_jobs = math.ceil((1 + opts.nslaves) / opts.slaves_per_job)
suffix_len = len('_') + int(1 + math.log10(total_jobs))
else:
suffix_len = len('_') + 1
if len(opts.name) + suffix_len <= QSUB_NAME_MAXLEN:
name = opts.name
else:
parser.error('NAME is too long.')
if opts.output is None:
parser.error('OUTPUT file must be specified')
if opts.time is None:
parser.error('TIME must be specified')
if opts.memory is None:
parser.error('MEMORY must be specified')
# Extract Mrs program and its command-line arguments/options
if len(args) >= 1:
mrs_program = args[0]
mrs_args = args[1:]
else:
parser.error('MRS_PROGRAM not specified.')
if not opts.force and os.path.exists(opts.output):
print >>sys.stderr, "Output file already exists:", opts.output
sys.exit(-1)
# Set up the job directory for output, etc.
jobdir_raw = os.path.join(RUN_DIRECTORY, name)
jobdir = os.path.expandvars(jobdir_raw)
try:
os.makedirs(jobdir)
except OSError:
# The directory might already exist.
pass
# Common command line arguments to qsub:
time = walltime(opts.time)
# TODO: when each slave is able to use multiple processors (with multiple
# worker subprocesses), change `ppn` accordingly.
nodespec = 'nodes=%s:ppn=1'
if opts.nodespec:
nodespec = '%s:%s' % (nodespec, opts.nodespec)
resources = '%s,walltime=%s,pmem=%smb' % (nodespec, time, opts.memory)
if opts.resource_list:
resources += ',%s' % (resources, opts.resource_list)
# Variables for the job script:
current_dir = os.getcwd()
quoted_args = ['"%s"' % arg.replace('"', r'\"') for arg in mrs_args]
arg_array = "(%s)" % " ".join(quoted_args)
script_vars = {
'python': opts.interpreter,
'program': mrs_program,
'arg_array': arg_array,
'interfaces': INTERFACES,
'jobdir': jobdir,
'current_dir': current_dir,
'output': opts.output,
'master_jobid': '',
}
if opts.slaves_per_job > 0:
nodes = min(1 + opts.nslaves, opts.slaves_per_job)
else:
nodes = 1 + opts.nslaves
print "Submitting master job...",
jobid = submit_job('%s_0' % name, script_vars, jobdir, resources % nodes)
print " done."
nodes_left = 1 + opts.nslaves - nodes
print "Master jobid:", jobid
script_vars['master_jobid'] = jobid
attrs = 'depend=after:%s' % jobid
print "Submitting slave jobs...",
i = 1
while nodes_left > 0:
nodes = min(nodes_left, opts.slaves_per_job)
submit_job('%s_%s' % (name, i), script_vars, jobdir,
resources % nodes, attrs)
nodes_left -= nodes
i += 1
def submit_job(name, script_vars, jobdir, resources, attrs=''):
"""Submit a single job to PBS using qsub.
Returns the jobid of the newly created job. If `master_jobid` (in the
`script_vars`) is an empty string, then the new job will include a master.
"""
script = r'''#!/bin/bash
. $HOME/.bashrc
# Output redirection will fail if the file already exists:
set noclobber
cd "%(current_dir)s"
JOBDIR="%(jobdir)s"
PYTHON="%(python)s"
MRS_PROGRAM="%(program)s"
ARGS=%(arg_array)s
MASTER_JOBID="%(master_jobid)s"
INTERFACES="%(interfaces)s"
OUTPUT="%(output)s"
if [[ -z $MASTER_JOBID ]]; then
HOST_FILE="$JOBDIR/host.$PBS_JOBID"
PORT_FILE="$JOBDIR/port.$PBS_JOBID"
STDERR_FILE="$JOBDIR/master-stderr.$PBS_JOBID"
# Run /sbin/ip and extract everything between "inet " and "/"
# (i.e. the IP address but not the netmask). Note that we use a
# semi-colon instead of / in the sed expression to make it easier
# on the eyes.
for iface in $INTERFACES; do
if /sbin/ip -o -4 addr list |grep -q "$iface\$"; then
IP_ADDRESS=$(/sbin/ip -o -4 addr list "$iface" \
|sed -e 's;^.*inet \(.*\)/.*$;\1;')
echo $IP_ADDRESS >$HOST_FILE
break
fi
done
if [[ -z $IP_ADDRESS ]]; then
echo "No valid IP address found!"
exit 1
fi
# Start the master.
mkdir -p $(dirname "$OUTPUT")
$PYTHON $MRS_PROGRAM --mrs=Master --mrs-runfile="$PORT_FILE" \
${ARGS[@]} >$OUTPUT 2>$STDERR_FILE &
else
HOST_FILE="$JOBDIR/host.$PBS_MASTER_JOBID"
PORT_FILE="$JOBDIR/port.$PBS_MASTER_JOBID"
fi
# Find the port used by the master.
while true; do
if [[ -e $PORT_FILE ]]; then
PORT=$(cat $PORT_FILE)
if [[ $PORT = "-" ]]; then
echo "The master quit prematurely."
exit
elif [[ ! -z $PORT ]]; then
break;
fi
fi
sleep 0.05;
done
HOST=$(cat $HOST_FILE)
echo "Connecting to master on '$HOST:$PORT'"
# Start the slaves.
SLAVE_CMD="$PYTHON $MRS_PROGRAM --mrs=Slave --mrs-master='$HOST:$PORT'"
if [[ -z $MASTER_JOBID ]]; then
# Don't start a slave on the master's node.
SLAVE_CMD="[[ \$PBS_VNODENUM != 0 ]] && $SLAVE_CMD"
fi
pbsdsh bash -i -c "$SLAVE_CMD"
# Wait for the master (backgrounded) to complete.
wait
''' % script_vars
cmdline = ['qsub', '-l', resources, '-N', name]
if attrs:
cmdline += ['-W', attrs]
outfile = os.path.join(jobdir, '%s.out' % name)
errfile = os.path.join(jobdir, '%s.err' % name)
cmdline += ['-o', outfile, '-e', errfile]
# Submit
qsub_proc = Popen(cmdline, stdin=PIPE, stdout=PIPE)
stdout, stderr = qsub_proc.communicate(script)
if qsub_proc.returncode != 0:
print >>sys.stderr, "Couldn't submit master job to queue!"
sys.exit(-1)
jobid = stdout.strip()
return jobid
def walltime(time):
"""Return a qsub-style walltime string for the given time (in hours)."""
hours = int(time)
time -= hours
minutes = int(time * 60)
time -= minutes / 60
seconds = int(time * 3600)
return ":".join(map(str, (hours, minutes, seconds)))
USAGE = ("""%prog [OPTIONS] -- MRS_PROGRAM [PROGRAM_OPTIONS]
Mrs Fulton uses qsub to submit a Mrs program to the supercomputer. The given
MRS_PROGRAM runs with the given PROGRAM_OPTIONS. These options should not
include master or slave subcommands, since Mrs Fulton will take care of these
details.""")
def create_parser():
from optparse import OptionParser
parser = OptionParser(usage=USAGE)
# We don't want options intended for the Mrs Program to go to Mrs Fulton.
parser.disable_interspersed_args()
parser.add_option('-n', dest='nslaves', type='int',
help='Number of slaves')
parser.add_option('-N', '--name', dest='name', help='Name of job')
parser.add_option('-o', '--output', dest='output', help='Output file')
parser.add_option('-t', '--time', dest='time', type='float',
help='Wallclock time (in hours)')
parser.add_option('-m', '--memory', dest='memory', type='int',
help='Amount of memory per node (in MB)')
parser.add_option('-s', dest='slaves_per_job', type='int',
help='Number of slaves in each PBS job', default=0)
parser.add_option('--interpreter', dest='interpreter', action='store',
help='Python interpreter to run', default=DEFAULT_INTERPRETER)
parser.add_option('-f', dest='force', action='store_true',
help='Force output, even if the output file already exists')
parser.add_option('--nodespec', dest='nodespec',
help='Extra node spec options (colon-separated PBS syntax)')
parser.add_option('-l', '--resource-list', dest='resource_list',
help='Extra resource requests (comma-separated PBS syntax)')
parser.set_defaults(n=1, name=QSUB_NAME_DEFAULT)
return parser
if __name__ == '__main__':
main()
# vim: et sw=4 sts=4
Allow the fulton script to accept a filename to use for master's stderr
#!/usr/bin/env python
# Mrs Fulton -- run Mrs programs on Marylou4 (BYU's supercomputer)
# Copyright 2008-2012 Brigham Young University
#
# This file is part of Mrs.
#
# Mrs is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# Mrs is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# Mrs. If not, see <http://www.gnu.org/licenses/>.
#
# Inquiries regarding any further use of Mrs, please contact the Copyright
# Licensing Office, Brigham Young University, 3760 HBLL, Provo, UT 84602,
# (801) 422-9339 or 422-3821, e-mail copyright@byu.edu.
"""Script for submitting jobs to a PBS scheduler.
This file is not meant to be a completely working example; We've used it to
submit MapReduce jobs to Brigham Young University's Fulton Supercomputing Lab.
It is included just as a helpful example.
"""
# TODO: make the number of slaves per job default to '0' (all-in-one) and
# also include the master in the same job as the first batch of slaves.
from __future__ import division
import math
import os
from subprocess import Popen, PIPE
import sys
DEFAULT_INTERPRETER = "/usr/bin/python"
INTERFACES = "ib0 eth1 eth0"
RUN_DIRECTORY = "$HOME/compute/run"
QSUB_NAME_DEFAULT = "mrs_fulton"
QSUB_NAME_MAXLEN = 15
def main():
parser = create_parser()
opts, args = parser.parse_args()
if opts.slaves_per_job > 0:
total_jobs = math.ceil((1 + opts.nslaves) / opts.slaves_per_job)
suffix_len = len('_') + int(1 + math.log10(total_jobs))
else:
suffix_len = len('_') + 1
if len(opts.name) + suffix_len <= QSUB_NAME_MAXLEN:
name = opts.name
else:
parser.error('NAME is too long.')
if opts.output is None:
parser.error('OUTPUT file must be specified')
if opts.time is None:
parser.error('TIME must be specified')
if opts.memory is None:
parser.error('MEMORY must be specified')
# Extract Mrs program and its command-line arguments/options
if len(args) >= 1:
mrs_program = args[0]
mrs_args = args[1:]
else:
parser.error('MRS_PROGRAM not specified.')
if not opts.force and os.path.exists(opts.output):
print >>sys.stderr, "Output file already exists:", opts.output
sys.exit(-1)
# Set up the job directory for output, etc.
jobdir_raw = os.path.join(RUN_DIRECTORY, name)
jobdir = os.path.expandvars(jobdir_raw)
try:
os.makedirs(jobdir)
except OSError:
# The directory might already exist.
pass
# Common command line arguments to qsub:
time = walltime(opts.time)
# TODO: when each slave is able to use multiple processors (with multiple
# worker subprocesses), change `ppn` accordingly.
nodespec = 'nodes=%s:ppn=1'
if opts.nodespec:
nodespec = '%s:%s' % (nodespec, opts.nodespec)
resources = '%s,walltime=%s,pmem=%smb' % (nodespec, time, opts.memory)
if opts.resource_list:
resources += ',%s' % (resources, opts.resource_list)
# Variables for the job script:
current_dir = os.getcwd()
quoted_args = ['"%s"' % arg.replace('"', r'\"') for arg in mrs_args]
arg_array = "(%s)" % " ".join(quoted_args)
script_vars = {
'python': opts.interpreter,
'program': mrs_program,
'arg_array': arg_array,
'interfaces': INTERFACES,
'jobdir': jobdir,
'current_dir': current_dir,
'output': opts.output,
'stderr': opts.stderr,
'master_jobid': '',
}
if opts.slaves_per_job > 0:
nodes = min(1 + opts.nslaves, opts.slaves_per_job)
else:
nodes = 1 + opts.nslaves
print "Submitting master job...",
jobid = submit_job('%s_0' % name, script_vars, jobdir, resources % nodes)
print " done."
nodes_left = 1 + opts.nslaves - nodes
print "Master jobid:", jobid
script_vars['master_jobid'] = jobid
attrs = 'depend=after:%s' % jobid
print "Submitting slave jobs...",
i = 1
while nodes_left > 0:
nodes = min(nodes_left, opts.slaves_per_job)
submit_job('%s_%s' % (name, i), script_vars, jobdir,
resources % nodes, attrs)
nodes_left -= nodes
i += 1
def submit_job(name, script_vars, jobdir, resources, attrs=''):
"""Submit a single job to PBS using qsub.
Returns the jobid of the newly created job. If `master_jobid` (in the
`script_vars`) is an empty string, then the new job will include a master.
"""
script = r'''#!/bin/bash
. $HOME/.bashrc
# Output redirection will fail if the file already exists:
set noclobber
cd "%(current_dir)s"
JOBDIR="%(jobdir)s"
PYTHON="%(python)s"
MRS_PROGRAM="%(program)s"
ARGS=%(arg_array)s
MASTER_JOBID="%(master_jobid)s"
INTERFACES="%(interfaces)s"
OUTPUT="%(output)s"
STDERR="%(stderr)s"
if [[ -z $MASTER_JOBID ]]; then
HOST_FILE="$JOBDIR/host.$PBS_JOBID"
PORT_FILE="$JOBDIR/port.$PBS_JOBID"
# Run /sbin/ip and extract everything between "inet " and "/"
# (i.e. the IP address but not the netmask). Note that we use a
# semi-colon instead of / in the sed expression to make it easier
# on the eyes.
for iface in $INTERFACES; do
if /sbin/ip -o -4 addr list |grep -q "$iface\$"; then
IP_ADDRESS=$(/sbin/ip -o -4 addr list "$iface" \
|sed -e 's;^.*inet \(.*\)/.*$;\1;')
echo $IP_ADDRESS >$HOST_FILE
break
fi
done
if [[ -z $IP_ADDRESS ]]; then
echo "No valid IP address found!"
exit 1
fi
# Start the master.
mkdir -p $(dirname "$OUTPUT")
if [[ -n $STDERR ]]; then
mkdir -p $(dirname "$STDERR")
else
STDERR="$JOBDIR/master-stderr.$PBS_JOBID"
fi
$PYTHON $MRS_PROGRAM --mrs=Master --mrs-runfile="$PORT_FILE" \
${ARGS[@]} >$OUTPUT 2>$STDERR &
else
HOST_FILE="$JOBDIR/host.$PBS_MASTER_JOBID"
PORT_FILE="$JOBDIR/port.$PBS_MASTER_JOBID"
fi
# Find the port used by the master.
while true; do
if [[ -e $PORT_FILE ]]; then
PORT=$(cat $PORT_FILE)
if [[ $PORT = "-" ]]; then
echo "The master quit prematurely."
exit
elif [[ ! -z $PORT ]]; then
break;
fi
fi
sleep 0.05;
done
HOST=$(cat $HOST_FILE)
echo "Connecting to master on '$HOST:$PORT'"
# Start the slaves.
SLAVE_CMD="$PYTHON $MRS_PROGRAM --mrs=Slave --mrs-master='$HOST:$PORT'"
if [[ -z $MASTER_JOBID ]]; then
# Don't start a slave on the master's node.
SLAVE_CMD="[[ \$PBS_VNODENUM != 0 ]] && $SLAVE_CMD"
fi
pbsdsh bash -i -c "$SLAVE_CMD"
# Wait for the master (backgrounded) to complete.
wait
''' % script_vars
cmdline = ['qsub', '-l', resources, '-N', name]
if attrs:
cmdline += ['-W', attrs]
outfile = os.path.join(jobdir, '%s.out' % name)
errfile = os.path.join(jobdir, '%s.err' % name)
cmdline += ['-o', outfile, '-e', errfile]
# Submit
qsub_proc = Popen(cmdline, stdin=PIPE, stdout=PIPE)
stdout, stderr = qsub_proc.communicate(script)
if qsub_proc.returncode != 0:
print >>sys.stderr, "Couldn't submit master job to queue!"
sys.exit(-1)
jobid = stdout.strip()
return jobid
def walltime(time):
"""Return a qsub-style walltime string for the given time (in hours)."""
hours = int(time)
time -= hours
minutes = int(time * 60)
time -= minutes / 60
seconds = int(time * 3600)
return ":".join(map(str, (hours, minutes, seconds)))
USAGE = ("""%prog [OPTIONS] -- MRS_PROGRAM [PROGRAM_OPTIONS]
Mrs Fulton uses qsub to submit a Mrs program to the supercomputer. The given
MRS_PROGRAM runs with the given PROGRAM_OPTIONS. These options should not
include master or slave subcommands, since Mrs Fulton will take care of these
details.""")
def create_parser():
from optparse import OptionParser
parser = OptionParser(usage=USAGE)
# We don't want options intended for the Mrs Program to go to Mrs Fulton.
parser.disable_interspersed_args()
parser.add_option('-n', dest='nslaves', type='int',
help='Number of slaves')
parser.add_option('-N', '--name', dest='name', help='Name of job')
parser.add_option('-o', '--output', dest='output',
help='Output (stdout) file')
parser.add_option('-e', '--stderr', dest='stderr', default='',
help='Output (stderr) file')
parser.add_option('-t', '--time', dest='time', type='float',
help='Wallclock time (in hours)')
parser.add_option('-m', '--memory', dest='memory', type='int',
help='Amount of memory per node (in MB)')
parser.add_option('-s', dest='slaves_per_job', type='int',
help='Number of slaves in each PBS job', default=0)
parser.add_option('--interpreter', dest='interpreter', action='store',
help='Python interpreter to run', default=DEFAULT_INTERPRETER)
parser.add_option('-f', dest='force', action='store_true',
help='Force output, even if the output file already exists')
parser.add_option('--nodespec', dest='nodespec',
help='Extra node spec options (colon-separated PBS syntax)')
parser.add_option('-l', '--resource-list', dest='resource_list',
help='Extra resource requests (comma-separated PBS syntax)')
parser.set_defaults(n=1, name=QSUB_NAME_DEFAULT)
return parser
if __name__ == '__main__':
main()
# vim: et sw=4 sts=4
|
#
# This file is part of flubber. See the NOTICE for more information.
#
import flubber
from flubber.timeout import Timeout
__all__ = ['Semaphore', 'BoundedSemaphore', 'RLock', 'Condition']
class Semaphore(object):
def __init__(self, value=1):
if value < 0:
raise ValueError("Semaphore must be initialized with a positive number, got %s" % value)
self._counter = value
self._waiters = set()
def __repr__(self):
params = (self.__class__.__name__, hex(id(self)),
self._counter, len(self._waiters))
return '<%s at %s c=%s _w[%s]>' % params
__str__ = __repr__
def acquire(self, blocking=True, timeout=None):
if self._counter > 0:
self._counter -= 1
return True
elif not blocking:
return False
else:
current = flubber.current.task
self._waiters.add(current)
timer = Timeout(timeout)
timer.start()
loop = flubber.current.loop
try:
while self._counter <= 0:
loop.switch()
except Timeout, e:
if e is timer:
return False
raise
else:
self._counter -= 1
return True
finally:
timer.cancel()
self._waiters.discard(current)
def release(self):
self._counter += 1
if self._waiters:
flubber.current.loop.call_soon(self._notify_waiters)
def _notify_waiters(self):
if self._waiters and self._counter > 0:
waiter = self._waiters.pop()
waiter.switch()
def __enter__(self):
self.acquire()
def __exit__(self, typ, val, tb):
self.release()
class BoundedSemaphore(Semaphore):
def __init__(self, value=1):
super(BoundedSemaphore, self).__init__(value)
self._initial_counter = value
def release(self, blocking=True):
if self._counter >= self._initial_counter:
raise ValueError, "Semaphore released too many times"
return super(BoundedSemaphore, self).release()
class RLock(object):
def __init__(self):
self._block = Semaphore()
self._count = 0
self._owner = None
def acquire(self, blocking=True, timeout=None):
me = flubber.current.task
if self._owner is me:
self._count += 1
return True
r = self._block.acquire(blocking, timeout)
if r:
self._owner = me
self._count = 1
return r
def release(self):
if self._owner is not flubber.current.task:
raise RuntimeError('cannot release un-aquired lock')
self._count = count = self._count - 1
if not count:
self._owner = None
self._block.release()
def __enter__(self):
return self.acquire()
def __exit__(self, typ, value, tb):
self.release()
# Needed by condition
def _acquire_restore(self, state):
self._block.acquire()
self._count, self._owner = state
def _release_save(self):
state = (self._count, self._owner)
self._count = 0
self._owner = None
self._block.release()
return state
def _is_owned(self):
return self._owner is flubber.current.task
class Condition(object):
def __init__(self, lock=None):
if lock is None:
lock = RLock()
self._lock = lock
self._waiters = []
# Export the lock's acquire() and release() methods
self.acquire = lock.acquire
self.release = lock.release
# If the lock defines _release_save() and/or _acquire_restore(),
# these override the default implementations (which just call
# release() and acquire() on the lock). Ditto for _is_owned().
try:
self._release_save = lock._release_save
except AttributeError:
pass
try:
self._acquire_restore = lock._acquire_restore
except AttributeError:
pass
try:
self._is_owned = lock._is_owned
except AttributeError:
pass
def wait(self, timeout=None):
if not self._is_owned():
raise RuntimeError('cannot wait on un-acquired lock')
waiter = Semaphore()
waiter.acquire()
self._waiters.append(waiter)
saved_state = self._release_save()
try:
return waiter.acquire(timeout=timeout)
finally:
self._acquire_restore(saved_state)
def notify(self, n=1):
if not self._is_owned():
raise RuntimeError('cannot wait on un-acquired lock')
__waiters = self._waiters
waiters = __waiters[:n]
if not waiters:
return
for waiter in waiters:
waiter.release()
try:
__waiters.remove(waiter)
except ValueError:
pass
def notify_all(self):
self.notify(len(self._waiters))
def _acquire_restore(self, state):
self._lock.acquire()
def _release_save(self):
self._lock.release()
def _is_owned(self):
# Return True if lock is owned by current_thread.
# This method is called only if __lock doesn't have _is_owned().
if self._lock.acquire(False):
self._lock.release()
return False
else:
return True
def __enter__(self):
return self._lock.__enter__()
def __exit__(self, *args):
return self._lock.__exit__(*args)
Removed bogus function argument
#
# This file is part of flubber. See the NOTICE for more information.
#
import flubber
from flubber.timeout import Timeout
__all__ = ['Semaphore', 'BoundedSemaphore', 'RLock', 'Condition']
class Semaphore(object):
def __init__(self, value=1):
if value < 0:
raise ValueError("Semaphore must be initialized with a positive number, got %s" % value)
self._counter = value
self._waiters = set()
def __repr__(self):
params = (self.__class__.__name__, hex(id(self)),
self._counter, len(self._waiters))
return '<%s at %s c=%s _w[%s]>' % params
__str__ = __repr__
def acquire(self, blocking=True, timeout=None):
if self._counter > 0:
self._counter -= 1
return True
elif not blocking:
return False
else:
current = flubber.current.task
self._waiters.add(current)
timer = Timeout(timeout)
timer.start()
loop = flubber.current.loop
try:
while self._counter <= 0:
loop.switch()
except Timeout, e:
if e is timer:
return False
raise
else:
self._counter -= 1
return True
finally:
timer.cancel()
self._waiters.discard(current)
def release(self):
self._counter += 1
if self._waiters:
flubber.current.loop.call_soon(self._notify_waiters)
def _notify_waiters(self):
if self._waiters and self._counter > 0:
waiter = self._waiters.pop()
waiter.switch()
def __enter__(self):
self.acquire()
def __exit__(self, typ, val, tb):
self.release()
class BoundedSemaphore(Semaphore):
def __init__(self, value=1):
super(BoundedSemaphore, self).__init__(value)
self._initial_counter = value
def release(self):
if self._counter >= self._initial_counter:
raise ValueError, "Semaphore released too many times"
return super(BoundedSemaphore, self).release()
class RLock(object):
def __init__(self):
self._block = Semaphore()
self._count = 0
self._owner = None
def acquire(self, blocking=True, timeout=None):
me = flubber.current.task
if self._owner is me:
self._count += 1
return True
r = self._block.acquire(blocking, timeout)
if r:
self._owner = me
self._count = 1
return r
def release(self):
if self._owner is not flubber.current.task:
raise RuntimeError('cannot release un-aquired lock')
self._count = count = self._count - 1
if not count:
self._owner = None
self._block.release()
def __enter__(self):
return self.acquire()
def __exit__(self, typ, value, tb):
self.release()
# Needed by condition
def _acquire_restore(self, state):
self._block.acquire()
self._count, self._owner = state
def _release_save(self):
state = (self._count, self._owner)
self._count = 0
self._owner = None
self._block.release()
return state
def _is_owned(self):
return self._owner is flubber.current.task
class Condition(object):
def __init__(self, lock=None):
if lock is None:
lock = RLock()
self._lock = lock
self._waiters = []
# Export the lock's acquire() and release() methods
self.acquire = lock.acquire
self.release = lock.release
# If the lock defines _release_save() and/or _acquire_restore(),
# these override the default implementations (which just call
# release() and acquire() on the lock). Ditto for _is_owned().
try:
self._release_save = lock._release_save
except AttributeError:
pass
try:
self._acquire_restore = lock._acquire_restore
except AttributeError:
pass
try:
self._is_owned = lock._is_owned
except AttributeError:
pass
def wait(self, timeout=None):
if not self._is_owned():
raise RuntimeError('cannot wait on un-acquired lock')
waiter = Semaphore()
waiter.acquire()
self._waiters.append(waiter)
saved_state = self._release_save()
try:
return waiter.acquire(timeout=timeout)
finally:
self._acquire_restore(saved_state)
def notify(self, n=1):
if not self._is_owned():
raise RuntimeError('cannot wait on un-acquired lock')
__waiters = self._waiters
waiters = __waiters[:n]
if not waiters:
return
for waiter in waiters:
waiter.release()
try:
__waiters.remove(waiter)
except ValueError:
pass
def notify_all(self):
self.notify(len(self._waiters))
def _acquire_restore(self, state):
self._lock.acquire()
def _release_save(self):
self._lock.release()
def _is_owned(self):
# Return True if lock is owned by current_thread.
# This method is called only if __lock doesn't have _is_owned().
if self._lock.acquire(False):
self._lock.release()
return False
else:
return True
def __enter__(self):
return self._lock.__enter__()
def __exit__(self, *args):
return self._lock.__exit__(*args)
|
#!/usr/bin/python
import sys, traceback
import cv2
import numpy as np
import argparse
import string
import plantcv as pcv
### Parse command-line arguments
def options():
parser = argparse.ArgumentParser(description="Imaging processing with opencv")
parser.add_argument("-i1", "--fdark", help="Input image file.", required=True)
parser.add_argument("-i2", "--fmin", help="Input image file.", required=True)
parser.add_argument("-i3", "--fmax", help="Input image file.", required=True)
parser.add_argument("-m", "--track", help="Input region of interest file.", required=False)
parser.add_argument("-o", "--outdir", help="Output directory for image files.", required=True)
parser.add_argument("-D", "--debug", help="Turn on debug, prints intermediate images.", action="store_true")
args = parser.parse_args()
return args
### Main pipeline
def main():
# Get options
args = options()
# Read image (converting fmax and track to 8 bit just to create a mask, use 16-bit for all the math)
mask = cv2.imread(args.fmax)
track = cv2.imread(args.track)
mask1, mask2, mask3= cv2.split(mask)
# Pipeline step
device = 0
# Mask pesky track autofluor
device, track1= pcv.rgb2gray_hsv(track, 'v', device, args.debug)
device, track_thresh = pcv.binary_threshold(track1, 0, 255, 'light', device, args.debug)
device, track_inv=pcv.invert(track_thresh, device, args.debug)
device, track_masked = pcv.apply_mask(mask1, track_inv, 'black', device, args.debug)
# Threshold the Saturation image
device, fmax_thresh = pcv.binary_threshold(track_masked, 20, 255, 'light', device, args.debug)
# Median Filter
device, s_mblur = pcv.median_blur(fmax_thresh, 5, device, args.debug)
device, s_cnt = pcv.median_blur(fmax_thresh, 5, device, args.debug)
# Fill small objects
device, s_fill = pcv.fill(s_mblur, s_cnt, 110, device, args.debug)
device, sfill_cnt = pcv.fill(s_mblur, s_cnt, 110, device, args.debug)
# Identify objects
device, id_objects,obj_hierarchy = pcv.find_objects(mask, sfill_cnt, device, args.debug)
# Define ROI
device, roi1, roi_hierarchy= pcv.define_roi(mask,'circle', device, None, 'default', args.debug,True, 0,0,-50,-50)
# Decide which objects to keep
device,roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(mask,'partial',roi1,roi_hierarchy,id_objects,obj_hierarchy,device, args.debug)
# Object combine kept objects
device, obj, masked = pcv.object_composition(mask, roi_objects, hierarchy3, device, args.debug)
################ Analysis ################
# Find shape properties, output shape image (optional)
device, shape_header,shape_data,shape_img = pcv.analyze_object(mask, args.fmax, obj, masked, device,args.debug,True)
# Fluorescence Measurement (read in 16-bit images)
fdark=cv2.imread(args.fdark, -1)
fmin=cv2.imread(args.fmin, -1)
fmax=cv2.imread(args.fmax, -1)
device, fvfm_header, fvfm_data=pcv.fluor_fvfm(fdark,fmin,fmax,kept_mask, device, args.fmax,1000, args.debug)
# Output shape and color data
pcv.print_results(args.fmax, shape_header, shape_data)
pcv.print_results(args.fmax, fvfm_header, fvfm_data)
if __name__ == '__main__':
main()
fluor executable
#!/usr/bin/python
import sys, traceback
import cv2
import numpy as np
import argparse
import string
import plantcv as pcv
### Parse command-line arguments
def options():
parser = argparse.ArgumentParser(description="Imaging processing with opencv")
parser.add_argument("-i1", "--fdark", help="Input image file.", required=True)
parser.add_argument("-i2", "--fmin", help="Input image file.", required=True)
parser.add_argument("-i3", "--fmax", help="Input image file.", required=True)
parser.add_argument("-m", "--track", help="Input region of interest file.", required=False)
parser.add_argument("-o", "--outdir", help="Output directory for image files.", required=True)
parser.add_argument("-D", "--debug", help="Turn on debug, prints intermediate images.", action="store_true")
args = parser.parse_args()
return args
### Main pipeline
def main():
# Get options
args = options()
# Read image (converting fmax and track to 8 bit just to create a mask, use 16-bit for all the math)
mask = cv2.imread(args.fmax)
track = cv2.imread(args.track)
mask1, mask2, mask3= cv2.split(mask)
# Pipeline step
device = 0
# Mask pesky track autofluor
device, track1= pcv.rgb2gray_hsv(track, 'v', device, args.debug)
device, track_thresh = pcv.binary_threshold(track1, 0, 255, 'light', device, args.debug)
device, track_inv=pcv.invert(track_thresh, device, args.debug)
device, track_masked = pcv.apply_mask(mask1, track_inv, 'black', device, args.debug)
# Threshold the Saturation image
device, fmax_thresh = pcv.binary_threshold(track_masked, 20, 255, 'light', device, args.debug)
# Median Filter
device, s_mblur = pcv.median_blur(fmax_thresh, 5, device, args.debug)
device, s_cnt = pcv.median_blur(fmax_thresh, 5, device, args.debug)
# Fill small objects
device, s_fill = pcv.fill(s_mblur, s_cnt, 110, device, args.debug)
device, sfill_cnt = pcv.fill(s_mblur, s_cnt, 110, device, args.debug)
# Identify objects
device, id_objects,obj_hierarchy = pcv.find_objects(mask, sfill_cnt, device, args.debug)
# Define ROI
device, roi1, roi_hierarchy= pcv.define_roi(mask,'circle', device, None, 'default', args.debug,True, 0,0,-50,-50)
# Decide which objects to keep
device,roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(mask,'partial',roi1,roi_hierarchy,id_objects,obj_hierarchy,device, args.debug)
# Object combine kept objects
device, obj, masked = pcv.object_composition(mask, roi_objects, hierarchy3, device, args.debug)
################ Analysis ################
# Find shape properties, output shape image (optional)
device, shape_header,shape_data,shape_img = pcv.analyze_object(mask, args.fmax, obj, masked, device,args.debug,True)
# Fluorescence Measurement (read in 16-bit images)
fdark=cv2.imread(args.fdark, -1)
fmin=cv2.imread(args.fmin, -1)
fmax=cv2.imread(args.fmax, -1)
device, fvfm_header, fvfm_data=pcv.fluor_fvfm(fdark,fmin,fmax,kept_mask, device, args.fmax,1000, args.debug)
# Output shape and color data
pcv.print_results(args.fmax, shape_header, shape_data)
pcv.print_results(args.fmax, fvfm_header, fvfm_data)
if __name__ == '__main__':
main() |
"""Server class for API connections"""
import json
import importlib.util
import warnings
from typing import (List, Dict, Optional, Any, IO, Tuple, Union, Iterator,
Callable)
from functools import wraps
import requests
from .utils import request, build_portal_params, build_script_params, filename_from_url
from .const import API_PATH, PORTAL_PREFIX, FMSErrorCode
from .exceptions import BadJSON, FileMakerError, RecordError
from .record import Record
from .foundset import Foundset
class Server(object):
"""The server class provides easy access to the FileMaker Data API
Get an instance of this class, login, get a record, logout:
import fmrest
fms = fmrest.Server('https://server-address.com',
user='db user name',
password='db password',
database='db name',
layout='db layout'
)
fms.login()
fms.get_record(1)
fms.logout()
Or use as with statement, logging out automatically:
with fms as my_server:
my_server.login()
# do stuff
"""
def __init__(self, url: str, user: str,
password: str, database: str, layout: str,
data_sources: Optional[List[Dict]] = None,
verify_ssl: Union[bool, str] = True,
type_conversion: bool = False,
auto_relogin: bool = False,
proxies: Optional[Dict] = None) -> None:
"""Initialize the Server class.
Parameters
----------
url : str
Address of the FileMaker Server, e.g. https://my-server.com or https://127.0.0.1
Note: Data API must use https.
user : str
Username to log into your database
Note: make sure it belongs to a privilege set that has fmrest extended privileges.
password : str
Password to log into your database
database : str
Name of database without extension, e.g. Contacts
layout : str
Layout to work with. Can be changed between calls by setting the layout attribute again,
e.g.: fmrest_instance.layout = 'new_layout'.
data_sources : list, optional
List of dicts in formatj
[{'database': 'db_file', 'username': 'admin', 'password': 'admin'}]
Use this if for your actions you need to be authenticated to multiple DB files.
verify_ssl : bool or str, optional
Switch to set if certificate should be verified.
Use False to disable verification. Default True.
Use string path to a root cert pem file, if you work with a custom CA.
type_conversion : bool, optional
If True, attempt to convert string values into their potential original types.
In previous versions of the FileMaker Data API only strings were returned and there was
no way of knowing the correct type of a requested field value.
Be cautious with this parameter, as results may be different from what you expect!
Values will be converted into int, float, datetime, timedelta, string. This happens
on a record level, not on a foundset level.
auto_relogin : bool, optional
If True, tries to automatically get a new token (re-login) when a
request comes back with a 952 (invalid token) error. Defaults to
False.
proxies : dict, optional
Pass requests through a proxy, configure like so:
{ 'https': 'http://127.0.0.1:8080' }
"""
self.url = url
self.user = user
self.password = password
self.database = database
self.layout = layout
self.data_sources = [] if data_sources is None else data_sources
self.verify_ssl = verify_ssl
self.auto_relogin = auto_relogin
self.proxies = proxies
self.type_conversion = type_conversion
if type_conversion and not importlib.util.find_spec("dateutil"):
warnings.warn('Turning on type_conversion needs the dateutil module, which '
'does not seem to be present on your system.')
if url[:5] != 'https':
raise ValueError('Please make sure to use https, otherwise calls to the Data '
'API will not work.')
self._token: Optional[str] = None
self._last_fm_error: Optional[int] = None
self._last_script_result: Optional[Dict[str, List]] = None
self._headers: Dict[str, str] = {}
self._set_content_type()
def __enter__(self) -> 'Server':
return self
def __exit__(self, exc_type, exc_val, exc_traceback) -> None:
self.logout()
def __repr__(self) -> str:
return '<Server logged_in={} database={} layout={}>'.format(
bool(self._token), self.database, self.layout
)
def _with_auto_relogin(f):
@wraps(f)
def wrapper(self, *args, **kwargs):
if not self.auto_relogin:
return f(self, *args, **kwargs)
try:
return f(self, *args, **kwargs)
except FileMakerError:
if self.last_error == FMSErrorCode.INVALID_DAPI_TOKEN.value:
# got invalid token error; try to get a new token
self._token = None
self.login()
# ... now perform original request again
return f(self, *args, **kwargs)
raise # if another error occurred, re-raise the exception
return wrapper
def login(self) -> Optional[str]:
"""Logs into FMServer and returns access token.
Authentication happens via HTTP Basic Auth. Subsequent calls to the API will then use
the return session token.
Note that OAuth is currently not supported.
"""
path = API_PATH['auth'].format(database=self.database, token='')
data = {'fmDataSource': self.data_sources}
response = self._call_filemaker('POST', path, data, auth=(self.user, self.password))
self._token = response.get('token', None)
return self._token
def logout(self) -> bool:
"""Logs out of current session. Returns True if successful.
Note: this method is also called by __exit__
"""
# token is expected in endpoint for logout
path = API_PATH['auth'].format(database=self.database, token=self._token)
# remove token, so that the Authorization header is not sent for logout
# (_call_filemaker() will update the headers)
self._token = ''
self._call_filemaker('DELETE', path)
return self.last_error == FMSErrorCode.SUCCESS.value
def create(self, record: Record) -> Optional[int]:
"""Shortcut to create_record method. Takes record instance and calls create_record."""
# TODO: support for handling foundset instances inside record instance
return self.create_record(record.to_dict(ignore_portals=True, ignore_internal_ids=True))
@_with_auto_relogin
def create_record(self, field_data: Dict[str, Any],
portals: Optional[Dict[str, Any]] = None,
scripts: Optional[Dict[str, List]] = None) -> Optional[int]:
"""Creates a new record with given field data and returns new internal record id.
Parameters
-----------
field_data : dict
Dict of field names as defined in FileMaker: E.g.: {'name': 'David', 'drink': 'Coffee'}
scripts : dict, optional
Specify which scripts should run when with which parameters
Example: {'prerequest': ['my_script', 'my_param']}
Allowed types: 'prerequest', 'presort', 'after'
List should have length of 2 (both script name and parameter are required.)
portals : dict
Specify the records that should be created via a portal (must allow creation of records)
Example: {'my_portal': [
{'TO::field': 'hello', 'TO::field2': 'world'},
{'TO::field': 'another record'}
]
"""
path = API_PATH['record'].format(
database=self.database,
layout=self.layout,
)
request_data: Dict = {'fieldData': field_data}
if portals:
request_data['portalData'] = portals
# build script param object in FMSDAPI style
script_params = build_script_params(scripts) if scripts else None
if script_params:
request_data.update(script_params)
response = self._call_filemaker('POST', path, request_data)
record_id = response.get('recordId')
return int(record_id) if record_id else None
def edit(self, record: Record, validate_mod_id: bool = False) -> bool:
"""Shortcut to edit_record method. Takes (modified) record instance and calls edit_record"""
mod_id = record.modification_id if validate_mod_id else None
return self.edit_record(record.record_id, record.modifications(), mod_id)
@_with_auto_relogin
def edit_record(self, record_id: int, field_data: Dict[str, Any],
mod_id: Optional[int] = None, portals: Optional[Dict[str, Any]] = None,
scripts: Optional[Dict[str, List]] = None) -> bool:
"""Edits the record with the given record_id and field_data. Return True on success.
Parameters
-----------
record_id : int
FileMaker's internal record id.
field_data: dict
Dict of field names as defined in FileMaker: E.g.: {'name': 'David', 'drink': 'Coffee'}
To delete related records, use {'deleteRelated': 'Orders.2'}, where 2 is the record id
of the related record.
mod_id: int, optional
Pass a modification id to only edit the record when mod_id matches the current mod_id of
the server. This is only supported for records in the current table, not related
records.
portals : dict
Specify the records that should be edited via a portal.
If recordId is not specified, a new record will be created.
Example: {'my_portal': [
{'TO::field': 'hello', 'recordId': '42'}
]
scripts : dict, optional
Specify which scripts should run when with which parameters
Example: {'prerequest': ['my_script', 'my_param']}
Allowed types: 'prerequest', 'presort', 'after'
List should have length of 2 (both script name and parameter are required.)
"""
path = API_PATH['record_action'].format(
database=self.database,
layout=self.layout,
record_id=record_id
)
request_data: Dict = {'fieldData': field_data}
if mod_id:
request_data['modId'] = str(mod_id)
if portals:
request_data['portalData'] = portals
# build script param object in FMSDAPI style
script_params = build_script_params(scripts) if scripts else None
if script_params:
request_data.update(script_params)
self._call_filemaker('PATCH', path, request_data)
return self.last_error == FMSErrorCode.SUCCESS.value
def delete(self, record: Record) -> bool:
"""Shortcut to delete_record method. Takes record instance and calls delete_record."""
try:
record_id = record.record_id
except AttributeError:
raise RecordError('Not a valid record instance. record_id is missing.') from None
return self.delete_record(record_id)
@_with_auto_relogin
def delete_record(self, record_id: int, scripts: Optional[Dict[str, List]] = None):
"""Deletes a record for the given record_id. Returns True on success.
Parameters
-----------
record_id : int
FileMaker's internal record id.
scripts : dict, optional
Specify which scripts should run when with which parameters
Example: {'prerequest': ['my_script', 'my_param']}
Allowed types: 'prerequest', 'presort', 'after'
List should have length of 2 (both script name and parameter are required.)
"""
path = API_PATH['record_action'].format(
database=self.database,
layout=self.layout,
record_id=record_id
)
params = build_script_params(scripts) if scripts else None
self._call_filemaker('DELETE', path, params=params)
return self.last_error == FMSErrorCode.SUCCESS.value
@_with_auto_relogin
def get_record(self, record_id: int, portals: Optional[List[Dict]] = None,
scripts: Optional[Dict[str, List]] = None,
layout: Optional[str] = None) -> Record:
"""Fetches record with given ID and returns Record instance
Parameters
-----------
record_id : int
The FileMaker record id. Be aware that record ids CAN change (e.g. in cloned databases)
portals : list
A list of dicts in format [{'name':'objectName', 'offset':1, 'limit':50}]
Use this if you want to limit the amout of data returned. Offset and limit are optional
with default values of 1 and 50, respectively.
All portals will be returned when portals==None. Default None.
scripts : dict, optional
Specify which scripts should run when with which parameters
Example: {'prerequest': ['my_script', 'my_param']}
Allowed types: 'prerequest', 'presort', 'after'
List should have length of 2 (both script name and parameter are required.)
layout : str, optional
Passing a layout name allows you to set the response (!) layout.
This is helpful, for example, if you want to limit the number of fields/portals being
returned and have a dedicated response layout.
"""
path = API_PATH['record_action'].format(
database=self.database,
layout=self.layout,
record_id=record_id
)
params = build_portal_params(portals, True) if portals else {}
params['layout.response'] = layout
# build script param object in FMSDAPI style
script_params = build_script_params(scripts) if scripts else None
if script_params:
params.update(script_params)
response = self._call_filemaker('GET', path, params=params)
# pass response to foundset generator function. As we are only requesting one record though,
# we only re-use the code and immediately consume the first (and only) record via next().
return next(self._process_foundset_response(response))
@_with_auto_relogin
def perform_script(self, name: str,
param: Optional[str] = None) -> Tuple[Optional[int], Optional[str]]:
"""Performs a script with the given name and parameter.
Returns tuple containing script error and result.
Parameters:
--------
name : str
The script name as defined in FileMaker Pro
param: str
Optional script parameter
"""
path = API_PATH['script'].format(
database=self.database,
layout=self.layout,
script_name=name
)
response = self._call_filemaker('GET', path, params={'script.param': param})
script_error = response.get('scriptError', None)
script_error = int(script_error) if script_error else None
script_result = response.get('scriptResult', None)
return script_error, script_result
@_with_auto_relogin
def upload_container(self, record_id: int, field_name: str, file_: IO) -> bool:
"""Uploads the given binary data for the given record id and returns True on success.
Parameters
-----------
record_id : int
The FileMaker record id
field_name : str
Name of the container field on the current layout without TO name. E.g.: my_container
file_ : fileobj
File object as returned by open() in binary mode.
"""
path = API_PATH['record_action'].format(
database=self.database,
layout=self.layout,
record_id=record_id
) + '/containers/' + field_name + '/1'
# requests library handles content type for multipart/form-data incl. boundary
self._set_content_type(False)
self._call_filemaker('POST', path, files={'upload': file_})
return self.last_error == FMSErrorCode.SUCCESS.value
@_with_auto_relogin
def get_records(self, offset: int = 1, limit: int = 100,
sort: Optional[List[Dict[str, str]]] = None,
portals: Optional[List[Dict[str, Any]]] = None,
scripts: Optional[Dict[str, List]] = None,
layout: Optional[str] = None) -> Foundset:
"""Requests all records with given offset and limit and returns result as
(sorted) Foundset instance.
Parameters
-----------
offset : int, optional
Offset for the query, starting at 1, default 1
limit : int, optional
Limit the amount of returned records. Defaults to 100
sort : list of dicts, optional
A list of sort criteria. Example:
[{'fieldName': 'name', 'sortOrder': 'descend'}]
portals : list of dicts, optional
Define which portals you want to include in the result.
Example: [{'name':'objectName', 'offset':1, 'limit':50}]
Defaults to None, which then returns all portals with default offset and limit.
scripts : dict, optional
Specify which scripts should run when with which parameters
Example: {'prerequest': ['my_script', 'my_param']}
Allowed types: 'prerequest', 'presort', 'after'
List should have length of 2 (both script name and parameter are required.)
layout : str, optional
Passing a layout name allows you to set the response (!) layout.
This is helpful, for example, if you want to limit the number of fields/portals being
returned and have a dedicated response layout.
"""
path = API_PATH['record'].format(
database=self.database,
layout=self.layout
)
params = build_portal_params(portals, True) if portals else {}
params['_offset'] = offset
params['_limit'] = limit
params['layout.response'] = layout
if sort:
params['_sort'] = json.dumps(sort)
# build script param object in FMSDAPI style
script_params = build_script_params(scripts) if scripts else None
if script_params:
params.update(script_params)
response = self._call_filemaker('GET', path, params=params)
info = response.get('dataInfo', {})
return Foundset(self._process_foundset_response(response), info)
@_with_auto_relogin
def find(self, query: List[Dict[str, Any]],
sort: Optional[List[Dict[str, str]]] = None,
offset: int = 1, limit: int = 100,
portals: Optional[List[Dict[str, Any]]] = None,
scripts: Optional[Dict[str, List]] = None,
layout: Optional[str] = None) -> Foundset:
"""Finds all records matching query and returns result as a Foundset instance.
Parameters
-----------
query : list of dicts
A list of find queries, specified as 'field_name': 'field_value'
Example:
[{'drink': 'Coffee'}, {'drink': 'Dr. Pepper'}] will find matches for either Coffee
or Dr. Pepper.
You can also negate find requests by adding a key "omit" with value "true".
Generally, all FileMaker Pro operators are supported. So, wildcard finds with "*" or
exact matches with "==" should all work like in Pro.
sort : list of dicts, optional
A list of sort criteria. Example:
[{'fieldName': 'name', 'sortOrder': 'descend'}]
offset : int, optional
Offset for the query, starting at 1, default 1
limit : int, optional
Limit the amount of returned records. Defaults to 100
portals : list of dicts, optional
Define which portals you want to include in the result.
Example: [{'name':'objectName', 'offset':1, 'limit':50}]
Defaults to None, which then returns all portals with default offset and limit.
scripts : dict, optional
Specify which scripts should run when with which parameters
Example: {'prerequest': ['my_script', 'my_param']}
Allowed types: 'prerequest', 'presort', 'after'
List should have length of 2 (both script name and parameter are required.)
layout : str, optional
Passing a layout name allows you to set the response (!) layout.
Your find will still be performed based on the Server.layout attribute.
This is helpful, for example, if you want to limit the number of fields/portals being
returned and have a dedicated response layout.
"""
path = API_PATH['find'].format(
database=self.database,
layout=self.layout
)
data = {
'query': query,
'sort': sort,
'limit': str(limit),
'offset': str(offset),
'layout.response': layout
}
# build script param object in FMSDAPI style
script_params = build_script_params(scripts) if scripts else None
if script_params:
data.update(script_params)
# build portal param object in FMSDAPI style
portal_params = build_portal_params(portals) if portals else None
if portal_params:
data.update(portal_params)
# FM Data API from v17 cannot handle null values, so we remove all Nones from data
data = {k:v for k, v in data.items() if v is not None}
response = self._call_filemaker('POST', path, data=data)
info = response.get('dataInfo', {})
return Foundset(self._process_foundset_response(response), info)
def fetch_file(self, file_url: str,
stream: bool = False) -> Tuple[str, Optional[str], Optional[str], requests.Response]:
"""Fetches the file from the given url.
Returns a tuple of filename (unique identifier), content type (e.g. image/png), length,
and a requests response object. You can access contents by response.content.
Example:
url = record.container_field
name, type_, length, content = fms.fetch_file(url)
Parameters
-----------
file_url : str
URL to file as returned by FMS.
Example:
https://address/Streaming_SSL/MainDB/unique-identifier.png?RCType=EmbeddedRCFileProcessor
stream : bool, optional
Set this to True if you don't want the file to immediately be loaded into memory.
This let's you decide how you want to handle large files before downloading them.
Access to headers is given before downloading.
If you are not consuming all data, make sure to close the connection after use by
calling response.close().
"""
name = filename_from_url(file_url)
response = request(method='get',
url=file_url,
verify=self.verify_ssl,
stream=stream,
proxies=self.proxies)
return (name,
response.headers.get('Content-Type'),
response.headers.get('Content-Length'),
response)
@_with_auto_relogin
def set_globals(self, globals_: Dict[str, Any]) -> bool:
"""Set global fields for the currently active session. Returns True on success.
Global fields do not need to be placed on the layout and can be used for establishing
relationships of which the global is a match field.
Parameters
-----------
globals_ : dict
Dict of { field name : value }
Note that field names must be fully qualified, i.e. contain the TO name
Example:
{ 'Table::myField': 'whatever' }
"""
path = API_PATH['global'].format(database=self.database)
data = {'globalFields': globals_}
self._call_filemaker('PATCH', path, data=data)
return self.last_error == FMSErrorCode.SUCCESS.value
@property
def last_error(self) -> Optional[int]:
"""Returns last error number returned by FileMaker Server as int.
Error is set by _call_filemaker method. If error == -1, the previous request failed
and no FM error code is available. If no request was made yet, last_error will be None.
"""
error: Optional[int]
if self._last_fm_error:
error = int(self._last_fm_error)
else:
error = None
return error
@property
def last_script_result(self) -> Dict:
"""Returns last script results as returned by FMS as dict in format {type: [error, result]}
Only returns keys that have a value from the last call. I.e. 'presort' will
only be present if the last call performed a presort script.
The returned error (0th element in list) will always be converted to int.
"""
result: Dict = {}
if self._last_script_result:
result = {
k:[int(v[0]), v[1]] for k, v in self._last_script_result.items() if v[0] is not None
}
return result
# 'meta': {
# 'productinfo': '/fmi/data/v1/productInfo',
# 'databasenames': '/fmi/data/v1/databases',
# 'layoutnames': '/fmi/data/v1/databases/{database}/layouts',
# 'scriptnames': '/fmi/data/v1/databases/{database}/scripts',
# 'layoutmetadata': '/fmi/data/v1/databases/{database}/layouts/{layout}'
# },
@_with_auto_relogin
def get_productinfo(self) -> Dict:
"""Fetches product info and returns Dict instance
Parameters
-----------
none
"""
path = API_PATH['meta']['productinfo']
response = self._call_filemaker('GET', path)
return response
@_with_auto_relogin
def get_databasenames(self) -> Dict:
"""Fetches database names and returns Dict instance
Parameters
-----------
none
"""
path = API_PATH['meta']['databasenames']
response = self._call_filemaker('GET', path)
# return list of "databases"?
#return response.get('databases')
return response
@_with_auto_relogin
def get_layoutnames(self) -> Dict:
"""Fetches database layout names and returns Dict instance
Parameters
-----------
none
"""
path = API_PATH['meta']['layoutnames'].format(
database=self.database
)
response = self._call_filemaker('GET', path)
# return list of database "layouts"?
#return response.get('layouts')
return response
@_with_auto_relogin
def get_scriptnames(self) -> Dict:
"""Fetches database script names and returns Dict instance
Parameters
-----------
none
"""
path = API_PATH['meta']['scriptnames'].format(
database=self.database
)
response = self._call_filemaker('GET', path)
# return list of database "scripts"?
#return response.get('scripts')
return response
@_with_auto_relogin
def get_layoutmetadata(self) -> Dict:
"""Fetches layout metadata and returns Dict instance
Parameters
-----------
none
"""
path = API_PATH['meta']['layoutmetadata'].format(
database=self.database,
layout=self.layout
)
response = self._call_filemaker('GET', path)
return response
def _call_filemaker(self, method: str, path: str,
data: Optional[Dict] = None,
params: Optional[Dict] = None,
**kwargs: Any) -> Dict:
"""Calls a FileMaker Server Data API path and returns the parsed fms response data
Parameters
-----------
method : str
The http request method, e.g. POST
path : str
The API path, /fmi/data/v1/databases/:database/...
data : dict of str : str, optional
Dict of parameter data for http request
Can be None if API expects no data, e.g. for logout
params : dict of str : str, optional
Dict of get parameters for http request
Can be None if API expects no params
auth : tuple of str, str, optional
Tuple containing user and password for HTTP basic
auth
"""
url = self.url + path
request_data = json.dumps(data) if data else None
# if we have a token, make sure it's included in the header
# if not, the Authorization header gets removed (necessary for example
# for logout)
self._update_token_header()
response = request(method=method,
headers=self._headers,
url=url,
data=request_data,
verify=self.verify_ssl,
params=params,
proxies=self.proxies,
**kwargs)
try:
response_data = response.json()
except json.decoder.JSONDecodeError as ex:
raise BadJSON(ex, response) from None
fms_messages = response_data.get('messages')
fms_response = response_data.get('response')
self._update_script_result(fms_response)
self._last_fm_error = fms_messages[0].get('code', -1)
if self.last_error != FMSErrorCode.SUCCESS.value:
raise FileMakerError(self._last_fm_error,
fms_messages[0].get('message', 'Unkown error'))
self._set_content_type() # reset content type
return fms_response
def _update_script_result(self, response: Dict) -> Dict[str, List]:
"""Extracts script result data from fms response and updates script result attribute"""
self._last_script_result = {
'prerequest': [
response.get('scriptError.prerequest', None),
response.get('scriptResult.prerequest', None)
],
'presort': [
response.get('scriptError.presort', None),
response.get('scriptResult.presort', None)
],
'after': [
response.get('scriptError', None),
response.get('scriptResult', None)
]
}
return self._last_script_result
def _update_token_header(self) -> Dict[str, str]:
"""Update header to include access token (if available) for subsequent calls."""
if self._token:
self._headers['Authorization'] = 'Bearer ' + self._token
else:
self._headers.pop('Authorization', None)
return self._headers
def _set_content_type(self, type_: Union[str, bool] = 'application/json') -> Dict[str, str]:
"""Set the Content-Type header and returns the updated _headers dict.
Parameters
-----------
type_ : str, boolean
String definining the content type for the HTTP header or False to remove the
Content-Type key from _headers (i.e. let the requests lib handle the Content-Type.)
path : str
"""
if isinstance(type_, str):
self._headers['Content-Type'] = type_
elif not type_:
self._headers.pop('Content-Type')
else:
raise ValueError
return self._headers
def _process_foundset_response(self, response: Dict) -> Iterator[Record]:
"""Generator function that takes a response object, brings it into a Foundset/Record
structure and yields processed Records.
Lazily processing and yielding the results is slightly faster than building a list upfront
when you deal with big foundsets containing records that each have many portal records.
It won't save us much memory as we still hold the response, but initial processing time goes
down, and we only need to build the records when we actually use them.
(may think of another approach if it proves to be more pain than gain though)
Parameters
-----------
response : dict
FMS response from a _call_filemaker request
"""
data = response['data']
for record in data:
field_data = record['fieldData']
# Add meta fields to record.
# TODO: this can clash with fields that have the same name. Find a better
# way (maybe prefix?).
# Note that portal foundsets have the recordId field included by default
# (without the related table prefix).
field_data['recordId'] = record.get('recordId')
field_data['modId'] = record.get('modId')
keys = list(field_data)
values = list(field_data.values())
portal_info = {}
for entry in record.get('portalDataInfo', []):
# a portal is identified by its object name, or, if not available, its TO name
portal_identifier = entry.get('portalObjectName', entry['table'])
portal_info[portal_identifier] = entry
for portal_name, rows in record['portalData'].items():
keys.append(PORTAL_PREFIX + portal_name)
# further delay creation of portal record instances
related_records = (
Record(list(row), list(row.values()),
in_portal=True, type_conversion=self.type_conversion
) for row in rows
)
# add portal foundset to record
values.append(Foundset(related_records, portal_info.get(portal_name, {})))
yield Record(keys, values, type_conversion=self.type_conversion)
Update server.py
fix tabs
"""Server class for API connections"""
import json
import importlib.util
import warnings
from typing import (List, Dict, Optional, Any, IO, Tuple, Union, Iterator,
Callable)
from functools import wraps
import requests
from .utils import request, build_portal_params, build_script_params, filename_from_url
from .const import API_PATH, PORTAL_PREFIX, FMSErrorCode
from .exceptions import BadJSON, FileMakerError, RecordError
from .record import Record
from .foundset import Foundset
class Server(object):
"""The server class provides easy access to the FileMaker Data API
Get an instance of this class, login, get a record, logout:
import fmrest
fms = fmrest.Server('https://server-address.com',
user='db user name',
password='db password',
database='db name',
layout='db layout'
)
fms.login()
fms.get_record(1)
fms.logout()
Or use as with statement, logging out automatically:
with fms as my_server:
my_server.login()
# do stuff
"""
def __init__(self, url: str, user: str,
password: str, database: str, layout: str,
data_sources: Optional[List[Dict]] = None,
verify_ssl: Union[bool, str] = True,
type_conversion: bool = False,
auto_relogin: bool = False,
proxies: Optional[Dict] = None) -> None:
"""Initialize the Server class.
Parameters
----------
url : str
Address of the FileMaker Server, e.g. https://my-server.com or https://127.0.0.1
Note: Data API must use https.
user : str
Username to log into your database
Note: make sure it belongs to a privilege set that has fmrest extended privileges.
password : str
Password to log into your database
database : str
Name of database without extension, e.g. Contacts
layout : str
Layout to work with. Can be changed between calls by setting the layout attribute again,
e.g.: fmrest_instance.layout = 'new_layout'.
data_sources : list, optional
List of dicts in formatj
[{'database': 'db_file', 'username': 'admin', 'password': 'admin'}]
Use this if for your actions you need to be authenticated to multiple DB files.
verify_ssl : bool or str, optional
Switch to set if certificate should be verified.
Use False to disable verification. Default True.
Use string path to a root cert pem file, if you work with a custom CA.
type_conversion : bool, optional
If True, attempt to convert string values into their potential original types.
In previous versions of the FileMaker Data API only strings were returned and there was
no way of knowing the correct type of a requested field value.
Be cautious with this parameter, as results may be different from what you expect!
Values will be converted into int, float, datetime, timedelta, string. This happens
on a record level, not on a foundset level.
auto_relogin : bool, optional
If True, tries to automatically get a new token (re-login) when a
request comes back with a 952 (invalid token) error. Defaults to
False.
proxies : dict, optional
Pass requests through a proxy, configure like so:
{ 'https': 'http://127.0.0.1:8080' }
"""
self.url = url
self.user = user
self.password = password
self.database = database
self.layout = layout
self.data_sources = [] if data_sources is None else data_sources
self.verify_ssl = verify_ssl
self.auto_relogin = auto_relogin
self.proxies = proxies
self.type_conversion = type_conversion
if type_conversion and not importlib.util.find_spec("dateutil"):
warnings.warn('Turning on type_conversion needs the dateutil module, which '
'does not seem to be present on your system.')
if url[:5] != 'https':
raise ValueError('Please make sure to use https, otherwise calls to the Data '
'API will not work.')
self._token: Optional[str] = None
self._last_fm_error: Optional[int] = None
self._last_script_result: Optional[Dict[str, List]] = None
self._headers: Dict[str, str] = {}
self._set_content_type()
def __enter__(self) -> 'Server':
return self
def __exit__(self, exc_type, exc_val, exc_traceback) -> None:
self.logout()
def __repr__(self) -> str:
return '<Server logged_in={} database={} layout={}>'.format(
bool(self._token), self.database, self.layout
)
def _with_auto_relogin(f):
@wraps(f)
def wrapper(self, *args, **kwargs):
if not self.auto_relogin:
return f(self, *args, **kwargs)
try:
return f(self, *args, **kwargs)
except FileMakerError:
if self.last_error == FMSErrorCode.INVALID_DAPI_TOKEN.value:
# got invalid token error; try to get a new token
self._token = None
self.login()
# ... now perform original request again
return f(self, *args, **kwargs)
raise # if another error occurred, re-raise the exception
return wrapper
def login(self) -> Optional[str]:
"""Logs into FMServer and returns access token.
Authentication happens via HTTP Basic Auth. Subsequent calls to the API will then use
the return session token.
Note that OAuth is currently not supported.
"""
path = API_PATH['auth'].format(database=self.database, token='')
data = {'fmDataSource': self.data_sources}
response = self._call_filemaker('POST', path, data, auth=(self.user, self.password))
self._token = response.get('token', None)
return self._token
def logout(self) -> bool:
"""Logs out of current session. Returns True if successful.
Note: this method is also called by __exit__
"""
# token is expected in endpoint for logout
path = API_PATH['auth'].format(database=self.database, token=self._token)
# remove token, so that the Authorization header is not sent for logout
# (_call_filemaker() will update the headers)
self._token = ''
self._call_filemaker('DELETE', path)
return self.last_error == FMSErrorCode.SUCCESS.value
def create(self, record: Record) -> Optional[int]:
"""Shortcut to create_record method. Takes record instance and calls create_record."""
# TODO: support for handling foundset instances inside record instance
return self.create_record(record.to_dict(ignore_portals=True, ignore_internal_ids=True))
@_with_auto_relogin
def create_record(self, field_data: Dict[str, Any],
portals: Optional[Dict[str, Any]] = None,
scripts: Optional[Dict[str, List]] = None) -> Optional[int]:
"""Creates a new record with given field data and returns new internal record id.
Parameters
-----------
field_data : dict
Dict of field names as defined in FileMaker: E.g.: {'name': 'David', 'drink': 'Coffee'}
scripts : dict, optional
Specify which scripts should run when with which parameters
Example: {'prerequest': ['my_script', 'my_param']}
Allowed types: 'prerequest', 'presort', 'after'
List should have length of 2 (both script name and parameter are required.)
portals : dict
Specify the records that should be created via a portal (must allow creation of records)
Example: {'my_portal': [
{'TO::field': 'hello', 'TO::field2': 'world'},
{'TO::field': 'another record'}
]
"""
path = API_PATH['record'].format(
database=self.database,
layout=self.layout,
)
request_data: Dict = {'fieldData': field_data}
if portals:
request_data['portalData'] = portals
# build script param object in FMSDAPI style
script_params = build_script_params(scripts) if scripts else None
if script_params:
request_data.update(script_params)
response = self._call_filemaker('POST', path, request_data)
record_id = response.get('recordId')
return int(record_id) if record_id else None
def edit(self, record: Record, validate_mod_id: bool = False) -> bool:
"""Shortcut to edit_record method. Takes (modified) record instance and calls edit_record"""
mod_id = record.modification_id if validate_mod_id else None
return self.edit_record(record.record_id, record.modifications(), mod_id)
@_with_auto_relogin
def edit_record(self, record_id: int, field_data: Dict[str, Any],
mod_id: Optional[int] = None, portals: Optional[Dict[str, Any]] = None,
scripts: Optional[Dict[str, List]] = None) -> bool:
"""Edits the record with the given record_id and field_data. Return True on success.
Parameters
-----------
record_id : int
FileMaker's internal record id.
field_data: dict
Dict of field names as defined in FileMaker: E.g.: {'name': 'David', 'drink': 'Coffee'}
To delete related records, use {'deleteRelated': 'Orders.2'}, where 2 is the record id
of the related record.
mod_id: int, optional
Pass a modification id to only edit the record when mod_id matches the current mod_id of
the server. This is only supported for records in the current table, not related
records.
portals : dict
Specify the records that should be edited via a portal.
If recordId is not specified, a new record will be created.
Example: {'my_portal': [
{'TO::field': 'hello', 'recordId': '42'}
]
scripts : dict, optional
Specify which scripts should run when with which parameters
Example: {'prerequest': ['my_script', 'my_param']}
Allowed types: 'prerequest', 'presort', 'after'
List should have length of 2 (both script name and parameter are required.)
"""
path = API_PATH['record_action'].format(
database=self.database,
layout=self.layout,
record_id=record_id
)
request_data: Dict = {'fieldData': field_data}
if mod_id:
request_data['modId'] = str(mod_id)
if portals:
request_data['portalData'] = portals
# build script param object in FMSDAPI style
script_params = build_script_params(scripts) if scripts else None
if script_params:
request_data.update(script_params)
self._call_filemaker('PATCH', path, request_data)
return self.last_error == FMSErrorCode.SUCCESS.value
def delete(self, record: Record) -> bool:
"""Shortcut to delete_record method. Takes record instance and calls delete_record."""
try:
record_id = record.record_id
except AttributeError:
raise RecordError('Not a valid record instance. record_id is missing.') from None
return self.delete_record(record_id)
@_with_auto_relogin
def delete_record(self, record_id: int, scripts: Optional[Dict[str, List]] = None):
"""Deletes a record for the given record_id. Returns True on success.
Parameters
-----------
record_id : int
FileMaker's internal record id.
scripts : dict, optional
Specify which scripts should run when with which parameters
Example: {'prerequest': ['my_script', 'my_param']}
Allowed types: 'prerequest', 'presort', 'after'
List should have length of 2 (both script name and parameter are required.)
"""
path = API_PATH['record_action'].format(
database=self.database,
layout=self.layout,
record_id=record_id
)
params = build_script_params(scripts) if scripts else None
self._call_filemaker('DELETE', path, params=params)
return self.last_error == FMSErrorCode.SUCCESS.value
@_with_auto_relogin
def get_record(self, record_id: int, portals: Optional[List[Dict]] = None,
scripts: Optional[Dict[str, List]] = None,
layout: Optional[str] = None) -> Record:
"""Fetches record with given ID and returns Record instance
Parameters
-----------
record_id : int
The FileMaker record id. Be aware that record ids CAN change (e.g. in cloned databases)
portals : list
A list of dicts in format [{'name':'objectName', 'offset':1, 'limit':50}]
Use this if you want to limit the amout of data returned. Offset and limit are optional
with default values of 1 and 50, respectively.
All portals will be returned when portals==None. Default None.
scripts : dict, optional
Specify which scripts should run when with which parameters
Example: {'prerequest': ['my_script', 'my_param']}
Allowed types: 'prerequest', 'presort', 'after'
List should have length of 2 (both script name and parameter are required.)
layout : str, optional
Passing a layout name allows you to set the response (!) layout.
This is helpful, for example, if you want to limit the number of fields/portals being
returned and have a dedicated response layout.
"""
path = API_PATH['record_action'].format(
database=self.database,
layout=self.layout,
record_id=record_id
)
params = build_portal_params(portals, True) if portals else {}
params['layout.response'] = layout
# build script param object in FMSDAPI style
script_params = build_script_params(scripts) if scripts else None
if script_params:
params.update(script_params)
response = self._call_filemaker('GET', path, params=params)
# pass response to foundset generator function. As we are only requesting one record though,
# we only re-use the code and immediately consume the first (and only) record via next().
return next(self._process_foundset_response(response))
@_with_auto_relogin
def perform_script(self, name: str,
param: Optional[str] = None) -> Tuple[Optional[int], Optional[str]]:
"""Performs a script with the given name and parameter.
Returns tuple containing script error and result.
Parameters:
--------
name : str
The script name as defined in FileMaker Pro
param: str
Optional script parameter
"""
path = API_PATH['script'].format(
database=self.database,
layout=self.layout,
script_name=name
)
response = self._call_filemaker('GET', path, params={'script.param': param})
script_error = response.get('scriptError', None)
script_error = int(script_error) if script_error else None
script_result = response.get('scriptResult', None)
return script_error, script_result
@_with_auto_relogin
def upload_container(self, record_id: int, field_name: str, file_: IO) -> bool:
"""Uploads the given binary data for the given record id and returns True on success.
Parameters
-----------
record_id : int
The FileMaker record id
field_name : str
Name of the container field on the current layout without TO name. E.g.: my_container
file_ : fileobj
File object as returned by open() in binary mode.
"""
path = API_PATH['record_action'].format(
database=self.database,
layout=self.layout,
record_id=record_id
) + '/containers/' + field_name + '/1'
# requests library handles content type for multipart/form-data incl. boundary
self._set_content_type(False)
self._call_filemaker('POST', path, files={'upload': file_})
return self.last_error == FMSErrorCode.SUCCESS.value
@_with_auto_relogin
def get_records(self, offset: int = 1, limit: int = 100,
sort: Optional[List[Dict[str, str]]] = None,
portals: Optional[List[Dict[str, Any]]] = None,
scripts: Optional[Dict[str, List]] = None,
layout: Optional[str] = None) -> Foundset:
"""Requests all records with given offset and limit and returns result as
(sorted) Foundset instance.
Parameters
-----------
offset : int, optional
Offset for the query, starting at 1, default 1
limit : int, optional
Limit the amount of returned records. Defaults to 100
sort : list of dicts, optional
A list of sort criteria. Example:
[{'fieldName': 'name', 'sortOrder': 'descend'}]
portals : list of dicts, optional
Define which portals you want to include in the result.
Example: [{'name':'objectName', 'offset':1, 'limit':50}]
Defaults to None, which then returns all portals with default offset and limit.
scripts : dict, optional
Specify which scripts should run when with which parameters
Example: {'prerequest': ['my_script', 'my_param']}
Allowed types: 'prerequest', 'presort', 'after'
List should have length of 2 (both script name and parameter are required.)
layout : str, optional
Passing a layout name allows you to set the response (!) layout.
This is helpful, for example, if you want to limit the number of fields/portals being
returned and have a dedicated response layout.
"""
path = API_PATH['record'].format(
database=self.database,
layout=self.layout
)
params = build_portal_params(portals, True) if portals else {}
params['_offset'] = offset
params['_limit'] = limit
params['layout.response'] = layout
if sort:
params['_sort'] = json.dumps(sort)
# build script param object in FMSDAPI style
script_params = build_script_params(scripts) if scripts else None
if script_params:
params.update(script_params)
response = self._call_filemaker('GET', path, params=params)
info = response.get('dataInfo', {})
return Foundset(self._process_foundset_response(response), info)
@_with_auto_relogin
def find(self, query: List[Dict[str, Any]],
sort: Optional[List[Dict[str, str]]] = None,
offset: int = 1, limit: int = 100,
portals: Optional[List[Dict[str, Any]]] = None,
scripts: Optional[Dict[str, List]] = None,
layout: Optional[str] = None) -> Foundset:
"""Finds all records matching query and returns result as a Foundset instance.
Parameters
-----------
query : list of dicts
A list of find queries, specified as 'field_name': 'field_value'
Example:
[{'drink': 'Coffee'}, {'drink': 'Dr. Pepper'}] will find matches for either Coffee
or Dr. Pepper.
You can also negate find requests by adding a key "omit" with value "true".
Generally, all FileMaker Pro operators are supported. So, wildcard finds with "*" or
exact matches with "==" should all work like in Pro.
sort : list of dicts, optional
A list of sort criteria. Example:
[{'fieldName': 'name', 'sortOrder': 'descend'}]
offset : int, optional
Offset for the query, starting at 1, default 1
limit : int, optional
Limit the amount of returned records. Defaults to 100
portals : list of dicts, optional
Define which portals you want to include in the result.
Example: [{'name':'objectName', 'offset':1, 'limit':50}]
Defaults to None, which then returns all portals with default offset and limit.
scripts : dict, optional
Specify which scripts should run when with which parameters
Example: {'prerequest': ['my_script', 'my_param']}
Allowed types: 'prerequest', 'presort', 'after'
List should have length of 2 (both script name and parameter are required.)
layout : str, optional
Passing a layout name allows you to set the response (!) layout.
Your find will still be performed based on the Server.layout attribute.
This is helpful, for example, if you want to limit the number of fields/portals being
returned and have a dedicated response layout.
"""
path = API_PATH['find'].format(
database=self.database,
layout=self.layout
)
data = {
'query': query,
'sort': sort,
'limit': str(limit),
'offset': str(offset),
'layout.response': layout
}
# build script param object in FMSDAPI style
script_params = build_script_params(scripts) if scripts else None
if script_params:
data.update(script_params)
# build portal param object in FMSDAPI style
portal_params = build_portal_params(portals) if portals else None
if portal_params:
data.update(portal_params)
# FM Data API from v17 cannot handle null values, so we remove all Nones from data
data = {k:v for k, v in data.items() if v is not None}
response = self._call_filemaker('POST', path, data=data)
info = response.get('dataInfo', {})
return Foundset(self._process_foundset_response(response), info)
def fetch_file(self, file_url: str,
stream: bool = False) -> Tuple[str, Optional[str], Optional[str], requests.Response]:
"""Fetches the file from the given url.
Returns a tuple of filename (unique identifier), content type (e.g. image/png), length,
and a requests response object. You can access contents by response.content.
Example:
url = record.container_field
name, type_, length, content = fms.fetch_file(url)
Parameters
-----------
file_url : str
URL to file as returned by FMS.
Example:
https://address/Streaming_SSL/MainDB/unique-identifier.png?RCType=EmbeddedRCFileProcessor
stream : bool, optional
Set this to True if you don't want the file to immediately be loaded into memory.
This let's you decide how you want to handle large files before downloading them.
Access to headers is given before downloading.
If you are not consuming all data, make sure to close the connection after use by
calling response.close().
"""
name = filename_from_url(file_url)
response = request(method='get',
url=file_url,
verify=self.verify_ssl,
stream=stream,
proxies=self.proxies)
return (name,
response.headers.get('Content-Type'),
response.headers.get('Content-Length'),
response)
@_with_auto_relogin
def set_globals(self, globals_: Dict[str, Any]) -> bool:
"""Set global fields for the currently active session. Returns True on success.
Global fields do not need to be placed on the layout and can be used for establishing
relationships of which the global is a match field.
Parameters
-----------
globals_ : dict
Dict of { field name : value }
Note that field names must be fully qualified, i.e. contain the TO name
Example:
{ 'Table::myField': 'whatever' }
"""
path = API_PATH['global'].format(database=self.database)
data = {'globalFields': globals_}
self._call_filemaker('PATCH', path, data=data)
return self.last_error == FMSErrorCode.SUCCESS.value
@property
def last_error(self) -> Optional[int]:
"""Returns last error number returned by FileMaker Server as int.
Error is set by _call_filemaker method. If error == -1, the previous request failed
and no FM error code is available. If no request was made yet, last_error will be None.
"""
error: Optional[int]
if self._last_fm_error:
error = int(self._last_fm_error)
else:
error = None
return error
@property
def last_script_result(self) -> Dict:
"""Returns last script results as returned by FMS as dict in format {type: [error, result]}
Only returns keys that have a value from the last call. I.e. 'presort' will
only be present if the last call performed a presort script.
The returned error (0th element in list) will always be converted to int.
"""
result: Dict = {}
if self._last_script_result:
result = {
k:[int(v[0]), v[1]] for k, v in self._last_script_result.items() if v[0] is not None
}
return result
# 'meta': {
# 'productinfo': '/fmi/data/v1/productInfo',
# 'databasenames': '/fmi/data/v1/databases',
# 'layoutnames': '/fmi/data/v1/databases/{database}/layouts',
# 'scriptnames': '/fmi/data/v1/databases/{database}/scripts',
# 'layoutmetadata': '/fmi/data/v1/databases/{database}/layouts/{layout}'
# },
@_with_auto_relogin
def get_productinfo(self) -> Dict:
"""Fetches product info and returns Dict instance
Parameters
-----------
none
"""
path = API_PATH['meta']['productinfo']
response = self._call_filemaker('GET', path)
return response
@_with_auto_relogin
def get_databasenames(self) -> Dict:
"""Fetches database names and returns Dict instance
Parameters
-----------
none
"""
path = API_PATH['meta']['databasenames']
response = self._call_filemaker('GET', path)
# return list of "databases"?
#return response.get('databases')
return response
@_with_auto_relogin
def get_layoutnames(self) -> Dict:
"""Fetches database layout names and returns Dict instance
Parameters
-----------
none
"""
path = API_PATH['meta']['layoutnames'].format(
database=self.database
)
response = self._call_filemaker('GET', path)
# return list of database "layouts"?
#return response.get('layouts')
return response
@_with_auto_relogin
def get_scriptnames(self) -> Dict:
"""Fetches database script names and returns Dict instance
Parameters
-----------
none
"""
path = API_PATH['meta']['scriptnames'].format(
database=self.database
)
response = self._call_filemaker('GET', path)
# return list of database "scripts"?
#return response.get('scripts')
return response
@_with_auto_relogin
def get_layoutmetadata(self) -> Dict:
"""Fetches layout metadata and returns Dict instance
Parameters
-----------
none
"""
path = API_PATH['meta']['layoutmetadata'].format(
database=self.database,
layout=self.layout
)
response = self._call_filemaker('GET', path)
return response
def _call_filemaker(self, method: str, path: str,
data: Optional[Dict] = None,
params: Optional[Dict] = None,
**kwargs: Any) -> Dict:
"""Calls a FileMaker Server Data API path and returns the parsed fms response data
Parameters
-----------
method : str
The http request method, e.g. POST
path : str
The API path, /fmi/data/v1/databases/:database/...
data : dict of str : str, optional
Dict of parameter data for http request
Can be None if API expects no data, e.g. for logout
params : dict of str : str, optional
Dict of get parameters for http request
Can be None if API expects no params
auth : tuple of str, str, optional
Tuple containing user and password for HTTP basic
auth
"""
url = self.url + path
request_data = json.dumps(data) if data else None
# if we have a token, make sure it's included in the header
# if not, the Authorization header gets removed (necessary for example
# for logout)
self._update_token_header()
response = request(method=method,
headers=self._headers,
url=url,
data=request_data,
verify=self.verify_ssl,
params=params,
proxies=self.proxies,
**kwargs)
try:
response_data = response.json()
except json.decoder.JSONDecodeError as ex:
raise BadJSON(ex, response) from None
fms_messages = response_data.get('messages')
fms_response = response_data.get('response')
self._update_script_result(fms_response)
self._last_fm_error = fms_messages[0].get('code', -1)
if self.last_error != FMSErrorCode.SUCCESS.value:
raise FileMakerError(self._last_fm_error,
fms_messages[0].get('message', 'Unkown error'))
self._set_content_type() # reset content type
return fms_response
def _update_script_result(self, response: Dict) -> Dict[str, List]:
"""Extracts script result data from fms response and updates script result attribute"""
self._last_script_result = {
'prerequest': [
response.get('scriptError.prerequest', None),
response.get('scriptResult.prerequest', None)
],
'presort': [
response.get('scriptError.presort', None),
response.get('scriptResult.presort', None)
],
'after': [
response.get('scriptError', None),
response.get('scriptResult', None)
]
}
return self._last_script_result
def _update_token_header(self) -> Dict[str, str]:
"""Update header to include access token (if available) for subsequent calls."""
if self._token:
self._headers['Authorization'] = 'Bearer ' + self._token
else:
self._headers.pop('Authorization', None)
return self._headers
def _set_content_type(self, type_: Union[str, bool] = 'application/json') -> Dict[str, str]:
"""Set the Content-Type header and returns the updated _headers dict.
Parameters
-----------
type_ : str, boolean
String definining the content type for the HTTP header or False to remove the
Content-Type key from _headers (i.e. let the requests lib handle the Content-Type.)
path : str
"""
if isinstance(type_, str):
self._headers['Content-Type'] = type_
elif not type_:
self._headers.pop('Content-Type')
else:
raise ValueError
return self._headers
def _process_foundset_response(self, response: Dict) -> Iterator[Record]:
"""Generator function that takes a response object, brings it into a Foundset/Record
structure and yields processed Records.
Lazily processing and yielding the results is slightly faster than building a list upfront
when you deal with big foundsets containing records that each have many portal records.
It won't save us much memory as we still hold the response, but initial processing time goes
down, and we only need to build the records when we actually use them.
(may think of another approach if it proves to be more pain than gain though)
Parameters
-----------
response : dict
FMS response from a _call_filemaker request
"""
data = response['data']
for record in data:
field_data = record['fieldData']
# Add meta fields to record.
# TODO: this can clash with fields that have the same name. Find a better
# way (maybe prefix?).
# Note that portal foundsets have the recordId field included by default
# (without the related table prefix).
field_data['recordId'] = record.get('recordId')
field_data['modId'] = record.get('modId')
keys = list(field_data)
values = list(field_data.values())
portal_info = {}
for entry in record.get('portalDataInfo', []):
# a portal is identified by its object name, or, if not available, its TO name
portal_identifier = entry.get('portalObjectName', entry['table'])
portal_info[portal_identifier] = entry
for portal_name, rows in record['portalData'].items():
keys.append(PORTAL_PREFIX + portal_name)
# further delay creation of portal record instances
related_records = (
Record(list(row), list(row.values()),
in_portal=True, type_conversion=self.type_conversion
) for row in rows
)
# add portal foundset to record
values.append(Foundset(related_records, portal_info.get(portal_name, {})))
yield Record(keys, values, type_conversion=self.type_conversion)
|
r"""Colored strings that behave mostly like strings
>>> s = fmtstr("Hey there!", 'red')
>>> s
red("Hey there!")
>>> s[4:7]
red("the")
>>> red_on_blue = fmtstr('hello', 'red', 'on_blue')
>>> blue_on_red = fmtstr('there', fg='blue', bg='red')
>>> green = fmtstr('!', 'green')
>>> full = red_on_blue + ' ' + blue_on_red + green
>>> full
on_blue(red("hello"))+" "+on_red(blue("there"))+green("!")
>>> str(full)
'\x1b[31m\x1b[44mhello\x1b[49m\x1b[39m \x1b[34m\x1b[41mthere\x1b[49m\x1b[39m\x1b[32m!\x1b[39m'
>>> fmtstr(', ').join(['a', fmtstr('b'), fmtstr('c', 'blue')])
"a"+", "+"b"+", "+blue("c")
"""
#TODO add a way to composite text without losing original formatting information
import sys
import re
from .escseqparse import parse
from .termformatconstants import FG_COLORS, BG_COLORS, STYLES
from .termformatconstants import FG_NUMBER_TO_COLOR, BG_NUMBER_TO_COLOR
from .termformatconstants import RESET_ALL, RESET_BG, RESET_FG
from .termformatconstants import seq
PY3 = sys.version_info[0] >= 3
if PY3:
unicode = str
xforms = {
'fg' : lambda x, v: '%s%s%s' % (seq(v), x, seq(RESET_FG)),
'bg' : lambda x, v: seq(v)+x+seq(RESET_BG),
'bold' : lambda x: seq(STYLES['bold'])+x+seq(RESET_ALL),
'underline' : lambda x: seq(STYLES['underline'])+x+seq(RESET_ALL),
'blink' : lambda x: seq(STYLES['blink'])+x+seq(RESET_ALL),
'invert' : lambda x: seq(STYLES['invert'])+x+seq(RESET_ALL),
}
class FrozenDict(dict):
"""Immutable dictionary class"""
def __setitem__(self, key, value):
raise Exception("Cannot change value.")
class BaseFmtStr(object):
"""Formatting annotations on a string"""
def __init__(self, string, atts=None):
self._s = string
self._atts = tuple(atts.items()) if atts else tuple()
def _get_atts(self):
return FrozenDict(self._atts)
atts = property(_get_atts, None, None,
'A copy of the current attributes dictionary')
s = property(lambda self: self._s) #makes self.s immutable
def __len__(self):
return len(self.s)
#TODO cache this if immutable
@property
def color_str(self):
s = self.s
for k, v in sorted(self.atts.items()):
if k not in xforms: continue
if v is True:
s = xforms[k](s)
elif v is False:
continue
else:
s = xforms[k](s, v)
return s
def __unicode__(self):
value = self.color_str
if isinstance(value, bytes):
return value.decode('utf8')
return value
def __eq__(self, other):
return self.s == other.s and self.atts == other.atts
if PY3:
__str__ = __unicode__
else:
def __str__(self):
return unicode(self).encode('utf8')
def __getitem__(self, index):
return self.color_str[index]
def __repr__(self):
def pp_att(att):
if att == 'fg': return FG_NUMBER_TO_COLOR[self.atts[att]]
elif att == 'bg': return 'on_' + BG_NUMBER_TO_COLOR[self.atts[att]]
else: return att
return (''.join(
pp_att(att)+'('
for att in sorted(self.atts)) +
('"%s"' % self.s) + ')'*len(self.atts))
# TODO
# Copy with atts
# Splice, insert as a special case thereof?
class FmtStr(object):
def __init__(self, *components):
# The assertions below could be useful for debugging, but slow things down considerably
#assert all([len(x) > 0 for x in components])
#self.basefmtstrs = [x for x in components if len(x) > 0]
self.basefmtstrs = list(components)
# caching these leads to a significant speedup
self._str = None
self._unicode = None
self._len = None
self._s = None
def splice(self, start, end, string):
"""Replaces the characters at self.s[start:end] with the input string.
"""
pass
def insert(self, index, string):
"""Inserts the input string at the given index of the fmtstr by creating
a new list of basefmtstrs. If the insertion occurs within an existing
basefmtstr, said basefmtstr is divided into two new basefmtstrs.
"""
new_components = []
for bfs in self.basefmtstrs:
# TODO: don't want to iterate through new_components every time
new_str = ''.join(new_components)
cur_len = len(new_str)
if cur_len >= index or len(''.join((new_str, bfs.s))) <= index:
# Done inserting; append remaining components to new list
new_components.append(bfs.s)
else:
divide = index - cur_len
new_components.extend([bfs.s[:divide], string, bfs.s[divide:]])
# should be a fmtstr, not a regular string
return ''.join(new_components)
@classmethod
def from_str(cls, s):
r"""
>>> fmtstr("|"+fmtstr("hey", fg='red', bg='blue')+"|")
"|"+on_blue(red("hey"))+"|"
>>> fmtstr('|\x1b[31m\x1b[44mhey\x1b[49m\x1b[39m|')
"|"+on_blue(red("hey"))+"|"
"""
if '\x1b[' in s:
tokens_and_strings = parse(s)
bases = []
cur_fmt = {}
for x in tokens_and_strings:
if isinstance(x, dict):
cur_fmt.update(x)
elif isinstance(x, (bytes, unicode)):
atts = parse_args('', dict((k, v) for k,v in cur_fmt.items() if v is not None))
bases.append(BaseFmtStr(x, atts=atts))
else:
raise Exception("logic error")
return FmtStr(*bases)
else:
return FmtStr(BaseFmtStr(s))
def copy_with_new_atts(self, **attributes):
self._unicode = None
self._str = None
# Copy original basefmtstrs, but with new attributes
new_basefmtstrs = []
for bfs in self.basefmtstrs:
new_atts = bfs.atts
new_atts.update(attributes)
new_basefmtstrs.append(BaseFmtStr(bfs.s, new_atts))
# self.basefmtstrs = new_basefmtstrs
return FmtStr(*new_basefmtstrs)
def join(self, iterable):
iterable = list(iterable)
basefmtstrs = []
for i, s in enumerate(iterable):
if isinstance(s, FmtStr):
basefmtstrs.extend(s.basefmtstrs)
elif isinstance(s, (bytes, unicode)):
basefmtstrs.extend(fmtstr(s).basefmtstrs) #TODO just make a basefmtstr directly
else:
raise TypeError("expected str or FmtStr, %r found" % type(s))
if i < len(iterable) - 1:
basefmtstrs.extend(self.basefmtstrs)
return FmtStr(*basefmtstrs)
#TODO make this split work like str.split
def split(self, on_char):
s = self.s
matches = list(re.finditer(on_char, s))
return [self[start:end] for start, end in zip(
[0] + [m.end() for m in matches],
[m.start() for m in matches] + [len(s)])]
def __unicode__(self):
if self._unicode is not None:
return self._unicode
self._unicode = ''.join(unicode(fs) for fs in self.basefmtstrs)
return self._unicode
def __str__(self):
if self._str is not None:
return self._str
self._str = ''.join(str(fs) for fs in self.basefmtstrs)
return self._str
def __len__(self):
if self._len is not None:
return self._len
self._len = sum(len(fs) for fs in self.basefmtstrs)
return self._len
def __repr__(self):
return '+'.join(repr(fs) for fs in self.basefmtstrs)
def __eq__(self, other):
return str(self) == str(other)
def __add__(self, other):
if isinstance(other, FmtStr):
return FmtStr(*(self.basefmtstrs + other.basefmtstrs))
elif isinstance(other, (bytes, unicode)):
return FmtStr(*(self.basefmtstrs + [BaseFmtStr(other)]))
else:
raise TypeError('Can\'t add %r and %r' % (self, other))
def __radd__(self, other):
if isinstance(other, FmtStr):
return FmtStr(*(x for x in (other.basefmtstrs + self.basefmtstrs)))
elif isinstance(other, (bytes, unicode)):
return FmtStr(*(x for x in ([BaseFmtStr(other)] + self.basefmtstrs)))
else:
raise TypeError('Can\'t add those')
def __mul__(self, other):
if isinstance(other, int):
return sum([FmtStr(*(x for x in self.basefmtstrs)) for _ in range(other)], FmtStr())
raise TypeError('Can\'t mulitply those')
#TODO ensure emtpy FmtStr isn't a problem
@property
def shared_atts(self):
"""Gets atts shared among all nonzero length component BaseFmtStrs"""
#TODO cache this, could get ugly for large FmtStrs
atts = {}
first = self.basefmtstrs[0]
for att in sorted(first.atts):
#TODO how to write this without the '???'?
if all(fs.atts.get(att, '???') == first.atts[att] for fs in self.basefmtstrs if len(fs) > 0):
atts[att] = first.atts[att]
return atts
def __getattr__(self, att):
# thanks to @aerenchyma/@jczetta
def func_help(*args, **kwargs):
result = getattr(self.s, att)(*args, **kwargs)
if isinstance(result, (bytes, unicode)):
return fmtstr(result, **self.shared_atts)
elif isinstance(result, list):
return [fmtstr(x, **self.shared_atts) for x in result]
else:
return result
return func_help
@property
def s(self):
if self._s is not None:
return self._s
self._s = "".join(fs.s for fs in self.basefmtstrs)
return self._s
def __getitem__(self, index):
index = normalize_slice(len(self), index)
counter = 0
parts = []
for fs in self.basefmtstrs:
if index.start < counter + len(fs) and index.stop > counter:
start = max(0, index.start - counter)
end = index.stop - counter
if end - start == len(fs):
parts.append(fs)
else:
s_part = fs.s[max(0, index.start - counter):index.stop - counter]
parts.append(BaseFmtStr(s_part, fs.atts))
counter += len(fs)
if index.stop < counter:
break
return FmtStr(*parts)
def _getitem_normalized(self, index):
"""Builds the more compact fmtstrs by using fromstr( of the control sequences)"""
index = normalize_slice(len(self), index)
counter = 0
output = ''
for fs in self.basefmtstrs:
if index.start < counter + len(fs) and index.stop > counter:
s_part = fs.s[max(0, index.start - counter):index.stop - counter]
piece = BaseFmtStr(s_part, fs.atts).color_str
output += piece
counter += len(fs)
if index.stop < counter:
break
return fmtstr(output)
def __setitem__(self, index, value):
raise Exception("No!")
self._unicode = None
self._str = None
self._len = None
index = normalize_slice(len(self), index)
if isinstance(value, (bytes, unicode)):
value = FmtStr(BaseFmtStr(value))
elif not isinstance(value, FmtStr):
raise ValueError('Should be str or FmtStr')
counter = 0
old_basefmtstrs = self.basefmtstrs[:]
self.basefmtstrs = []
inserted = False
for fs in old_basefmtstrs:
if index.start < counter + len(fs) and index.stop > counter:
start = max(0, index.start - counter)
end = index.stop - counter
front = BaseFmtStr(fs.s[:start], fs.atts)
# stuff
new = value
back = BaseFmtStr(fs.s[end:], fs.atts)
if len(front) > 0:
self.basefmtstrs.append(front)
if len(new) > 0 and not inserted:
self.basefmtstrs.extend(new.basefmtstrs)
inserted = True
if len(back) > 0:
self.basefmtstrs.append(back)
else:
self.basefmtstrs.append(fs)
counter += len(fs)
def copy(self):
return FmtStr(*self.basefmtstrs)
def normalize_slice(length, index):
is_int = False
if isinstance(index, int):
is_int = True
index = slice(index, index+1)
if index.start is None:
index = slice(0, index.stop, index.step)
if index.stop is None:
index = slice(index.start, length, index.step)
if index.start < -1:
index = slice(length - index.start, index.stop, index.step)
if index.stop < -1:
index = slice(index.start, length - index.stop, index.step)
if index.step is not None:
raise NotImplementedError("You can't use steps with slicing yet")
if is_int:
if index.start < 0 or index.start > length:
raise IndexError("index out of bounds")
return index
def parse_args(args, kwargs):
"""Returns a kwargs dictionary by turning args into kwargs"""
if 'style' in kwargs:
args += (kwargs['style'],)
del kwargs['style']
for arg in args:
if not isinstance(arg, (bytes, unicode)):
raise ValueError("args must be strings:" + repr(args))
if arg.lower() in FG_COLORS:
if 'fg' in kwargs: raise ValueError("fg specified twice")
kwargs['fg'] = FG_COLORS[arg]
elif arg.lower().startswith('on_') and arg[3:].lower() in BG_COLORS:
if 'bg' in kwargs: raise ValueError("fg specified twice")
kwargs['bg'] = BG_COLORS[arg[3:]]
elif arg.lower() in STYLES:
kwargs[arg] = True
else:
raise ValueError("couldn't process arg: "+repr(arg))
for k in kwargs:
if k not in ['fg', 'bg'] + list(STYLES.keys()):
raise ValueError("Can't apply that transformation")
if 'fg' in kwargs:
if kwargs['fg'] in FG_COLORS:
kwargs['fg'] = FG_COLORS[kwargs['fg']]
if kwargs['fg'] not in list(FG_COLORS.values()):
raise ValueError("Bad fg value: %s", kwargs['fg'])
if 'bg' in kwargs:
if kwargs['bg'] in BG_COLORS:
kwargs['bg'] = BG_COLORS[kwargs['bg']]
if kwargs['bg'] not in list(BG_COLORS.values()):
raise ValueError("Bad bg value: %s", kwargs['bg'])
return kwargs
def fmtstr(string, *args, **kwargs):
"""
Convenience function for creating a FmtStr
>>> fmtstr('asdf', 'blue', 'on_red')
on_red(blue("asdf"))
"""
atts = parse_args(args, kwargs)
if isinstance(string, FmtStr):
new_str = string.copy_with_new_atts(**atts)
return new_str
elif isinstance(string, (bytes, unicode)):
string = FmtStr.from_str(string)
new_str = string.copy_with_new_atts(**atts)
return new_str
else:
raise ValueError("Bad Args: %r (of type %s), %r, %r" % (string, type(string), args, kwargs))
if __name__ == '__main__':
import doctest
doctest.testmod(verbose=True)
#f = FmtStr.from_str(str(fmtstr('tom', 'blue')))
#print((repr(f)))
#f = fmtstr('stuff', fg='blue', bold=True)
#print((repr(f)))
Working insert method for fmtstrs with optional ending index argument?
r"""Colored strings that behave mostly like strings
>>> s = fmtstr("Hey there!", 'red')
>>> s
red("Hey there!")
>>> s[4:7]
red("the")
>>> red_on_blue = fmtstr('hello', 'red', 'on_blue')
>>> blue_on_red = fmtstr('there', fg='blue', bg='red')
>>> green = fmtstr('!', 'green')
>>> full = red_on_blue + ' ' + blue_on_red + green
>>> full
on_blue(red("hello"))+" "+on_red(blue("there"))+green("!")
>>> str(full)
'\x1b[31m\x1b[44mhello\x1b[49m\x1b[39m \x1b[34m\x1b[41mthere\x1b[49m\x1b[39m\x1b[32m!\x1b[39m'
>>> fmtstr(', ').join(['a', fmtstr('b'), fmtstr('c', 'blue')])
"a"+", "+"b"+", "+blue("c")
"""
#TODO add a way to composite text without losing original formatting information
import sys
import re
from .escseqparse import parse
from .termformatconstants import FG_COLORS, BG_COLORS, STYLES
from .termformatconstants import FG_NUMBER_TO_COLOR, BG_NUMBER_TO_COLOR
from .termformatconstants import RESET_ALL, RESET_BG, RESET_FG
from .termformatconstants import seq
PY3 = sys.version_info[0] >= 3
if PY3:
unicode = str
xforms = {
'fg' : lambda x, v: '%s%s%s' % (seq(v), x, seq(RESET_FG)),
'bg' : lambda x, v: seq(v)+x+seq(RESET_BG),
'bold' : lambda x: seq(STYLES['bold'])+x+seq(RESET_ALL),
'underline' : lambda x: seq(STYLES['underline'])+x+seq(RESET_ALL),
'blink' : lambda x: seq(STYLES['blink'])+x+seq(RESET_ALL),
'invert' : lambda x: seq(STYLES['invert'])+x+seq(RESET_ALL),
}
class FrozenDict(dict):
"""Immutable dictionary class"""
def __setitem__(self, key, value):
raise Exception("Cannot change value.")
class BaseFmtStr(object):
"""Formatting annotations on a string"""
def __init__(self, string, atts=None):
self._s = string
self._atts = tuple(atts.items()) if atts else tuple()
def _get_atts(self):
return FrozenDict(self._atts)
atts = property(_get_atts, None, None,
'A copy of the current attributes dictionary')
s = property(lambda self: self._s) #makes self.s immutable
def __len__(self):
return len(self.s)
#TODO cache this if immutable
@property
def color_str(self):
s = self.s
for k, v in sorted(self.atts.items()):
if k not in xforms: continue
if v is True:
s = xforms[k](s)
elif v is False:
continue
else:
s = xforms[k](s, v)
return s
def __unicode__(self):
value = self.color_str
if isinstance(value, bytes):
return value.decode('utf8')
return value
def __eq__(self, other):
return self.s == other.s and self.atts == other.atts
if PY3:
__str__ = __unicode__
else:
def __str__(self):
return unicode(self).encode('utf8')
def __getitem__(self, index):
return self.color_str[index]
def __repr__(self):
def pp_att(att):
if att == 'fg': return FG_NUMBER_TO_COLOR[self.atts[att]]
elif att == 'bg': return 'on_' + BG_NUMBER_TO_COLOR[self.atts[att]]
else: return att
return (''.join(
pp_att(att)+'('
for att in sorted(self.atts)) +
('"%s"' % self.s) + ')'*len(self.atts))
# TODO
# Copy with atts
# Splice, insert as a special case thereof?
class FmtStr(object):
def __init__(self, *components):
# The assertions below could be useful for debugging, but slow things down considerably
#assert all([len(x) > 0 for x in components])
#self.basefmtstrs = [x for x in components if len(x) > 0]
self.basefmtstrs = list(components)
# caching these leads to a significant speedup
self._str = None
self._unicode = None
self._len = None
self._s = None
# def splice(self, start, end, string):
# """Replaces the characters at self.s[start:end] with the input string.
# """
# pass
def insert(self, string, start, end=None):
"""Inserts the input string at the given index of the fmtstr by creating
a new list of basefmtstrs. If the insertion occurs within an existing
basefmtstr, said basefmtstr is divided into two new basefmtstrs.
"""
new_components = []
for bfs in self.basefmtstrs:
# TODO: don't want to iterate through new_components every time
new_str = ''.join(new_components)
cur_len = len(new_str)
if cur_len >= start or len(''.join((new_str, bfs.s))) <= start:
# Either done inserting or have not yet reached the starting index
new_components.append(bfs.s)
else:
divide = start - cur_len
head = bfs.s[:divide]
tail = bfs.s[end:] if end else bfs.s[divide:]
new_components.extend([head, string, tail])
# should be a fmtstr, not a regular string
return ''.join(new_components)
@classmethod
def from_str(cls, s):
r"""
>>> fmtstr("|"+fmtstr("hey", fg='red', bg='blue')+"|")
"|"+on_blue(red("hey"))+"|"
>>> fmtstr('|\x1b[31m\x1b[44mhey\x1b[49m\x1b[39m|')
"|"+on_blue(red("hey"))+"|"
"""
if '\x1b[' in s:
tokens_and_strings = parse(s)
bases = []
cur_fmt = {}
for x in tokens_and_strings:
if isinstance(x, dict):
cur_fmt.update(x)
elif isinstance(x, (bytes, unicode)):
atts = parse_args('', dict((k, v) for k,v in cur_fmt.items() if v is not None))
bases.append(BaseFmtStr(x, atts=atts))
else:
raise Exception("logic error")
return FmtStr(*bases)
else:
return FmtStr(BaseFmtStr(s))
def copy_with_new_atts(self, **attributes):
self._unicode = None
self._str = None
# Copy original basefmtstrs, but with new attributes
new_basefmtstrs = []
for bfs in self.basefmtstrs:
new_atts = bfs.atts
new_atts.update(attributes)
new_basefmtstrs.append(BaseFmtStr(bfs.s, new_atts))
# self.basefmtstrs = new_basefmtstrs
return FmtStr(*new_basefmtstrs)
def join(self, iterable):
iterable = list(iterable)
basefmtstrs = []
for i, s in enumerate(iterable):
if isinstance(s, FmtStr):
basefmtstrs.extend(s.basefmtstrs)
elif isinstance(s, (bytes, unicode)):
basefmtstrs.extend(fmtstr(s).basefmtstrs) #TODO just make a basefmtstr directly
else:
raise TypeError("expected str or FmtStr, %r found" % type(s))
if i < len(iterable) - 1:
basefmtstrs.extend(self.basefmtstrs)
return FmtStr(*basefmtstrs)
#TODO make this split work like str.split
def split(self, on_char):
s = self.s
matches = list(re.finditer(on_char, s))
return [self[start:end] for start, end in zip(
[0] + [m.end() for m in matches],
[m.start() for m in matches] + [len(s)])]
def __unicode__(self):
if self._unicode is not None:
return self._unicode
self._unicode = ''.join(unicode(fs) for fs in self.basefmtstrs)
return self._unicode
def __str__(self):
if self._str is not None:
return self._str
self._str = ''.join(str(fs) for fs in self.basefmtstrs)
return self._str
def __len__(self):
if self._len is not None:
return self._len
self._len = sum(len(fs) for fs in self.basefmtstrs)
return self._len
def __repr__(self):
return '+'.join(repr(fs) for fs in self.basefmtstrs)
def __eq__(self, other):
return str(self) == str(other)
def __add__(self, other):
if isinstance(other, FmtStr):
return FmtStr(*(self.basefmtstrs + other.basefmtstrs))
elif isinstance(other, (bytes, unicode)):
return FmtStr(*(self.basefmtstrs + [BaseFmtStr(other)]))
else:
raise TypeError('Can\'t add %r and %r' % (self, other))
def __radd__(self, other):
if isinstance(other, FmtStr):
return FmtStr(*(x for x in (other.basefmtstrs + self.basefmtstrs)))
elif isinstance(other, (bytes, unicode)):
return FmtStr(*(x for x in ([BaseFmtStr(other)] + self.basefmtstrs)))
else:
raise TypeError('Can\'t add those')
def __mul__(self, other):
if isinstance(other, int):
return sum([FmtStr(*(x for x in self.basefmtstrs)) for _ in range(other)], FmtStr())
raise TypeError('Can\'t mulitply those')
#TODO ensure emtpy FmtStr isn't a problem
@property
def shared_atts(self):
"""Gets atts shared among all nonzero length component BaseFmtStrs"""
#TODO cache this, could get ugly for large FmtStrs
atts = {}
first = self.basefmtstrs[0]
for att in sorted(first.atts):
#TODO how to write this without the '???'?
if all(fs.atts.get(att, '???') == first.atts[att] for fs in self.basefmtstrs if len(fs) > 0):
atts[att] = first.atts[att]
return atts
def __getattr__(self, att):
# thanks to @aerenchyma/@jczetta
def func_help(*args, **kwargs):
result = getattr(self.s, att)(*args, **kwargs)
if isinstance(result, (bytes, unicode)):
return fmtstr(result, **self.shared_atts)
elif isinstance(result, list):
return [fmtstr(x, **self.shared_atts) for x in result]
else:
return result
return func_help
@property
def s(self):
if self._s is not None:
return self._s
self._s = "".join(fs.s for fs in self.basefmtstrs)
return self._s
def __getitem__(self, index):
index = normalize_slice(len(self), index)
counter = 0
parts = []
for fs in self.basefmtstrs:
if index.start < counter + len(fs) and index.stop > counter:
start = max(0, index.start - counter)
end = index.stop - counter
if end - start == len(fs):
parts.append(fs)
else:
s_part = fs.s[max(0, index.start - counter):index.stop - counter]
parts.append(BaseFmtStr(s_part, fs.atts))
counter += len(fs)
if index.stop < counter:
break
return FmtStr(*parts)
def _getitem_normalized(self, index):
"""Builds the more compact fmtstrs by using fromstr( of the control sequences)"""
index = normalize_slice(len(self), index)
counter = 0
output = ''
for fs in self.basefmtstrs:
if index.start < counter + len(fs) and index.stop > counter:
s_part = fs.s[max(0, index.start - counter):index.stop - counter]
piece = BaseFmtStr(s_part, fs.atts).color_str
output += piece
counter += len(fs)
if index.stop < counter:
break
return fmtstr(output)
def __setitem__(self, index, value):
raise Exception("No!")
self._unicode = None
self._str = None
self._len = None
index = normalize_slice(len(self), index)
if isinstance(value, (bytes, unicode)):
value = FmtStr(BaseFmtStr(value))
elif not isinstance(value, FmtStr):
raise ValueError('Should be str or FmtStr')
counter = 0
old_basefmtstrs = self.basefmtstrs[:]
self.basefmtstrs = []
inserted = False
for fs in old_basefmtstrs:
if index.start < counter + len(fs) and index.stop > counter:
start = max(0, index.start - counter)
end = index.stop - counter
front = BaseFmtStr(fs.s[:start], fs.atts)
# stuff
new = value
back = BaseFmtStr(fs.s[end:], fs.atts)
if len(front) > 0:
self.basefmtstrs.append(front)
if len(new) > 0 and not inserted:
self.basefmtstrs.extend(new.basefmtstrs)
inserted = True
if len(back) > 0:
self.basefmtstrs.append(back)
else:
self.basefmtstrs.append(fs)
counter += len(fs)
def copy(self):
return FmtStr(*self.basefmtstrs)
def normalize_slice(length, index):
is_int = False
if isinstance(index, int):
is_int = True
index = slice(index, index+1)
if index.start is None:
index = slice(0, index.stop, index.step)
if index.stop is None:
index = slice(index.start, length, index.step)
if index.start < -1:
index = slice(length - index.start, index.stop, index.step)
if index.stop < -1:
index = slice(index.start, length - index.stop, index.step)
if index.step is not None:
raise NotImplementedError("You can't use steps with slicing yet")
if is_int:
if index.start < 0 or index.start > length:
raise IndexError("index out of bounds")
return index
def parse_args(args, kwargs):
"""Returns a kwargs dictionary by turning args into kwargs"""
if 'style' in kwargs:
args += (kwargs['style'],)
del kwargs['style']
for arg in args:
if not isinstance(arg, (bytes, unicode)):
raise ValueError("args must be strings:" + repr(args))
if arg.lower() in FG_COLORS:
if 'fg' in kwargs: raise ValueError("fg specified twice")
kwargs['fg'] = FG_COLORS[arg]
elif arg.lower().startswith('on_') and arg[3:].lower() in BG_COLORS:
if 'bg' in kwargs: raise ValueError("fg specified twice")
kwargs['bg'] = BG_COLORS[arg[3:]]
elif arg.lower() in STYLES:
kwargs[arg] = True
else:
raise ValueError("couldn't process arg: "+repr(arg))
for k in kwargs:
if k not in ['fg', 'bg'] + list(STYLES.keys()):
raise ValueError("Can't apply that transformation")
if 'fg' in kwargs:
if kwargs['fg'] in FG_COLORS:
kwargs['fg'] = FG_COLORS[kwargs['fg']]
if kwargs['fg'] not in list(FG_COLORS.values()):
raise ValueError("Bad fg value: %s", kwargs['fg'])
if 'bg' in kwargs:
if kwargs['bg'] in BG_COLORS:
kwargs['bg'] = BG_COLORS[kwargs['bg']]
if kwargs['bg'] not in list(BG_COLORS.values()):
raise ValueError("Bad bg value: %s", kwargs['bg'])
return kwargs
def fmtstr(string, *args, **kwargs):
"""
Convenience function for creating a FmtStr
>>> fmtstr('asdf', 'blue', 'on_red')
on_red(blue("asdf"))
"""
atts = parse_args(args, kwargs)
if isinstance(string, FmtStr):
new_str = string.copy_with_new_atts(**atts)
return new_str
elif isinstance(string, (bytes, unicode)):
string = FmtStr.from_str(string)
new_str = string.copy_with_new_atts(**atts)
return new_str
else:
raise ValueError("Bad Args: %r (of type %s), %r, %r" % (string, type(string), args, kwargs))
if __name__ == '__main__':
import doctest
doctest.testmod(verbose=True)
#f = FmtStr.from_str(str(fmtstr('tom', 'blue')))
#print((repr(f)))
#f = fmtstr('stuff', fg='blue', bold=True)
#print((repr(f)))
|
#!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import random
import shutil
import subprocess
import time
from shlex import split
from subprocess import check_call, check_output
from subprocess import CalledProcessError
from socket import gethostname, getfqdn
from charms import layer
from charms.layer import snap
from charms.reactive import hook
from charms.reactive import set_state, remove_state, is_state
from charms.reactive import when, when_any, when_not
from charms.kubernetes.common import get_version
from charms.reactive.helpers import data_changed, any_file_changed
from charms.templating.jinja2 import render
from charmhelpers.core import hookenv, unitdata
from charmhelpers.core.host import service_stop, service_restart
from charmhelpers.contrib.charmsupport import nrpe
# Override the default nagios shortname regex to allow periods, which we
# need because our bin names contain them (e.g. 'snap.foo.daemon'). The
# default regex in charmhelpers doesn't allow periods, but nagios itself does.
nrpe.Check.shortname_re = '[\.A-Za-z0-9-_]+$'
kubeconfig_path = '/root/cdk/kubeconfig'
kubeproxyconfig_path = '/root/cdk/kubeproxyconfig'
kubeclientconfig_path = '/root/.kube/config'
os.environ['PATH'] += os.pathsep + os.path.join(os.sep, 'snap', 'bin')
db = unitdata.kv()
@hook('upgrade-charm')
def upgrade_charm():
# Trigger removal of PPA docker installation if it was previously set.
set_state('config.changed.install_from_upstream')
hookenv.atexit(remove_state, 'config.changed.install_from_upstream')
cleanup_pre_snap_services()
check_resources_for_upgrade_needed()
# Remove the RC for nginx ingress if it exists
if hookenv.config().get('ingress'):
kubectl_success('delete', 'rc', 'nginx-ingress-controller')
# Remove gpu.enabled state so we can reconfigure gpu-related kubelet flags,
# since they can differ between k8s versions
remove_state('kubernetes-worker.gpu.enabled')
remove_state('kubernetes-worker.cni-plugins.installed')
remove_state('kubernetes-worker.config.created')
remove_state('kubernetes-worker.ingress.available')
remove_state('worker.auth.bootstrapped')
set_state('kubernetes-worker.restart-needed')
def check_resources_for_upgrade_needed():
hookenv.status_set('maintenance', 'Checking resources')
resources = ['kubectl', 'kubelet', 'kube-proxy']
paths = [hookenv.resource_get(resource) for resource in resources]
if any_file_changed(paths):
set_upgrade_needed()
def set_upgrade_needed():
set_state('kubernetes-worker.snaps.upgrade-needed')
config = hookenv.config()
previous_channel = config.previous('channel')
require_manual = config.get('require-manual-upgrade')
if previous_channel is None or not require_manual:
set_state('kubernetes-worker.snaps.upgrade-specified')
def cleanup_pre_snap_services():
# remove old states
remove_state('kubernetes-worker.components.installed')
# disable old services
services = ['kubelet', 'kube-proxy']
for service in services:
hookenv.log('Stopping {0} service.'.format(service))
service_stop(service)
# cleanup old files
files = [
"/lib/systemd/system/kubelet.service",
"/lib/systemd/system/kube-proxy.service",
"/etc/default/kube-default",
"/etc/default/kubelet",
"/etc/default/kube-proxy",
"/srv/kubernetes",
"/usr/local/bin/kubectl",
"/usr/local/bin/kubelet",
"/usr/local/bin/kube-proxy",
"/etc/kubernetes"
]
for file in files:
if os.path.isdir(file):
hookenv.log("Removing directory: " + file)
shutil.rmtree(file)
elif os.path.isfile(file):
hookenv.log("Removing file: " + file)
os.remove(file)
@when('config.changed.channel')
def channel_changed():
set_upgrade_needed()
@when('kubernetes-worker.snaps.upgrade-needed')
@when_not('kubernetes-worker.snaps.upgrade-specified')
def upgrade_needed_status():
msg = 'Needs manual upgrade, run the upgrade action'
hookenv.status_set('blocked', msg)
@when('kubernetes-worker.snaps.upgrade-specified')
def install_snaps():
check_resources_for_upgrade_needed()
channel = hookenv.config('channel')
hookenv.status_set('maintenance', 'Installing kubectl snap')
snap.install('kubectl', channel=channel, classic=True)
hookenv.status_set('maintenance', 'Installing kubelet snap')
snap.install('kubelet', channel=channel, classic=True)
hookenv.status_set('maintenance', 'Installing kube-proxy snap')
snap.install('kube-proxy', channel=channel, classic=True)
set_state('kubernetes-worker.snaps.installed')
set_state('kubernetes-worker.restart-needed')
remove_state('kubernetes-worker.snaps.upgrade-needed')
remove_state('kubernetes-worker.snaps.upgrade-specified')
@hook('stop')
def shutdown():
''' When this unit is destroyed:
- delete the current node
- stop the worker services
'''
try:
if os.path.isfile(kubeconfig_path):
kubectl('delete', 'node', gethostname().lower())
except CalledProcessError:
hookenv.log('Failed to unregister node.')
service_stop('snap.kubelet.daemon')
service_stop('snap.kube-proxy.daemon')
@when('docker.available')
@when_not('kubernetes-worker.cni-plugins.installed')
def install_cni_plugins():
''' Unpack the cni-plugins resource '''
charm_dir = os.getenv('CHARM_DIR')
# Get the resource via resource_get
try:
resource_name = 'cni-{}'.format(arch())
archive = hookenv.resource_get(resource_name)
except Exception:
message = 'Error fetching the cni resource.'
hookenv.log(message)
hookenv.status_set('blocked', message)
return
if not archive:
hookenv.log('Missing cni resource.')
hookenv.status_set('blocked', 'Missing cni resource.')
return
# Handle null resource publication, we check if filesize < 1mb
filesize = os.stat(archive).st_size
if filesize < 1000000:
hookenv.status_set('blocked', 'Incomplete cni resource.')
return
hookenv.status_set('maintenance', 'Unpacking cni resource.')
unpack_path = '{}/files/cni'.format(charm_dir)
os.makedirs(unpack_path, exist_ok=True)
cmd = ['tar', 'xfvz', archive, '-C', unpack_path]
hookenv.log(cmd)
check_call(cmd)
apps = [
{'name': 'loopback', 'path': '/opt/cni/bin'}
]
for app in apps:
unpacked = '{}/{}'.format(unpack_path, app['name'])
app_path = os.path.join(app['path'], app['name'])
install = ['install', '-v', '-D', unpacked, app_path]
hookenv.log(install)
check_call(install)
# Used by the "registry" action. The action is run on a single worker, but
# the registry pod can end up on any worker, so we need this directory on
# all the workers.
os.makedirs('/srv/registry', exist_ok=True)
set_state('kubernetes-worker.cni-plugins.installed')
@when('kubernetes-worker.snaps.installed')
def set_app_version():
''' Declare the application version to juju '''
cmd = ['kubelet', '--version']
version = check_output(cmd)
hookenv.application_version_set(version.split(b' v')[-1].rstrip())
@when('kubernetes-worker.snaps.installed')
@when_not('kube-control.dns.available')
def notify_user_transient_status():
''' Notify to the user we are in a transient state and the application
is still converging. Potentially remotely, or we may be in a detached loop
wait state '''
# During deployment the worker has to start kubelet without cluster dns
# configured. If this is the first unit online in a service pool waiting
# to self host the dns pod, and configure itself to query the dns service
# declared in the kube-system namespace
hookenv.status_set('waiting', 'Waiting for cluster DNS.')
@when('kubernetes-worker.snaps.installed',
'kube-control.dns.available')
@when_not('kubernetes-worker.snaps.upgrade-needed')
def charm_status(kube_control):
'''Update the status message with the current status of kubelet.'''
update_kubelet_status()
def update_kubelet_status():
''' There are different states that the kubelet can be in, where we are
waiting for dns, waiting for cluster turnup, or ready to serve
applications.'''
services = [
'kubelet',
'kube-proxy'
]
failing_services = []
for service in services:
daemon = 'snap.{}.daemon'.format(service)
if not _systemctl_is_active(daemon):
failing_services.append(service)
if len(failing_services) == 0:
hookenv.status_set('active', 'Kubernetes worker running.')
else:
msg = 'Waiting for {} to start.'.format(','.join(failing_services))
hookenv.status_set('waiting', msg)
def get_ingress_address(relation):
try:
network_info = hookenv.network_get(relation.relation_name)
except NotImplementedError:
network_info = []
if network_info and 'ingress-addresses' in network_info:
# just grab the first one for now, maybe be more robust here?
return network_info['ingress-addresses'][0]
else:
# if they don't have ingress-addresses they are running a juju that
# doesn't support spaces, so just return the private address
return hookenv.unit_get('private-address')
@when('certificates.available', 'kube-control.connected')
def send_data(tls, kube_control):
'''Send the data that is required to create a server certificate for
this server.'''
# Use the public ip of this unit as the Common Name for the certificate.
common_name = hookenv.unit_public_ip()
ingress_ip = get_ingress_address(kube_control)
# Create SANs that the tls layer will add to the server cert.
sans = [
hookenv.unit_public_ip(),
ingress_ip,
gethostname()
]
# Create a path safe name by removing path characters from the unit name.
certificate_name = hookenv.local_unit().replace('/', '_')
# Request a server cert with this information.
tls.request_server_cert(common_name, sans, certificate_name)
@when('kube-api-endpoint.available', 'kube-control.dns.available',
'cni.available')
def watch_for_changes(kube_api, kube_control, cni):
''' Watch for configuration changes and signal if we need to restart the
worker services '''
servers = get_kube_api_servers(kube_api)
dns = kube_control.get_dns()
cluster_cidr = cni.get_config()['cidr']
if (data_changed('kube-api-servers', servers) or
data_changed('kube-dns', dns) or
data_changed('cluster-cidr', cluster_cidr)):
set_state('kubernetes-worker.restart-needed')
@when('kubernetes-worker.snaps.installed', 'kube-api-endpoint.available',
'tls_client.ca.saved', 'tls_client.client.certificate.saved',
'tls_client.client.key.saved', 'tls_client.server.certificate.saved',
'tls_client.server.key.saved',
'kube-control.dns.available', 'kube-control.auth.available',
'cni.available', 'kubernetes-worker.restart-needed',
'worker.auth.bootstrapped')
def start_worker(kube_api, kube_control, auth_control, cni):
''' Start kubelet using the provided API and DNS info.'''
servers = get_kube_api_servers(kube_api)
# Note that the DNS server doesn't necessarily exist at this point. We know
# what its IP will eventually be, though, so we can go ahead and configure
# kubelet with that info. This ensures that early pods are configured with
# the correct DNS even though the server isn't ready yet.
dns = kube_control.get_dns()
ingress_ip = get_ingress_address(kube_control)
cluster_cidr = cni.get_config()['cidr']
if cluster_cidr is None:
hookenv.log('Waiting for cluster cidr.')
return
creds = db.get('credentials')
data_changed('kube-control.creds', creds)
# set --allow-privileged flag for kubelet
set_privileged()
create_config(random.choice(servers), creds)
configure_kubelet(dns, ingress_ip)
configure_kube_proxy(servers, cluster_cidr)
set_state('kubernetes-worker.config.created')
restart_unit_services()
update_kubelet_status()
apply_node_labels()
remove_state('kubernetes-worker.restart-needed')
@when('cni.connected')
@when_not('cni.configured')
def configure_cni(cni):
''' Set worker configuration on the CNI relation. This lets the CNI
subordinate know that we're the worker so it can respond accordingly. '''
cni.set_config(is_master=False, kubeconfig_path=kubeconfig_path)
@when('config.changed.ingress')
def toggle_ingress_state():
''' Ingress is a toggled state. Remove ingress.available if set when
toggled '''
remove_state('kubernetes-worker.ingress.available')
@when('docker.sdn.configured')
def sdn_changed():
'''The Software Defined Network changed on the container so restart the
kubernetes services.'''
restart_unit_services()
update_kubelet_status()
remove_state('docker.sdn.configured')
@when('kubernetes-worker.config.created')
@when_not('kubernetes-worker.ingress.available')
def render_and_launch_ingress():
''' If configuration has ingress daemon set enabled, launch the ingress load
balancer and default http backend. Otherwise attempt deletion. '''
config = hookenv.config()
# If ingress is enabled, launch the ingress controller
if config.get('ingress'):
launch_default_ingress_controller()
else:
hookenv.log('Deleting the http backend and ingress.')
kubectl_manifest('delete',
'/root/cdk/addons/default-http-backend.yaml')
kubectl_manifest('delete',
'/root/cdk/addons/ingress-daemon-set.yaml') # noqa
hookenv.close_port(80)
hookenv.close_port(443)
@when('config.changed.labels', 'kubernetes-worker.config.created')
def apply_node_labels():
''' Parse the labels configuration option and apply the labels to the node.
'''
# scrub and try to format an array from the configuration option
config = hookenv.config()
user_labels = _parse_labels(config.get('labels'))
# For diffing sake, iterate the previous label set
if config.previous('labels'):
previous_labels = _parse_labels(config.previous('labels'))
hookenv.log('previous labels: {}'.format(previous_labels))
else:
# this handles first time run if there is no previous labels config
previous_labels = _parse_labels("")
# Calculate label removal
for label in previous_labels:
if label not in user_labels:
hookenv.log('Deleting node label {}'.format(label))
_apply_node_label(label, delete=True)
# if the label is in user labels we do nothing here, it will get set
# during the atomic update below.
# Atomically set a label
for label in user_labels:
_apply_node_label(label, overwrite=True)
# Set label for application name
_apply_node_label('juju-application={}'.format(hookenv.service_name()),
overwrite=True)
@when_any('config.changed.kubelet-extra-args',
'config.changed.proxy-extra-args')
def extra_args_changed():
set_state('kubernetes-worker.restart-needed')
@when('config.changed.docker-logins')
def docker_logins_changed():
"""Set a flag to handle new docker login options.
If docker daemon options have also changed, set a flag to ensure the
daemon is restarted prior to running docker login.
"""
config = hookenv.config()
if data_changed('docker-opts', config['docker-opts']):
hookenv.log('Found new docker daemon options. Requesting a restart.')
# State will be removed by layer-docker after restart
set_state('docker.restart')
set_state('kubernetes-worker.docker-login')
@when('kubernetes-worker.docker-login')
@when_not('docker.restart')
def run_docker_login():
"""Login to a docker registry with configured credentials."""
config = hookenv.config()
previous_logins = config.previous('docker-logins')
logins = config['docker-logins']
logins = json.loads(logins)
if previous_logins:
previous_logins = json.loads(previous_logins)
next_servers = {login['server'] for login in logins}
previous_servers = {login['server'] for login in previous_logins}
servers_to_logout = previous_servers - next_servers
for server in servers_to_logout:
cmd = ['docker', 'logout', server]
subprocess.check_call(cmd)
for login in logins:
server = login['server']
username = login['username']
password = login['password']
cmd = ['docker', 'login', server, '-u', username, '-p', password]
subprocess.check_call(cmd)
remove_state('kubernetes-worker.docker-login')
set_state('kubernetes-worker.restart-needed')
def arch():
'''Return the package architecture as a string. Raise an exception if the
architecture is not supported by kubernetes.'''
# Get the package architecture for this system.
architecture = check_output(['dpkg', '--print-architecture']).rstrip()
# Convert the binary result into a string.
architecture = architecture.decode('utf-8')
return architecture
def create_config(server, creds):
'''Create a kubernetes configuration for the worker unit.'''
# Get the options from the tls-client layer.
layer_options = layer.options('tls-client')
# Get all the paths to the tls information required for kubeconfig.
ca = layer_options.get('ca_certificate_path')
# Create kubernetes configuration in the default location for ubuntu.
create_kubeconfig('/home/ubuntu/.kube/config', server, ca,
token=creds['client_token'], user='ubuntu')
# Make the config dir readable by the ubuntu users so juju scp works.
cmd = ['chown', '-R', 'ubuntu:ubuntu', '/home/ubuntu/.kube']
check_call(cmd)
# Create kubernetes configuration in the default location for root.
create_kubeconfig(kubeclientconfig_path, server, ca,
token=creds['client_token'], user='root')
# Create kubernetes configuration for kubelet, and kube-proxy services.
create_kubeconfig(kubeconfig_path, server, ca,
token=creds['kubelet_token'], user='kubelet')
create_kubeconfig(kubeproxyconfig_path, server, ca,
token=creds['proxy_token'], user='kube-proxy')
def parse_extra_args(config_key):
elements = hookenv.config().get(config_key, '').split()
args = {}
for element in elements:
if '=' in element:
key, _, value = element.partition('=')
args[key] = value
else:
args[element] = 'true'
return args
def configure_kubernetes_service(service, base_args, extra_args_key):
db = unitdata.kv()
prev_args_key = 'kubernetes-worker.prev_args.' + service
prev_args = db.get(prev_args_key) or {}
extra_args = parse_extra_args(extra_args_key)
args = {}
for arg in prev_args:
# remove previous args by setting to null
args[arg] = 'null'
for k, v in base_args.items():
args[k] = v
for k, v in extra_args.items():
args[k] = v
cmd = ['snap', 'set', service] + ['%s=%s' % item for item in args.items()]
check_call(cmd)
db.set(prev_args_key, args)
def configure_kubelet(dns, ingress_ip):
layer_options = layer.options('tls-client')
ca_cert_path = layer_options.get('ca_certificate_path')
server_cert_path = layer_options.get('server_certificate_path')
server_key_path = layer_options.get('server_key_path')
kubelet_opts = {}
kubelet_opts['require-kubeconfig'] = 'true'
kubelet_opts['kubeconfig'] = kubeconfig_path
kubelet_opts['network-plugin'] = 'cni'
kubelet_opts['v'] = '0'
kubelet_opts['address'] = '0.0.0.0'
kubelet_opts['port'] = '10250'
kubelet_opts['cluster-domain'] = dns['domain']
kubelet_opts['anonymous-auth'] = 'false'
kubelet_opts['client-ca-file'] = ca_cert_path
kubelet_opts['tls-cert-file'] = server_cert_path
kubelet_opts['tls-private-key-file'] = server_key_path
kubelet_opts['logtostderr'] = 'true'
kubelet_opts['fail-swap-on'] = 'false'
kubelet_opts['node-ip'] = ingress_ip
if (dns['enable-kube-dns']):
kubelet_opts['cluster-dns'] = dns['sdn-ip']
privileged = is_state('kubernetes-worker.privileged')
kubelet_opts['allow-privileged'] = 'true' if privileged else 'false'
if is_state('kubernetes-worker.gpu.enabled'):
if get_version('kubelet') < (1, 6):
hookenv.log('Adding --experimental-nvidia-gpus=1 to kubelet')
kubelet_opts['experimental-nvidia-gpus'] = '1'
else:
hookenv.log('Adding --feature-gates=Accelerators=true to kubelet')
kubelet_opts['feature-gates'] = 'Accelerators=true'
configure_kubernetes_service('kubelet', kubelet_opts, 'kubelet-extra-args')
def configure_kube_proxy(api_servers, cluster_cidr):
kube_proxy_opts = {}
kube_proxy_opts['cluster-cidr'] = cluster_cidr
kube_proxy_opts['kubeconfig'] = kubeproxyconfig_path
kube_proxy_opts['logtostderr'] = 'true'
kube_proxy_opts['v'] = '0'
kube_proxy_opts['master'] = random.choice(api_servers)
kube_proxy_opts['hostname-override'] = get_node_name()
if b'lxc' in check_output('virt-what', shell=True):
kube_proxy_opts['conntrack-max-per-core'] = '0'
configure_kubernetes_service('kube-proxy', kube_proxy_opts,
'proxy-extra-args')
def create_kubeconfig(kubeconfig, server, ca, key=None, certificate=None,
user='ubuntu', context='juju-context',
cluster='juju-cluster', password=None, token=None):
'''Create a configuration for Kubernetes based on path using the supplied
arguments for values of the Kubernetes server, CA, key, certificate, user
context and cluster.'''
if not key and not certificate and not password and not token:
raise ValueError('Missing authentication mechanism.')
# token and password are mutually exclusive. Error early if both are
# present. The developer has requested an impossible situation.
# see: kubectl config set-credentials --help
if token and password:
raise ValueError('Token and Password are mutually exclusive.')
# Create the config file with the address of the master server.
cmd = 'kubectl config --kubeconfig={0} set-cluster {1} ' \
'--server={2} --certificate-authority={3} --embed-certs=true'
check_call(split(cmd.format(kubeconfig, cluster, server, ca)))
# Delete old users
cmd = 'kubectl config --kubeconfig={0} unset users'
check_call(split(cmd.format(kubeconfig)))
# Create the credentials using the client flags.
cmd = 'kubectl config --kubeconfig={0} ' \
'set-credentials {1} '.format(kubeconfig, user)
if key and certificate:
cmd = '{0} --client-key={1} --client-certificate={2} '\
'--embed-certs=true'.format(cmd, key, certificate)
if password:
cmd = "{0} --username={1} --password={2}".format(cmd, user, password)
# This is mutually exclusive from password. They will not work together.
if token:
cmd = "{0} --token={1}".format(cmd, token)
check_call(split(cmd))
# Create a default context with the cluster.
cmd = 'kubectl config --kubeconfig={0} set-context {1} ' \
'--cluster={2} --user={3}'
check_call(split(cmd.format(kubeconfig, context, cluster, user)))
# Make the config use this new context.
cmd = 'kubectl config --kubeconfig={0} use-context {1}'
check_call(split(cmd.format(kubeconfig, context)))
@when_any('config.changed.default-backend-image',
'config.changed.nginx-image')
@when('kubernetes-worker.config.created')
def launch_default_ingress_controller():
''' Launch the Kubernetes ingress controller & default backend (404) '''
config = hookenv.config()
# need to test this in case we get in
# here from a config change to the image
if not config.get('ingress'):
return
context = {}
context['arch'] = arch()
addon_path = '/root/cdk/addons/{}'
context['defaultbackend_image'] = config.get('default-backend-image')
if (context['defaultbackend_image'] == "" or
context['defaultbackend_image'] == "auto"):
if context['arch'] == 's390x':
context['defaultbackend_image'] = \
"k8s.gcr.io/defaultbackend-s390x:1.4"
else:
context['defaultbackend_image'] = \
"k8s.gcr.io/defaultbackend:1.4"
# Render the default http backend (404) replicationcontroller manifest
manifest = addon_path.format('default-http-backend.yaml')
render('default-http-backend.yaml', manifest, context)
hookenv.log('Creating the default http backend.')
try:
kubectl('apply', '-f', manifest)
except CalledProcessError as e:
hookenv.log(e)
hookenv.log('Failed to create default-http-backend. Will attempt again next update.') # noqa
hookenv.close_port(80)
hookenv.close_port(443)
return
# Render the ingress daemon set controller manifest
context['ingress_image'] = config.get('nginx-image')
if context['ingress_image'] == "" or context['ingress_image'] == "auto":
if context['arch'] == 's390x':
context['ingress_image'] = \
"docker.io/cdkbot/nginx-ingress-controller-s390x:0.9.0-beta.13"
else:
context['ingress_image'] = \
"k8s.gcr.io/nginx-ingress-controller:0.9.0-beta.15" # noqa
context['juju_application'] = hookenv.service_name()
manifest = addon_path.format('ingress-daemon-set.yaml')
render('ingress-daemon-set.yaml', manifest, context)
hookenv.log('Creating the ingress daemon set.')
try:
kubectl('apply', '-f', manifest)
except CalledProcessError as e:
hookenv.log(e)
hookenv.log('Failed to create ingress controller. Will attempt again next update.') # noqa
hookenv.close_port(80)
hookenv.close_port(443)
return
set_state('kubernetes-worker.ingress.available')
hookenv.open_port(80)
hookenv.open_port(443)
def restart_unit_services():
'''Restart worker services.'''
hookenv.log('Restarting kubelet and kube-proxy.')
services = ['kube-proxy', 'kubelet']
for service in services:
service_restart('snap.%s.daemon' % service)
def get_kube_api_servers(kube_api):
'''Return the kubernetes api server address and port for this
relationship.'''
hosts = []
# Iterate over every service from the relation object.
for service in kube_api.services():
for unit in service['hosts']:
hosts.append('https://{0}:{1}'.format(unit['hostname'],
unit['port']))
return hosts
def kubectl(*args):
''' Run a kubectl cli command with a config file. Returns stdout and throws
an error if the command fails. '''
command = ['kubectl', '--kubeconfig=' + kubeclientconfig_path] + list(args)
hookenv.log('Executing {}'.format(command))
return check_output(command)
def kubectl_success(*args):
''' Runs kubectl with the given args. Returns True if successful, False if
not. '''
try:
kubectl(*args)
return True
except CalledProcessError:
return False
def kubectl_manifest(operation, manifest):
''' Wrap the kubectl creation command when using filepath resources
:param operation - one of get, create, delete, replace
:param manifest - filepath to the manifest
'''
# Deletions are a special case
if operation == 'delete':
# Ensure we immediately remove requested resources with --now
return kubectl_success(operation, '-f', manifest, '--now')
else:
# Guard against an error re-creating the same manifest multiple times
if operation == 'create':
# If we already have the definition, its probably safe to assume
# creation was true.
if kubectl_success('get', '-f', manifest):
hookenv.log('Skipping definition for {}'.format(manifest))
return True
# Execute the requested command that did not match any of the special
# cases above
return kubectl_success(operation, '-f', manifest)
@when('nrpe-external-master.available')
@when_not('nrpe-external-master.initial-config')
def initial_nrpe_config(nagios=None):
set_state('nrpe-external-master.initial-config')
update_nrpe_config(nagios)
@when('kubernetes-worker.config.created')
@when('nrpe-external-master.available')
@when_any('config.changed.nagios_context',
'config.changed.nagios_servicegroups')
def update_nrpe_config(unused=None):
services = ('snap.kubelet.daemon', 'snap.kube-proxy.daemon')
hostname = nrpe.get_nagios_hostname()
current_unit = nrpe.get_nagios_unit_name()
nrpe_setup = nrpe.NRPE(hostname=hostname)
nrpe.add_init_service_checks(nrpe_setup, services, current_unit)
nrpe_setup.write()
@when_not('nrpe-external-master.available')
@when('nrpe-external-master.initial-config')
def remove_nrpe_config(nagios=None):
remove_state('nrpe-external-master.initial-config')
# List of systemd services for which the checks will be removed
services = ('snap.kubelet.daemon', 'snap.kube-proxy.daemon')
# The current nrpe-external-master interface doesn't handle a lot of logic,
# use the charm-helpers code for now.
hostname = nrpe.get_nagios_hostname()
nrpe_setup = nrpe.NRPE(hostname=hostname)
for service in services:
nrpe_setup.remove_check(shortname=service)
def set_privileged():
"""Update the allow-privileged flag for kubelet.
"""
privileged = hookenv.config('allow-privileged').lower()
if privileged == 'auto':
gpu_enabled = is_state('kubernetes-worker.gpu.enabled')
privileged = 'true' if gpu_enabled else 'false'
if privileged == 'true':
set_state('kubernetes-worker.privileged')
else:
remove_state('kubernetes-worker.privileged')
@when('config.changed.allow-privileged')
@when('kubernetes-worker.config.created')
def on_config_allow_privileged_change():
"""React to changed 'allow-privileged' config value.
"""
set_state('kubernetes-worker.restart-needed')
remove_state('config.changed.allow-privileged')
@when('cuda.installed')
@when('kubernetes-worker.config.created')
@when_not('kubernetes-worker.gpu.enabled')
def enable_gpu():
"""Enable GPU usage on this node.
"""
config = hookenv.config()
if config['allow-privileged'] == "false":
hookenv.status_set(
'active',
'GPUs available. Set allow-privileged="auto" to enable.'
)
return
hookenv.log('Enabling gpu mode')
try:
# Not sure why this is necessary, but if you don't run this, k8s will
# think that the node has 0 gpus (as shown by the output of
# `kubectl get nodes -o yaml`
check_call(['nvidia-smi'])
except CalledProcessError as cpe:
hookenv.log('Unable to communicate with the NVIDIA driver.')
hookenv.log(cpe)
return
# Apply node labels
_apply_node_label('gpu=true', overwrite=True)
_apply_node_label('cuda=true', overwrite=True)
set_state('kubernetes-worker.gpu.enabled')
set_state('kubernetes-worker.restart-needed')
@when('kubernetes-worker.gpu.enabled')
@when_not('kubernetes-worker.privileged')
@when_not('kubernetes-worker.restart-needed')
def disable_gpu():
"""Disable GPU usage on this node.
This handler fires when we're running in gpu mode, and then the operator
sets allow-privileged="false". Since we can no longer run privileged
containers, we need to disable gpu mode.
"""
hookenv.log('Disabling gpu mode')
# Remove node labels
_apply_node_label('gpu', delete=True)
_apply_node_label('cuda', delete=True)
remove_state('kubernetes-worker.gpu.enabled')
set_state('kubernetes-worker.restart-needed')
@when('kubernetes-worker.gpu.enabled')
@when('kube-control.connected')
def notify_master_gpu_enabled(kube_control):
"""Notify kubernetes-master that we're gpu-enabled.
"""
kube_control.set_gpu(True)
@when_not('kubernetes-worker.gpu.enabled')
@when('kube-control.connected')
def notify_master_gpu_not_enabled(kube_control):
"""Notify kubernetes-master that we're not gpu-enabled.
"""
kube_control.set_gpu(False)
@when('kube-control.connected')
def request_kubelet_and_proxy_credentials(kube_control):
""" Request kubelet node authorization with a well formed kubelet user.
This also implies that we are requesting kube-proxy auth. """
# The kube-cotrol interface is created to support RBAC.
# At this point we might as well do the right thing and return the hostname
# even if it will only be used when we enable RBAC
nodeuser = 'system:node:{}'.format(gethostname().lower())
kube_control.set_auth_request(nodeuser)
@when('kube-control.connected')
def catch_change_in_creds(kube_control):
"""Request a service restart in case credential updates were detected."""
nodeuser = 'system:node:{}'.format(gethostname().lower())
creds = kube_control.get_auth_credentials(nodeuser)
if creds \
and data_changed('kube-control.creds', creds) \
and creds['user'] == nodeuser:
# We need to cache the credentials here because if the
# master changes (master leader dies and replaced by a new one)
# the new master will have no recollection of our certs.
db.set('credentials', creds)
set_state('worker.auth.bootstrapped')
set_state('kubernetes-worker.restart-needed')
@when_not('kube-control.connected')
def missing_kube_control():
"""Inform the operator they need to add the kube-control relation.
If deploying via bundle this won't happen, but if operator is upgrading a
a charm in a deployment that pre-dates the kube-control relation, it'll be
missing.
"""
hookenv.status_set(
'blocked',
'Relate {}:kube-control kubernetes-master:kube-control'.format(
hookenv.service_name()))
@when('docker.ready')
def fix_iptables_for_docker_1_13():
""" Fix iptables FORWARD policy for Docker >=1.13
https://github.com/kubernetes/kubernetes/issues/40182
https://github.com/kubernetes/kubernetes/issues/39823
"""
cmd = ['iptables', '-w', '300', '-P', 'FORWARD', 'ACCEPT']
check_call(cmd)
def _systemctl_is_active(application):
''' Poll systemctl to determine if the application is running '''
cmd = ['systemctl', 'is-active', application]
try:
raw = check_output(cmd)
return b'active' in raw
except Exception:
return False
def get_node_name():
kubelet_extra_args = parse_extra_args('kubelet-extra-args')
cloud_provider = kubelet_extra_args.get('cloud-provider', '')
if cloud_provider == 'aws':
return getfqdn()
else:
return gethostname()
class ApplyNodeLabelFailed(Exception):
pass
def _apply_node_label(label, delete=False, overwrite=False):
''' Invoke kubectl to apply node label changes '''
nodename = get_node_name()
# TODO: Make this part of the kubectl calls instead of a special string
cmd_base = 'kubectl --kubeconfig={0} label node {1} {2}'
if delete is True:
label_key = label.split('=')[0]
cmd = cmd_base.format(kubeconfig_path, nodename, label_key)
cmd = cmd + '-'
else:
cmd = cmd_base.format(kubeconfig_path, nodename, label)
if overwrite:
cmd = '{} --overwrite'.format(cmd)
cmd = cmd.split()
deadline = time.time() + 180
while time.time() < deadline:
code = subprocess.call(cmd)
if code == 0:
break
hookenv.log('Failed to apply label %s, exit code %d. Will retry.' % (
label, code))
time.sleep(1)
else:
msg = 'Failed to apply label %s' % label
raise ApplyNodeLabelFailed(msg)
def _parse_labels(labels):
''' Parse labels from a key=value string separated by space.'''
label_array = labels.split(' ')
sanitized_labels = []
for item in label_array:
if '=' in item:
sanitized_labels.append(item)
else:
hookenv.log('Skipping malformed option: {}'.format(item))
return sanitized_labels
Avoid hook errors when effecting label changes.
#!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import random
import shutil
import subprocess
import time
from shlex import split
from subprocess import check_call, check_output
from subprocess import CalledProcessError
from socket import gethostname, getfqdn
from charms import layer
from charms.layer import snap
from charms.reactive import hook
from charms.reactive import set_state, remove_state, is_state
from charms.reactive import when, when_any, when_not
from charms.kubernetes.common import get_version
from charms.reactive.helpers import data_changed, any_file_changed
from charms.templating.jinja2 import render
from charmhelpers.core import hookenv, unitdata
from charmhelpers.core.host import service_stop, service_restart
from charmhelpers.contrib.charmsupport import nrpe
# Override the default nagios shortname regex to allow periods, which we
# need because our bin names contain them (e.g. 'snap.foo.daemon'). The
# default regex in charmhelpers doesn't allow periods, but nagios itself does.
nrpe.Check.shortname_re = '[\.A-Za-z0-9-_]+$'
kubeconfig_path = '/root/cdk/kubeconfig'
kubeproxyconfig_path = '/root/cdk/kubeproxyconfig'
kubeclientconfig_path = '/root/.kube/config'
os.environ['PATH'] += os.pathsep + os.path.join(os.sep, 'snap', 'bin')
db = unitdata.kv()
@hook('upgrade-charm')
def upgrade_charm():
# Trigger removal of PPA docker installation if it was previously set.
set_state('config.changed.install_from_upstream')
hookenv.atexit(remove_state, 'config.changed.install_from_upstream')
cleanup_pre_snap_services()
check_resources_for_upgrade_needed()
# Remove the RC for nginx ingress if it exists
if hookenv.config().get('ingress'):
kubectl_success('delete', 'rc', 'nginx-ingress-controller')
# Remove gpu.enabled state so we can reconfigure gpu-related kubelet flags,
# since they can differ between k8s versions
remove_state('kubernetes-worker.gpu.enabled')
remove_state('kubernetes-worker.cni-plugins.installed')
remove_state('kubernetes-worker.config.created')
remove_state('kubernetes-worker.ingress.available')
remove_state('worker.auth.bootstrapped')
set_state('kubernetes-worker.restart-needed')
def check_resources_for_upgrade_needed():
hookenv.status_set('maintenance', 'Checking resources')
resources = ['kubectl', 'kubelet', 'kube-proxy']
paths = [hookenv.resource_get(resource) for resource in resources]
if any_file_changed(paths):
set_upgrade_needed()
def set_upgrade_needed():
set_state('kubernetes-worker.snaps.upgrade-needed')
config = hookenv.config()
previous_channel = config.previous('channel')
require_manual = config.get('require-manual-upgrade')
if previous_channel is None or not require_manual:
set_state('kubernetes-worker.snaps.upgrade-specified')
def cleanup_pre_snap_services():
# remove old states
remove_state('kubernetes-worker.components.installed')
# disable old services
services = ['kubelet', 'kube-proxy']
for service in services:
hookenv.log('Stopping {0} service.'.format(service))
service_stop(service)
# cleanup old files
files = [
"/lib/systemd/system/kubelet.service",
"/lib/systemd/system/kube-proxy.service",
"/etc/default/kube-default",
"/etc/default/kubelet",
"/etc/default/kube-proxy",
"/srv/kubernetes",
"/usr/local/bin/kubectl",
"/usr/local/bin/kubelet",
"/usr/local/bin/kube-proxy",
"/etc/kubernetes"
]
for file in files:
if os.path.isdir(file):
hookenv.log("Removing directory: " + file)
shutil.rmtree(file)
elif os.path.isfile(file):
hookenv.log("Removing file: " + file)
os.remove(file)
@when('config.changed.channel')
def channel_changed():
set_upgrade_needed()
@when('kubernetes-worker.snaps.upgrade-needed')
@when_not('kubernetes-worker.snaps.upgrade-specified')
def upgrade_needed_status():
msg = 'Needs manual upgrade, run the upgrade action'
hookenv.status_set('blocked', msg)
@when('kubernetes-worker.snaps.upgrade-specified')
def install_snaps():
check_resources_for_upgrade_needed()
channel = hookenv.config('channel')
hookenv.status_set('maintenance', 'Installing kubectl snap')
snap.install('kubectl', channel=channel, classic=True)
hookenv.status_set('maintenance', 'Installing kubelet snap')
snap.install('kubelet', channel=channel, classic=True)
hookenv.status_set('maintenance', 'Installing kube-proxy snap')
snap.install('kube-proxy', channel=channel, classic=True)
set_state('kubernetes-worker.snaps.installed')
set_state('kubernetes-worker.restart-needed')
remove_state('kubernetes-worker.snaps.upgrade-needed')
remove_state('kubernetes-worker.snaps.upgrade-specified')
@hook('stop')
def shutdown():
''' When this unit is destroyed:
- delete the current node
- stop the worker services
'''
try:
if os.path.isfile(kubeconfig_path):
kubectl('delete', 'node', gethostname().lower())
except CalledProcessError:
hookenv.log('Failed to unregister node.')
service_stop('snap.kubelet.daemon')
service_stop('snap.kube-proxy.daemon')
@when('docker.available')
@when_not('kubernetes-worker.cni-plugins.installed')
def install_cni_plugins():
''' Unpack the cni-plugins resource '''
charm_dir = os.getenv('CHARM_DIR')
# Get the resource via resource_get
try:
resource_name = 'cni-{}'.format(arch())
archive = hookenv.resource_get(resource_name)
except Exception:
message = 'Error fetching the cni resource.'
hookenv.log(message)
hookenv.status_set('blocked', message)
return
if not archive:
hookenv.log('Missing cni resource.')
hookenv.status_set('blocked', 'Missing cni resource.')
return
# Handle null resource publication, we check if filesize < 1mb
filesize = os.stat(archive).st_size
if filesize < 1000000:
hookenv.status_set('blocked', 'Incomplete cni resource.')
return
hookenv.status_set('maintenance', 'Unpacking cni resource.')
unpack_path = '{}/files/cni'.format(charm_dir)
os.makedirs(unpack_path, exist_ok=True)
cmd = ['tar', 'xfvz', archive, '-C', unpack_path]
hookenv.log(cmd)
check_call(cmd)
apps = [
{'name': 'loopback', 'path': '/opt/cni/bin'}
]
for app in apps:
unpacked = '{}/{}'.format(unpack_path, app['name'])
app_path = os.path.join(app['path'], app['name'])
install = ['install', '-v', '-D', unpacked, app_path]
hookenv.log(install)
check_call(install)
# Used by the "registry" action. The action is run on a single worker, but
# the registry pod can end up on any worker, so we need this directory on
# all the workers.
os.makedirs('/srv/registry', exist_ok=True)
set_state('kubernetes-worker.cni-plugins.installed')
@when('kubernetes-worker.snaps.installed')
def set_app_version():
''' Declare the application version to juju '''
cmd = ['kubelet', '--version']
version = check_output(cmd)
hookenv.application_version_set(version.split(b' v')[-1].rstrip())
@when('kubernetes-worker.snaps.installed')
@when_not('kube-control.dns.available')
def notify_user_transient_status():
''' Notify to the user we are in a transient state and the application
is still converging. Potentially remotely, or we may be in a detached loop
wait state '''
# During deployment the worker has to start kubelet without cluster dns
# configured. If this is the first unit online in a service pool waiting
# to self host the dns pod, and configure itself to query the dns service
# declared in the kube-system namespace
hookenv.status_set('waiting', 'Waiting for cluster DNS.')
@when('kubernetes-worker.snaps.installed',
'kube-control.dns.available')
@when_not('kubernetes-worker.snaps.upgrade-needed')
def charm_status(kube_control):
'''Update the status message with the current status of kubelet.'''
update_kubelet_status()
def update_kubelet_status():
''' There are different states that the kubelet can be in, where we are
waiting for dns, waiting for cluster turnup, or ready to serve
applications.'''
services = [
'kubelet',
'kube-proxy'
]
failing_services = []
for service in services:
daemon = 'snap.{}.daemon'.format(service)
if not _systemctl_is_active(daemon):
failing_services.append(service)
if len(failing_services) == 0:
hookenv.status_set('active', 'Kubernetes worker running.')
else:
msg = 'Waiting for {} to start.'.format(','.join(failing_services))
hookenv.status_set('waiting', msg)
def get_ingress_address(relation):
try:
network_info = hookenv.network_get(relation.relation_name)
except NotImplementedError:
network_info = []
if network_info and 'ingress-addresses' in network_info:
# just grab the first one for now, maybe be more robust here?
return network_info['ingress-addresses'][0]
else:
# if they don't have ingress-addresses they are running a juju that
# doesn't support spaces, so just return the private address
return hookenv.unit_get('private-address')
@when('certificates.available', 'kube-control.connected')
def send_data(tls, kube_control):
'''Send the data that is required to create a server certificate for
this server.'''
# Use the public ip of this unit as the Common Name for the certificate.
common_name = hookenv.unit_public_ip()
ingress_ip = get_ingress_address(kube_control)
# Create SANs that the tls layer will add to the server cert.
sans = [
hookenv.unit_public_ip(),
ingress_ip,
gethostname()
]
# Create a path safe name by removing path characters from the unit name.
certificate_name = hookenv.local_unit().replace('/', '_')
# Request a server cert with this information.
tls.request_server_cert(common_name, sans, certificate_name)
@when('kube-api-endpoint.available', 'kube-control.dns.available',
'cni.available')
def watch_for_changes(kube_api, kube_control, cni):
''' Watch for configuration changes and signal if we need to restart the
worker services '''
servers = get_kube_api_servers(kube_api)
dns = kube_control.get_dns()
cluster_cidr = cni.get_config()['cidr']
if (data_changed('kube-api-servers', servers) or
data_changed('kube-dns', dns) or
data_changed('cluster-cidr', cluster_cidr)):
set_state('kubernetes-worker.restart-needed')
@when('kubernetes-worker.snaps.installed', 'kube-api-endpoint.available',
'tls_client.ca.saved', 'tls_client.client.certificate.saved',
'tls_client.client.key.saved', 'tls_client.server.certificate.saved',
'tls_client.server.key.saved',
'kube-control.dns.available', 'kube-control.auth.available',
'cni.available', 'kubernetes-worker.restart-needed',
'worker.auth.bootstrapped')
def start_worker(kube_api, kube_control, auth_control, cni):
''' Start kubelet using the provided API and DNS info.'''
servers = get_kube_api_servers(kube_api)
# Note that the DNS server doesn't necessarily exist at this point. We know
# what its IP will eventually be, though, so we can go ahead and configure
# kubelet with that info. This ensures that early pods are configured with
# the correct DNS even though the server isn't ready yet.
dns = kube_control.get_dns()
ingress_ip = get_ingress_address(kube_control)
cluster_cidr = cni.get_config()['cidr']
if cluster_cidr is None:
hookenv.log('Waiting for cluster cidr.')
return
creds = db.get('credentials')
data_changed('kube-control.creds', creds)
# set --allow-privileged flag for kubelet
set_privileged()
create_config(random.choice(servers), creds)
configure_kubelet(dns, ingress_ip)
configure_kube_proxy(servers, cluster_cidr)
set_state('kubernetes-worker.config.created')
restart_unit_services()
update_kubelet_status()
set_state('kubernetes-worker.label-config-required')
remove_state('kubernetes-worker.restart-needed')
@when('cni.connected')
@when_not('cni.configured')
def configure_cni(cni):
''' Set worker configuration on the CNI relation. This lets the CNI
subordinate know that we're the worker so it can respond accordingly. '''
cni.set_config(is_master=False, kubeconfig_path=kubeconfig_path)
@when('config.changed.ingress')
def toggle_ingress_state():
''' Ingress is a toggled state. Remove ingress.available if set when
toggled '''
remove_state('kubernetes-worker.ingress.available')
@when('docker.sdn.configured')
def sdn_changed():
'''The Software Defined Network changed on the container so restart the
kubernetes services.'''
restart_unit_services()
update_kubelet_status()
remove_state('docker.sdn.configured')
@when('kubernetes-worker.config.created')
@when_not('kubernetes-worker.ingress.available')
def render_and_launch_ingress():
''' If configuration has ingress daemon set enabled, launch the ingress load
balancer and default http backend. Otherwise attempt deletion. '''
config = hookenv.config()
# If ingress is enabled, launch the ingress controller
if config.get('ingress'):
launch_default_ingress_controller()
else:
hookenv.log('Deleting the http backend and ingress.')
kubectl_manifest('delete',
'/root/cdk/addons/default-http-backend.yaml')
kubectl_manifest('delete',
'/root/cdk/addons/ingress-daemon-set.yaml') # noqa
hookenv.close_port(80)
hookenv.close_port(443)
@when('config.changed.labels')
def handle_labels_changed():
set_state('kubernetes-worker.label-config-required')
@when('kubernetes-worker.label-config-required',
'kubernetes-worker.config.created')
def apply_node_labels():
''' Parse the labels configuration option and apply the labels to the
node. '''
# Get the user's configured labels.
config = hookenv.config()
user_labels = {}
for item in config.get('labels').split(' '):
if '=' in item:
key, val = item.split('=')
user_labels[key] = val
else:
hookenv.log('Skipping malformed option: {}.'.format(item))
# Collect the current label state.
current_labels = db.get('current_labels') or {}
# Remove any labels that the user has removed from the config.
for key in list(current_labels.keys()):
if key not in user_labels:
try:
remove_label(key)
del current_labels[key]
db.set('current_labels', current_labels)
except ApplyNodeLabelFailed as e:
hookenv.log(str(e))
return
# Add any new labels.
for key, val in user_labels.items():
try:
set_label(key, val)
current_labels[key] = val
db.set('current_labels', current_labels)
except ApplyNodeLabelFailed as e:
hookenv.log(str(e))
return
# Set the juju-application label.
try:
set_label('juju-application', hookenv.service_name())
except ApplyNodeLabelFailed as e:
hookenv.log(str(e))
return
# Label configuration complete.
remove_state('kubernetes-worker.label-config-required')
@when_any('config.changed.kubelet-extra-args',
'config.changed.proxy-extra-args')
def extra_args_changed():
set_state('kubernetes-worker.restart-needed')
@when('config.changed.docker-logins')
def docker_logins_changed():
"""Set a flag to handle new docker login options.
If docker daemon options have also changed, set a flag to ensure the
daemon is restarted prior to running docker login.
"""
config = hookenv.config()
if data_changed('docker-opts', config['docker-opts']):
hookenv.log('Found new docker daemon options. Requesting a restart.')
# State will be removed by layer-docker after restart
set_state('docker.restart')
set_state('kubernetes-worker.docker-login')
@when('kubernetes-worker.docker-login')
@when_not('docker.restart')
def run_docker_login():
"""Login to a docker registry with configured credentials."""
config = hookenv.config()
previous_logins = config.previous('docker-logins')
logins = config['docker-logins']
logins = json.loads(logins)
if previous_logins:
previous_logins = json.loads(previous_logins)
next_servers = {login['server'] for login in logins}
previous_servers = {login['server'] for login in previous_logins}
servers_to_logout = previous_servers - next_servers
for server in servers_to_logout:
cmd = ['docker', 'logout', server]
subprocess.check_call(cmd)
for login in logins:
server = login['server']
username = login['username']
password = login['password']
cmd = ['docker', 'login', server, '-u', username, '-p', password]
subprocess.check_call(cmd)
remove_state('kubernetes-worker.docker-login')
set_state('kubernetes-worker.restart-needed')
def arch():
'''Return the package architecture as a string. Raise an exception if the
architecture is not supported by kubernetes.'''
# Get the package architecture for this system.
architecture = check_output(['dpkg', '--print-architecture']).rstrip()
# Convert the binary result into a string.
architecture = architecture.decode('utf-8')
return architecture
def create_config(server, creds):
'''Create a kubernetes configuration for the worker unit.'''
# Get the options from the tls-client layer.
layer_options = layer.options('tls-client')
# Get all the paths to the tls information required for kubeconfig.
ca = layer_options.get('ca_certificate_path')
# Create kubernetes configuration in the default location for ubuntu.
create_kubeconfig('/home/ubuntu/.kube/config', server, ca,
token=creds['client_token'], user='ubuntu')
# Make the config dir readable by the ubuntu users so juju scp works.
cmd = ['chown', '-R', 'ubuntu:ubuntu', '/home/ubuntu/.kube']
check_call(cmd)
# Create kubernetes configuration in the default location for root.
create_kubeconfig(kubeclientconfig_path, server, ca,
token=creds['client_token'], user='root')
# Create kubernetes configuration for kubelet, and kube-proxy services.
create_kubeconfig(kubeconfig_path, server, ca,
token=creds['kubelet_token'], user='kubelet')
create_kubeconfig(kubeproxyconfig_path, server, ca,
token=creds['proxy_token'], user='kube-proxy')
def parse_extra_args(config_key):
elements = hookenv.config().get(config_key, '').split()
args = {}
for element in elements:
if '=' in element:
key, _, value = element.partition('=')
args[key] = value
else:
args[element] = 'true'
return args
def configure_kubernetes_service(service, base_args, extra_args_key):
db = unitdata.kv()
prev_args_key = 'kubernetes-worker.prev_args.' + service
prev_args = db.get(prev_args_key) or {}
extra_args = parse_extra_args(extra_args_key)
args = {}
for arg in prev_args:
# remove previous args by setting to null
args[arg] = 'null'
for k, v in base_args.items():
args[k] = v
for k, v in extra_args.items():
args[k] = v
cmd = ['snap', 'set', service] + ['%s=%s' % item for item in args.items()]
check_call(cmd)
db.set(prev_args_key, args)
def configure_kubelet(dns, ingress_ip):
layer_options = layer.options('tls-client')
ca_cert_path = layer_options.get('ca_certificate_path')
server_cert_path = layer_options.get('server_certificate_path')
server_key_path = layer_options.get('server_key_path')
kubelet_opts = {}
kubelet_opts['require-kubeconfig'] = 'true'
kubelet_opts['kubeconfig'] = kubeconfig_path
kubelet_opts['network-plugin'] = 'cni'
kubelet_opts['v'] = '0'
kubelet_opts['address'] = '0.0.0.0'
kubelet_opts['port'] = '10250'
kubelet_opts['cluster-domain'] = dns['domain']
kubelet_opts['anonymous-auth'] = 'false'
kubelet_opts['client-ca-file'] = ca_cert_path
kubelet_opts['tls-cert-file'] = server_cert_path
kubelet_opts['tls-private-key-file'] = server_key_path
kubelet_opts['logtostderr'] = 'true'
kubelet_opts['fail-swap-on'] = 'false'
kubelet_opts['node-ip'] = ingress_ip
if (dns['enable-kube-dns']):
kubelet_opts['cluster-dns'] = dns['sdn-ip']
privileged = is_state('kubernetes-worker.privileged')
kubelet_opts['allow-privileged'] = 'true' if privileged else 'false'
if is_state('kubernetes-worker.gpu.enabled'):
if get_version('kubelet') < (1, 6):
hookenv.log('Adding --experimental-nvidia-gpus=1 to kubelet')
kubelet_opts['experimental-nvidia-gpus'] = '1'
else:
hookenv.log('Adding --feature-gates=Accelerators=true to kubelet')
kubelet_opts['feature-gates'] = 'Accelerators=true'
configure_kubernetes_service('kubelet', kubelet_opts, 'kubelet-extra-args')
def configure_kube_proxy(api_servers, cluster_cidr):
kube_proxy_opts = {}
kube_proxy_opts['cluster-cidr'] = cluster_cidr
kube_proxy_opts['kubeconfig'] = kubeproxyconfig_path
kube_proxy_opts['logtostderr'] = 'true'
kube_proxy_opts['v'] = '0'
kube_proxy_opts['master'] = random.choice(api_servers)
kube_proxy_opts['hostname-override'] = get_node_name()
if b'lxc' in check_output('virt-what', shell=True):
kube_proxy_opts['conntrack-max-per-core'] = '0'
configure_kubernetes_service('kube-proxy', kube_proxy_opts,
'proxy-extra-args')
def create_kubeconfig(kubeconfig, server, ca, key=None, certificate=None,
user='ubuntu', context='juju-context',
cluster='juju-cluster', password=None, token=None):
'''Create a configuration for Kubernetes based on path using the supplied
arguments for values of the Kubernetes server, CA, key, certificate, user
context and cluster.'''
if not key and not certificate and not password and not token:
raise ValueError('Missing authentication mechanism.')
# token and password are mutually exclusive. Error early if both are
# present. The developer has requested an impossible situation.
# see: kubectl config set-credentials --help
if token and password:
raise ValueError('Token and Password are mutually exclusive.')
# Create the config file with the address of the master server.
cmd = 'kubectl config --kubeconfig={0} set-cluster {1} ' \
'--server={2} --certificate-authority={3} --embed-certs=true'
check_call(split(cmd.format(kubeconfig, cluster, server, ca)))
# Delete old users
cmd = 'kubectl config --kubeconfig={0} unset users'
check_call(split(cmd.format(kubeconfig)))
# Create the credentials using the client flags.
cmd = 'kubectl config --kubeconfig={0} ' \
'set-credentials {1} '.format(kubeconfig, user)
if key and certificate:
cmd = '{0} --client-key={1} --client-certificate={2} '\
'--embed-certs=true'.format(cmd, key, certificate)
if password:
cmd = "{0} --username={1} --password={2}".format(cmd, user, password)
# This is mutually exclusive from password. They will not work together.
if token:
cmd = "{0} --token={1}".format(cmd, token)
check_call(split(cmd))
# Create a default context with the cluster.
cmd = 'kubectl config --kubeconfig={0} set-context {1} ' \
'--cluster={2} --user={3}'
check_call(split(cmd.format(kubeconfig, context, cluster, user)))
# Make the config use this new context.
cmd = 'kubectl config --kubeconfig={0} use-context {1}'
check_call(split(cmd.format(kubeconfig, context)))
@when_any('config.changed.default-backend-image',
'config.changed.nginx-image')
@when('kubernetes-worker.config.created')
def launch_default_ingress_controller():
''' Launch the Kubernetes ingress controller & default backend (404) '''
config = hookenv.config()
# need to test this in case we get in
# here from a config change to the image
if not config.get('ingress'):
return
context = {}
context['arch'] = arch()
addon_path = '/root/cdk/addons/{}'
context['defaultbackend_image'] = config.get('default-backend-image')
if (context['defaultbackend_image'] == "" or
context['defaultbackend_image'] == "auto"):
if context['arch'] == 's390x':
context['defaultbackend_image'] = \
"k8s.gcr.io/defaultbackend-s390x:1.4"
else:
context['defaultbackend_image'] = \
"k8s.gcr.io/defaultbackend:1.4"
# Render the default http backend (404) replicationcontroller manifest
manifest = addon_path.format('default-http-backend.yaml')
render('default-http-backend.yaml', manifest, context)
hookenv.log('Creating the default http backend.')
try:
kubectl('apply', '-f', manifest)
except CalledProcessError as e:
hookenv.log(e)
hookenv.log('Failed to create default-http-backend. Will attempt again next update.') # noqa
hookenv.close_port(80)
hookenv.close_port(443)
return
# Render the ingress daemon set controller manifest
context['ingress_image'] = config.get('nginx-image')
if context['ingress_image'] == "" or context['ingress_image'] == "auto":
if context['arch'] == 's390x':
context['ingress_image'] = \
"docker.io/cdkbot/nginx-ingress-controller-s390x:0.9.0-beta.13"
else:
context['ingress_image'] = \
"k8s.gcr.io/nginx-ingress-controller:0.9.0-beta.15" # noqa
context['juju_application'] = hookenv.service_name()
manifest = addon_path.format('ingress-daemon-set.yaml')
render('ingress-daemon-set.yaml', manifest, context)
hookenv.log('Creating the ingress daemon set.')
try:
kubectl('apply', '-f', manifest)
except CalledProcessError as e:
hookenv.log(e)
hookenv.log('Failed to create ingress controller. Will attempt again next update.') # noqa
hookenv.close_port(80)
hookenv.close_port(443)
return
set_state('kubernetes-worker.ingress.available')
hookenv.open_port(80)
hookenv.open_port(443)
def restart_unit_services():
'''Restart worker services.'''
hookenv.log('Restarting kubelet and kube-proxy.')
services = ['kube-proxy', 'kubelet']
for service in services:
service_restart('snap.%s.daemon' % service)
def get_kube_api_servers(kube_api):
'''Return the kubernetes api server address and port for this
relationship.'''
hosts = []
# Iterate over every service from the relation object.
for service in kube_api.services():
for unit in service['hosts']:
hosts.append('https://{0}:{1}'.format(unit['hostname'],
unit['port']))
return hosts
def kubectl(*args):
''' Run a kubectl cli command with a config file. Returns stdout and throws
an error if the command fails. '''
command = ['kubectl', '--kubeconfig=' + kubeclientconfig_path] + list(args)
hookenv.log('Executing {}'.format(command))
return check_output(command)
def kubectl_success(*args):
''' Runs kubectl with the given args. Returns True if successful, False if
not. '''
try:
kubectl(*args)
return True
except CalledProcessError:
return False
def kubectl_manifest(operation, manifest):
''' Wrap the kubectl creation command when using filepath resources
:param operation - one of get, create, delete, replace
:param manifest - filepath to the manifest
'''
# Deletions are a special case
if operation == 'delete':
# Ensure we immediately remove requested resources with --now
return kubectl_success(operation, '-f', manifest, '--now')
else:
# Guard against an error re-creating the same manifest multiple times
if operation == 'create':
# If we already have the definition, its probably safe to assume
# creation was true.
if kubectl_success('get', '-f', manifest):
hookenv.log('Skipping definition for {}'.format(manifest))
return True
# Execute the requested command that did not match any of the special
# cases above
return kubectl_success(operation, '-f', manifest)
@when('nrpe-external-master.available')
@when_not('nrpe-external-master.initial-config')
def initial_nrpe_config(nagios=None):
set_state('nrpe-external-master.initial-config')
update_nrpe_config(nagios)
@when('kubernetes-worker.config.created')
@when('nrpe-external-master.available')
@when_any('config.changed.nagios_context',
'config.changed.nagios_servicegroups')
def update_nrpe_config(unused=None):
services = ('snap.kubelet.daemon', 'snap.kube-proxy.daemon')
hostname = nrpe.get_nagios_hostname()
current_unit = nrpe.get_nagios_unit_name()
nrpe_setup = nrpe.NRPE(hostname=hostname)
nrpe.add_init_service_checks(nrpe_setup, services, current_unit)
nrpe_setup.write()
@when_not('nrpe-external-master.available')
@when('nrpe-external-master.initial-config')
def remove_nrpe_config(nagios=None):
remove_state('nrpe-external-master.initial-config')
# List of systemd services for which the checks will be removed
services = ('snap.kubelet.daemon', 'snap.kube-proxy.daemon')
# The current nrpe-external-master interface doesn't handle a lot of logic,
# use the charm-helpers code for now.
hostname = nrpe.get_nagios_hostname()
nrpe_setup = nrpe.NRPE(hostname=hostname)
for service in services:
nrpe_setup.remove_check(shortname=service)
def set_privileged():
"""Update the allow-privileged flag for kubelet.
"""
privileged = hookenv.config('allow-privileged').lower()
if privileged == 'auto':
gpu_enabled = is_state('kubernetes-worker.gpu.enabled')
privileged = 'true' if gpu_enabled else 'false'
if privileged == 'true':
set_state('kubernetes-worker.privileged')
else:
remove_state('kubernetes-worker.privileged')
@when('config.changed.allow-privileged')
@when('kubernetes-worker.config.created')
def on_config_allow_privileged_change():
"""React to changed 'allow-privileged' config value.
"""
set_state('kubernetes-worker.restart-needed')
remove_state('config.changed.allow-privileged')
@when('cuda.installed')
@when('kubernetes-worker.config.created')
@when_not('kubernetes-worker.gpu.enabled')
def enable_gpu():
"""Enable GPU usage on this node.
"""
config = hookenv.config()
if config['allow-privileged'] == "false":
hookenv.status_set(
'active',
'GPUs available. Set allow-privileged="auto" to enable.'
)
return
hookenv.log('Enabling gpu mode')
try:
# Not sure why this is necessary, but if you don't run this, k8s will
# think that the node has 0 gpus (as shown by the output of
# `kubectl get nodes -o yaml`
check_call(['nvidia-smi'])
except CalledProcessError as cpe:
hookenv.log('Unable to communicate with the NVIDIA driver.')
hookenv.log(cpe)
return
# Apply node labels
set_label('gpu', 'true')
set_label('cuda', 'true')
set_state('kubernetes-worker.gpu.enabled')
set_state('kubernetes-worker.restart-needed')
@when('kubernetes-worker.gpu.enabled')
@when_not('kubernetes-worker.privileged')
@when_not('kubernetes-worker.restart-needed')
def disable_gpu():
"""Disable GPU usage on this node.
This handler fires when we're running in gpu mode, and then the operator
sets allow-privileged="false". Since we can no longer run privileged
containers, we need to disable gpu mode.
"""
hookenv.log('Disabling gpu mode')
# Remove node labels
remove_label('gpu')
remove_label('cuda')
remove_state('kubernetes-worker.gpu.enabled')
set_state('kubernetes-worker.restart-needed')
@when('kubernetes-worker.gpu.enabled')
@when('kube-control.connected')
def notify_master_gpu_enabled(kube_control):
"""Notify kubernetes-master that we're gpu-enabled.
"""
kube_control.set_gpu(True)
@when_not('kubernetes-worker.gpu.enabled')
@when('kube-control.connected')
def notify_master_gpu_not_enabled(kube_control):
"""Notify kubernetes-master that we're not gpu-enabled.
"""
kube_control.set_gpu(False)
@when('kube-control.connected')
def request_kubelet_and_proxy_credentials(kube_control):
""" Request kubelet node authorization with a well formed kubelet user.
This also implies that we are requesting kube-proxy auth. """
# The kube-cotrol interface is created to support RBAC.
# At this point we might as well do the right thing and return the hostname
# even if it will only be used when we enable RBAC
nodeuser = 'system:node:{}'.format(gethostname().lower())
kube_control.set_auth_request(nodeuser)
@when('kube-control.connected')
def catch_change_in_creds(kube_control):
"""Request a service restart in case credential updates were detected."""
nodeuser = 'system:node:{}'.format(gethostname().lower())
creds = kube_control.get_auth_credentials(nodeuser)
if creds \
and data_changed('kube-control.creds', creds) \
and creds['user'] == nodeuser:
# We need to cache the credentials here because if the
# master changes (master leader dies and replaced by a new one)
# the new master will have no recollection of our certs.
db.set('credentials', creds)
set_state('worker.auth.bootstrapped')
set_state('kubernetes-worker.restart-needed')
@when_not('kube-control.connected')
def missing_kube_control():
"""Inform the operator they need to add the kube-control relation.
If deploying via bundle this won't happen, but if operator is upgrading a
a charm in a deployment that pre-dates the kube-control relation, it'll be
missing.
"""
hookenv.status_set(
'blocked',
'Relate {}:kube-control kubernetes-master:kube-control'.format(
hookenv.service_name()))
@when('docker.ready')
def fix_iptables_for_docker_1_13():
""" Fix iptables FORWARD policy for Docker >=1.13
https://github.com/kubernetes/kubernetes/issues/40182
https://github.com/kubernetes/kubernetes/issues/39823
"""
cmd = ['iptables', '-w', '300', '-P', 'FORWARD', 'ACCEPT']
check_call(cmd)
def _systemctl_is_active(application):
''' Poll systemctl to determine if the application is running '''
cmd = ['systemctl', 'is-active', application]
try:
raw = check_output(cmd)
return b'active' in raw
except Exception:
return False
def get_node_name():
kubelet_extra_args = parse_extra_args('kubelet-extra-args')
cloud_provider = kubelet_extra_args.get('cloud-provider', '')
if cloud_provider == 'aws':
return getfqdn()
else:
return gethostname()
class ApplyNodeLabelFailed(Exception):
pass
def persistent_call(cmd, retry_message):
deadline = time.time() + 180
while time.time() < deadline:
code = subprocess.call(cmd)
if code == 0:
return True
hookenv.log(retry_message)
time.sleep(1)
else:
return False
def set_label(label, value):
nodename = get_node_name()
cmd = 'kubectl --kubeconfig={0} label node {1} {2}={3} --overwrite'
cmd = cmd.format(kubeconfig_path, nodename, label, value)
cmd = cmd.split()
retry = 'Failed to apply label %s=%s. Will retry.' % (label, value)
if not persistent_call(cmd, retry):
raise ApplyNodeLabelFailed(retry)
def remove_label(label):
nodename = get_node_name()
cmd = 'kubectl --kubeconfig={0} label node {1} {2}-'
cmd = cmd.format(kubeconfig_path, nodename, label)
cmd = cmd.split()
retry = 'Failed to remove label {0}. Will retry.'.format(label)
if not persistent_call(cmd, retry):
raise ApplyNodeLabelFailed(retry)
|
import apikeys
import psycopg2
import utils
import logging
from mutagen.mp3 import MP3
_conn = psycopg2.connect(apikeys.db_connect_string)
log = logging.getLogger(__name__)
class Track(object):
def __init__(self, id, filename, artist, title, length):
log.info("Rendering Track(%r, %r, %r, %r, %r)", id, filename, artist, title, length)
self.id = id
self.filename = filename
# Add some stubby metadata (in an attribute that desperately
# wants to be renamed to something mildly useful)
self.track_details = {
'id': id,
'artist': artist,
'title': title,
'length': length,
}
def get_mp3(some_specifier):
with _conn, _conn.cursor():
# TODO: Fetch an MP3 and return its raw data
pass
def get_many_mp3():
"""Get a list of many (possibly all) the tracks in the database.
Returns a list, guaranteed to be fully realized prior to finishing
with the database cursor, for safety.
"""
with _conn, _conn.cursor() as cur:
cur.execute("SELECT id,filename,artist,title,length FROM tracks WHERE status = 1 ORDER BY length")
return [Track(*row) for row in cur.fetchall()]
def enqueue_tracks(queue):
"""Repeatedly enumerate tracks and enqueue them.
Designed to be daemonized.
"""
while True:
for track in get_many_mp3():
queue.put(track)
def get_complete_length():
"""Get the sum of length of all active tracks."""
with _conn, _conn.cursor() as cur:
cur.execute("SELECT sum(length) FROM tracks WHERE status = 1")
return cur.fetchone()
def create_track(mp3data, filename, info):
"""Save a blob of MP3 data to the specified file and registers it in the database.
Note that this function breaks encapsulation horribly. The third argument is
assumed to be a request object dictionary, with all its quirks. The file is saved
to disk as well as being registered with the database. TODO: Clean me up."""
with _conn, _conn.cursor() as cur:
# We have a chicken-and-egg problem here. We can't (AFAIK) get the ID3 data
# until we have a file, and we want to name the file based on the track ID.
# Resolution: Either save the file to a temporary name and then rename it,
# or insert a dummy row and then update it. Using the latter approach.
cur.execute("""INSERT INTO tracks (artist, title, filename, artwork, length, submitter, submitteremail, lyrics, story)
VALUES ('', '', '', '', 0, %s, %s, %s, %s) RETURNING id""",
(info.get("SubmitterName",[""])[0], info.get("Email",[""])[0], info.get("Lyrics",[""])[0], info.get("Story",[""])[0]))
id = cur.fetchone()[0]
filename = "audio/%d %s"%(id, filename)
with open(filename, "wb") as f: f.write(mp3data)
track = MP3(filename)
pic=next((k for k in track if k.startswith("APIC:")), None)
pic = pic and track[pic].data
if pic: print("length of pic: {}".format(len(pic)))
try: artist = u', '.join(track['TPE1'].text)
except KeyError: artist = u'(unknown artist)'
try: title = u', '.join(track['TIT2'].text)
except KeyError: title = u'(unknown title)'
cur.execute("UPDATE tracks SET artist=%s, title=%s, filename=%s, artwork=%s, length=%s WHERE id=%s",
(artist,
title,
track.filename[6:],
pic and memoryview(pic),
track.info.length,
id)
)
return id
def show_all_mp3():
"""Get a list of all the tracks in the database.
Returns a list, guaranteed to be fully realized prior to finishing
with the database cursor, for safety.
"""
with _conn, _conn.cursor() as cur:
cur.execute("SELECT id,filename,artist,title,length FROM tracks ORDER BY id")
return [Track(*row) for row in cur.fetchall()]
abstract get_many_mp3 slightly
import apikeys
import psycopg2
import utils
import logging
from mutagen.mp3 import MP3
_conn = psycopg2.connect(apikeys.db_connect_string)
log = logging.getLogger(__name__)
class Track(object):
def __init__(self, id, filename, artist, title, length):
log.info("Rendering Track(%r, %r, %r, %r, %r)", id, filename, artist, title, length)
self.id = id
self.filename = filename
# Add some stubby metadata (in an attribute that desperately
# wants to be renamed to something mildly useful)
self.track_details = {
'id': id,
'artist': artist,
'title': title,
'length': length,
}
def get_mp3(some_specifier):
with _conn, _conn.cursor():
# TODO: Fetch an MP3 and return its raw data
pass
def get_many_mp3(status=1, order_by='length'):
"""Get a list of many (possibly all) the tracks in the database.
Returns a list, guaranteed to be fully realized prior to finishing
with the database cursor, for safety.
"""
query_clause = {'columns': 'id,filename,artist,title,length',
'where_clause': 'status = ' + str(status),
'order_choice': order_by
}
query = """SELECT {columns}
FROM tracks
WHERE {where_clause} ORDER BY {order_choice}""".format(**query_clause)
with _conn, _conn.cursor() as cur:
cur.execute(query)
return [Track(*row) for row in cur.fetchall()]
def enqueue_tracks(queue):
"""Repeatedly enumerate tracks and enqueue them.
Designed to be daemonized.
"""
while True:
for track in get_many_mp3():
queue.put(track)
def get_complete_length():
"""Get the sum of length of all active tracks."""
with _conn, _conn.cursor() as cur:
cur.execute("SELECT sum(length) FROM tracks WHERE status = 1")
return cur.fetchone()
def create_track(mp3data, filename, info):
"""Save a blob of MP3 data to the specified file and registers it in the database.
Note that this function breaks encapsulation horribly. The third argument is
assumed to be a request object dictionary, with all its quirks. The file is saved
to disk as well as being registered with the database. TODO: Clean me up."""
with _conn, _conn.cursor() as cur:
# We have a chicken-and-egg problem here. We can't (AFAIK) get the ID3 data
# until we have a file, and we want to name the file based on the track ID.
# Resolution: Either save the file to a temporary name and then rename it,
# or insert a dummy row and then update it. Using the latter approach.
cur.execute("""INSERT INTO tracks (artist, title, filename, artwork, length, submitter, submitteremail, lyrics, story)
VALUES ('', '', '', '', 0, %s, %s, %s, %s) RETURNING id""",
(info.get("SubmitterName",[""])[0], info.get("Email",[""])[0], info.get("Lyrics",[""])[0], info.get("Story",[""])[0]))
id = cur.fetchone()[0]
filename = "audio/%d %s"%(id, filename)
with open(filename, "wb") as f: f.write(mp3data)
track = MP3(filename)
pic=next((k for k in track if k.startswith("APIC:")), None)
pic = pic and track[pic].data
if pic: print("length of pic: {}".format(len(pic)))
try: artist = u', '.join(track['TPE1'].text)
except KeyError: artist = u'(unknown artist)'
try: title = u', '.join(track['TIT2'].text)
except KeyError: title = u'(unknown title)'
cur.execute("UPDATE tracks SET artist=%s, title=%s, filename=%s, artwork=%s, length=%s WHERE id=%s",
(artist,
title,
track.filename[6:],
pic and memoryview(pic),
track.info.length,
id)
)
return id
def show_all_mp3():
"""Get a list of all the tracks in the database.
Returns a list, guaranteed to be fully realized prior to finishing
with the database cursor, for safety.
"""
with _conn, _conn.cursor() as cur:
cur.execute("SELECT id,filename,artist,title,length FROM tracks ORDER BY id")
return [Track(*row) for row in cur.fetchall()] |
"""Database operations for Appension
Executable using 'python -m fore.database' - use --help for usage.
"""
import apikeys
import psycopg2
import utils
import logging
import Queue
import multiprocessing
import os
import re
import hashlib
from mutagen.mp3 import MP3
from time import sleep
from docstringargs import cmdline
_conn = psycopg2.connect(apikeys.db_connect_string)
log = logging.getLogger(__name__)
# Enable Unicode return values for all database queries
# This would be the default in Python 3, but in Python 2, we
# need to enable these two extensions.
# http://initd.org/psycopg/docs/usage.html#unicode-handling
import psycopg2.extensions
psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
psycopg2.extensions.register_type(psycopg2.extensions.UNICODEARRAY)
class Track(object):
# Select these from the tracks table to construct a track object.
columns = "id,filename,artist,title,length,status,submitter,submitteremail,submitted,lyrics,story,comments,xfade,itrim,otrim,sequence,keywords,url"
def __init__(self, id, filename, artist, title, length, status,
submitter, submitteremail, submitted, lyrics, story, comments, xfade, itrim, otrim, sequence, keywords, url):
log.info("Rendering Track(%r, %r, %r, %r, %r, %r, %r, %r, %r, %r, %r)", id, filename, artist, title, \
length, status, story, comments, xfade, itrim, otrim)
if len(artist.split(',')) > 1:
the_artist = artist.split(',')
artist_exact = artist
artist = ' '.join([the_artist[1], the_artist[0]])
else: artist_exact = artist
self.id = id
self.filename = filename
# Add some stubby metadata (in an attribute that desperately
# wants to be renamed to something mildly useful)
self.track_details = {
'id': id,
'artist': artist,
'artist_exact': artist_exact,
'title': title,
'length': length,
'status': status,
'story': story,
'lyrics': lyrics,
'xfade': xfade,
'itrim': itrim,
'otrim': otrim,
'comments': comments,
'sequence': sequence,
'url': url,
}
self.full_track_details = {
'status': status,
'submitted': submitted,
'submitter': submitter,
'submitteremail': submitteremail,
'lyrics': lyrics,
'story': story,
'comments': comments,
'keywords': keywords,
}
class Submitter(object):
def __init__(self,username,email,userid,artist,track_id,filename,lyrics,story ):
self.userid = userid
self.username = username
self.email = email
self.submitted = {
'artist': artist,
'track_id': track_id,
'filename': filename,
'lyrics': lyrics,
'story': story
}
class Artist(object):
def __init__(self, artist_from_db):
if len(artist_from_db.split(',')) > 1:
name_list = artist_from_db.split(',')
display_name = ' '.join([name_list[1], name_list[0]])
else:
display_name = artist_from_db
name_list = ['', artist_from_db]
self.name = {
'display_name': display_name,
'name_list': name_list
}
class Lyric(object):
# Select these from the tracks table to construct a track object.
columns = "id,artist,lyrics"
def __init__(self, id, artist, lyrics):
couplets = [block for block in re.split(r'(?:\r\n){2,}', lyrics) if block.count('\r\n') == 1]
couplet_count = len(couplets)
lyrics = self.get_couplets(lyrics)
an_artist = Artist(artist)
self.track_lyrics = {
'id': id,
'artist': an_artist,
'lyrics': lyrics,
#TODO ignore lyrics that exceed sts of two (but allow for 1/2 couplets)
'couplet_count': couplet_count,
'couplets': couplets
}
def get_couplets(self, lyrics):
return lyrics.splitlines(True)
def get_mp3(some_specifier):
with _conn, _conn.cursor():
# TODO: Fetch an MP3 and return its raw data
pass
def get_many_mp3(status=1, order_by='length'):
"""Get a list of many (possibly all) the tracks in the database.
Returns a list, guaranteed to be fully realized prior to finishing
with the database cursor, for safety.
"""
query = "SELECT {cols} FROM tracks WHERE {col}=%s ORDER BY {ord}""".format(cols=Track.columns, col=("'all'" if status=='all' else 'status'), ord=order_by)
with _conn, _conn.cursor() as cur:
cur.execute(query, (status,))
return [Track(*row) for row in cur.fetchall()]
_track_queue = multiprocessing.Queue()
def get_track_to_play():
"""Get a track from the database with presumption that it will be played.
If something has been enqueued with enqueue_track(), that will be the one
returned; otherwise, one is picked by magic.
"""
with _conn, _conn.cursor() as cur:
try:
track=_track_queue.get(False)
log.info("Using enqueued track %s.", track.id)
except Queue.Empty:
cur.execute("SELECT "+Track.columns+" FROM tracks WHERE status=1 ORDER BY played,random()")
row=cur.fetchone()
if not row: raise ValueError("Database is empty, cannot enqueue track")
track=Track(*row)
log.info("Automatically picking track %s.", track.id)
# Record that a track has been played.
# Currently simply increments the counter; may later keep track of how long since played, etc.
cur.execute("UPDATE tracks SET played=played+1 WHERE id=%s", (track.id,))
return track
def enqueue_track(id):
with _conn, _conn.cursor() as cur:
cur.execute("UPDATE tracks SET enqueued=enqueued+1 WHERE ID=%s RETURNING "+Track.columns, (id,))
# Assumes the ID is actually valid (will raise TypeError if not)
_track_queue.put(Track(*cur.fetchone()))
def get_single_track(track_id):
"""Get details for a single track by its ID"""
with _conn, _conn.cursor() as cur:
cur.execute("SELECT "+Track.columns+" FROM tracks WHERE id=%s", (track_id,))
return Track(*cur.fetchone())
def get_complete_length():
"""Get the sum of length of all active tracks."""
with _conn, _conn.cursor() as cur:
cur.execute("SELECT coalesce(sum(length),0) FROM tracks WHERE status = 1")
return cur.fetchone()[0]
def get_all_lyrics():
"""Get the lyrics from all active tracks.."""
with _conn, _conn.cursor() as cur:
cur.execute("SELECT id, artist, lyrics FROM tracks WHERE status = 1 AND lyrics != ''")
return [Lyric(*row) for row in cur.fetchall()]
def match_lyrics(word):
with _conn, _conn.cursor() as cur:
cur.execute("SELECT id, artist, lyrics FROM tracks WHERE lyrics ILIKE %s", ('%'+word+'%',))
return [Lyric(*row) for row in cur.fetchall()]
def match_keywords(word):
with _conn, _conn.cursor() as cur:
cur.execute("SELECT id, artist, lyrics FROM tracks WHERE keywords ILIKE %s", ('%'+word+'%',))
return [Lyric(*row) for row in cur.fetchall()]
def random_lyrics():
with _conn, _conn.cursor() as cur:
cur.execute("SELECT id, artist, lyrics FROM tracks WHERE lyrics != '' ORDER BY random() limit 1")
return [Lyric(*row) for row in cur.fetchall()]
def get_track_artwork(id):
"""Get the artwork for one track, or None if no track, or '' if no artwork."""
with _conn, _conn.cursor() as cur:
cur.execute("SELECT artwork FROM tracks WHERE id=%s", (id,))
row = cur.fetchone()
return row and row[0]
def create_track(mp3data, filename, imagefile, info, user_name):
"""Save a blob of MP3 data to the specified file and registers it in the database.
Note that this function breaks encapsulation horribly. The third argument is
assumed to be a request object dictionary, with all its quirks. The file is saved
to disk as well as being registered with the database. TODO: Clean me up."""
log.info(info)
with _conn, _conn.cursor() as cur:
# We have a chicken-and-egg problem here. We can't (AFAIK) get the ID3 data
# until we have a file, and we want to name the file based on the track ID.
# Resolution: Either save the file to a temporary name and then rename it,
# or insert a dummy row and then update it. Using the latter approach.
cur.execute("""INSERT INTO tracks (userid, lyrics, story, comments, url)
VALUES ((
select id from users where username = %s
), %s, %s, %s, %s) RETURNING id""",
(user_name, info.get("lyrics",[""])[0], info.get("story",[""])[0], info.get("comments",[""])[0],
info.get("url",[""])[0]))
id = cur.fetchone()[0]
filename = "audio/%d %s"%(id, filename)
with open(filename, "wb") as f: f.write(mp3data)
track = MP3(filename)
if imagefile:
pic = imagefile
else:
pic=next((k for k in track if k.startswith("APIC:")), None)
pic = pic and track[pic].data
# Note: These need to fold absent and blank both to the given string.
try: artist = u', '.join(track['TPE1'].text)
except KeyError: artist = info.get("artist",[""])[0] or u'(unknown artist)'
try: title = u', '.join(track['TIT2'].text)
except KeyError: title = info.get("track_title",[""])[0] or u'(unknown title)'
cur.execute("UPDATE tracks SET artist=%s, title=%s, filename=%s, artwork=%s, length=%s WHERE id=%s",
(artist,
title,
track.filename[6:],
memoryview(pic) if pic else "",
track.info.length,
id)
)
return id
def delete_track(input):
"""Delete the given track ID - no confirmation"""
with _conn, _conn.cursor() as cur:
cur.execute("""DELETE FROM tracks WHERE id = %s""", (input,))
def reset_played():
"""Reset played for all tracks to 0"""
with _conn, _conn.cursor() as cur:
cur.execute("UPDATE tracks SET played = 0")
def update_track(id, info, artwork):
"""Update the given track ID based on the info mapping.
This breaks encapsulation just as create_track() does."""
print('****************')
log.info(info)
with _conn, _conn.cursor() as cur:
# Enumerate all updateable fields. If they're not provided, they won't be updated;
# any other fields will be ignored. This is basically set intersection on a dict.
fields = ("artist", "status", "lyrics", "story", "xfade", "otrim", "itrim", "keywords", "artwork", "url")
param = {k:info[k][0] for k in fields if k in info}
if not artwork == None:
param['artwork'] = memoryview(artwork)
else:
del param['artwork']
cur.execute("UPDATE tracks SET "+",".join(x+"=%("+x+")s" for x in param)+" WHERE id="+str(id),param)
def sequence_tracks(sequence_object):
for id, sequence in sequence_object.iteritems():
seq = sequence_object.get(id,'')[0]
with _conn, _conn.cursor() as cur:
cur.execute("UPDATE tracks SET sequence = "+str(seq)+", played = 0 WHERE id="+str(id))
def get_track_submitter_info():
with _conn, _conn.cursor() as cur:
query = '''SELECT a.username, a.email, a.id as userid, b.artist, b.id as track_id, b.filename,
CASE WHEN b.lyrics !='' THEN 1
ELSE 0
END as lyrics,
CASE WHEN b.story !='' THEN 1
ELSE 0
END as story
FROM tracks b
join users a
on a.id=b.userid GROUP by a.username, a.email, a.id, b.artist, b.id'''
cur.execute(query)
return [Submitter(*row) for row in cur.fetchall()]
def update_track_submitter_info(submitter_object):
'''We may not need track id, but it may prove useful at some point.'''
for track_grouping in zip(submitter_object['track_id'],submitter_object['user_id'],submitter_object['username'],submitter_object['email']):
userid = track_grouping[1]
name = track_grouping[2]
email = track_grouping[3]
track_id = track_grouping[0]
with _conn, _conn.cursor() as cur:
cur.execute("UPDATE users SET username = '"+str(name)+"', email = '"+str(email)+"' WHERE id = "+str(userid))
def add_dummy_users():
start_default_email_number = 0
with _conn, _conn.cursor() as cur:
cur.execute("SELECT artist FROM tracks WHERE userid = 0 GROUP BY artist;")
artists = cur.fetchall()
for artist in artists:
start_default_email_number += 1
start_default_email = str(start_default_email_number) + '@infiniteglitch.net'
cur.execute("INSERT INTO users (username, email) VALUES (%s, %s) RETURNING id", (artist[0], start_default_email))
userid = cur.fetchone()
print(artist[0], userid)
cur.execute("UPDATE tracks SET userid = %s WHERE artist LIKE %s", (userid, artist[0]))
def create_outreach_message(message):
with _conn, _conn.cursor() as cur:
cur.execute("INSERT INTO outreach (message) VALUES (%s) RETURNING id, message", \
(message,))
return [row for row in cur.fetchone()]
def update_outreach_message(message, id=1):
if retrieve_outreach_message()[0] == '':
return create_outreach_message(message)
query = "UPDATE outreach SET message = (message) WHERE id = 1 RETURNING id, message"
data = (message,)
with _conn, _conn.cursor() as cur:
cur.execute(query, data)
return [row for row in cur.fetchone()]
def retrieve_outreach_message():
with _conn, _conn.cursor() as cur:
cur.execute("SELECT id, message FROM outreach ORDER BY id LIMIT 1")
try:
return [row for row in cur.fetchone()]
except TypeError:
return ['', '']
def get_subsequent_track(track_id):
"""Return Track Object for next track in sequence."""
with _conn, _conn.cursor() as cur:
cur.execute("SELECT sequence FROM tracks WHERE id = "+str(track_id))
sequence = cur.fetchone()[0]
query = "SELECT {cols} FROM tracks WHERE sequence > {seq} ORDER BY sequence limit 1".format(cols=Track.columns, seq=str(sequence))
cur.execute(query)
try:
return Track(*cur.fetchone())
except TypeError:
query = "SELECT {cols} FROM tracks WHERE sequence >= {seq} ORDER BY sequence limit 1".format(cols=Track.columns, seq=str(sequence))
cur.execute(query)
return Track(*cur.fetchone())
def get_track_filename(track_id):
"""Return filename for a specific track, or None"""
with _conn, _conn.cursor() as cur:
cur.execute("SELECT filename FROM tracks WHERE id = %s", (track_id,))
for row in cur: return row[0]
def browse_tracks(letter):
"""Return artist, id for tracks, where artist name starts with letter in expression or higher, limit 20."""
query = "SELECT DISTINCT artist FROM tracks WHERE status = 1 AND (case when artist ilike 'The %' then substr(upper(artist), 5, 100) else upper(artist) end) >= '{letter}' ORDER BY artist LIMIT 20".format(cols=Track.columns, letter=letter)
with _conn, _conn.cursor() as cur:
cur.execute(query)
return [row for row in cur.fetchall()]
def get_recent_tracks(number):
"""Retrieve [number] number of most recently activated tracks"""
query = "SELECT DISTINCT artist, submitted FROM tracks WHERE status = 1 ORDER BY submitted DESC LIMIT {number}".format(cols=Track.columns, number=number)
with _conn, _conn.cursor() as cur:
cur.execute(query)
return [row for row in cur.fetchall()]
def tracks_by(artist):
"""Return artist, id for tracks, where artist name starts with letter in expression"""
with _conn, _conn.cursor() as cur:
cur.execute("SELECT {cols} FROM tracks WHERE status = 1 AND trim(artist) = '{artist}' ORDER BY title LIMIT 20".format(cols=Track.columns, artist=artist))
return [Track(*row) for row in cur.fetchall()]
@cmdline
def create_user(username, email, password):
"""Create a new user, return the newly-created ID
username: Name for the new user
email: Email address (must be unique)
password: Clear-text password
"""
username = username.lower(); email = email.lower();
if not isinstance(password, bytes): password=password.encode("utf-8")
print password
print email
hex_key = utils.random_hex()
with _conn, _conn.cursor() as cur:
salt = os.urandom(16)
hash = hashlib.sha256(salt+password).hexdigest()
pwd = salt.encode("hex")+"-"+hash
try:
cur.execute("INSERT INTO users (username, email, password, hex_key) VALUES (%s, %s, %s, %s) RETURNING id, hex_key", \
(username, email, pwd, hex_key))
return cur.fetchone()
except psycopg2.IntegrityError as e:
return "That didn't work too well because: <br/>%s<br/> Maybe you already have an account or \
someone else is using the name you requested."%e
@cmdline
def confirm_user(id, hex_key):
"""Attempt to confirm a user's email address
id: Numeric user ID (not user name or email)
hex_key: Matching key to the one stored, else the confirmation fails
"""
with _conn, _conn.cursor() as cur:
cur.execute("UPDATE users SET status = 1, hex_key = '' WHERE id = %s AND hex_key = %s RETURNING username, email", (id, hex_key))
try:
return cur.fetchone()[0]
except TypeError:
return [None, None]
def test_reset_permissions(id, hex_key):
with _conn, _conn.cursor() as cur:
cur.execute("SELECT id, username, email FROM users WHERE id = %s AND hex_key = %s", (id, hex_key))
try:
return cur.fetchone()
except TypeError:
return [None, None]
def set_user_password(user_or_email, password):
"""Change a user's password (administratively) - returns None on success, or error message"""
user_or_email = user_or_email.lower()
if not isinstance(password, bytes): password=password.encode("utf-8")
with _conn, _conn.cursor() as cur:
salt = os.urandom(16)
hash = hashlib.sha256(salt+password).hexdigest()
pwd = salt.encode("hex")+"-"+hash
cur.execute("SELECT id FROM users WHERE username=%s OR email=%s AND status=1", (user_or_email, user_or_email))
rows=cur.fetchall()
if len(rows)!=1: return "There is already an account for that email."
cur.execute("update users set password=%s where id=%s", (pwd, rows[0][0]))
def check_db_for_user(user_or_email):
"""Change a user's password (administratively) - returns None on success, or error message"""
user_or_email = user_or_email.lower()
with _conn, _conn.cursor() as cur:
cur.execute("SELECT id, status FROM users WHERE username=%s OR email=%s", (user_or_email, user_or_email))
rows=cur.fetchall()
print(rows)
if not len(rows)>=1: return "No account found."
else: return "There is already an account for that email."
def verify_user(user_or_email, password):
"""Verify a user name/email and password, returns the ID if valid or None if not"""
user_or_email = user_or_email.lower()
if not isinstance(password, bytes): password=password.encode("utf-8")
with _conn, _conn.cursor() as cur:
cur.execute("SELECT id,password FROM users WHERE username=%s OR email=%s AND status=1", (user_or_email, user_or_email))
for id, pwd in cur:
if "-" not in pwd: continue
salt, hash = pwd.split("-", 1)
if hashlib.sha256(salt.decode("hex")+password).hexdigest()==hash:
# Successful match.
return id
# If we fall through without finding anything that matches, return None.
def get_user_info(id):
"""Return the user name and permissions level for a given UID, or (None,0) if not logged in"""
with _conn, _conn.cursor() as cur:
cur.execute("SELECT username, user_level FROM users WHERE id=%s", (id,))
row = cur.fetchone()
return row or (None, 0)
def hex_user_password(email, hex_key):
"""Return username and id that matches email."""
with _conn, _conn.cursor() as cur:
cur.execute("UPDATE users set hex_key = %s WHERE email=%s RETURNING username, id", (hex_key, email))
row = cur.fetchone()
return row or (None, 0)
def reset_user_password(id, hex_key, password):
with _conn, _conn.cursor() as cur:
salt = os.urandom(16)
hash = hashlib.sha256(salt+password).hexdigest()
pwd = salt.encode("hex")+"-"+hash
cur.execute("update users set password=%s, hex_key='' where id=%s and hex_key=%s", (pwd, id, hex_key))
def get_analysis(id):
with _conn, _conn.cursor() as cur:
cur.execute("select analysis from tracks where id=%s", (id,))
return cur.fetchone()[0]
def save_analysis(id, analysis):
with _conn, _conn.cursor() as cur:
cur.execute("update tracks set analysis=%s where id=%s", (analysis, id))
@cmdline
def importmp3(filename, submitter="Bulk import", submitteremail="bulk@import.invalid"):
"""Bulk-import MP3 files into the appension database
filename+: MP3 file(s) to import
--submitter: Name of submitter
--submitteremail: Email address of submitter
"""
# Build up a form-like dictionary for the info mapping. This is the downside of
# the breaching of encapsulation in database.create_track().
info = {"SubmitterName": [submitter], "Email": [submitteremail]}
for fn in filename:
print("Importing %s"%fn)
with open(fn, "rb") as f: data = f.read()
id = database.create_track(data, os.path.split(fn)[-1], info)
print("Saved as track #%d."%id)
@cmdline
def tables(confirm=False):
"""Update tables based on create_table.sql
--confirm: If omitted, will do a dry run.
"""
tb = None; cols = set(); coldefs = []
with _conn, _conn.cursor() as cur:
def finish():
if cols: coldefs.extend("drop "+col for col in cols)
if tb and coldefs:
if is_new: query = "create table "+tb+" ("+", ".join(coldefs)+")"
else: query = "alter table "+tb+" add "+", add ".join(coldefs)
if confirm: cur.execute(query)
else: print(query)
for line in open("create_table.sql"):
line = line.rstrip()
if line == "" or line.startswith("--"): continue
# Flush-left lines are table names
if line == line.lstrip():
finish()
tb = line; cols = set(); coldefs = []
cur.execute("select column_name from information_schema.columns where table_name=%s", (tb,))
cols = {row[0] for row in cur}
is_new = not cols
continue
# Otherwise, it should be a column definition, starting (after whitespace) with the column name.
colname, defn = line.strip().split(" ", 1)
if colname in cols:
# Column already exists. Currently, we assume there's nothing to change.
cols.remove(colname)
else:
# Column doesn't exist. Add it!
# Note that we include a newline here so that a comment will be properly terminated.
# If you look at the query, it'll have all its commas oddly placed, but that's okay.
coldefs.append("%s %s\n"%(colname,defn))
finish()
@cmdline
def testfiles():
"""Test all audio files"""
import pyechonest.track
for file in get_many_mp3(status=0):
if file.track_details['length'] < 700:
print("Name: {} Length: {}".format(file.filename, file.track_details['length']))
# TODO: Should this be pyechonest.track.track_from_filename?
track = track.track_from_filename('audio/'+file.filename, force_upload=True)
print(track.id)
else:
print("BIG ONE - Name: {} Length: {}".format(file.filename, file.track_details['length']))
if __name__ == "__main__": print(cmdline.main() or "")
Only delete artwork param if it exists.
"""Database operations for Appension
Executable using 'python -m fore.database' - use --help for usage.
"""
import apikeys
import psycopg2
import utils
import logging
import Queue
import multiprocessing
import os
import re
import hashlib
from mutagen.mp3 import MP3
from time import sleep
from docstringargs import cmdline
_conn = psycopg2.connect(apikeys.db_connect_string)
log = logging.getLogger(__name__)
# Enable Unicode return values for all database queries
# This would be the default in Python 3, but in Python 2, we
# need to enable these two extensions.
# http://initd.org/psycopg/docs/usage.html#unicode-handling
import psycopg2.extensions
psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
psycopg2.extensions.register_type(psycopg2.extensions.UNICODEARRAY)
class Track(object):
# Select these from the tracks table to construct a track object.
columns = "id,filename,artist,title,length,status,submitter,submitteremail,submitted,lyrics,story,comments,xfade,itrim,otrim,sequence,keywords,url"
def __init__(self, id, filename, artist, title, length, status,
submitter, submitteremail, submitted, lyrics, story, comments, xfade, itrim, otrim, sequence, keywords, url):
log.info("Rendering Track(%r, %r, %r, %r, %r, %r, %r, %r, %r, %r, %r)", id, filename, artist, title, \
length, status, story, comments, xfade, itrim, otrim)
if len(artist.split(',')) > 1:
the_artist = artist.split(',')
artist_exact = artist
artist = ' '.join([the_artist[1], the_artist[0]])
else: artist_exact = artist
self.id = id
self.filename = filename
# Add some stubby metadata (in an attribute that desperately
# wants to be renamed to something mildly useful)
self.track_details = {
'id': id,
'artist': artist,
'artist_exact': artist_exact,
'title': title,
'length': length,
'status': status,
'story': story,
'lyrics': lyrics,
'xfade': xfade,
'itrim': itrim,
'otrim': otrim,
'comments': comments,
'sequence': sequence,
'url': url,
}
self.full_track_details = {
'status': status,
'submitted': submitted,
'submitter': submitter,
'submitteremail': submitteremail,
'lyrics': lyrics,
'story': story,
'comments': comments,
'keywords': keywords,
}
class Submitter(object):
def __init__(self,username,email,userid,artist,track_id,filename,lyrics,story ):
self.userid = userid
self.username = username
self.email = email
self.submitted = {
'artist': artist,
'track_id': track_id,
'filename': filename,
'lyrics': lyrics,
'story': story
}
class Artist(object):
def __init__(self, artist_from_db):
if len(artist_from_db.split(',')) > 1:
name_list = artist_from_db.split(',')
display_name = ' '.join([name_list[1], name_list[0]])
else:
display_name = artist_from_db
name_list = ['', artist_from_db]
self.name = {
'display_name': display_name,
'name_list': name_list
}
class Lyric(object):
# Select these from the tracks table to construct a track object.
columns = "id,artist,lyrics"
def __init__(self, id, artist, lyrics):
couplets = [block for block in re.split(r'(?:\r\n){2,}', lyrics) if block.count('\r\n') == 1]
couplet_count = len(couplets)
lyrics = self.get_couplets(lyrics)
an_artist = Artist(artist)
self.track_lyrics = {
'id': id,
'artist': an_artist,
'lyrics': lyrics,
#TODO ignore lyrics that exceed sts of two (but allow for 1/2 couplets)
'couplet_count': couplet_count,
'couplets': couplets
}
def get_couplets(self, lyrics):
return lyrics.splitlines(True)
def get_mp3(some_specifier):
with _conn, _conn.cursor():
# TODO: Fetch an MP3 and return its raw data
pass
def get_many_mp3(status=1, order_by='length'):
"""Get a list of many (possibly all) the tracks in the database.
Returns a list, guaranteed to be fully realized prior to finishing
with the database cursor, for safety.
"""
query = "SELECT {cols} FROM tracks WHERE {col}=%s ORDER BY {ord}""".format(cols=Track.columns, col=("'all'" if status=='all' else 'status'), ord=order_by)
with _conn, _conn.cursor() as cur:
cur.execute(query, (status,))
return [Track(*row) for row in cur.fetchall()]
_track_queue = multiprocessing.Queue()
def get_track_to_play():
"""Get a track from the database with presumption that it will be played.
If something has been enqueued with enqueue_track(), that will be the one
returned; otherwise, one is picked by magic.
"""
with _conn, _conn.cursor() as cur:
try:
track=_track_queue.get(False)
log.info("Using enqueued track %s.", track.id)
except Queue.Empty:
cur.execute("SELECT "+Track.columns+" FROM tracks WHERE status=1 ORDER BY played,random()")
row=cur.fetchone()
if not row: raise ValueError("Database is empty, cannot enqueue track")
track=Track(*row)
log.info("Automatically picking track %s.", track.id)
# Record that a track has been played.
# Currently simply increments the counter; may later keep track of how long since played, etc.
cur.execute("UPDATE tracks SET played=played+1 WHERE id=%s", (track.id,))
return track
def enqueue_track(id):
with _conn, _conn.cursor() as cur:
cur.execute("UPDATE tracks SET enqueued=enqueued+1 WHERE ID=%s RETURNING "+Track.columns, (id,))
# Assumes the ID is actually valid (will raise TypeError if not)
_track_queue.put(Track(*cur.fetchone()))
def get_single_track(track_id):
"""Get details for a single track by its ID"""
with _conn, _conn.cursor() as cur:
cur.execute("SELECT "+Track.columns+" FROM tracks WHERE id=%s", (track_id,))
return Track(*cur.fetchone())
def get_complete_length():
"""Get the sum of length of all active tracks."""
with _conn, _conn.cursor() as cur:
cur.execute("SELECT coalesce(sum(length),0) FROM tracks WHERE status = 1")
return cur.fetchone()[0]
def get_all_lyrics():
"""Get the lyrics from all active tracks.."""
with _conn, _conn.cursor() as cur:
cur.execute("SELECT id, artist, lyrics FROM tracks WHERE status = 1 AND lyrics != ''")
return [Lyric(*row) for row in cur.fetchall()]
def match_lyrics(word):
with _conn, _conn.cursor() as cur:
cur.execute("SELECT id, artist, lyrics FROM tracks WHERE lyrics ILIKE %s", ('%'+word+'%',))
return [Lyric(*row) for row in cur.fetchall()]
def match_keywords(word):
with _conn, _conn.cursor() as cur:
cur.execute("SELECT id, artist, lyrics FROM tracks WHERE keywords ILIKE %s", ('%'+word+'%',))
return [Lyric(*row) for row in cur.fetchall()]
def random_lyrics():
with _conn, _conn.cursor() as cur:
cur.execute("SELECT id, artist, lyrics FROM tracks WHERE lyrics != '' ORDER BY random() limit 1")
return [Lyric(*row) for row in cur.fetchall()]
def get_track_artwork(id):
"""Get the artwork for one track, or None if no track, or '' if no artwork."""
with _conn, _conn.cursor() as cur:
cur.execute("SELECT artwork FROM tracks WHERE id=%s", (id,))
row = cur.fetchone()
return row and row[0]
def create_track(mp3data, filename, imagefile, info, user_name):
"""Save a blob of MP3 data to the specified file and registers it in the database.
Note that this function breaks encapsulation horribly. The third argument is
assumed to be a request object dictionary, with all its quirks. The file is saved
to disk as well as being registered with the database. TODO: Clean me up."""
log.info(info)
with _conn, _conn.cursor() as cur:
# We have a chicken-and-egg problem here. We can't (AFAIK) get the ID3 data
# until we have a file, and we want to name the file based on the track ID.
# Resolution: Either save the file to a temporary name and then rename it,
# or insert a dummy row and then update it. Using the latter approach.
cur.execute("""INSERT INTO tracks (userid, lyrics, story, comments, url)
VALUES ((
select id from users where username = %s
), %s, %s, %s, %s) RETURNING id""",
(user_name, info.get("lyrics",[""])[0], info.get("story",[""])[0], info.get("comments",[""])[0],
info.get("url",[""])[0]))
id = cur.fetchone()[0]
filename = "audio/%d %s"%(id, filename)
with open(filename, "wb") as f: f.write(mp3data)
track = MP3(filename)
if imagefile:
pic = imagefile
else:
pic=next((k for k in track if k.startswith("APIC:")), None)
pic = pic and track[pic].data
# Note: These need to fold absent and blank both to the given string.
try: artist = u', '.join(track['TPE1'].text)
except KeyError: artist = info.get("artist",[""])[0] or u'(unknown artist)'
try: title = u', '.join(track['TIT2'].text)
except KeyError: title = info.get("track_title",[""])[0] or u'(unknown title)'
cur.execute("UPDATE tracks SET artist=%s, title=%s, filename=%s, artwork=%s, length=%s WHERE id=%s",
(artist,
title,
track.filename[6:],
memoryview(pic) if pic else "",
track.info.length,
id)
)
return id
def delete_track(input):
"""Delete the given track ID - no confirmation"""
with _conn, _conn.cursor() as cur:
cur.execute("""DELETE FROM tracks WHERE id = %s""", (input,))
def reset_played():
"""Reset played for all tracks to 0"""
with _conn, _conn.cursor() as cur:
cur.execute("UPDATE tracks SET played = 0")
def update_track(id, info, artwork):
"""Update the given track ID based on the info mapping.
This breaks encapsulation just as create_track() does."""
print('****************')
log.info(info)
with _conn, _conn.cursor() as cur:
# Enumerate all updateable fields. If they're not provided, they won't be updated;
# any other fields will be ignored. This is basically set intersection on a dict.
fields = ("artist", "status", "lyrics", "story", "xfade", "otrim", "itrim", "keywords", "artwork", "url")
param = {k:info[k][0] for k in fields if k in info}
if not artwork == None:
param['artwork'] = memoryview(artwork)
else:
try:
del param['artwork']
except KeyError:
pass
cur.execute("UPDATE tracks SET "+",".join(x+"=%("+x+")s" for x in param)+" WHERE id="+str(id),param)
def sequence_tracks(sequence_object):
for id, sequence in sequence_object.iteritems():
seq = sequence_object.get(id,'')[0]
with _conn, _conn.cursor() as cur:
cur.execute("UPDATE tracks SET sequence = "+str(seq)+", played = 0 WHERE id="+str(id))
def get_track_submitter_info():
with _conn, _conn.cursor() as cur:
query = '''SELECT a.username, a.email, a.id as userid, b.artist, b.id as track_id, b.filename,
CASE WHEN b.lyrics !='' THEN 1
ELSE 0
END as lyrics,
CASE WHEN b.story !='' THEN 1
ELSE 0
END as story
FROM tracks b
join users a
on a.id=b.userid GROUP by a.username, a.email, a.id, b.artist, b.id'''
cur.execute(query)
return [Submitter(*row) for row in cur.fetchall()]
def update_track_submitter_info(submitter_object):
'''We may not need track id, but it may prove useful at some point.'''
for track_grouping in zip(submitter_object['track_id'],submitter_object['user_id'],submitter_object['username'],submitter_object['email']):
userid = track_grouping[1]
name = track_grouping[2]
email = track_grouping[3]
track_id = track_grouping[0]
with _conn, _conn.cursor() as cur:
cur.execute("UPDATE users SET username = '"+str(name)+"', email = '"+str(email)+"' WHERE id = "+str(userid))
def add_dummy_users():
start_default_email_number = 0
with _conn, _conn.cursor() as cur:
cur.execute("SELECT artist FROM tracks WHERE userid = 0 GROUP BY artist;")
artists = cur.fetchall()
for artist in artists:
start_default_email_number += 1
start_default_email = str(start_default_email_number) + '@infiniteglitch.net'
cur.execute("INSERT INTO users (username, email) VALUES (%s, %s) RETURNING id", (artist[0], start_default_email))
userid = cur.fetchone()
print(artist[0], userid)
cur.execute("UPDATE tracks SET userid = %s WHERE artist LIKE %s", (userid, artist[0]))
def create_outreach_message(message):
with _conn, _conn.cursor() as cur:
cur.execute("INSERT INTO outreach (message) VALUES (%s) RETURNING id, message", \
(message,))
return [row for row in cur.fetchone()]
def update_outreach_message(message, id=1):
if retrieve_outreach_message()[0] == '':
return create_outreach_message(message)
query = "UPDATE outreach SET message = (message) WHERE id = 1 RETURNING id, message"
data = (message,)
with _conn, _conn.cursor() as cur:
cur.execute(query, data)
return [row for row in cur.fetchone()]
def retrieve_outreach_message():
with _conn, _conn.cursor() as cur:
cur.execute("SELECT id, message FROM outreach ORDER BY id LIMIT 1")
try:
return [row for row in cur.fetchone()]
except TypeError:
return ['', '']
def get_subsequent_track(track_id):
"""Return Track Object for next track in sequence."""
with _conn, _conn.cursor() as cur:
cur.execute("SELECT sequence FROM tracks WHERE id = "+str(track_id))
sequence = cur.fetchone()[0]
query = "SELECT {cols} FROM tracks WHERE sequence > {seq} ORDER BY sequence limit 1".format(cols=Track.columns, seq=str(sequence))
cur.execute(query)
try:
return Track(*cur.fetchone())
except TypeError:
query = "SELECT {cols} FROM tracks WHERE sequence >= {seq} ORDER BY sequence limit 1".format(cols=Track.columns, seq=str(sequence))
cur.execute(query)
return Track(*cur.fetchone())
def get_track_filename(track_id):
"""Return filename for a specific track, or None"""
with _conn, _conn.cursor() as cur:
cur.execute("SELECT filename FROM tracks WHERE id = %s", (track_id,))
for row in cur: return row[0]
def browse_tracks(letter):
"""Return artist, id for tracks, where artist name starts with letter in expression or higher, limit 20."""
query = "SELECT DISTINCT artist FROM tracks WHERE status = 1 AND (case when artist ilike 'The %' then substr(upper(artist), 5, 100) else upper(artist) end) >= '{letter}' ORDER BY artist LIMIT 20".format(cols=Track.columns, letter=letter)
with _conn, _conn.cursor() as cur:
cur.execute(query)
return [row for row in cur.fetchall()]
def get_recent_tracks(number):
"""Retrieve [number] number of most recently activated tracks"""
query = "SELECT DISTINCT artist, submitted FROM tracks WHERE status = 1 ORDER BY submitted DESC LIMIT {number}".format(cols=Track.columns, number=number)
with _conn, _conn.cursor() as cur:
cur.execute(query)
return [row for row in cur.fetchall()]
def tracks_by(artist):
"""Return artist, id for tracks, where artist name starts with letter in expression"""
with _conn, _conn.cursor() as cur:
cur.execute("SELECT {cols} FROM tracks WHERE status = 1 AND trim(artist) = '{artist}' ORDER BY title LIMIT 20".format(cols=Track.columns, artist=artist))
return [Track(*row) for row in cur.fetchall()]
@cmdline
def create_user(username, email, password):
"""Create a new user, return the newly-created ID
username: Name for the new user
email: Email address (must be unique)
password: Clear-text password
"""
username = username.lower(); email = email.lower();
if not isinstance(password, bytes): password=password.encode("utf-8")
print password
print email
hex_key = utils.random_hex()
with _conn, _conn.cursor() as cur:
salt = os.urandom(16)
hash = hashlib.sha256(salt+password).hexdigest()
pwd = salt.encode("hex")+"-"+hash
try:
cur.execute("INSERT INTO users (username, email, password, hex_key) VALUES (%s, %s, %s, %s) RETURNING id, hex_key", \
(username, email, pwd, hex_key))
return cur.fetchone()
except psycopg2.IntegrityError as e:
return "That didn't work too well because: <br/>%s<br/> Maybe you already have an account or \
someone else is using the name you requested."%e
@cmdline
def confirm_user(id, hex_key):
"""Attempt to confirm a user's email address
id: Numeric user ID (not user name or email)
hex_key: Matching key to the one stored, else the confirmation fails
"""
with _conn, _conn.cursor() as cur:
cur.execute("UPDATE users SET status = 1, hex_key = '' WHERE id = %s AND hex_key = %s RETURNING username, email", (id, hex_key))
try:
return cur.fetchone()[0]
except TypeError:
return [None, None]
def test_reset_permissions(id, hex_key):
with _conn, _conn.cursor() as cur:
cur.execute("SELECT id, username, email FROM users WHERE id = %s AND hex_key = %s", (id, hex_key))
try:
return cur.fetchone()
except TypeError:
return [None, None]
def set_user_password(user_or_email, password):
"""Change a user's password (administratively) - returns None on success, or error message"""
user_or_email = user_or_email.lower()
if not isinstance(password, bytes): password=password.encode("utf-8")
with _conn, _conn.cursor() as cur:
salt = os.urandom(16)
hash = hashlib.sha256(salt+password).hexdigest()
pwd = salt.encode("hex")+"-"+hash
cur.execute("SELECT id FROM users WHERE username=%s OR email=%s AND status=1", (user_or_email, user_or_email))
rows=cur.fetchall()
if len(rows)!=1: return "There is already an account for that email."
cur.execute("update users set password=%s where id=%s", (pwd, rows[0][0]))
def check_db_for_user(user_or_email):
"""Change a user's password (administratively) - returns None on success, or error message"""
user_or_email = user_or_email.lower()
with _conn, _conn.cursor() as cur:
cur.execute("SELECT id, status FROM users WHERE username=%s OR email=%s", (user_or_email, user_or_email))
rows=cur.fetchall()
print(rows)
if not len(rows)>=1: return "No account found."
else: return "There is already an account for that email."
def verify_user(user_or_email, password):
"""Verify a user name/email and password, returns the ID if valid or None if not"""
user_or_email = user_or_email.lower()
if not isinstance(password, bytes): password=password.encode("utf-8")
with _conn, _conn.cursor() as cur:
cur.execute("SELECT id,password FROM users WHERE username=%s OR email=%s AND status=1", (user_or_email, user_or_email))
for id, pwd in cur:
if "-" not in pwd: continue
salt, hash = pwd.split("-", 1)
if hashlib.sha256(salt.decode("hex")+password).hexdigest()==hash:
# Successful match.
return id
# If we fall through without finding anything that matches, return None.
def get_user_info(id):
"""Return the user name and permissions level for a given UID, or (None,0) if not logged in"""
with _conn, _conn.cursor() as cur:
cur.execute("SELECT username, user_level FROM users WHERE id=%s", (id,))
row = cur.fetchone()
return row or (None, 0)
def hex_user_password(email, hex_key):
"""Return username and id that matches email."""
with _conn, _conn.cursor() as cur:
cur.execute("UPDATE users set hex_key = %s WHERE email=%s RETURNING username, id", (hex_key, email))
row = cur.fetchone()
return row or (None, 0)
def reset_user_password(id, hex_key, password):
with _conn, _conn.cursor() as cur:
salt = os.urandom(16)
hash = hashlib.sha256(salt+password).hexdigest()
pwd = salt.encode("hex")+"-"+hash
cur.execute("update users set password=%s, hex_key='' where id=%s and hex_key=%s", (pwd, id, hex_key))
def get_analysis(id):
with _conn, _conn.cursor() as cur:
cur.execute("select analysis from tracks where id=%s", (id,))
return cur.fetchone()[0]
def save_analysis(id, analysis):
with _conn, _conn.cursor() as cur:
cur.execute("update tracks set analysis=%s where id=%s", (analysis, id))
@cmdline
def importmp3(filename, submitter="Bulk import", submitteremail="bulk@import.invalid"):
"""Bulk-import MP3 files into the appension database
filename+: MP3 file(s) to import
--submitter: Name of submitter
--submitteremail: Email address of submitter
"""
# Build up a form-like dictionary for the info mapping. This is the downside of
# the breaching of encapsulation in database.create_track().
info = {"SubmitterName": [submitter], "Email": [submitteremail]}
for fn in filename:
print("Importing %s"%fn)
with open(fn, "rb") as f: data = f.read()
id = database.create_track(data, os.path.split(fn)[-1], info)
print("Saved as track #%d."%id)
@cmdline
def tables(confirm=False):
"""Update tables based on create_table.sql
--confirm: If omitted, will do a dry run.
"""
tb = None; cols = set(); coldefs = []
with _conn, _conn.cursor() as cur:
def finish():
if cols: coldefs.extend("drop "+col for col in cols)
if tb and coldefs:
if is_new: query = "create table "+tb+" ("+", ".join(coldefs)+")"
else: query = "alter table "+tb+" add "+", add ".join(coldefs)
if confirm: cur.execute(query)
else: print(query)
for line in open("create_table.sql"):
line = line.rstrip()
if line == "" or line.startswith("--"): continue
# Flush-left lines are table names
if line == line.lstrip():
finish()
tb = line; cols = set(); coldefs = []
cur.execute("select column_name from information_schema.columns where table_name=%s", (tb,))
cols = {row[0] for row in cur}
is_new = not cols
continue
# Otherwise, it should be a column definition, starting (after whitespace) with the column name.
colname, defn = line.strip().split(" ", 1)
if colname in cols:
# Column already exists. Currently, we assume there's nothing to change.
cols.remove(colname)
else:
# Column doesn't exist. Add it!
# Note that we include a newline here so that a comment will be properly terminated.
# If you look at the query, it'll have all its commas oddly placed, but that's okay.
coldefs.append("%s %s\n"%(colname,defn))
finish()
@cmdline
def testfiles():
"""Test all audio files"""
import pyechonest.track
for file in get_many_mp3(status=0):
if file.track_details['length'] < 700:
print("Name: {} Length: {}".format(file.filename, file.track_details['length']))
# TODO: Should this be pyechonest.track.track_from_filename?
track = track.track_from_filename('audio/'+file.filename, force_upload=True)
print(track.id)
else:
print("BIG ONE - Name: {} Length: {}".format(file.filename, file.track_details['length']))
if __name__ == "__main__": print(cmdline.main() or "")
|
"""
Wrapper for biopython Fasta, add option to parse sequence headers
"""
import sys
import os
import os.path as op
import shutil
import logging
import string
from optparse import OptionParser
from itertools import groupby, izip_longest
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from jcvi.formats.base import BaseFile, DictFile, must_open
from jcvi.formats.bed import Bed
from jcvi.apps.base import ActionDispatcher, debug, set_outfile
from jcvi.apps.console import red, green
debug()
class Fasta (BaseFile, dict):
def __init__(self, filename, index=False, key_function=None, lazy=False):
super(Fasta, self).__init__(filename)
self.key_function = key_function
if lazy: # do not incur the overhead
return
if index:
self.index = SeqIO.index(filename, "fasta",
key_function=key_function)
else:
# SeqIO.to_dict expects a different key_function that operates on
# the SeqRecord instead of the raw string
_key_function = (lambda rec: key_function(rec.description)) if \
key_function else None
self.index = SeqIO.to_dict(SeqIO.parse(must_open(filename), "fasta"),
key_function=_key_function)
def _key_function(self, key):
return self.key_function(key) if self.key_function else key
def __len__(self):
return len(self.index)
def __contains__(self, key):
key = self._key_function(key)
return key in self.index
def __getitem__(self, key):
key = self._key_function(key)
rec = self.index[key]
return rec
def keys(self):
return self.index.keys()
def iterkeys(self):
for k in self.index.iterkeys():
yield k
def iteritems(self):
for k in self.iterkeys():
yield k, self[k]
def itersizes(self):
for k in self.iterkeys():
yield k, len(self[k])
def iteritems_ordered(self):
for rec in SeqIO.parse(must_open(self.filename), "fasta"):
yield rec.name, rec
def iterdescriptions_ordered(self):
for k, rec in self.iteritems_ordered():
yield rec.description, rec
def iterkeys_ordered(self):
for k, rec in self.iteritems_ordered():
yield k
def itersizes_ordered(self):
for k, rec in self.iteritems_ordered():
yield k, len(rec)
@property
def totalsize(self):
return sum(size for k, size in self.itersizes())
@classmethod
def subseq(cls, fasta, start=None, stop=None, strand=None):
"""
Take Bio.SeqRecord and slice "start:stop" from it, does proper
index and error handling
"""
start = start - 1 if start is not None else 0
stop = stop if stop is not None else len(fasta)
assert start >= 0, "start (%d) must > 0" % (start + 1)
assert stop <= len(fasta), \
("stop (%d) must be <= " + \
"length of `%s` (%d)") % (stop, fasta.id, len(fasta))
seq = fasta.seq[start:stop]
if strand in (-1, '-1', '-'):
seq = seq.reverse_complement()
return seq
def sequence(self, f, asstring=True):
"""
Emulate brentp's pyfasta/fasta.py sequence() methods
take a feature and use the start/stop or exon_keys to return
the sequence from the assocatied fasta file:
f: a feature
asstring: if true, return the sequence as a string
: if false, return as a biopython Seq
>>> f = Fasta('tests/data/three_chrs.fasta')
>>> f.sequence({'start':1, 'stop':2, 'strand':1, 'chr': 'chr1'})
'AC'
>>> f.sequence({'start':1, 'stop':2, 'strand': -1, 'chr': 'chr1'})
'GT'
"""
assert 'chr' in f, "`chr` field required"
name = f['chr']
assert name in self, "feature: %s not in `%s`" % \
(f, self.filename)
fasta = self[f['chr']]
seq = Fasta.subseq(fasta,
f.get('start'), f.get('stop'), f.get('strand'))
if asstring:
return str(seq)
return seq
"""
Class derived from https://gist.github.com/933737
Original code written by David Winter (https://github.com/dwinter)
Code writted to answer this challenge at Biostar:
http://biostar.stackexchange.com/questions/5902/
(Code includes improvements from Brad Chapman)
"""
class ORFFinder:
"""Find the longest ORF in a given sequence
"seq" is a string, if "start" is not provided any codon can be the start of
and ORF. If muliple ORFs have the longest length the first one encountered
is printed
"""
def __init__(self, seq, start=[], stop=["TAG", "TAA", "TGA"]):
self.seq = seq.tostring().upper()
self.start = start
self.stop = stop
# strand, frame, start, end, length; coordinates are 1-based
self.result = ["+", 0, 0, 0, 0]
self.longest = 0
self.size = len(seq)
def __str__(self):
# Format similar to getorf
strand, frame, start, end, length = self.result
start += 1 # 1-based coordinates
if strand == '-':
start, end = end, start
return "[{0} - {1}]".format(start, end)
@property
def info(self):
strand, frame, start, end, length = self.result
return "\t".join(str(x) for x in (strand, frame, start, end))
def codons(self, frame):
""" A generator that yields DNA in one codon blocks
"frame" counts for 0. This function yields a tuple (triplet, index) with
index relative to the original DNA sequence
"""
start = frame
while start + 3 <= self.size:
yield self.sequence[start : start + 3], start
start += 3
def scan_sequence(self, frame, direction):
""" Search in one reading frame """
orf_start = None
for c, index in self.codons(frame):
if (c not in self.stop and (c in self.start or not self.start)
and orf_start is None):
orf_start = index
elif c in self.stop and orf_start is not None:
self._update_longest(orf_start, index + 3, direction, frame)
orf_start = None
if orf_start is not None:
self._update_longest(orf_start, index + 3, direction, frame)
def _update_longest(self, orf_start, index, direction, frame):
orf_end = index
L = orf_end - orf_start
if L > self.longest:
self.longest = L
self.result = [direction, frame, orf_start, orf_end, L]
def get_longest_orf(self):
dirs = ("+", "-")
for direction in dirs:
self.sequence = self.seq
if direction == "-":
self.sequence = rc(self.sequence)
for frame in xrange(3):
self.scan_sequence(frame, direction)
strand, frame, start, end, length = self.result
size = self.size
if strand == '-':
start, end = size - end, size - start
self.result[2 : 4] = start, end
assert start < end, self.result
orf = self.seq[start : end]
if strand == '-':
orf = rc(orf)
assert len(orf) % 3 == 0
return orf
def rc(s):
_complement = string.maketrans('ATCGatcgNnXx', 'TAGCtagcNnXx')
cs = s.translate(_complement)
return cs[::-1]
def main():
actions = (
('extract', 'given fasta file and seq id, retrieve the sequence ' + \
'in fasta format'),
('longestorf', 'find longest orf for CDS fasta'),
('translate', 'translate CDS to proteins'),
('summary', "report the real no of bases and N's in fastafiles"),
('uniq', 'remove records that are the same'),
('ids', 'generate a list of headers'),
('format', 'trim accession id to the first space or switch id ' + \
'based on 2-column mapping file'),
('pool', 'pool a bunch of fastafiles together and add prefix'),
('random', 'randomly take some records'),
('diff', 'check if two fasta records contain same information'),
('identical', 'given 2 fasta files, find all exactly identical records'),
('trim', 'given a cross_match screened fasta, trim the sequence'),
('sort', 'sort the records by IDs, sizes, etc.'),
('filter', 'filter the records by size'),
('pair', 'sort paired reads to .pairs, rest to .fragments'),
('pairinplace', 'starting from fragment.fasta, find if ' +\
"adjacent records can form pairs"),
('fastq', 'combine fasta and qual to create fastq file'),
('tidy', 'normalize gap sizes and remove small components in fasta'),
('sequin', 'generate a gapped fasta file for sequin submission'),
('gaps', 'print out a list of gap sizes within sequences'),
('join', 'concatenate a list of seqs and add gaps in between'),
('some', 'include or exclude a list of records (also performs on ' + \
'.qual file if available)'),
('clean', 'remove irregular chars in FASTA seqs'),
('ispcr', 'reformat paired primers into isPcr query format'),
('fromtab', 'convert 2-column sequence file to FASTA format'),
)
p = ActionDispatcher(actions)
p.dispatch(globals())
def fromtab(args):
"""
%prog fromtab tabfile fastafile
Convert 2-column sequence file to FASTA format. One usage for this is to
generatea `adapters.fasta` for TRIMMOMATIC.
"""
p = OptionParser(fromtab.__doc__)
p.add_option("--sep",
help="Separator in the tabfile [default: %default]")
p.add_option("--replace",
help="Replace spaces in name to char [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
tabfile, fastafile = args
sep = opts.sep
replace = opts.replace
fp = must_open(tabfile)
fw = must_open(fastafile, "w")
nseq = 0
for row in fp:
row = row.strip()
if not row or row[0] == '#':
continue
name, seq = row.rsplit(sep, 1)
if replace:
name = name.replace(" ", replace)
print >> fw, ">{0}\n{1}".format(name, seq)
nseq += 1
fw.close()
logging.debug("A total of {0} sequences written to `{1}`.".\
format(nseq, fastafile))
def longestorf(args):
"""
%prog longestorf fastafile
Find longest ORF for each sequence in fastafile.
"""
from jcvi.utils.cbook import percentage
p = OptionParser(longestorf.__doc__)
p.add_option("--ids", action="store_true",
help="Generate table with ORF info [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastafile, = args
pf = fastafile.rsplit(".", 1)[0]
orffile = pf + ".orf.fasta"
idsfile = None
if opts.ids:
idsfile = pf + ".orf.ids"
fwids = open(idsfile, "w")
f = Fasta(fastafile, lazy=True)
fw = must_open(orffile, "w")
before, after = 0, 0
for name, rec in f.iteritems_ordered():
cds = rec.seq
before += len(cds)
# Try all six frames
orf = ORFFinder(cds)
lorf = orf.get_longest_orf()
newcds = Seq(lorf)
after += len(newcds)
newrec = SeqRecord(newcds, id=name, description=rec.description)
SeqIO.write([newrec], fw, "fasta")
if idsfile:
print >> fwids, "\t".join((name, orf.info))
fw.close()
if idsfile:
fwids.close()
logging.debug("Longest ORFs written to `{0}` ({1}).".\
format(orffile, percentage(after, before)))
return orffile
def ispcr(args):
"""
%prog ispcr fastafile
Reformat paired primers into isPcr query format, which is three column
format: name, forward, reverse
"""
from jcvi.utils.iter import grouper
p = OptionParser(ispcr.__doc__)
p.add_option("-r", dest="rclip", default=1, type="int",
help="pair ID is derived from rstrip N chars [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastafile, = args
ispcrfile = fastafile + ".isPcr"
fw = open(ispcrfile, "w")
N = opts.rclip
strip_name = lambda x: x[:-N] if N else str
npairs = 0
fastaiter = SeqIO.parse(fastafile, "fasta")
for a, b in grouper(2, fastaiter):
aid, bid = [strip_name(x) for x in (a.id, b.id)]
assert aid == bid, "Name mismatch {0}".format((aid, bid))
print >> fw, "\t".join((aid, str(a.seq), str(b.seq)))
npairs += 1
fw.close()
logging.debug("A total of {0} pairs written to `{1}`.".\
format(npairs, ispcrfile))
def parse_fasta(infile):
'''
parse a fasta-formatted file and returns header
can be a fasta file that contains multiple records.
'''
fp = open(infile)
# keep header
fa_iter = (x[1] for x in groupby(fp, lambda row: row[0] == '>'))
for header in fa_iter:
header = header.next()
if header[0] != '>':
continue
# drop '>'
header = header.strip()[1:]
# stitch the sequence lines together and make into upper case
seq = "".join(s.strip().upper() for s in fa_iter.next())
yield header, seq
def iter_clean_fasta(fastafile):
import string
for header, seq in parse_fasta(fastafile):
seq = "".join(x for x in seq if x in string.letters or x == '*')
yield header, seq
def iter_canonical_fasta(fastafile):
canonical = "acgtnACGTN"
totalbad = 0
for header, seq in parse_fasta(fastafile):
badcounts = sum(1 for x in seq if x not in canonical)
seq = "".join((x if x in canonical else 'N') for x in seq)
totalbad += badcounts
yield header, seq
logging.debug("Total bad char: {0}".format(totalbad))
def fancyprint(fw, seq, width=60, chunk=10):
from jcvi.utils.iter import grouper
assert width % chunk == 0
nchunks = width / chunk
seqlen = len(seq)
maxchar = len(str(seqlen))
s = ["".join(x) for x in grouper(chunk, seq, fillvalue="")]
s = [" ".join(x) for x in grouper(nchunks, s, fillvalue="")]
for a, b in zip(range(1, len(seq), width), s):
b = b.rstrip()
a = str(a).rjust(maxchar, " ")
print >> fw, " ".join((a, b))
def clean(args):
"""
%prog clean fastafile
Remove irregular chars in FASTA seqs.
"""
p = OptionParser(clean.__doc__)
p.add_option("--fancy", default=False, action="store_true",
help="Pretty print the sequence [default: %default]")
p.add_option("--canonical", default=False, action="store_true",
help="Use only acgtnACGTN [default: %default]")
set_outfile(p)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastafile, = args
fw = must_open(opts.outfile, "w")
if opts.fancy:
for header, seq in iter_clean_fasta(fastafile):
print >> fw, ">" + header
fancyprint(fw, seq)
return 0
iterator = iter_canonical_fasta if opts.canonical else iter_clean_fasta
for header, seq in iterator(fastafile):
seq = Seq(seq)
s = SeqRecord(seq, id=header, description="")
SeqIO.write([s], fw, "fasta")
def translate(args):
"""
%prog translate cdsfasta
Translate CDS to proteins. The tricky thing is that sometimes the CDS
represents a partial gene, therefore disrupting the frame of the protein.
Check all three frames to get a valid translation.
"""
from collections import defaultdict
from jcvi.utils.cbook import percentage
transl_tables = [str(x) for x in xrange(1,25)]
p = OptionParser(translate.__doc__)
p.add_option("--ids", default=False, action="store_true",
help="Create .ids file with the complete/partial/gaps "
"label [default: %default]")
p.add_option("--longest", default=False, action="store_true",
help="Find the longest ORF from each input CDS [default: %default]")
p.add_option("--table", default=1, choices=transl_tables,
help="Specify translation table to use [default: %default]")
set_outfile(p)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
cdsfasta, = args
if opts.longest:
cdsfasta = longestorf([cdsfasta])
f = Fasta(cdsfasta, lazy=True)
outfile = opts.outfile
fw = must_open(outfile, "w")
if opts.ids:
idsfile = cdsfasta.rsplit(".", 1)[0] + ".ids"
ids = open(idsfile, "w")
else:
ids = None
five_prime_missing = three_prime_missing = 0
contain_ns = complete = cannot_translate = total = 0
for name, rec in f.iteritems_ordered():
cds = rec.seq
cdslen = len(cds)
peplen = cdslen / 3
total += 1
# Try all three frames
pep = ""
for i in xrange(3):
newcds = cds[i: i + peplen * 3]
newpep = newcds.translate(table=opts.table)
if len(newpep.split("*")[0]) > len(pep.split("*")[0]):
pep = newpep
labels = []
if "*" in pep.rstrip("*"):
logging.error("{0} cannot translate".format(name))
cannot_translate += 1
labels.append("cannot_translate")
contains_start = pep.startswith("M")
contains_stop = pep.endswith("*")
contains_ns = "X" in pep
start_ns = pep.startswith("X")
end_ns = pep.endswith("X")
if not contains_start:
five_prime_missing += 1
labels.append("five_prime_missing")
if not contains_stop:
three_prime_missing += 1
labels.append("three_prime_missing")
if contains_ns:
contain_ns += 1
labels.append("contain_ns")
if contains_start and contains_stop:
complete += 1
labels.append("complete")
if start_ns:
labels.append("start_ns")
if end_ns:
labels.append("end_ns")
if ids:
print >> ids, "\t".join((name, ",".join(labels)))
peprec = SeqRecord(pep, id=name, description=rec.description)
SeqIO.write([peprec], fw, "fasta")
fw.flush()
print >> sys.stderr, "Complete gene models: {0}".\
format(percentage(complete, total))
print >> sys.stderr, "Missing 5`-end: {0}".\
format(percentage(five_prime_missing, total))
print >> sys.stderr, "Missing 3`-end: {0}".\
format(percentage(three_prime_missing, total))
print >> sys.stderr, "Contain Ns: {0}".\
format(percentage(contain_ns, total))
if cannot_translate:
print >> sys.stderr, "Cannot translate: {0}".\
format(percentage(cannot_translate, total))
fw.close()
return cdsfasta, outfile
def filter(args):
"""
%prog filter fastafile 100
Filter the FASTA file to contain records with size >= or <= certain cutoff.
"""
p = OptionParser(filter.__doc__)
p.add_option("--less", default=False, action="store_true",
help="filter the sizes <= certain cutoff [default: >=]")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
fastafile, cutoff = args
try:
cutoff = int(cutoff)
except ValueError:
sys.exit(not p.print_help())
f = Fasta(fastafile, lazy=True)
fw = sys.stdout
for name, rec in f.iteritems_ordered():
if opts.less and len(rec) > cutoff:
continue
if (not opts.less) and len(rec) < cutoff:
continue
SeqIO.write([rec], fw, "fasta")
fw.flush()
def pool(args):
"""
%prog pool fastafiles
Pool a bunch of FASTA files, and add prefix to each record based on
filenames.
"""
p = OptionParser(pool.__doc__)
if len(args) < 1:
sys.exit(not p.print_help())
for fastafile in args:
pf = op.basename(fastafile).split(".")[0].split("_")[0]
prefixopt = "--prefix={0}_".format(pf)
format([fastafile, "stdout", prefixopt])
def ids(args):
"""
%prog ids fastafiles
Generate the FASTA headers without the '>'.
"""
p = OptionParser(ids.__doc__)
p.add_option("--until", default=None,
help="Truncate the name and description at words [default: %default]")
p.add_option("--description", default=False, action="store_true",
help="Generate a second column with description [default: %default]")
set_outfile(p)
opts, args = p.parse_args(args)
if len(args) < 1:
sys.exit(not p.print_help())
until = opts.until
fw = must_open(opts.outfile, "w")
for row in must_open(args):
if row[0] == ">":
row = row[1:].rstrip()
if until:
row = row.split(until)[0]
atoms = row.split(None, 1)
if opts.description:
outrow = "\t".join(atoms)
else:
outrow = atoms[0]
print >> fw, outrow
fw.close()
def sort(args):
"""
%prog sort fastafile
Sort a list of sequences and output with sorted IDs, etc.
"""
p = OptionParser(sort.__doc__)
p.add_option("--sizes", default=False, action="store_true",
help="Sort by decreasing size [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(p.print_help())
fastafile, = args
sortedfastafile = fastafile.rsplit(".", 1)[0] + ".sorted.fasta"
f = Fasta(fastafile, index=False)
fw = must_open(sortedfastafile, "w")
if opts.sizes:
# Sort by decreasing size
sortlist = sorted(f.itersizes(), key=lambda x: (-x[1], x[0]))
logging.debug("Sort by size: max: {0}, min: {1}".\
format(sortlist[0], sortlist[-1]))
sortlist = [x for x, s in sortlist]
else:
sortlist = sorted(f.iterkeys())
for key in sortlist:
rec = f[key]
SeqIO.write([rec], fw, "fasta")
logging.debug("Sorted file written to `{0}`.".format(sortedfastafile))
fw.close()
def join(args):
"""
%prog join fastafile [phasefile]
Make AGP file for a bunch of sequences, and add gaps between, and then build
the joined fastafile. This is useful by itself, but with --oo option this
can convert the .oo (BAMBUS output) into AGP and a joined fasta.
Phasefile is optional, but must contain two columns - BAC and phase (0, 1, 2, 3).
"""
from jcvi.formats.agp import OO, Phases, build
from jcvi.formats.sizes import Sizes
p = OptionParser(join.__doc__)
p.add_option("--newid", default=None,
help="New sequence ID [default: `%default`]")
p.add_option("--gapsize", default=100, type="int",
help="Number of N's in between the sequences [default: %default]")
p.add_option("--gaptype", default="contig",
help="Gap type to use in the AGP file [default: %default]")
p.add_option("--evidence", default="",
help="Linkage evidence to report in the AGP file [default: %default]")
p.add_option("--oo", help="Use .oo file generated by bambus [default: %default]")
opts, args = p.parse_args(args)
nargs = len(args)
if nargs not in (1, 2):
sys.exit(not p.print_help())
if nargs == 2:
fastafile, phasefile = args
phases = DictFile(phasefile)
phases = dict((a, Phases[int(b)]) for a, b in phases.items())
else:
fastafile, = args
phases = {}
sizes = Sizes(fastafile)
prefix = fastafile.rsplit(".", 1)[0]
agpfile = prefix + ".agp"
newid = opts.newid
oo = opts.oo
o = OO(oo, sizes.mapping)
if oo:
seen = o.contigs
# The leftover contigs not in the oo file
logging.debug("A total of {0} contigs ({1} in `{2}`)".\
format(len(sizes), len(seen), oo))
for ctg, size in sizes.iter_sizes():
if ctg in seen:
continue
o.add(ctg, ctg, size)
else:
if newid:
for ctg, size in sizes.iter_sizes():
o.add(newid, ctg, size)
else:
for scaffold_number, (ctg, size) in enumerate(sizes.iter_sizes()):
object_id = "scaffold{0:03d}".format(scaffold_number + 1)
o.add(object_id, ctg, size)
fw = open(agpfile, "w")
o.write_AGP(fw, gapsize=opts.gapsize, gaptype=opts.gaptype,
evidence=opts.evidence, phases=phases)
fw.close()
joinedfastafile = prefix + ".joined.fasta"
build([agpfile, fastafile, joinedfastafile])
def summary(args):
"""
%prog summary *.fasta
Report real bases and N's in fastafiles in a tabular report
"""
from jcvi.utils.table import write_csv
p = OptionParser(summary.__doc__)
p.add_option("--suffix", default="Mb",
help="make the base pair counts human readable [default: %default]")
p.add_option("--ids",
help="write the ids that have >= 50% N's [default: %default]")
set_outfile(p)
opts, args = p.parse_args(args)
if len(args) == 0:
sys.exit(not p.print_help())
idsfile = opts.ids
header = "Seqid Real N's Total %_real".split()
if idsfile:
idsfile = open(idsfile, "w")
nids = 0
data = []
for fastafile in args:
for rec in SeqIO.parse(fastafile, "fasta"):
seqlen = len(rec)
nns = rec.seq.count('n') + rec.seq.count('N')
reals = seqlen - nns
pct = reals * 100. / seqlen
pctreal = "{0:.1f} %".format(pct)
if idsfile and pct < 50:
nids += 1
print >> idsfile, rec.id
data.append((rec.id, reals, nns, seqlen, pctreal))
ids, reals, nns, seqlen, pctreal = zip(*data)
reals = sum(reals)
nns = sum(nns)
seqlen = sum(seqlen)
pctreal = "{0:.1f} %".format(reals * 100. / seqlen)
data.append(("Total", reals, nns, seqlen, pctreal))
write_csv(header, data, sep=" ", filename=opts.outfile)
if idsfile:
logging.debug("A total of {0} ids >= 50% N's written to {1}.".\
format(nids, idsfile.name))
idsfile.close()
return reals, nns, seqlen
def format(args):
"""
%prog format infasta outfasta
Reformat FASTA file and also clean up names.
"""
p = OptionParser(format.__doc__)
p.add_option("--pairs", default=False, action="store_true",
help="Add trailing /1 and /2 for interleaved pairs [default: %default]")
p.add_option("--sequential", default=False, action="store_true",
help="Add sequential IDs [default: %default]")
p.add_option("--sequentialoffset", default=1, type="int",
help="Sequential IDs start at [default: %default]")
p.add_option("--pad0", default=6, type="int",
help="Pad a few zeros in front of sequential [default: %default]")
p.add_option("--gb", default=False, action="store_true",
help="For Genbank ID, get the accession [default: %default]")
p.add_option("--sep", default=None,
help="Split description by certain symbol [default: %default]")
p.add_option("--index", default=0, type="int",
help="Extract i-th field after split with --sep [default: %default]")
p.add_option("--noversion", default=False, action="store_true",
help="Remove the gb trailing version [default: %default]")
p.add_option("--prefix", help="Prepend prefix to sequence ID")
p.add_option("--suffix", help="Append suffix to sequence ID")
p.add_option("--template", default=False, action="store_true",
help="Extract `template=aaa dir=x library=m` to `m-aaa/x` [default: %default]")
p.add_option("--switch", help="Switch ID from two-column file [default: %default]")
p.add_option("--annotation", help="Add functional annotation from "
"two-column file ('ID <--> Annotation') [default: %default]")
p.add_option("--ids", help="Generate ID conversion table [default: %default]")
p.add_option("--upper", default=False, action="store_true",
help="Convert sequence to upper case [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
infasta, outfasta = args
gb = opts.gb
pairs = opts.pairs
prefix = opts.prefix
suffix = opts.suffix
noversion = opts.noversion
sequential = opts.sequential
sequentialoffset = opts.sequentialoffset
sep = opts.sep
idx = opts.index
mapfile = opts.switch
annotfile = opts.annotation
idsfile = opts.ids
idsfile = open(idsfile, "w") if idsfile else None
upper = opts.upper
if mapfile:
mapping = DictFile(mapfile, delimiter="\t")
if annotfile:
annotation = DictFile(annotfile, delimiter="\t")
fw = must_open(outfasta, "w")
fp = SeqIO.parse(must_open(infasta), "fasta")
for i, rec in enumerate(fp):
origid = rec.id
description = rec.description
if sep:
description = description.split(sep)[idx]
rec.id = description
if gb:
# gi|262233616|gb|GU123895.1| Coffea arabica clone BAC
atoms = rec.id.split("|")
if len(atoms) >= 3:
rec.id = atoms[3]
elif len(atoms) == 2:
rec.id = atoms[1]
if pairs:
id = "/1" if (i % 2 == 0) else "/2"
rec.id += id
if noversion:
rec.id = rec.id.rsplit(".", 1)[0]
if sequential:
rec.id = "{0:0{1}d}".format(sequentialoffset, opts.pad0)
sequentialoffset += 1
if opts.template:
template, dir, lib = [x.split("=")[-1] for x in
rec.description.split()[1:4]]
rec.id = "{0}-{1}/{2}".format(lib, template, dir)
if mapfile:
if origid in mapping:
rec.id = mapping[origid]
else:
logging.error("{0} not found in `{1}`. ID unchanged.".\
format(origid, mapfile))
if prefix:
rec.id = prefix + rec.id
if suffix:
rec.id += suffix
rec.description = ""
if annotfile:
rec.description = annotation.get(origid, "") if not mapfile \
else annotation.get(rec.id, "")
if idsfile:
print >> idsfile, "\t".join((origid, rec.id))
if upper:
rec.seq = rec.seq.upper()
SeqIO.write(rec, fw, "fasta")
if idsfile:
logging.debug("Conversion table written to `{0}`.".\
format(idsfile.name))
idsfile.close()
def print_first_difference(arec, brec, ignore_case=False, ignore_N=False,
rc=False, report_match=True):
"""
Returns the first different nucleotide in two sequence comparisons
runs both Plus and Minus strand
"""
plus_match = _print_first_difference(arec, brec, ignore_case=ignore_case,
ignore_N=ignore_N, report_match=report_match)
if rc and not plus_match:
logging.debug("trying reverse complement of %s" % brec.id)
brec.seq = brec.seq.reverse_complement()
minus_match = _print_first_difference(arec, brec,
ignore_case=ignore_case, ignore_N=ignore_N,
report_match=report_match)
return minus_match
else:
return plus_match
def _print_first_difference(arec, brec, ignore_case=False, ignore_N=False,
report_match=True):
"""
Returns the first different nucleotide in two sequence comparisons
"""
aseq, bseq = arec.seq, brec.seq
asize, bsize = len(aseq), len(bseq)
matched = True
for i, (a, b) in enumerate(izip_longest(aseq, bseq)):
if ignore_case and None not in (a, b):
a, b = a.upper(), b.upper()
if ignore_N and ('N' in (a, b) or 'X' in (a, b)):
continue
if a != b:
matched = False
break
if i + 1 == asize and matched:
if report_match:
print green("Two sequences match")
match = True
else:
print red("Two sequences do not match")
snippet_size = 20 # show the context of the difference
print red("Sequence start to differ at position %d:" % (i + 1))
begin = max(i - snippet_size, 0)
aend = min(i + snippet_size, asize)
bend = min(i + snippet_size, bsize)
print red(aseq[begin:i] + "|" + aseq[i:aend])
print red(bseq[begin:i] + "|" + bseq[i:bend])
match = False
return match
def diff(args):
"""
%prog diff afasta bfasta
print out whether the records in two fasta files are the same
"""
from jcvi.utils.table import banner
p = OptionParser(diff.__doc__)
p.add_option("--ignore_case", default=False, action="store_true",
help="ignore case when comparing sequences [default: %default]")
p.add_option("--ignore_N", default=False, action="store_true",
help="ignore N and X's when comparing sequences [default: %default]")
p.add_option("--ignore_stop", default=False, action="store_true",
help="ignore stop codon when comparing sequences [default: %default]")
p.add_option("--rc", default=False, action="store_true",
help="also consider reverse complement [default: %default]")
p.add_option("--quiet", default=False, action="store_true",
help="don't output comparison details [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
afasta, bfasta = args
afastan = len(Fasta(afasta))
bfastan = len(Fasta(bfasta))
if afastan == bfastan:
print green("Two sets contain the same number of sequences ({0}, {1})".\
format(afastan, bfastan))
else:
print red("Two sets contain different number of sequences ({0}, {1})".\
format(afastan, bfastan))
ah = SeqIO.parse(afasta, "fasta")
bh = SeqIO.parse(bfasta, "fasta")
problem_ids = []
for arec, brec in zip(ah, bh):
if opts.ignore_stop:
arec.seq = arec.seq.rstrip("*")
brec.seq = brec.seq.rstrip("*")
asize, bsize = len(arec), len(brec)
if not opts.quiet:
print banner((arec, brec))
if asize == bsize:
print green("Two sequence size match (%d)" % asize)
else:
print red("Two sequence size do not match (%d, %d)" % (asize, bsize))
# print out the first place the two sequences diff
fd = print_first_difference(arec, brec, ignore_case=opts.ignore_case,
ignore_N=opts.ignore_N, rc=opts.rc, report_match=not opts.quiet)
if not fd:
logging.error("Two sets of sequences differ at `{0}`".format(arec.id))
problem_ids.append("\t".join(str(x) for x in (arec.id, asize, bsize,
abs(asize - bsize))))
if problem_ids:
print red("A total of {0} records mismatch.".format(len(problem_ids)))
fw = must_open("Problems.ids", "w")
print >> fw, "\n".join(problem_ids)
def hash_fasta(fastafile, ignore_case=False, ignore_N=False, ignore_stop=False):
"""
Generates MD5 hash of each element within the input multifasta file
Returns a dictionary with the sequence hashes as keys and sequence IDs as values
"""
import re
import hashlib
f = Fasta(fastafile)
logging.debug("Hashing individual elements of {0}".format(fastafile))
hash_dict = {}
for name, rec in f.iteritems_ordered():
seq = re.sub(' ', '', rec.seq.tostring())
if ignore_stop:
seq = seq.rstrip("*")
if ignore_case:
seq = seq.upper()
if ignore_N:
if not all(c.upper() in 'ATGCN' for c in seq):
seq = re.sub('X', '', seq)
else:
seq = re.sub('N', '', seq)
md5_hex = hashlib.md5(seq).hexdigest()
if md5_hex not in hash_dict: hash_dict[md5_hex] = set()
hash_dict[md5_hex].add(name)
return hash_dict
def identical(args):
"""
%prog identical *.fasta
Given multiple fasta files, find all the exactly identical records
based on the computed md5 hexdigest of each sequence.
Output is a N column file (where N = number of input fasta files).
If there are duplicates within a given fasta file, they will all be
listed out in the same row separated by a comma.
Example output:
---------------------
tta1.fsa tta2.fsa
2131 na
3420 na
3836,3847 852
148 890
584 614
623 684
1281 470
3367 na
"""
p = OptionParser(identical.__doc__)
p.add_option("--ignore_case", default=False, action="store_true",
help="ignore case when comparing sequences [default: %default]")
p.add_option("--ignore_N", default=False, action="store_true",
help="ignore N and X's when comparing sequences [default: %default]")
p.add_option("--ignore_stop", default=False, action="store_true",
help="ignore stop codon when comparing sequences [default: %default]")
set_outfile(p)
opts, args = p.parse_args(args)
if len(args) == 0:
sys.exit(not p.print_help())
dict, setlist = {}, []
for fastafile in args:
dict[fastafile] = hash_fasta(fastafile, ignore_case=opts.ignore_case, ignore_N=opts.ignore_N, \
ignore_stop=opts.ignore_stop)
setlist.append(set(dict[fastafile].keys()))
hashes = set.union(*setlist)
header = [str(x) for x in args]
fw = must_open(opts.outfile, "w")
print >> fw, "\t".join(header)
for hash in hashes:
line = []
for fastafile in args:
if hash in dict[fastafile].keys():
line.append(",".join(dict[fastafile][hash]))
else:
line.append("na")
print >> fw, "\t".join(line)
fw.close()
QUALSUFFIX = ".qual"
def get_qual(fastafile, suffix=QUALSUFFIX, check=True):
"""
Check if current folder contains a qual file associated with the fastafile
"""
qualfile1 = fastafile.rsplit(".", 1)[0] + suffix
qualfile2 = fastafile + suffix
if check:
if op.exists(qualfile1):
logging.debug("qual file `{0}` found".format(qualfile1))
return qualfile1
elif op.exists(qualfile2):
logging.debug("qual file `{0}` found".format(qualfile2))
return qualfile2
else:
logging.warning("qual file not found")
return None
return qualfile1
def some(args):
"""
%prog some fastafile listfile outfastafile
generate a subset of fastafile, based on a list
"""
p = OptionParser(some.__doc__)
p.add_option("--exclude", default=False, action="store_true",
help="Output sequences not in the list file [default: %default]")
p.add_option("--uniprot", default=False, action="store_true",
help="Header is from uniprot [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(p.print_help())
fastafile, listfile, outfastafile = args
outfastahandle = must_open(outfastafile, "w")
qualfile = get_qual(fastafile)
names = set(x.strip() for x in open(listfile))
if qualfile:
outqualfile = outfastafile + ".qual"
outqualhandle = open(outqualfile, "w")
parser = iter_fasta_qual(fastafile, qualfile)
else:
parser = SeqIO.parse(fastafile, "fasta")
num_records = 0
for rec in parser:
name = rec.id
if opts.uniprot:
name = name.split("|")[-1]
if opts.exclude:
if name in names:
continue
else:
if name not in names:
continue
SeqIO.write([rec], outfastahandle, "fasta")
if qualfile:
SeqIO.write([rec], outqualhandle, "qual")
num_records += 1
logging.debug("A total of %d records written to `%s`" % \
(num_records, outfastafile))
def fastq(args):
"""
%prog fastq fastafile
Generate fastqfile by combining fastafile and fastafile.qual.
Also check --qv option to use a default qv score.
"""
from jcvi.formats.fastq import FastqLite
p = OptionParser(fastq.__doc__)
p.add_option("--qv", type="int",
help="Use generic qv value [dafault: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastafile, = args
fastqfile = fastafile.rsplit(".", 1)[0] + ".fastq"
fastqhandle = open(fastqfile, "w")
num_records = 0
if opts.qv is not None:
qv = chr(ord('!') + opts.qv)
logging.debug("QV char '{0}' ({1})".format(qv, opts.qv))
else:
qv = None
if qv:
f = Fasta(fastafile, lazy=True)
for name, rec in f.iteritems_ordered():
r = FastqLite("@" + name, str(rec.seq).upper(), qv * len(rec.seq))
print >> fastqhandle, r
num_records += 1
else:
qualfile = get_qual(fastafile)
for rec in iter_fasta_qual(fastafile, qualfile):
SeqIO.write([rec], fastqhandle, "fastq")
num_records += 1
fastqhandle.close()
logging.debug("A total of %d records written to `%s`" % \
(num_records, fastqfile))
def pair(args):
"""
%prog pair fastafile
Generate .pairs.fasta and .fragments.fasta by matching records
into the pairs and the rest go to fragments.
"""
p = OptionParser(pair.__doc__)
p.add_option("-d", dest="separator", default=None,
help="separater in the name field to reduce to the same clone " +\
"[e.g. GFNQ33242/1 use /, BOT01-2453H.b1 use .]" +\
"[default: trim until last char]")
p.add_option("-m", dest="matepairs", default=False, action="store_true",
help="generate .matepairs file [often used for Celera Assembler]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(p.print_help())
fastafile, = args
qualfile = get_qual(fastafile)
prefix = fastafile.rsplit(".", 1)[0]
pairsfile = prefix + ".pairs.fasta"
fragsfile = prefix + ".frags.fasta"
pairsfw = open(pairsfile, "w")
fragsfw = open(fragsfile, "w")
#TODO: need a class to handle coupled fasta and qual iterating and indexing
if opts.matepairs:
matepairsfile = prefix + ".matepairs"
matepairsfw = open(matepairsfile, "w")
if qualfile:
pairsqualfile = pairsfile + ".qual"
pairsqualhandle = open(pairsqualfile, "w")
fragsqualfile = fragsfile + ".qual"
fragsqualhandle = open(fragsqualfile, "w")
f = Fasta(fastafile)
if qualfile:
q = SeqIO.index(qualfile, "qual")
all_keys = list(f.iterkeys())
all_keys.sort()
sep = opts.separator
if sep:
key_fun = lambda x: x.split(sep, 1)[0]
else:
key_fun = lambda x: x[:-1]
for key, variants in groupby(all_keys, key=key_fun):
variants = list(variants)
paired = (len(variants) == 2)
if paired and opts.matepairs:
print >> matepairsfw, "\t".join(("%s/1" % key, "%s/2" % key))
fw = pairsfw if paired else fragsfw
if qualfile:
qualfw = pairsqualhandle if paired else fragsqualhandle
for i, var in enumerate(variants):
rec = f[var]
if qualfile:
recqual = q[var]
newid = "%s/%d" % (key, i + 1)
rec.id = newid
rec.description = ""
SeqIO.write([rec], fw, "fasta")
if qualfile:
recqual.id = newid
recqual.description = ""
SeqIO.write([recqual], qualfw, "qual")
logging.debug("sequences written to `%s` and `%s`" % \
(pairsfile, fragsfile))
if opts.matepairs:
logging.debug("mates written to `%s`" % matepairsfile)
def pairinplace(args):
"""
%prog pairinplace bulk.fasta
Pair up the records in bulk.fasta by comparing the names for adjacent
records. If they match, print to bulk.pairs.fasta, else print to
bulk.frags.fasta.
"""
from jcvi.utils.iter import pairwise
p = OptionParser(pairinplace.__doc__)
p.add_option("-r", dest="rclip", default=1, type="int",
help="pair ID is derived from rstrip N chars [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastafile, = args
base = op.basename(fastafile).split(".")[0]
frags = base + ".frags.fasta"
pairs = base + ".pairs.fasta"
if fastafile.endswith(".gz"):
frags += ".gz"
pairs += ".gz"
fragsfw = must_open(frags, "w")
pairsfw = must_open(pairs, "w")
N = opts.rclip
strip_name = lambda x: x[:-N] if N else str
skipflag = False # controls the iterator skip
fastaiter = SeqIO.parse(fastafile, "fasta")
for a, b in pairwise(fastaiter):
aid, bid = [strip_name(x) for x in (a.id, b.id)]
if skipflag:
skipflag = False
continue
if aid == bid:
SeqIO.write([a, b], pairsfw, "fasta")
skipflag = True
else:
SeqIO.write([a], fragsfw, "fasta")
# don't forget the last one, when b is None
if not skipflag:
SeqIO.write([a], fragsfw, "fasta")
logging.debug("Reads paired into `%s` and `%s`" % (pairs, frags))
def extract(args):
"""
%prog extract fasta query
extract query out of fasta file, query needs to be in the form of
"seqname", or "seqname:start-stop", or "seqname:start-stop:-"
"""
p = OptionParser(extract.__doc__)
p.add_option('--include', default=False, action="store_true",
help="search description line for match [default: %default]")
p.add_option('--exclude', default=False, action="store_true",
help="exclude description that matches [default: %default]")
p.add_option('--bed', default=None,
help="path to bed file to guide extraction by matching seqname")
set_outfile(p)
opts, args = p.parse_args(args)
if len(args) == 2:
fastafile, query = args
elif len(args) == 1 and opts.bed:
fastafile, = args
bedaccns = Bed(opts.bed).accns
else:
sys.exit(p.print_help())
if opts.bed:
fw = must_open(opts.outfile, "w")
f = Fasta(fastafile)
for accn in bedaccns:
try:
rec = f[accn]
except:
logging.error("{0} not found in {1}".format(accn, fastafile))
continue
SeqIO.write([rec], fw, "fasta")
return fw.name
atoms = query.split(":")
key = atoms[0]
assert len(atoms) <= 3, "cannot have more than two ':' in your query"
pos = ""
if len(atoms) in (2, 3):
pos = atoms[1]
strand = "+"
if len(atoms) == 3:
strand = atoms[2]
assert strand in ('+', '-'), "strand must be either '+' or '-'"
feature = dict(chr=key)
if "-" in pos:
start, stop = pos.split("-")
try:
start, stop = int(start), int(stop)
except ValueError as e:
logging.error(e)
sys.exit(p.print_help())
feature["start"] = start
feature["stop"] = stop
else:
start, stop = None, None
assert start < stop or None in (start, stop), \
"start must be < stop, you have ({0}, {1})".format(start, stop)
feature["strand"] = strand
include, exclude = opts.include, opts.exclude
# conflicting options, cannot be true at the same time
assert not (include and exclude), "--include and --exclude cannot be "\
"on at the same time"
fw = must_open(opts.outfile, "w")
if include or exclude:
f = Fasta(fastafile, lazy=True)
for k, rec in f.iterdescriptions_ordered():
if include and key not in k:
continue
if exclude and key in k:
continue
seq = Fasta.subseq(rec, start, stop, strand)
newid = rec.id
if start is not None:
newid += ":{0}-{1}:{2}".format(start, stop, strand)
rec = SeqRecord(seq, id=newid, description=k)
SeqIO.write([rec], fw, "fasta")
else:
f = Fasta(fastafile)
try:
seq = f.sequence(feature, asstring=False)
except AssertionError as e:
logging.error(e)
return
rec = SeqRecord(seq, id=query, description="")
SeqIO.write([rec], fw, "fasta")
return fw.name
def _uniq_rec(fastafile, seq=False):
"""
Returns unique records
"""
seen = set()
for rec in SeqIO.parse(fastafile, "fasta"):
name = str(rec.seq) if seq else rec.id
if name in seen:
logging.debug("ignore {0}".format(rec.id))
continue
seen.add(name)
yield rec
def uniq(args):
"""
%prog uniq fasta uniq.fasta
remove fasta records that are the same
"""
p = OptionParser(uniq.__doc__)
p.add_option("--seq", default=False, action="store_true",
help="Uniqify the sequences [default: %default]")
p.add_option("-t", "--trimname", dest="trimname",
action="store_true", default=False,
help="turn on the defline trim to first space [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(p.print_help())
fastafile, uniqfastafile = args
fw = must_open(uniqfastafile, "w")
seq = opts.seq
for rec in _uniq_rec(fastafile, seq=seq):
if opts.trimname:
rec.description = ""
SeqIO.write([rec], fw, "fasta")
def random(args):
"""
%prog random fasta 100 > random100.fasta
Take number of records randomly from fasta
"""
from random import sample
p = OptionParser(random.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
fastafile, N = args
N = int(N)
assert N > 0
f = Fasta(fastafile)
fw = must_open("stdout", "w")
for key in sample(f.keys(), N):
rec = f[key]
SeqIO.write([rec], fw, "fasta")
fw.close()
XQUAL = -1000 # default quality for X
NQUAL = 5 # default quality value for N
QUAL = 10 # default quality value
OKQUAL = 15
def modify_qual(rec):
qv = rec.letter_annotations['phred_quality']
for i, (s, q) in enumerate(zip(rec.seq, qv)):
if s == 'X' or s == 'x':
qv[i] = XQUAL
if s == 'N' or s == 'x':
qv[i] = NQUAL
return rec
def iter_fasta_qual(fastafile, qualfile, defaultqual=OKQUAL, modify=False):
"""
used by trim, emits one SeqRecord with quality values in it
"""
fastahandle = SeqIO.parse(fastafile, "fasta")
if qualfile:
qualityhandle = SeqIO.parse(qualfile, "qual")
for rec, rec_qual in zip(fastahandle, qualityhandle):
assert len(rec) == len(rec_qual)
rec.letter_annotations['phred_quality'] = \
rec_qual.letter_annotations['phred_quality']
yield rec if not modify else modify_qual(rec)
else:
logging.warning("assume qual ({0})".format(defaultqual))
for rec in fastahandle:
rec.letter_annotations['phred_quality'] = [defaultqual] * len(rec)
yield rec if not modify else modify_qual(rec)
def write_fasta_qual(rec, fastahandle, qualhandle):
if fastahandle:
SeqIO.write([rec], fastahandle, "fasta")
if qualhandle:
SeqIO.write([rec], qualhandle, "qual")
def trim(args):
"""
%prog trim fasta.screen newfasta
take the screen output from `cross_match` (against a vector db, for
example), then trim the sequences to remove X's. Will also perform quality
trim if fasta.screen.qual is found. The trimming algorithm is based on
finding the subarray that maximize the sum
"""
from jcvi.algorithms.maxsum import max_sum
p = OptionParser(trim.__doc__)
p.add_option("-c", dest="min_length", type="int", default=64,
help="minimum sequence length after trimming")
p.add_option("-s", dest="score", default=QUAL,
help="quality trimming cutoff [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(p.print_help())
fastafile, newfastafile = args
qualfile = get_qual(fastafile)
newqualfile = get_qual(newfastafile, check=False)
logging.debug("Trim bad sequence from fasta file `%s` to `%s`" % \
(fastafile, newfastafile))
fw = must_open(newfastafile, "w")
fw_qual = open(newqualfile, "w")
dropped = trimmed = 0
for rec in iter_fasta_qual(fastafile, qualfile, modify=True):
qv = [x - opts.score for x in \
rec.letter_annotations["phred_quality"]]
msum, trim_start, trim_end = max_sum(qv)
score = trim_end - trim_start + 1
if score < opts.min_length:
dropped += 1
continue
if score < len(rec):
trimmed += 1
rec = rec[trim_start:trim_end + 1]
write_fasta_qual(rec, fw, fw_qual)
print >>sys.stderr, "A total of %d sequences modified." % trimmed
print >>sys.stderr, "A total of %d sequences dropped (length < %d)." % \
(dropped, opts.min_length)
fw.close()
fw_qual.close()
def sequin(args):
"""
%prog sequin inputfasta
Generate a gapped fasta format with known gap sizes embedded. suitable for
Sequin submission.
A gapped sequence represents a newer method for describing non-contiguous
sequences, but only requires a single sequence identifier. A gap is
represented by a line that starts with >? and is immediately followed by
either a length (for gaps of known length) or "unk100" for gaps of unknown
length. For example, ">?200". The next sequence segment continues on the
next line, with no separate definition line or identifier. The difference
between a gapped sequence and a segmented sequence is that the gapped
sequence uses a single identifier and can specify known length gaps.
Gapped sequences are preferred over segmented sequences. A sample gapped
sequence file is shown here:
>m_gagei [organism=Mansonia gagei] Mansonia gagei NADH dehydrogenase ...
ATGGAGCATACATATCAATATTCATGGATCATACCGTTTGTGCCACTTCCAATTCCTATTTTAATAGGAA
TTGGACTCCTACTTTTTCCGACGGCAACAAAAAATCTTCGTCGTATGTGGGCTCTTCCCAATATTTTATT
>?200
GGTATAATAACAGTATTATTAGGGGCTACTTTAGCTCTTGC
TCAAAAAGATATTAAGAGGGGTTTAGCCTATTCTACAATGTCCCAACTGGGTTATATGATGTTAGCTCTA
>?unk100
TCAATAAAACTATGGGGTAAAGAAGAACAAAAAATAATTAACAGAAATTTTCGTTTATCTCCTTTATTAA
TATTAACGATGAATAATAATGAGAAGCCATATAGAATTGGTGATAATGTAAAAAAAGGGGCTCTTATTAC
"""
p = OptionParser(sequin.__doc__)
p.add_option("--mingap", dest="mingap", default=100, type="int",
help="The minimum size of a gap to split [default: %default]")
p.add_option("--unk", default=100, type="int",
help="The size for unknown gaps [default: %default]")
p.add_option("--newid", default=None,
help="Use this identifier instead [default: %default]")
p.add_option("--chromosome", default=None,
help="Add [chromosome= ] to FASTA header [default: %default]")
p.add_option("--clone", default=None,
help="Add [clone= ] to FASTA header [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
inputfasta, = args
unk = opts.unk
outputfasta = inputfasta.rsplit(".", 1)[0] + ".split"
rec = SeqIO.parse(must_open(inputfasta), "fasta").next()
seq = ""
unknowns, knowns = 0, 0
for gap, gap_group in groupby(rec.seq, lambda x: x.upper() == 'N'):
subseq = "".join(gap_group)
if gap:
gap_length = len(subseq)
if gap_length == unk:
subseq = "\n>?unk{0}\n".format(unk)
unknowns += 1
elif gap_length >= opts.mingap:
subseq = "\n>?{0}\n".format(gap_length)
knowns += 1
seq += subseq
fw = must_open(outputfasta, "w")
id = opts.newid or rec.id
fastaheader = ">{0}".format(id)
if opts.chromosome:
fastaheader += " [chromosome={0}]".format(opts.chromosome)
if opts.clone:
fastaheader += " [clone={0}]".format(opts.clone)
print >> fw, fastaheader
print >> fw, seq
fw.close()
logging.debug("Sequin FASTA written to `{0}` (gaps: {1} unknowns, {2} knowns).".\
format(outputfasta, unknowns, knowns))
return outputfasta, unknowns + knowns
def tidy(args):
"""
%prog tidy fastafile
Normalize gap sizes (default 100 N's) and remove small components (less than
100 nucleotides).
"""
p = OptionParser(tidy.__doc__)
p.add_option("--justtrim", default=False, action="store_true",
help="Just trim end Ns, disable other options [default: %default]")
p.add_option("--gapsize", dest="gapsize", default=100, type="int",
help="Set all gaps to the same size [default: %default]")
p.add_option("--minlen", dest="minlen", default=100, type="int",
help="Minimum component size [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastafile, = args
gapsize = opts.gapsize
minlen = opts.minlen
tidyfastafile = fastafile.rsplit(".", 1)[0] + ".tidy.fasta"
fw = must_open(tidyfastafile, "w")
normalized_gap = "N" * gapsize
for rec in SeqIO.parse(fastafile, "fasta"):
newseq = ""
dangle_gaps = 0
for gap, seq in groupby(rec.seq, lambda x: x.upper() == 'N'):
if opts.justtrim:
newseq = str(rec.seq)
break
seq = "".join(seq)
seqlen = len(seq)
msg = None
if gap:
nsize = max(gapsize - dangle_gaps, 0)
if seqlen < 10:
if nsize > seqlen:
nsize = seqlen
dangle_gaps += seqlen
else:
if seqlen != gapsize:
msg = "Normalize gap size ({0}) to {1}" \
.format(seqlen, nsize)
dangle_gaps = gapsize
newseq += nsize * 'N'
else:
if seqlen < minlen:
msg = "Discard component ({0})".format(seqlen)
else:
newseq += seq
# Discarding components might cause flank gaps to merge
# should be handled in dangle_gaps, which is only reset when
# seeing an actual sequence
dangle_gaps = 0
if msg:
msg = rec.id + ": " + msg
logging.info(msg)
newseq = newseq.strip('nN')
ngaps = newseq.count(normalized_gap)
rec.seq = Seq(newseq)
SeqIO.write([rec], fw, "fasta")
def gaps(args):
"""
%prog gaps fastafile
Print out a list of gaps in BED format (.gaps.bed).
"""
p = OptionParser(gaps.__doc__)
p.add_option("--mingap", default=100, type="int",
help="The minimum size of a gap to split [default: %default]")
p.add_option("--agp", default=False, action="store_true",
help="Generate AGP file to show components [default: %default]")
p.add_option("--split", default=False, action="store_true",
help="Generate .split.fasta [default: %default]")
p.add_option("--log", default=False, action="store_true",
help="Generate gap positions to .gaps.log [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
inputfasta, = args
mingap = opts.mingap
prefix = inputfasta.rsplit(".", 1)[0]
bedfile = prefix + ".gaps.bed"
fwbed = open(bedfile, "w")
logging.debug("Write gap locations to `{0}`.".format(bedfile))
if opts.log:
logfile = prefix + ".gaps.log"
fwlog = must_open(logfile, "w")
logging.debug("Write gap locations to `{0}`.".format(logfile))
gapnum = 0
for rec in SeqIO.parse(inputfasta, "fasta"):
allgaps = []
start = 0
object = rec.id
for gap, seq in groupby(rec.seq.upper(), lambda x: x == 'N'):
seq = "".join(seq)
current_length = len(seq)
object_beg = start + 1
object_end = start + current_length
if gap and current_length >= opts.mingap:
allgaps.append((current_length, start))
gapnum += 1
gapname = "gap.{0:05d}".format(gapnum)
print >> fwbed, "\t".join(str(x) for x in (object,
object_beg - 1, object_end, gapname))
start += current_length
if opts.log:
if allgaps:
lengths, starts = zip(*allgaps)
gap_description = ",".join(str(x) for x in lengths)
starts = ",".join(str(x) for x in starts)
else:
gap_description = starts = "no gaps"
print >> fwlog, "\t".join((rec.id, str(len(allgaps)),
gap_description, starts))
fwbed.close()
if opts.agp or opts.split:
from jcvi.formats.sizes import agp
from jcvi.formats.agp import mask
agpfile = prefix + ".gaps.agp"
sizesagpfile = agp([inputfasta])
maskopts = [sizesagpfile, bedfile]
if opts.split:
maskopts += ["--split"]
maskedagpfile = mask(maskopts)
shutil.move(maskedagpfile, agpfile)
os.remove(sizesagpfile)
logging.debug("AGP file written to `{0}`.".format(agpfile))
if opts.split:
from jcvi.formats.agp import build
splitfile = prefix + ".split.fasta"
build([agpfile, inputfasta, splitfile])
if __name__ == '__main__':
main()
Updated formats.fasta.identical() to now print out sequences unique to
all input files
"""
Wrapper for biopython Fasta, add option to parse sequence headers
"""
import sys
import os
import os.path as op
import shutil
import logging
import string
from optparse import OptionParser
from itertools import groupby, izip_longest
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from jcvi.formats.base import BaseFile, DictFile, must_open
from jcvi.formats.bed import Bed
from jcvi.apps.base import ActionDispatcher, debug, set_outfile
from jcvi.apps.console import red, green
debug()
class Fasta (BaseFile, dict):
def __init__(self, filename, index=False, key_function=None, lazy=False):
super(Fasta, self).__init__(filename)
self.key_function = key_function
if lazy: # do not incur the overhead
return
if index:
self.index = SeqIO.index(filename, "fasta",
key_function=key_function)
else:
# SeqIO.to_dict expects a different key_function that operates on
# the SeqRecord instead of the raw string
_key_function = (lambda rec: key_function(rec.description)) if \
key_function else None
self.index = SeqIO.to_dict(SeqIO.parse(must_open(filename), "fasta"),
key_function=_key_function)
def _key_function(self, key):
return self.key_function(key) if self.key_function else key
def __len__(self):
return len(self.index)
def __contains__(self, key):
key = self._key_function(key)
return key in self.index
def __getitem__(self, key):
key = self._key_function(key)
rec = self.index[key]
return rec
def keys(self):
return self.index.keys()
def iterkeys(self):
for k in self.index.iterkeys():
yield k
def iteritems(self):
for k in self.iterkeys():
yield k, self[k]
def itersizes(self):
for k in self.iterkeys():
yield k, len(self[k])
def iteritems_ordered(self):
for rec in SeqIO.parse(must_open(self.filename), "fasta"):
yield rec.name, rec
def iterdescriptions_ordered(self):
for k, rec in self.iteritems_ordered():
yield rec.description, rec
def iterkeys_ordered(self):
for k, rec in self.iteritems_ordered():
yield k
def itersizes_ordered(self):
for k, rec in self.iteritems_ordered():
yield k, len(rec)
@property
def totalsize(self):
return sum(size for k, size in self.itersizes())
@classmethod
def subseq(cls, fasta, start=None, stop=None, strand=None):
"""
Take Bio.SeqRecord and slice "start:stop" from it, does proper
index and error handling
"""
start = start - 1 if start is not None else 0
stop = stop if stop is not None else len(fasta)
assert start >= 0, "start (%d) must > 0" % (start + 1)
assert stop <= len(fasta), \
("stop (%d) must be <= " + \
"length of `%s` (%d)") % (stop, fasta.id, len(fasta))
seq = fasta.seq[start:stop]
if strand in (-1, '-1', '-'):
seq = seq.reverse_complement()
return seq
def sequence(self, f, asstring=True):
"""
Emulate brentp's pyfasta/fasta.py sequence() methods
take a feature and use the start/stop or exon_keys to return
the sequence from the assocatied fasta file:
f: a feature
asstring: if true, return the sequence as a string
: if false, return as a biopython Seq
>>> f = Fasta('tests/data/three_chrs.fasta')
>>> f.sequence({'start':1, 'stop':2, 'strand':1, 'chr': 'chr1'})
'AC'
>>> f.sequence({'start':1, 'stop':2, 'strand': -1, 'chr': 'chr1'})
'GT'
"""
assert 'chr' in f, "`chr` field required"
name = f['chr']
assert name in self, "feature: %s not in `%s`" % \
(f, self.filename)
fasta = self[f['chr']]
seq = Fasta.subseq(fasta,
f.get('start'), f.get('stop'), f.get('strand'))
if asstring:
return str(seq)
return seq
"""
Class derived from https://gist.github.com/933737
Original code written by David Winter (https://github.com/dwinter)
Code writted to answer this challenge at Biostar:
http://biostar.stackexchange.com/questions/5902/
(Code includes improvements from Brad Chapman)
"""
class ORFFinder:
"""Find the longest ORF in a given sequence
"seq" is a string, if "start" is not provided any codon can be the start of
and ORF. If muliple ORFs have the longest length the first one encountered
is printed
"""
def __init__(self, seq, start=[], stop=["TAG", "TAA", "TGA"]):
self.seq = seq.tostring().upper()
self.start = start
self.stop = stop
# strand, frame, start, end, length; coordinates are 1-based
self.result = ["+", 0, 0, 0, 0]
self.longest = 0
self.size = len(seq)
def __str__(self):
# Format similar to getorf
strand, frame, start, end, length = self.result
start += 1 # 1-based coordinates
if strand == '-':
start, end = end, start
return "[{0} - {1}]".format(start, end)
@property
def info(self):
strand, frame, start, end, length = self.result
return "\t".join(str(x) for x in (strand, frame, start, end))
def codons(self, frame):
""" A generator that yields DNA in one codon blocks
"frame" counts for 0. This function yields a tuple (triplet, index) with
index relative to the original DNA sequence
"""
start = frame
while start + 3 <= self.size:
yield self.sequence[start : start + 3], start
start += 3
def scan_sequence(self, frame, direction):
""" Search in one reading frame """
orf_start = None
for c, index in self.codons(frame):
if (c not in self.stop and (c in self.start or not self.start)
and orf_start is None):
orf_start = index
elif c in self.stop and orf_start is not None:
self._update_longest(orf_start, index + 3, direction, frame)
orf_start = None
if orf_start is not None:
self._update_longest(orf_start, index + 3, direction, frame)
def _update_longest(self, orf_start, index, direction, frame):
orf_end = index
L = orf_end - orf_start
if L > self.longest:
self.longest = L
self.result = [direction, frame, orf_start, orf_end, L]
def get_longest_orf(self):
dirs = ("+", "-")
for direction in dirs:
self.sequence = self.seq
if direction == "-":
self.sequence = rc(self.sequence)
for frame in xrange(3):
self.scan_sequence(frame, direction)
strand, frame, start, end, length = self.result
size = self.size
if strand == '-':
start, end = size - end, size - start
self.result[2 : 4] = start, end
assert start < end, self.result
orf = self.seq[start : end]
if strand == '-':
orf = rc(orf)
assert len(orf) % 3 == 0
return orf
def rc(s):
_complement = string.maketrans('ATCGatcgNnXx', 'TAGCtagcNnXx')
cs = s.translate(_complement)
return cs[::-1]
def main():
actions = (
('extract', 'given fasta file and seq id, retrieve the sequence ' + \
'in fasta format'),
('longestorf', 'find longest orf for CDS fasta'),
('translate', 'translate CDS to proteins'),
('summary', "report the real no of bases and N's in fastafiles"),
('uniq', 'remove records that are the same'),
('ids', 'generate a list of headers'),
('format', 'trim accession id to the first space or switch id ' + \
'based on 2-column mapping file'),
('pool', 'pool a bunch of fastafiles together and add prefix'),
('random', 'randomly take some records'),
('diff', 'check if two fasta records contain same information'),
('identical', 'given 2 fasta files, find all exactly identical records'),
('trim', 'given a cross_match screened fasta, trim the sequence'),
('sort', 'sort the records by IDs, sizes, etc.'),
('filter', 'filter the records by size'),
('pair', 'sort paired reads to .pairs, rest to .fragments'),
('pairinplace', 'starting from fragment.fasta, find if ' +\
"adjacent records can form pairs"),
('fastq', 'combine fasta and qual to create fastq file'),
('tidy', 'normalize gap sizes and remove small components in fasta'),
('sequin', 'generate a gapped fasta file for sequin submission'),
('gaps', 'print out a list of gap sizes within sequences'),
('join', 'concatenate a list of seqs and add gaps in between'),
('some', 'include or exclude a list of records (also performs on ' + \
'.qual file if available)'),
('clean', 'remove irregular chars in FASTA seqs'),
('ispcr', 'reformat paired primers into isPcr query format'),
('fromtab', 'convert 2-column sequence file to FASTA format'),
)
p = ActionDispatcher(actions)
p.dispatch(globals())
def fromtab(args):
"""
%prog fromtab tabfile fastafile
Convert 2-column sequence file to FASTA format. One usage for this is to
generatea `adapters.fasta` for TRIMMOMATIC.
"""
p = OptionParser(fromtab.__doc__)
p.add_option("--sep",
help="Separator in the tabfile [default: %default]")
p.add_option("--replace",
help="Replace spaces in name to char [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
tabfile, fastafile = args
sep = opts.sep
replace = opts.replace
fp = must_open(tabfile)
fw = must_open(fastafile, "w")
nseq = 0
for row in fp:
row = row.strip()
if not row or row[0] == '#':
continue
name, seq = row.rsplit(sep, 1)
if replace:
name = name.replace(" ", replace)
print >> fw, ">{0}\n{1}".format(name, seq)
nseq += 1
fw.close()
logging.debug("A total of {0} sequences written to `{1}`.".\
format(nseq, fastafile))
def longestorf(args):
"""
%prog longestorf fastafile
Find longest ORF for each sequence in fastafile.
"""
from jcvi.utils.cbook import percentage
p = OptionParser(longestorf.__doc__)
p.add_option("--ids", action="store_true",
help="Generate table with ORF info [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastafile, = args
pf = fastafile.rsplit(".", 1)[0]
orffile = pf + ".orf.fasta"
idsfile = None
if opts.ids:
idsfile = pf + ".orf.ids"
fwids = open(idsfile, "w")
f = Fasta(fastafile, lazy=True)
fw = must_open(orffile, "w")
before, after = 0, 0
for name, rec in f.iteritems_ordered():
cds = rec.seq
before += len(cds)
# Try all six frames
orf = ORFFinder(cds)
lorf = orf.get_longest_orf()
newcds = Seq(lorf)
after += len(newcds)
newrec = SeqRecord(newcds, id=name, description=rec.description)
SeqIO.write([newrec], fw, "fasta")
if idsfile:
print >> fwids, "\t".join((name, orf.info))
fw.close()
if idsfile:
fwids.close()
logging.debug("Longest ORFs written to `{0}` ({1}).".\
format(orffile, percentage(after, before)))
return orffile
def ispcr(args):
"""
%prog ispcr fastafile
Reformat paired primers into isPcr query format, which is three column
format: name, forward, reverse
"""
from jcvi.utils.iter import grouper
p = OptionParser(ispcr.__doc__)
p.add_option("-r", dest="rclip", default=1, type="int",
help="pair ID is derived from rstrip N chars [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastafile, = args
ispcrfile = fastafile + ".isPcr"
fw = open(ispcrfile, "w")
N = opts.rclip
strip_name = lambda x: x[:-N] if N else str
npairs = 0
fastaiter = SeqIO.parse(fastafile, "fasta")
for a, b in grouper(2, fastaiter):
aid, bid = [strip_name(x) for x in (a.id, b.id)]
assert aid == bid, "Name mismatch {0}".format((aid, bid))
print >> fw, "\t".join((aid, str(a.seq), str(b.seq)))
npairs += 1
fw.close()
logging.debug("A total of {0} pairs written to `{1}`.".\
format(npairs, ispcrfile))
def parse_fasta(infile):
'''
parse a fasta-formatted file and returns header
can be a fasta file that contains multiple records.
'''
fp = open(infile)
# keep header
fa_iter = (x[1] for x in groupby(fp, lambda row: row[0] == '>'))
for header in fa_iter:
header = header.next()
if header[0] != '>':
continue
# drop '>'
header = header.strip()[1:]
# stitch the sequence lines together and make into upper case
seq = "".join(s.strip().upper() for s in fa_iter.next())
yield header, seq
def iter_clean_fasta(fastafile):
import string
for header, seq in parse_fasta(fastafile):
seq = "".join(x for x in seq if x in string.letters or x == '*')
yield header, seq
def iter_canonical_fasta(fastafile):
canonical = "acgtnACGTN"
totalbad = 0
for header, seq in parse_fasta(fastafile):
badcounts = sum(1 for x in seq if x not in canonical)
seq = "".join((x if x in canonical else 'N') for x in seq)
totalbad += badcounts
yield header, seq
logging.debug("Total bad char: {0}".format(totalbad))
def fancyprint(fw, seq, width=60, chunk=10):
from jcvi.utils.iter import grouper
assert width % chunk == 0
nchunks = width / chunk
seqlen = len(seq)
maxchar = len(str(seqlen))
s = ["".join(x) for x in grouper(chunk, seq, fillvalue="")]
s = [" ".join(x) for x in grouper(nchunks, s, fillvalue="")]
for a, b in zip(range(1, len(seq), width), s):
b = b.rstrip()
a = str(a).rjust(maxchar, " ")
print >> fw, " ".join((a, b))
def clean(args):
"""
%prog clean fastafile
Remove irregular chars in FASTA seqs.
"""
p = OptionParser(clean.__doc__)
p.add_option("--fancy", default=False, action="store_true",
help="Pretty print the sequence [default: %default]")
p.add_option("--canonical", default=False, action="store_true",
help="Use only acgtnACGTN [default: %default]")
set_outfile(p)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastafile, = args
fw = must_open(opts.outfile, "w")
if opts.fancy:
for header, seq in iter_clean_fasta(fastafile):
print >> fw, ">" + header
fancyprint(fw, seq)
return 0
iterator = iter_canonical_fasta if opts.canonical else iter_clean_fasta
for header, seq in iterator(fastafile):
seq = Seq(seq)
s = SeqRecord(seq, id=header, description="")
SeqIO.write([s], fw, "fasta")
def translate(args):
"""
%prog translate cdsfasta
Translate CDS to proteins. The tricky thing is that sometimes the CDS
represents a partial gene, therefore disrupting the frame of the protein.
Check all three frames to get a valid translation.
"""
from collections import defaultdict
from jcvi.utils.cbook import percentage
transl_tables = [str(x) for x in xrange(1,25)]
p = OptionParser(translate.__doc__)
p.add_option("--ids", default=False, action="store_true",
help="Create .ids file with the complete/partial/gaps "
"label [default: %default]")
p.add_option("--longest", default=False, action="store_true",
help="Find the longest ORF from each input CDS [default: %default]")
p.add_option("--table", default=1, choices=transl_tables,
help="Specify translation table to use [default: %default]")
set_outfile(p)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
cdsfasta, = args
if opts.longest:
cdsfasta = longestorf([cdsfasta])
f = Fasta(cdsfasta, lazy=True)
outfile = opts.outfile
fw = must_open(outfile, "w")
if opts.ids:
idsfile = cdsfasta.rsplit(".", 1)[0] + ".ids"
ids = open(idsfile, "w")
else:
ids = None
five_prime_missing = three_prime_missing = 0
contain_ns = complete = cannot_translate = total = 0
for name, rec in f.iteritems_ordered():
cds = rec.seq
cdslen = len(cds)
peplen = cdslen / 3
total += 1
# Try all three frames
pep = ""
for i in xrange(3):
newcds = cds[i: i + peplen * 3]
newpep = newcds.translate(table=opts.table)
if len(newpep.split("*")[0]) > len(pep.split("*")[0]):
pep = newpep
labels = []
if "*" in pep.rstrip("*"):
logging.error("{0} cannot translate".format(name))
cannot_translate += 1
labels.append("cannot_translate")
contains_start = pep.startswith("M")
contains_stop = pep.endswith("*")
contains_ns = "X" in pep
start_ns = pep.startswith("X")
end_ns = pep.endswith("X")
if not contains_start:
five_prime_missing += 1
labels.append("five_prime_missing")
if not contains_stop:
three_prime_missing += 1
labels.append("three_prime_missing")
if contains_ns:
contain_ns += 1
labels.append("contain_ns")
if contains_start and contains_stop:
complete += 1
labels.append("complete")
if start_ns:
labels.append("start_ns")
if end_ns:
labels.append("end_ns")
if ids:
print >> ids, "\t".join((name, ",".join(labels)))
peprec = SeqRecord(pep, id=name, description=rec.description)
SeqIO.write([peprec], fw, "fasta")
fw.flush()
print >> sys.stderr, "Complete gene models: {0}".\
format(percentage(complete, total))
print >> sys.stderr, "Missing 5`-end: {0}".\
format(percentage(five_prime_missing, total))
print >> sys.stderr, "Missing 3`-end: {0}".\
format(percentage(three_prime_missing, total))
print >> sys.stderr, "Contain Ns: {0}".\
format(percentage(contain_ns, total))
if cannot_translate:
print >> sys.stderr, "Cannot translate: {0}".\
format(percentage(cannot_translate, total))
fw.close()
return cdsfasta, outfile
def filter(args):
"""
%prog filter fastafile 100
Filter the FASTA file to contain records with size >= or <= certain cutoff.
"""
p = OptionParser(filter.__doc__)
p.add_option("--less", default=False, action="store_true",
help="filter the sizes <= certain cutoff [default: >=]")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
fastafile, cutoff = args
try:
cutoff = int(cutoff)
except ValueError:
sys.exit(not p.print_help())
f = Fasta(fastafile, lazy=True)
fw = sys.stdout
for name, rec in f.iteritems_ordered():
if opts.less and len(rec) > cutoff:
continue
if (not opts.less) and len(rec) < cutoff:
continue
SeqIO.write([rec], fw, "fasta")
fw.flush()
def pool(args):
"""
%prog pool fastafiles
Pool a bunch of FASTA files, and add prefix to each record based on
filenames.
"""
p = OptionParser(pool.__doc__)
if len(args) < 1:
sys.exit(not p.print_help())
for fastafile in args:
pf = op.basename(fastafile).split(".")[0].split("_")[0]
prefixopt = "--prefix={0}_".format(pf)
format([fastafile, "stdout", prefixopt])
def ids(args):
"""
%prog ids fastafiles
Generate the FASTA headers without the '>'.
"""
p = OptionParser(ids.__doc__)
p.add_option("--until", default=None,
help="Truncate the name and description at words [default: %default]")
p.add_option("--description", default=False, action="store_true",
help="Generate a second column with description [default: %default]")
set_outfile(p)
opts, args = p.parse_args(args)
if len(args) < 1:
sys.exit(not p.print_help())
until = opts.until
fw = must_open(opts.outfile, "w")
for row in must_open(args):
if row[0] == ">":
row = row[1:].rstrip()
if until:
row = row.split(until)[0]
atoms = row.split(None, 1)
if opts.description:
outrow = "\t".join(atoms)
else:
outrow = atoms[0]
print >> fw, outrow
fw.close()
def sort(args):
"""
%prog sort fastafile
Sort a list of sequences and output with sorted IDs, etc.
"""
p = OptionParser(sort.__doc__)
p.add_option("--sizes", default=False, action="store_true",
help="Sort by decreasing size [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(p.print_help())
fastafile, = args
sortedfastafile = fastafile.rsplit(".", 1)[0] + ".sorted.fasta"
f = Fasta(fastafile, index=False)
fw = must_open(sortedfastafile, "w")
if opts.sizes:
# Sort by decreasing size
sortlist = sorted(f.itersizes(), key=lambda x: (-x[1], x[0]))
logging.debug("Sort by size: max: {0}, min: {1}".\
format(sortlist[0], sortlist[-1]))
sortlist = [x for x, s in sortlist]
else:
sortlist = sorted(f.iterkeys())
for key in sortlist:
rec = f[key]
SeqIO.write([rec], fw, "fasta")
logging.debug("Sorted file written to `{0}`.".format(sortedfastafile))
fw.close()
def join(args):
"""
%prog join fastafile [phasefile]
Make AGP file for a bunch of sequences, and add gaps between, and then build
the joined fastafile. This is useful by itself, but with --oo option this
can convert the .oo (BAMBUS output) into AGP and a joined fasta.
Phasefile is optional, but must contain two columns - BAC and phase (0, 1, 2, 3).
"""
from jcvi.formats.agp import OO, Phases, build
from jcvi.formats.sizes import Sizes
p = OptionParser(join.__doc__)
p.add_option("--newid", default=None,
help="New sequence ID [default: `%default`]")
p.add_option("--gapsize", default=100, type="int",
help="Number of N's in between the sequences [default: %default]")
p.add_option("--gaptype", default="contig",
help="Gap type to use in the AGP file [default: %default]")
p.add_option("--evidence", default="",
help="Linkage evidence to report in the AGP file [default: %default]")
p.add_option("--oo", help="Use .oo file generated by bambus [default: %default]")
opts, args = p.parse_args(args)
nargs = len(args)
if nargs not in (1, 2):
sys.exit(not p.print_help())
if nargs == 2:
fastafile, phasefile = args
phases = DictFile(phasefile)
phases = dict((a, Phases[int(b)]) for a, b in phases.items())
else:
fastafile, = args
phases = {}
sizes = Sizes(fastafile)
prefix = fastafile.rsplit(".", 1)[0]
agpfile = prefix + ".agp"
newid = opts.newid
oo = opts.oo
o = OO(oo, sizes.mapping)
if oo:
seen = o.contigs
# The leftover contigs not in the oo file
logging.debug("A total of {0} contigs ({1} in `{2}`)".\
format(len(sizes), len(seen), oo))
for ctg, size in sizes.iter_sizes():
if ctg in seen:
continue
o.add(ctg, ctg, size)
else:
if newid:
for ctg, size in sizes.iter_sizes():
o.add(newid, ctg, size)
else:
for scaffold_number, (ctg, size) in enumerate(sizes.iter_sizes()):
object_id = "scaffold{0:03d}".format(scaffold_number + 1)
o.add(object_id, ctg, size)
fw = open(agpfile, "w")
o.write_AGP(fw, gapsize=opts.gapsize, gaptype=opts.gaptype,
evidence=opts.evidence, phases=phases)
fw.close()
joinedfastafile = prefix + ".joined.fasta"
build([agpfile, fastafile, joinedfastafile])
def summary(args):
"""
%prog summary *.fasta
Report real bases and N's in fastafiles in a tabular report
"""
from jcvi.utils.table import write_csv
p = OptionParser(summary.__doc__)
p.add_option("--suffix", default="Mb",
help="make the base pair counts human readable [default: %default]")
p.add_option("--ids",
help="write the ids that have >= 50% N's [default: %default]")
set_outfile(p)
opts, args = p.parse_args(args)
if len(args) == 0:
sys.exit(not p.print_help())
idsfile = opts.ids
header = "Seqid Real N's Total %_real".split()
if idsfile:
idsfile = open(idsfile, "w")
nids = 0
data = []
for fastafile in args:
for rec in SeqIO.parse(fastafile, "fasta"):
seqlen = len(rec)
nns = rec.seq.count('n') + rec.seq.count('N')
reals = seqlen - nns
pct = reals * 100. / seqlen
pctreal = "{0:.1f} %".format(pct)
if idsfile and pct < 50:
nids += 1
print >> idsfile, rec.id
data.append((rec.id, reals, nns, seqlen, pctreal))
ids, reals, nns, seqlen, pctreal = zip(*data)
reals = sum(reals)
nns = sum(nns)
seqlen = sum(seqlen)
pctreal = "{0:.1f} %".format(reals * 100. / seqlen)
data.append(("Total", reals, nns, seqlen, pctreal))
write_csv(header, data, sep=" ", filename=opts.outfile)
if idsfile:
logging.debug("A total of {0} ids >= 50% N's written to {1}.".\
format(nids, idsfile.name))
idsfile.close()
return reals, nns, seqlen
def format(args):
"""
%prog format infasta outfasta
Reformat FASTA file and also clean up names.
"""
p = OptionParser(format.__doc__)
p.add_option("--pairs", default=False, action="store_true",
help="Add trailing /1 and /2 for interleaved pairs [default: %default]")
p.add_option("--sequential", default=False, action="store_true",
help="Add sequential IDs [default: %default]")
p.add_option("--sequentialoffset", default=1, type="int",
help="Sequential IDs start at [default: %default]")
p.add_option("--pad0", default=6, type="int",
help="Pad a few zeros in front of sequential [default: %default]")
p.add_option("--gb", default=False, action="store_true",
help="For Genbank ID, get the accession [default: %default]")
p.add_option("--sep", default=None,
help="Split description by certain symbol [default: %default]")
p.add_option("--index", default=0, type="int",
help="Extract i-th field after split with --sep [default: %default]")
p.add_option("--noversion", default=False, action="store_true",
help="Remove the gb trailing version [default: %default]")
p.add_option("--prefix", help="Prepend prefix to sequence ID")
p.add_option("--suffix", help="Append suffix to sequence ID")
p.add_option("--template", default=False, action="store_true",
help="Extract `template=aaa dir=x library=m` to `m-aaa/x` [default: %default]")
p.add_option("--switch", help="Switch ID from two-column file [default: %default]")
p.add_option("--annotation", help="Add functional annotation from "
"two-column file ('ID <--> Annotation') [default: %default]")
p.add_option("--ids", help="Generate ID conversion table [default: %default]")
p.add_option("--upper", default=False, action="store_true",
help="Convert sequence to upper case [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
infasta, outfasta = args
gb = opts.gb
pairs = opts.pairs
prefix = opts.prefix
suffix = opts.suffix
noversion = opts.noversion
sequential = opts.sequential
sequentialoffset = opts.sequentialoffset
sep = opts.sep
idx = opts.index
mapfile = opts.switch
annotfile = opts.annotation
idsfile = opts.ids
idsfile = open(idsfile, "w") if idsfile else None
upper = opts.upper
if mapfile:
mapping = DictFile(mapfile, delimiter="\t")
if annotfile:
annotation = DictFile(annotfile, delimiter="\t")
fw = must_open(outfasta, "w")
fp = SeqIO.parse(must_open(infasta), "fasta")
for i, rec in enumerate(fp):
origid = rec.id
description = rec.description
if sep:
description = description.split(sep)[idx]
rec.id = description
if gb:
# gi|262233616|gb|GU123895.1| Coffea arabica clone BAC
atoms = rec.id.split("|")
if len(atoms) >= 3:
rec.id = atoms[3]
elif len(atoms) == 2:
rec.id = atoms[1]
if pairs:
id = "/1" if (i % 2 == 0) else "/2"
rec.id += id
if noversion:
rec.id = rec.id.rsplit(".", 1)[0]
if sequential:
rec.id = "{0:0{1}d}".format(sequentialoffset, opts.pad0)
sequentialoffset += 1
if opts.template:
template, dir, lib = [x.split("=")[-1] for x in
rec.description.split()[1:4]]
rec.id = "{0}-{1}/{2}".format(lib, template, dir)
if mapfile:
if origid in mapping:
rec.id = mapping[origid]
else:
logging.error("{0} not found in `{1}`. ID unchanged.".\
format(origid, mapfile))
if prefix:
rec.id = prefix + rec.id
if suffix:
rec.id += suffix
rec.description = ""
if annotfile:
rec.description = annotation.get(origid, "") if not mapfile \
else annotation.get(rec.id, "")
if idsfile:
print >> idsfile, "\t".join((origid, rec.id))
if upper:
rec.seq = rec.seq.upper()
SeqIO.write(rec, fw, "fasta")
if idsfile:
logging.debug("Conversion table written to `{0}`.".\
format(idsfile.name))
idsfile.close()
def print_first_difference(arec, brec, ignore_case=False, ignore_N=False,
rc=False, report_match=True):
"""
Returns the first different nucleotide in two sequence comparisons
runs both Plus and Minus strand
"""
plus_match = _print_first_difference(arec, brec, ignore_case=ignore_case,
ignore_N=ignore_N, report_match=report_match)
if rc and not plus_match:
logging.debug("trying reverse complement of %s" % brec.id)
brec.seq = brec.seq.reverse_complement()
minus_match = _print_first_difference(arec, brec,
ignore_case=ignore_case, ignore_N=ignore_N,
report_match=report_match)
return minus_match
else:
return plus_match
def _print_first_difference(arec, brec, ignore_case=False, ignore_N=False,
report_match=True):
"""
Returns the first different nucleotide in two sequence comparisons
"""
aseq, bseq = arec.seq, brec.seq
asize, bsize = len(aseq), len(bseq)
matched = True
for i, (a, b) in enumerate(izip_longest(aseq, bseq)):
if ignore_case and None not in (a, b):
a, b = a.upper(), b.upper()
if ignore_N and ('N' in (a, b) or 'X' in (a, b)):
continue
if a != b:
matched = False
break
if i + 1 == asize and matched:
if report_match:
print green("Two sequences match")
match = True
else:
print red("Two sequences do not match")
snippet_size = 20 # show the context of the difference
print red("Sequence start to differ at position %d:" % (i + 1))
begin = max(i - snippet_size, 0)
aend = min(i + snippet_size, asize)
bend = min(i + snippet_size, bsize)
print red(aseq[begin:i] + "|" + aseq[i:aend])
print red(bseq[begin:i] + "|" + bseq[i:bend])
match = False
return match
def diff(args):
"""
%prog diff afasta bfasta
print out whether the records in two fasta files are the same
"""
from jcvi.utils.table import banner
p = OptionParser(diff.__doc__)
p.add_option("--ignore_case", default=False, action="store_true",
help="ignore case when comparing sequences [default: %default]")
p.add_option("--ignore_N", default=False, action="store_true",
help="ignore N and X's when comparing sequences [default: %default]")
p.add_option("--ignore_stop", default=False, action="store_true",
help="ignore stop codon when comparing sequences [default: %default]")
p.add_option("--rc", default=False, action="store_true",
help="also consider reverse complement [default: %default]")
p.add_option("--quiet", default=False, action="store_true",
help="don't output comparison details [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
afasta, bfasta = args
afastan = len(Fasta(afasta))
bfastan = len(Fasta(bfasta))
if afastan == bfastan:
print green("Two sets contain the same number of sequences ({0}, {1})".\
format(afastan, bfastan))
else:
print red("Two sets contain different number of sequences ({0}, {1})".\
format(afastan, bfastan))
ah = SeqIO.parse(afasta, "fasta")
bh = SeqIO.parse(bfasta, "fasta")
problem_ids = []
for arec, brec in zip(ah, bh):
if opts.ignore_stop:
arec.seq = arec.seq.rstrip("*")
brec.seq = brec.seq.rstrip("*")
asize, bsize = len(arec), len(brec)
if not opts.quiet:
print banner((arec, brec))
if asize == bsize:
print green("Two sequence size match (%d)" % asize)
else:
print red("Two sequence size do not match (%d, %d)" % (asize, bsize))
# print out the first place the two sequences diff
fd = print_first_difference(arec, brec, ignore_case=opts.ignore_case,
ignore_N=opts.ignore_N, rc=opts.rc, report_match=not opts.quiet)
if not fd:
logging.error("Two sets of sequences differ at `{0}`".format(arec.id))
problem_ids.append("\t".join(str(x) for x in (arec.id, asize, bsize,
abs(asize - bsize))))
if problem_ids:
print red("A total of {0} records mismatch.".format(len(problem_ids)))
fw = must_open("Problems.ids", "w")
print >> fw, "\n".join(problem_ids)
def hash_fasta(fastafile, ignore_case=False, ignore_N=False, ignore_stop=False):
"""
Generates MD5 hash of each element within the input multifasta file
Returns a dictionary with the sequence hashes as keys and sequence IDs as values
"""
import re
import hashlib
f = Fasta(fastafile)
logging.debug("Hashing individual elements of {0}".format(fastafile))
hash_dict = {}
for name, rec in f.iteritems_ordered():
seq = re.sub(' ', '', rec.seq.tostring())
orig_seq = seq
if ignore_stop:
seq = seq.rstrip("*")
if ignore_case:
seq = seq.upper()
if ignore_N:
if not all(c.upper() in 'ATGCN' for c in seq):
seq = re.sub('X', '', seq)
else:
seq = re.sub('N', '', seq)
md5_hex = hashlib.md5(seq).hexdigest()
if md5_hex not in hash_dict.keys():
hash_dict[md5_hex] = {}
hash_dict[md5_hex]['names'] = set()
hash_dict[md5_hex]['names'].add(name)
hash_dict[md5_hex]['seq'] = orig_seq
return hash_dict
def identical(args):
"""
%prog identical *.fasta
Given multiple fasta files, find all the exactly identical records
based on the computed md5 hexdigest of each sequence.
Output is an N + 1 column file (where N = number of input fasta files).
If there are duplicates within a given fasta file, they will all be
listed out in the same row separated by a comma.
Example output:
---------------------------
tta1.fsa tta2.fsa
t0 2131 na
t1 3420 na
t2 3836,3847 852
t3 148 890
t4 584 614
t5 623 684
t6 1281 470
t7 3367 na
"""
p = OptionParser(identical.__doc__)
p.add_option("--ignore_case", default=False, action="store_true",
help="ignore case when comparing sequences [default: %default]")
p.add_option("--ignore_N", default=False, action="store_true",
help="ignore N and X's when comparing sequences [default: %default]")
p.add_option("--ignore_stop", default=False, action="store_true",
help="ignore stop codon when comparing sequences [default: %default]")
p.add_option("--output_uniq", default=False, action="store_true",
help="output uniq sequences in FASTA format" + \
" [default: %default]")
set_outfile(p)
opts, args = p.parse_args(args)
if len(args) == 0:
sys.exit(not p.print_help())
d, setlist, uniq = {}, [], {}
for fastafile in args:
pf = fastafile.rsplit(".", 1)[0]
d[pf] = hash_fasta(fastafile, ignore_case=opts.ignore_case, ignore_N=opts.ignore_N, \
ignore_stop=opts.ignore_stop)
setlist.append(set(d[pf].keys()))
hashes = set.union(*setlist)
fw = must_open(opts.outfile, "w")
if opts.output_uniq:
uniqfile = "_".join(d.keys()) + ".uniq.fasta"
uniqfw = must_open(uniqfile, "w")
header = "\t".join(str(x) for x in (args))
print >> fw, "\t".join(str(x) for x in ("", header))
for idx, md5_hex in enumerate(hashes):
if opts.output_uniq and md5_hex not in uniq.keys():
uniq[md5_hex] = {}
uniq[md5_hex]['count'] = 0
line = []
line.append("t{0}".format(idx))
for fastafile in d.keys():
if md5_hex in d[fastafile].keys():
line.append(",".join(d[fastafile][md5_hex]['names']))
if opts.output_uniq:
uniq[md5_hex]['count'] += len(d[fastafile][md5_hex]['names'])
uniq[md5_hex]['seq'] = d[fastafile][md5_hex]['seq']
else:
line.append("na")
print >> fw, "\t".join(line)
if opts.output_uniq:
seqid = "\t".join(str(x) for x in ("t{0}".format(idx), uniq[md5_hex]['count']))
rec = SeqRecord(Seq(uniq[md5_hex]['seq']), id=seqid, description="")
SeqIO.write([rec], uniqfw, "fasta")
fw.close()
if opts.output_uniq:
logging.debug("Uniq sequences written to `{0}`".format(uniqfile))
uniqfw.close()
QUALSUFFIX = ".qual"
def get_qual(fastafile, suffix=QUALSUFFIX, check=True):
"""
Check if current folder contains a qual file associated with the fastafile
"""
qualfile1 = fastafile.rsplit(".", 1)[0] + suffix
qualfile2 = fastafile + suffix
if check:
if op.exists(qualfile1):
logging.debug("qual file `{0}` found".format(qualfile1))
return qualfile1
elif op.exists(qualfile2):
logging.debug("qual file `{0}` found".format(qualfile2))
return qualfile2
else:
logging.warning("qual file not found")
return None
return qualfile1
def some(args):
"""
%prog some fastafile listfile outfastafile
generate a subset of fastafile, based on a list
"""
p = OptionParser(some.__doc__)
p.add_option("--exclude", default=False, action="store_true",
help="Output sequences not in the list file [default: %default]")
p.add_option("--uniprot", default=False, action="store_true",
help="Header is from uniprot [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(p.print_help())
fastafile, listfile, outfastafile = args
outfastahandle = must_open(outfastafile, "w")
qualfile = get_qual(fastafile)
names = set(x.strip() for x in open(listfile))
if qualfile:
outqualfile = outfastafile + ".qual"
outqualhandle = open(outqualfile, "w")
parser = iter_fasta_qual(fastafile, qualfile)
else:
parser = SeqIO.parse(fastafile, "fasta")
num_records = 0
for rec in parser:
name = rec.id
if opts.uniprot:
name = name.split("|")[-1]
if opts.exclude:
if name in names:
continue
else:
if name not in names:
continue
SeqIO.write([rec], outfastahandle, "fasta")
if qualfile:
SeqIO.write([rec], outqualhandle, "qual")
num_records += 1
logging.debug("A total of %d records written to `%s`" % \
(num_records, outfastafile))
def fastq(args):
"""
%prog fastq fastafile
Generate fastqfile by combining fastafile and fastafile.qual.
Also check --qv option to use a default qv score.
"""
from jcvi.formats.fastq import FastqLite
p = OptionParser(fastq.__doc__)
p.add_option("--qv", type="int",
help="Use generic qv value [dafault: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastafile, = args
fastqfile = fastafile.rsplit(".", 1)[0] + ".fastq"
fastqhandle = open(fastqfile, "w")
num_records = 0
if opts.qv is not None:
qv = chr(ord('!') + opts.qv)
logging.debug("QV char '{0}' ({1})".format(qv, opts.qv))
else:
qv = None
if qv:
f = Fasta(fastafile, lazy=True)
for name, rec in f.iteritems_ordered():
r = FastqLite("@" + name, str(rec.seq).upper(), qv * len(rec.seq))
print >> fastqhandle, r
num_records += 1
else:
qualfile = get_qual(fastafile)
for rec in iter_fasta_qual(fastafile, qualfile):
SeqIO.write([rec], fastqhandle, "fastq")
num_records += 1
fastqhandle.close()
logging.debug("A total of %d records written to `%s`" % \
(num_records, fastqfile))
def pair(args):
"""
%prog pair fastafile
Generate .pairs.fasta and .fragments.fasta by matching records
into the pairs and the rest go to fragments.
"""
p = OptionParser(pair.__doc__)
p.add_option("-d", dest="separator", default=None,
help="separater in the name field to reduce to the same clone " +\
"[e.g. GFNQ33242/1 use /, BOT01-2453H.b1 use .]" +\
"[default: trim until last char]")
p.add_option("-m", dest="matepairs", default=False, action="store_true",
help="generate .matepairs file [often used for Celera Assembler]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(p.print_help())
fastafile, = args
qualfile = get_qual(fastafile)
prefix = fastafile.rsplit(".", 1)[0]
pairsfile = prefix + ".pairs.fasta"
fragsfile = prefix + ".frags.fasta"
pairsfw = open(pairsfile, "w")
fragsfw = open(fragsfile, "w")
#TODO: need a class to handle coupled fasta and qual iterating and indexing
if opts.matepairs:
matepairsfile = prefix + ".matepairs"
matepairsfw = open(matepairsfile, "w")
if qualfile:
pairsqualfile = pairsfile + ".qual"
pairsqualhandle = open(pairsqualfile, "w")
fragsqualfile = fragsfile + ".qual"
fragsqualhandle = open(fragsqualfile, "w")
f = Fasta(fastafile)
if qualfile:
q = SeqIO.index(qualfile, "qual")
all_keys = list(f.iterkeys())
all_keys.sort()
sep = opts.separator
if sep:
key_fun = lambda x: x.split(sep, 1)[0]
else:
key_fun = lambda x: x[:-1]
for key, variants in groupby(all_keys, key=key_fun):
variants = list(variants)
paired = (len(variants) == 2)
if paired and opts.matepairs:
print >> matepairsfw, "\t".join(("%s/1" % key, "%s/2" % key))
fw = pairsfw if paired else fragsfw
if qualfile:
qualfw = pairsqualhandle if paired else fragsqualhandle
for i, var in enumerate(variants):
rec = f[var]
if qualfile:
recqual = q[var]
newid = "%s/%d" % (key, i + 1)
rec.id = newid
rec.description = ""
SeqIO.write([rec], fw, "fasta")
if qualfile:
recqual.id = newid
recqual.description = ""
SeqIO.write([recqual], qualfw, "qual")
logging.debug("sequences written to `%s` and `%s`" % \
(pairsfile, fragsfile))
if opts.matepairs:
logging.debug("mates written to `%s`" % matepairsfile)
def pairinplace(args):
"""
%prog pairinplace bulk.fasta
Pair up the records in bulk.fasta by comparing the names for adjacent
records. If they match, print to bulk.pairs.fasta, else print to
bulk.frags.fasta.
"""
from jcvi.utils.iter import pairwise
p = OptionParser(pairinplace.__doc__)
p.add_option("-r", dest="rclip", default=1, type="int",
help="pair ID is derived from rstrip N chars [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastafile, = args
base = op.basename(fastafile).split(".")[0]
frags = base + ".frags.fasta"
pairs = base + ".pairs.fasta"
if fastafile.endswith(".gz"):
frags += ".gz"
pairs += ".gz"
fragsfw = must_open(frags, "w")
pairsfw = must_open(pairs, "w")
N = opts.rclip
strip_name = lambda x: x[:-N] if N else str
skipflag = False # controls the iterator skip
fastaiter = SeqIO.parse(fastafile, "fasta")
for a, b in pairwise(fastaiter):
aid, bid = [strip_name(x) for x in (a.id, b.id)]
if skipflag:
skipflag = False
continue
if aid == bid:
SeqIO.write([a, b], pairsfw, "fasta")
skipflag = True
else:
SeqIO.write([a], fragsfw, "fasta")
# don't forget the last one, when b is None
if not skipflag:
SeqIO.write([a], fragsfw, "fasta")
logging.debug("Reads paired into `%s` and `%s`" % (pairs, frags))
def extract(args):
"""
%prog extract fasta query
extract query out of fasta file, query needs to be in the form of
"seqname", or "seqname:start-stop", or "seqname:start-stop:-"
"""
p = OptionParser(extract.__doc__)
p.add_option('--include', default=False, action="store_true",
help="search description line for match [default: %default]")
p.add_option('--exclude', default=False, action="store_true",
help="exclude description that matches [default: %default]")
p.add_option('--bed', default=None,
help="path to bed file to guide extraction by matching seqname")
set_outfile(p)
opts, args = p.parse_args(args)
if len(args) == 2:
fastafile, query = args
elif len(args) == 1 and opts.bed:
fastafile, = args
bedaccns = Bed(opts.bed).accns
else:
sys.exit(p.print_help())
if opts.bed:
fw = must_open(opts.outfile, "w")
f = Fasta(fastafile)
for accn in bedaccns:
try:
rec = f[accn]
except:
logging.error("{0} not found in {1}".format(accn, fastafile))
continue
SeqIO.write([rec], fw, "fasta")
return fw.name
atoms = query.split(":")
key = atoms[0]
assert len(atoms) <= 3, "cannot have more than two ':' in your query"
pos = ""
if len(atoms) in (2, 3):
pos = atoms[1]
strand = "+"
if len(atoms) == 3:
strand = atoms[2]
assert strand in ('+', '-'), "strand must be either '+' or '-'"
feature = dict(chr=key)
if "-" in pos:
start, stop = pos.split("-")
try:
start, stop = int(start), int(stop)
except ValueError as e:
logging.error(e)
sys.exit(p.print_help())
feature["start"] = start
feature["stop"] = stop
else:
start, stop = None, None
assert start < stop or None in (start, stop), \
"start must be < stop, you have ({0}, {1})".format(start, stop)
feature["strand"] = strand
include, exclude = opts.include, opts.exclude
# conflicting options, cannot be true at the same time
assert not (include and exclude), "--include and --exclude cannot be "\
"on at the same time"
fw = must_open(opts.outfile, "w")
if include or exclude:
f = Fasta(fastafile, lazy=True)
for k, rec in f.iterdescriptions_ordered():
if include and key not in k:
continue
if exclude and key in k:
continue
seq = Fasta.subseq(rec, start, stop, strand)
newid = rec.id
if start is not None:
newid += ":{0}-{1}:{2}".format(start, stop, strand)
rec = SeqRecord(seq, id=newid, description=k)
SeqIO.write([rec], fw, "fasta")
else:
f = Fasta(fastafile)
try:
seq = f.sequence(feature, asstring=False)
except AssertionError as e:
logging.error(e)
return
rec = SeqRecord(seq, id=query, description="")
SeqIO.write([rec], fw, "fasta")
return fw.name
def _uniq_rec(fastafile, seq=False):
"""
Returns unique records
"""
seen = set()
for rec in SeqIO.parse(fastafile, "fasta"):
name = str(rec.seq) if seq else rec.id
if name in seen:
logging.debug("ignore {0}".format(rec.id))
continue
seen.add(name)
yield rec
def uniq(args):
"""
%prog uniq fasta uniq.fasta
remove fasta records that are the same
"""
p = OptionParser(uniq.__doc__)
p.add_option("--seq", default=False, action="store_true",
help="Uniqify the sequences [default: %default]")
p.add_option("-t", "--trimname", dest="trimname",
action="store_true", default=False,
help="turn on the defline trim to first space [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(p.print_help())
fastafile, uniqfastafile = args
fw = must_open(uniqfastafile, "w")
seq = opts.seq
for rec in _uniq_rec(fastafile, seq=seq):
if opts.trimname:
rec.description = ""
SeqIO.write([rec], fw, "fasta")
def random(args):
"""
%prog random fasta 100 > random100.fasta
Take number of records randomly from fasta
"""
from random import sample
p = OptionParser(random.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
fastafile, N = args
N = int(N)
assert N > 0
f = Fasta(fastafile)
fw = must_open("stdout", "w")
for key in sample(f.keys(), N):
rec = f[key]
SeqIO.write([rec], fw, "fasta")
fw.close()
XQUAL = -1000 # default quality for X
NQUAL = 5 # default quality value for N
QUAL = 10 # default quality value
OKQUAL = 15
def modify_qual(rec):
qv = rec.letter_annotations['phred_quality']
for i, (s, q) in enumerate(zip(rec.seq, qv)):
if s == 'X' or s == 'x':
qv[i] = XQUAL
if s == 'N' or s == 'x':
qv[i] = NQUAL
return rec
def iter_fasta_qual(fastafile, qualfile, defaultqual=OKQUAL, modify=False):
"""
used by trim, emits one SeqRecord with quality values in it
"""
fastahandle = SeqIO.parse(fastafile, "fasta")
if qualfile:
qualityhandle = SeqIO.parse(qualfile, "qual")
for rec, rec_qual in zip(fastahandle, qualityhandle):
assert len(rec) == len(rec_qual)
rec.letter_annotations['phred_quality'] = \
rec_qual.letter_annotations['phred_quality']
yield rec if not modify else modify_qual(rec)
else:
logging.warning("assume qual ({0})".format(defaultqual))
for rec in fastahandle:
rec.letter_annotations['phred_quality'] = [defaultqual] * len(rec)
yield rec if not modify else modify_qual(rec)
def write_fasta_qual(rec, fastahandle, qualhandle):
if fastahandle:
SeqIO.write([rec], fastahandle, "fasta")
if qualhandle:
SeqIO.write([rec], qualhandle, "qual")
def trim(args):
"""
%prog trim fasta.screen newfasta
take the screen output from `cross_match` (against a vector db, for
example), then trim the sequences to remove X's. Will also perform quality
trim if fasta.screen.qual is found. The trimming algorithm is based on
finding the subarray that maximize the sum
"""
from jcvi.algorithms.maxsum import max_sum
p = OptionParser(trim.__doc__)
p.add_option("-c", dest="min_length", type="int", default=64,
help="minimum sequence length after trimming")
p.add_option("-s", dest="score", default=QUAL,
help="quality trimming cutoff [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(p.print_help())
fastafile, newfastafile = args
qualfile = get_qual(fastafile)
newqualfile = get_qual(newfastafile, check=False)
logging.debug("Trim bad sequence from fasta file `%s` to `%s`" % \
(fastafile, newfastafile))
fw = must_open(newfastafile, "w")
fw_qual = open(newqualfile, "w")
dropped = trimmed = 0
for rec in iter_fasta_qual(fastafile, qualfile, modify=True):
qv = [x - opts.score for x in \
rec.letter_annotations["phred_quality"]]
msum, trim_start, trim_end = max_sum(qv)
score = trim_end - trim_start + 1
if score < opts.min_length:
dropped += 1
continue
if score < len(rec):
trimmed += 1
rec = rec[trim_start:trim_end + 1]
write_fasta_qual(rec, fw, fw_qual)
print >>sys.stderr, "A total of %d sequences modified." % trimmed
print >>sys.stderr, "A total of %d sequences dropped (length < %d)." % \
(dropped, opts.min_length)
fw.close()
fw_qual.close()
def sequin(args):
"""
%prog sequin inputfasta
Generate a gapped fasta format with known gap sizes embedded. suitable for
Sequin submission.
A gapped sequence represents a newer method for describing non-contiguous
sequences, but only requires a single sequence identifier. A gap is
represented by a line that starts with >? and is immediately followed by
either a length (for gaps of known length) or "unk100" for gaps of unknown
length. For example, ">?200". The next sequence segment continues on the
next line, with no separate definition line or identifier. The difference
between a gapped sequence and a segmented sequence is that the gapped
sequence uses a single identifier and can specify known length gaps.
Gapped sequences are preferred over segmented sequences. A sample gapped
sequence file is shown here:
>m_gagei [organism=Mansonia gagei] Mansonia gagei NADH dehydrogenase ...
ATGGAGCATACATATCAATATTCATGGATCATACCGTTTGTGCCACTTCCAATTCCTATTTTAATAGGAA
TTGGACTCCTACTTTTTCCGACGGCAACAAAAAATCTTCGTCGTATGTGGGCTCTTCCCAATATTTTATT
>?200
GGTATAATAACAGTATTATTAGGGGCTACTTTAGCTCTTGC
TCAAAAAGATATTAAGAGGGGTTTAGCCTATTCTACAATGTCCCAACTGGGTTATATGATGTTAGCTCTA
>?unk100
TCAATAAAACTATGGGGTAAAGAAGAACAAAAAATAATTAACAGAAATTTTCGTTTATCTCCTTTATTAA
TATTAACGATGAATAATAATGAGAAGCCATATAGAATTGGTGATAATGTAAAAAAAGGGGCTCTTATTAC
"""
p = OptionParser(sequin.__doc__)
p.add_option("--mingap", dest="mingap", default=100, type="int",
help="The minimum size of a gap to split [default: %default]")
p.add_option("--unk", default=100, type="int",
help="The size for unknown gaps [default: %default]")
p.add_option("--newid", default=None,
help="Use this identifier instead [default: %default]")
p.add_option("--chromosome", default=None,
help="Add [chromosome= ] to FASTA header [default: %default]")
p.add_option("--clone", default=None,
help="Add [clone= ] to FASTA header [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
inputfasta, = args
unk = opts.unk
outputfasta = inputfasta.rsplit(".", 1)[0] + ".split"
rec = SeqIO.parse(must_open(inputfasta), "fasta").next()
seq = ""
unknowns, knowns = 0, 0
for gap, gap_group in groupby(rec.seq, lambda x: x.upper() == 'N'):
subseq = "".join(gap_group)
if gap:
gap_length = len(subseq)
if gap_length == unk:
subseq = "\n>?unk{0}\n".format(unk)
unknowns += 1
elif gap_length >= opts.mingap:
subseq = "\n>?{0}\n".format(gap_length)
knowns += 1
seq += subseq
fw = must_open(outputfasta, "w")
id = opts.newid or rec.id
fastaheader = ">{0}".format(id)
if opts.chromosome:
fastaheader += " [chromosome={0}]".format(opts.chromosome)
if opts.clone:
fastaheader += " [clone={0}]".format(opts.clone)
print >> fw, fastaheader
print >> fw, seq
fw.close()
logging.debug("Sequin FASTA written to `{0}` (gaps: {1} unknowns, {2} knowns).".\
format(outputfasta, unknowns, knowns))
return outputfasta, unknowns + knowns
def tidy(args):
"""
%prog tidy fastafile
Normalize gap sizes (default 100 N's) and remove small components (less than
100 nucleotides).
"""
p = OptionParser(tidy.__doc__)
p.add_option("--justtrim", default=False, action="store_true",
help="Just trim end Ns, disable other options [default: %default]")
p.add_option("--gapsize", dest="gapsize", default=100, type="int",
help="Set all gaps to the same size [default: %default]")
p.add_option("--minlen", dest="minlen", default=100, type="int",
help="Minimum component size [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastafile, = args
gapsize = opts.gapsize
minlen = opts.minlen
tidyfastafile = fastafile.rsplit(".", 1)[0] + ".tidy.fasta"
fw = must_open(tidyfastafile, "w")
normalized_gap = "N" * gapsize
for rec in SeqIO.parse(fastafile, "fasta"):
newseq = ""
dangle_gaps = 0
for gap, seq in groupby(rec.seq, lambda x: x.upper() == 'N'):
if opts.justtrim:
newseq = str(rec.seq)
break
seq = "".join(seq)
seqlen = len(seq)
msg = None
if gap:
nsize = max(gapsize - dangle_gaps, 0)
if seqlen < 10:
if nsize > seqlen:
nsize = seqlen
dangle_gaps += seqlen
else:
if seqlen != gapsize:
msg = "Normalize gap size ({0}) to {1}" \
.format(seqlen, nsize)
dangle_gaps = gapsize
newseq += nsize * 'N'
else:
if seqlen < minlen:
msg = "Discard component ({0})".format(seqlen)
else:
newseq += seq
# Discarding components might cause flank gaps to merge
# should be handled in dangle_gaps, which is only reset when
# seeing an actual sequence
dangle_gaps = 0
if msg:
msg = rec.id + ": " + msg
logging.info(msg)
newseq = newseq.strip('nN')
ngaps = newseq.count(normalized_gap)
rec.seq = Seq(newseq)
SeqIO.write([rec], fw, "fasta")
def gaps(args):
"""
%prog gaps fastafile
Print out a list of gaps in BED format (.gaps.bed).
"""
p = OptionParser(gaps.__doc__)
p.add_option("--mingap", default=100, type="int",
help="The minimum size of a gap to split [default: %default]")
p.add_option("--agp", default=False, action="store_true",
help="Generate AGP file to show components [default: %default]")
p.add_option("--split", default=False, action="store_true",
help="Generate .split.fasta [default: %default]")
p.add_option("--log", default=False, action="store_true",
help="Generate gap positions to .gaps.log [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
inputfasta, = args
mingap = opts.mingap
prefix = inputfasta.rsplit(".", 1)[0]
bedfile = prefix + ".gaps.bed"
fwbed = open(bedfile, "w")
logging.debug("Write gap locations to `{0}`.".format(bedfile))
if opts.log:
logfile = prefix + ".gaps.log"
fwlog = must_open(logfile, "w")
logging.debug("Write gap locations to `{0}`.".format(logfile))
gapnum = 0
for rec in SeqIO.parse(inputfasta, "fasta"):
allgaps = []
start = 0
object = rec.id
for gap, seq in groupby(rec.seq.upper(), lambda x: x == 'N'):
seq = "".join(seq)
current_length = len(seq)
object_beg = start + 1
object_end = start + current_length
if gap and current_length >= opts.mingap:
allgaps.append((current_length, start))
gapnum += 1
gapname = "gap.{0:05d}".format(gapnum)
print >> fwbed, "\t".join(str(x) for x in (object,
object_beg - 1, object_end, gapname))
start += current_length
if opts.log:
if allgaps:
lengths, starts = zip(*allgaps)
gap_description = ",".join(str(x) for x in lengths)
starts = ",".join(str(x) for x in starts)
else:
gap_description = starts = "no gaps"
print >> fwlog, "\t".join((rec.id, str(len(allgaps)),
gap_description, starts))
fwbed.close()
if opts.agp or opts.split:
from jcvi.formats.sizes import agp
from jcvi.formats.agp import mask
agpfile = prefix + ".gaps.agp"
sizesagpfile = agp([inputfasta])
maskopts = [sizesagpfile, bedfile]
if opts.split:
maskopts += ["--split"]
maskedagpfile = mask(maskopts)
shutil.move(maskedagpfile, agpfile)
os.remove(sizesagpfile)
logging.debug("AGP file written to `{0}`.".format(agpfile))
if opts.split:
from jcvi.formats.agp import build
splitfile = prefix + ".split.fasta"
build([agpfile, inputfasta, splitfile])
if __name__ == '__main__':
main()
|
from django import forms
from django.core.exceptions import ValidationError
from django.utils.translation import gettext_lazy as _
class LimitedMultipleChoiceField(forms.MultipleChoiceField):
def __init__(self, *args, **kwargs):
self.maximum_choices = kwargs.pop("maximum_choices", None)
self.default_error_messages.update({
'maximum_choices': _('You may select at most %(maximum)d choices (%(selected)d selected)')
})
super(LimitedMultipleChoiceField, self).__init__(*args, **kwargs)
def validate(self, value):
super(LimitedMultipleChoiceField, self).validate(value)
selected_count = len(value)
if selected_count > self.maximum_choices:
raise ValidationError(
self.error_messages['maximum_choices'],
code='maximum_choices',
params={'maximum': self.maximum_choices, 'selected': selected_count},
)
only validate maximum choices if set
from django import forms
from django.core.exceptions import ValidationError
from django.utils.translation import gettext_lazy as _
class LimitedMultipleChoiceField(forms.MultipleChoiceField):
def __init__(self, *args, **kwargs):
self.maximum_choices = kwargs.pop("maximum_choices")
self.default_error_messages.update({
'maximum_choices': _('You may select at most %(maximum)d choices (%(selected)d selected)')
})
super(LimitedMultipleChoiceField, self).__init__(*args, **kwargs)
def validate(self, value):
super(LimitedMultipleChoiceField, self).validate(value)
selected_count = len(value)
if self.maximum_choices and selected_count > self.maximum_choices:
raise ValidationError(
self.error_messages['maximum_choices'],
code='maximum_choices',
params={'maximum': self.maximum_choices, 'selected': selected_count},
)
|
from django.core import management
from django.utils import unittest
from django.contrib.contenttypes.models import ContentType
from django.test.client import Client as BaseClient, FakePayload, \
RequestFactory
from django.core.urlresolvers import reverse
from post.models import Post
from foundry.models import Member, Listing
class Client(BaseClient):
"""Bug in django/test/client.py omits wsgi.input"""
def _base_environ(self, **request):
result = super(Client, self)._base_environ(**request)
result['HTTP_USER_AGENT'] = 'Django Unittest'
result['HTTP_REFERER'] = 'dummy'
result['wsgi.input'] = FakePayload('')
return result
class TestCase(unittest.TestCase):
def setUp(self):
self.request = RequestFactory()
self.client = Client()
# Post-syncdb steps
management.call_command('migrate', interactive=False)
management.call_command('load_photosizes', interactive=False)
# Editor
self.editor, dc = Member.objects.get_or_create(
username='editor',
email='editor@test.com'
)
# Posts
for i in range(1, 5):
post, dc = Post.objects.get_or_create(
title='Post %s' % i, content='<b>aaa</b>',
owner=self.editor, state='published',
)
post.sites = [1]
post.save()
setattr(self, 'post%s' % i, post)
# Listings
content_type = ContentType.objects.get(app_label='post', model='post')
listing, dc = Listing.objects.get_or_create(
title='Posts vertical thumbnail',
slug='posts-vertical-thumbnail',
count=0, items_per_page=0, style='VerticalThumbnail',
)
listing.content_type = [content_type]
listing.sites = [1]
listing.save()
setattr(self, listing.slug, listing)
setattr(self, '_initialized', 1)
def test_listing(self):
listing = getattr(self, 'posts-vertical-thumbnail')
self.failUnless(self.post1.modelbase_obj in listing.queryset().all())
def test_pages(self):
# Login, password reset
for name in ('login', 'password_reset'):
response = self.client.get(reverse(name))
self.assertEqual(response.status_code, 200)
self.failIf(response.content.find('foundry-form') == -1)
# Posts vertical thumbnail listing
response = self.client.get('/listing/posts-vertical-thumbnail/')
self.assertEqual(response.status_code, 200)
self.failIf(response.content.find('foundry-listing-vertical-thumbnail') == -1)
self.failIf(response.content.find('/post/post-1') == -1)
Demonstrate the failure in a test
from django.core import management
from django.utils import unittest
from django.contrib.contenttypes.models import ContentType
from django.test.client import Client as BaseClient, FakePayload, \
RequestFactory
from django.core.urlresolvers import reverse
from post.models import Post
from foundry.models import Member, Listing
class Client(BaseClient):
"""Bug in django/test/client.py omits wsgi.input"""
def _base_environ(self, **request):
result = super(Client, self)._base_environ(**request)
result['HTTP_USER_AGENT'] = 'Django Unittest'
result['HTTP_REFERER'] = 'dummy'
result['wsgi.input'] = FakePayload('')
return result
class TestCase(unittest.TestCase):
def setUp(self):
self.request = RequestFactory()
self.client = Client()
# Post-syncdb steps
management.call_command('migrate', interactive=False)
management.call_command('load_photosizes', interactive=False)
# Editor
self.editor, dc = Member.objects.get_or_create(
username='editor',
email='editor@test.com'
)
# Published posts
for i in range(1, 5):
post, dc = Post.objects.get_or_create(
title='Post %s' % i, content='<b>aaa</b>',
owner=self.editor, state='published',
)
post.sites = [1]
post.save()
setattr(self, 'post%s' % i, post)
# Unpublished posts
for i in range(5,7):
post, dc = Post.objects.get_or_create(
title='Post %s' % i, content='<b>aaa</b>',
owner=self.editor, state='unpublished',
)
post.sites = [1]
post.save()
setattr(self, 'post%s' % i, post)
# Listings
content_type = ContentType.objects.get(app_label='post', model='post')
listing_pvt, dc = Listing.objects.get_or_create(
title='Posts vertical thumbnail',
slug='posts-vertical-thumbnail',
count=0, items_per_page=0, style='VerticalThumbnail',
)
listing_pvt.content_type = [content_type]
listing_pvt.sites = [1]
listing_pvt.save()
setattr(self, listing_pvt.slug, listing_pvt)
listing_upc, dc = Listing.objects.get_or_create(
title='Unpublished content',
slug='unpublished-content',
count=0, items_per_page=0, style='VerticalThumbnail',
)
listing_upc.content = [self.post5]
listing_upc.sites = [1]
listing_upc.save()
setattr(self, listing_upc.slug, listing_upc)
setattr(self, '_initialized', 1)
def test_listing_pvt(self):
listing = getattr(self, 'posts-vertical-thumbnail')
self.failUnless(self.post1.modelbase_obj in listing.queryset().all())
def test_listing_upc(self):
# Unpublished content must not be present in listing queryset
listing = getattr(self, 'unpublished-content')
self.failIf(self.post5.modelbase_obj in listing.queryset().all())
def test_pages(self):
# Login, password reset
for name in ('login', 'password_reset'):
response = self.client.get(reverse(name))
self.assertEqual(response.status_code, 200)
self.failIf(response.content.find('foundry-form') == -1)
# Posts vertical thumbnail listing
response = self.client.get('/listing/posts-vertical-thumbnail/')
self.assertEqual(response.status_code, 200)
self.failIf(response.content.find('foundry-listing-vertical-thumbnail') == -1)
self.failIf(response.content.find('/post/post-1') == -1)
|
from collections import deque
import copy
from cereal import car
from common.conversions import Conversions as CV
from opendbc.can.parser import CANParser
from opendbc.can.can_define import CANDefine
from selfdrive.car.hyundai.values import DBC, STEER_THRESHOLD, FEATURES, HDA2_CAR, EV_CAR, HYBRID_CAR, Buttons
from selfdrive.car.interfaces import CarStateBase
PREV_BUTTON_SAMPLES = 4
class CarState(CarStateBase):
def __init__(self, CP):
super().__init__(CP)
can_define = CANDefine(DBC[CP.carFingerprint]["pt"])
self.cruise_buttons = deque([Buttons.NONE] * PREV_BUTTON_SAMPLES, maxlen=PREV_BUTTON_SAMPLES)
self.main_buttons = deque([Buttons.NONE] * PREV_BUTTON_SAMPLES, maxlen=PREV_BUTTON_SAMPLES)
if CP.carFingerprint in HDA2_CAR:
self.shifter_values = can_define.dv["ACCELERATOR"]["GEAR"]
elif self.CP.carFingerprint in FEATURES["use_cluster_gears"]:
self.shifter_values = can_define.dv["CLU15"]["CF_Clu_Gear"]
elif self.CP.carFingerprint in FEATURES["use_tcu_gears"]:
self.shifter_values = can_define.dv["TCU12"]["CUR_GR"]
else: # preferred and elect gear methods use same definition
self.shifter_values = can_define.dv["LVR12"]["CF_Lvr_Gear"]
self.brake_error = False
self.park_brake = False
self.buttons_counter = 0
def update(self, cp, cp_cam):
if self.CP.carFingerprint in HDA2_CAR:
return self.update_hda2(cp, cp_cam)
ret = car.CarState.new_message()
ret.doorOpen = any([cp.vl["CGW1"]["CF_Gway_DrvDrSw"], cp.vl["CGW1"]["CF_Gway_AstDrSw"],
cp.vl["CGW2"]["CF_Gway_RLDrSw"], cp.vl["CGW2"]["CF_Gway_RRDrSw"]])
ret.seatbeltUnlatched = cp.vl["CGW1"]["CF_Gway_DrvSeatBeltSw"] == 0
ret.wheelSpeeds = self.get_wheel_speeds(
cp.vl["WHL_SPD11"]["WHL_SPD_FL"],
cp.vl["WHL_SPD11"]["WHL_SPD_FR"],
cp.vl["WHL_SPD11"]["WHL_SPD_RL"],
cp.vl["WHL_SPD11"]["WHL_SPD_RR"],
)
ret.vEgoRaw = (ret.wheelSpeeds.fl + ret.wheelSpeeds.fr + ret.wheelSpeeds.rl + ret.wheelSpeeds.rr) / 4.
ret.vEgo, ret.aEgo = self.update_speed_kf(ret.vEgoRaw)
ret.standstill = ret.vEgoRaw < 0.1
ret.steeringAngleDeg = cp.vl["SAS11"]["SAS_Angle"]
ret.steeringRateDeg = cp.vl["SAS11"]["SAS_Speed"]
ret.yawRate = cp.vl["ESP12"]["YAW_RATE"]
ret.leftBlinker, ret.rightBlinker = self.update_blinker_from_lamp(
50, cp.vl["CGW1"]["CF_Gway_TurnSigLh"], cp.vl["CGW1"]["CF_Gway_TurnSigRh"])
ret.steeringTorque = cp.vl["MDPS12"]["CR_Mdps_StrColTq"]
ret.steeringTorqueEps = cp.vl["MDPS12"]["CR_Mdps_OutTq"]
ret.steeringPressed = abs(ret.steeringTorque) > STEER_THRESHOLD
ret.steerFaultTemporary = cp.vl["MDPS12"]["CF_Mdps_ToiUnavail"] != 0 or cp.vl["MDPS12"]["CF_Mdps_ToiFlt"] != 0
# cruise state
if self.CP.openpilotLongitudinalControl:
# These are not used for engage/disengage since openpilot keeps track of state using the buttons
ret.cruiseState.available = cp.vl["TCS13"]["ACCEnable"] == 0
ret.cruiseState.enabled = cp.vl["TCS13"]["ACC_REQ"] == 1
ret.cruiseState.standstill = False
else:
ret.cruiseState.available = cp.vl["SCC11"]["MainMode_ACC"] == 1
ret.cruiseState.enabled = cp.vl["SCC12"]["ACCMode"] != 0
ret.cruiseState.standstill = cp.vl["SCC11"]["SCCInfoDisplay"] == 4.
speed_conv = CV.MPH_TO_MS if cp.vl["CLU11"]["CF_Clu_SPEED_UNIT"] else CV.KPH_TO_MS
ret.cruiseState.speed = cp.vl["SCC11"]["VSetDis"] * speed_conv
# TODO: Find brake pressure
ret.brake = 0
ret.brakePressed = cp.vl["TCS13"]["DriverBraking"] != 0
ret.brakeHoldActive = cp.vl["TCS15"]["AVH_LAMP"] == 2 # 0 OFF, 1 ERROR, 2 ACTIVE, 3 READY
ret.parkingBrake = cp.vl["TCS13"]["PBRAKE_ACT"] == 1
if self.CP.carFingerprint in (HYBRID_CAR | EV_CAR):
if self.CP.carFingerprint in HYBRID_CAR:
ret.gas = cp.vl["E_EMS11"]["CR_Vcu_AccPedDep_Pos"] / 254.
else:
ret.gas = cp.vl["E_EMS11"]["Accel_Pedal_Pos"] / 254.
ret.gasPressed = ret.gas > 0
else:
ret.gas = cp.vl["EMS12"]["PV_AV_CAN"] / 100.
ret.gasPressed = bool(cp.vl["EMS16"]["CF_Ems_AclAct"])
# Gear Selection via Cluster - For those Kia/Hyundai which are not fully discovered, we can use the Cluster Indicator for Gear Selection,
# as this seems to be standard over all cars, but is not the preferred method.
if self.CP.carFingerprint in FEATURES["use_cluster_gears"]:
gear = cp.vl["CLU15"]["CF_Clu_Gear"]
elif self.CP.carFingerprint in FEATURES["use_tcu_gears"]:
gear = cp.vl["TCU12"]["CUR_GR"]
elif self.CP.carFingerprint in FEATURES["use_elect_gears"]:
gear = cp.vl["ELECT_GEAR"]["Elect_Gear_Shifter"]
else:
gear = cp.vl["LVR12"]["CF_Lvr_Gear"]
ret.gearShifter = self.parse_gear_shifter(self.shifter_values.get(gear))
if not self.CP.openpilotLongitudinalControl:
if self.CP.carFingerprint in FEATURES["use_fca"]:
ret.stockAeb = cp.vl["FCA11"]["FCA_CmdAct"] != 0
ret.stockFcw = cp.vl["FCA11"]["CF_VSM_Warn"] == 2
else:
ret.stockAeb = cp.vl["SCC12"]["AEB_CmdAct"] != 0
ret.stockFcw = cp.vl["SCC12"]["CF_VSM_Warn"] == 2
if self.CP.enableBsm:
ret.leftBlindspot = cp.vl["LCA11"]["CF_Lca_IndLeft"] != 0
ret.rightBlindspot = cp.vl["LCA11"]["CF_Lca_IndRight"] != 0
# save the entire LKAS11 and CLU11
self.lkas11 = copy.copy(cp_cam.vl["LKAS11"])
self.clu11 = copy.copy(cp.vl["CLU11"])
self.steer_state = cp.vl["MDPS12"]["CF_Mdps_ToiActive"] # 0 NOT ACTIVE, 1 ACTIVE
self.brake_error = cp.vl["TCS13"]["ACCEnable"] != 0 # 0 ACC CONTROL ENABLED, 1-3 ACC CONTROL DISABLED
self.prev_cruise_buttons = self.cruise_buttons[-1]
self.cruise_buttons.extend(cp.vl_all["CLU11"]["CF_Clu_CruiseSwState"])
self.main_buttons.extend(cp.vl_all["CLU11"]["CF_Clu_CruiseSwMain"])
return ret
def update_hda2(self, cp, cp_cam):
ret = car.CarState.new_message()
ret.gas = cp.vl["ACCELERATOR"]["ACCELERATOR_PEDAL"] / 255.
ret.gasPressed = ret.gas > 1e-3
ret.brakePressed = cp.vl["BRAKE"]["BRAKE_PRESSED"] == 1
ret.doorOpen = cp.vl["DOORS_SEATBELTS"]["DRIVER_DOOR_OPEN"] == 1
ret.seatbeltUnlatched = cp.vl["DOORS_SEATBELTS"]["DRIVER_SEATBELT_LATCHED"] == 0
gear = cp.vl["ACCELERATOR"]["GEAR"]
ret.gearShifter = self.parse_gear_shifter(self.shifter_values.get(gear))
# TODO: figure out positions
ret.wheelSpeeds = self.get_wheel_speeds(
cp.vl["WHEEL_SPEEDS"]["WHEEL_SPEED_1"],
cp.vl["WHEEL_SPEEDS"]["WHEEL_SPEED_2"],
cp.vl["WHEEL_SPEEDS"]["WHEEL_SPEED_3"],
cp.vl["WHEEL_SPEEDS"]["WHEEL_SPEED_4"],
)
ret.vEgoRaw = (ret.wheelSpeeds.fl + ret.wheelSpeeds.fr + ret.wheelSpeeds.rl + ret.wheelSpeeds.rr) / 4.
ret.vEgo, ret.aEgo = self.update_speed_kf(ret.vEgoRaw)
ret.standstill = ret.vEgoRaw < 0.1
ret.steeringRateDeg = cp.vl["STEERING_SENSORS"]["STEERING_RATE"]
ret.steeringAngleDeg = cp.vl["STEERING_SENSORS"]["STEERING_ANGLE"] * -1
ret.steeringTorque = cp.vl["MDPS"]["STEERING_COL_TORQUE"]
ret.steeringTorqueEps = cp.vl["MDPS"]["STEERING_OUT_TORQUE"]
ret.steeringPressed = abs(ret.steeringTorque) > STEER_THRESHOLD
ret.leftBlinker, ret.rightBlinker = self.update_blinker_from_lamp(50, cp.vl["BLINKERS"]["LEFT_LAMP"],
cp.vl["BLINKERS"]["RIGHT_LAMP"])
ret.cruiseState.available = True
ret.cruiseState.enabled = cp.vl["SCC1"]["CRUISE_ACTIVE"] == 1
ret.cruiseState.standstill = cp.vl["CRUISE_INFO"]["CRUISE_STANDSTILL"] == 1
speed_factor = CV.MPH_TO_MS if cp.vl["CLUSTER_INFO"]["DISTANCE_UNIT"] == 1 else CV.KPH_TO_MS
ret.cruiseState.speed = cp.vl["CRUISE_INFO"]["SET_SPEED"] * speed_factor
self.buttons_counter = cp.vl["CRUISE_BUTTONS"]["_COUNTER"]
return ret
@staticmethod
def get_can_parser(CP):
if CP.carFingerprint in HDA2_CAR:
return CarState.get_can_parser_hda2(CP)
signals = [
# sig_name, sig_address
("WHL_SPD_FL", "WHL_SPD11"),
("WHL_SPD_FR", "WHL_SPD11"),
("WHL_SPD_RL", "WHL_SPD11"),
("WHL_SPD_RR", "WHL_SPD11"),
("YAW_RATE", "ESP12"),
("CF_Gway_DrvSeatBeltInd", "CGW4"),
("CF_Gway_DrvSeatBeltSw", "CGW1"),
("CF_Gway_DrvDrSw", "CGW1"), # Driver Door
("CF_Gway_AstDrSw", "CGW1"), # Passenger door
("CF_Gway_RLDrSw", "CGW2"), # Rear reft door
("CF_Gway_RRDrSw", "CGW2"), # Rear right door
("CF_Gway_TurnSigLh", "CGW1"),
("CF_Gway_TurnSigRh", "CGW1"),
("CF_Gway_ParkBrakeSw", "CGW1"),
("CYL_PRES", "ESP12"),
("CF_Clu_CruiseSwState", "CLU11"),
("CF_Clu_CruiseSwMain", "CLU11"),
("CF_Clu_SldMainSW", "CLU11"),
("CF_Clu_ParityBit1", "CLU11"),
("CF_Clu_VanzDecimal" , "CLU11"),
("CF_Clu_Vanz", "CLU11"),
("CF_Clu_SPEED_UNIT", "CLU11"),
("CF_Clu_DetentOut", "CLU11"),
("CF_Clu_RheostatLevel", "CLU11"),
("CF_Clu_CluInfo", "CLU11"),
("CF_Clu_AmpInfo", "CLU11"),
("CF_Clu_AliveCnt1", "CLU11"),
("ACCEnable", "TCS13"),
("ACC_REQ", "TCS13"),
("DriverBraking", "TCS13"),
("StandStill", "TCS13"),
("PBRAKE_ACT", "TCS13"),
("ESC_Off_Step", "TCS15"),
("AVH_LAMP", "TCS15"),
("CR_Mdps_StrColTq", "MDPS12"),
("CF_Mdps_ToiActive", "MDPS12"),
("CF_Mdps_ToiUnavail", "MDPS12"),
("CF_Mdps_ToiFlt", "MDPS12"),
("CR_Mdps_OutTq", "MDPS12"),
("SAS_Angle", "SAS11"),
("SAS_Speed", "SAS11"),
]
checks = [
# address, frequency
("MDPS12", 50),
("TCS13", 50),
("TCS15", 10),
("CLU11", 50),
("ESP12", 100),
("CGW1", 10),
("CGW2", 5),
("CGW4", 5),
("WHL_SPD11", 50),
("SAS11", 100),
]
if not CP.openpilotLongitudinalControl:
signals += [
("MainMode_ACC", "SCC11"),
("VSetDis", "SCC11"),
("SCCInfoDisplay", "SCC11"),
("ACC_ObjDist", "SCC11"),
("ACCMode", "SCC12"),
]
checks += [
("SCC11", 50),
("SCC12", 50),
]
if CP.carFingerprint in FEATURES["use_fca"]:
signals += [
("FCA_CmdAct", "FCA11"),
("CF_VSM_Warn", "FCA11"),
]
checks.append(("FCA11", 50))
else:
signals += [
("AEB_CmdAct", "SCC12"),
("CF_VSM_Warn", "SCC12"),
]
if CP.enableBsm:
signals += [
("CF_Lca_IndLeft", "LCA11"),
("CF_Lca_IndRight", "LCA11"),
]
checks.append(("LCA11", 50))
if CP.carFingerprint in (HYBRID_CAR | EV_CAR):
if CP.carFingerprint in HYBRID_CAR:
signals.append(("CR_Vcu_AccPedDep_Pos", "E_EMS11"))
else:
signals.append(("Accel_Pedal_Pos", "E_EMS11"))
checks.append(("E_EMS11", 50))
else:
signals += [
("PV_AV_CAN", "EMS12"),
("CF_Ems_AclAct", "EMS16"),
]
checks += [
("EMS12", 100),
("EMS16", 100),
]
if CP.carFingerprint in FEATURES["use_cluster_gears"]:
signals.append(("CF_Clu_Gear", "CLU15"))
checks.append(("CLU15", 5))
elif CP.carFingerprint in FEATURES["use_tcu_gears"]:
signals.append(("CUR_GR", "TCU12"))
checks.append(("TCU12", 100))
elif CP.carFingerprint in FEATURES["use_elect_gears"]:
signals.append(("Elect_Gear_Shifter", "ELECT_GEAR"))
checks.append(("ELECT_GEAR", 20))
else:
signals.append(("CF_Lvr_Gear", "LVR12"))
checks.append(("LVR12", 100))
return CANParser(DBC[CP.carFingerprint]["pt"], signals, checks, 0)
@staticmethod
def get_cam_can_parser(CP):
if CP.carFingerprint in HDA2_CAR:
return None
signals = [
# sig_name, sig_address
("CF_Lkas_LdwsActivemode", "LKAS11"),
("CF_Lkas_LdwsSysState", "LKAS11"),
("CF_Lkas_SysWarning", "LKAS11"),
("CF_Lkas_LdwsLHWarning", "LKAS11"),
("CF_Lkas_LdwsRHWarning", "LKAS11"),
("CF_Lkas_HbaLamp", "LKAS11"),
("CF_Lkas_FcwBasReq", "LKAS11"),
("CF_Lkas_HbaSysState", "LKAS11"),
("CF_Lkas_FcwOpt", "LKAS11"),
("CF_Lkas_HbaOpt", "LKAS11"),
("CF_Lkas_FcwSysState", "LKAS11"),
("CF_Lkas_FcwCollisionWarning", "LKAS11"),
("CF_Lkas_FusionState", "LKAS11"),
("CF_Lkas_FcwOpt_USM", "LKAS11"),
("CF_Lkas_LdwsOpt_USM", "LKAS11"),
]
checks = [
("LKAS11", 100)
]
return CANParser(DBC[CP.carFingerprint]["pt"], signals, checks, 2)
@staticmethod
def get_can_parser_hda2(CP):
signals = [
("WHEEL_SPEED_1", "WHEEL_SPEEDS"),
("WHEEL_SPEED_2", "WHEEL_SPEEDS"),
("WHEEL_SPEED_3", "WHEEL_SPEEDS"),
("WHEEL_SPEED_4", "WHEEL_SPEEDS"),
("ACCELERATOR_PEDAL", "ACCELERATOR"),
("GEAR", "ACCELERATOR"),
("BRAKE_PRESSED", "BRAKE"),
("STEERING_RATE", "STEERING_SENSORS"),
("STEERING_ANGLE", "STEERING_SENSORS"),
("STEERING_COL_TORQUE", "MDPS"),
("STEERING_OUT_TORQUE", "MDPS"),
("CRUISE_ACTIVE", "SCC1"),
("SET_SPEED", "CRUISE_INFO"),
("CRUISE_STANDSTILL", "CRUISE_INFO"),
("_COUNTER", "CRUISE_BUTTONS"),
("DISTANCE_UNIT", "CLUSTER_INFO"),
("LEFT_LAMP", "BLINKERS"),
("RIGHT_LAMP", "BLINKERS"),
("DRIVER_DOOR_OPEN", "DOORS_SEATBELTS"),
("DRIVER_SEATBELT_LATCHED", "DOORS_SEATBELTS"),
]
checks = [
("WHEEL_SPEEDS", 100),
("ACCELERATOR", 100),
("BRAKE", 100),
("STEERING_SENSORS", 100),
("MDPS", 100),
("SCC1", 50),
("CRUISE_INFO", 50),
("CRUISE_BUTTONS", 50),
("CLUSTER_INFO", 4),
("BLINKERS", 4),
("DOORS_SEATBELTS", 4),
]
return CANParser(DBC[CP.carFingerprint]["pt"], signals, checks, 5)
Hyundai: small car state cleanup (#24643)
* fix typo
* Update selfdrive/car/hyundai/carstate.py
Co-authored-by: Adeeb Shihadeh <560a1160cdca1405d17f7eea1cfe3906403d06ef@gmail.com>
from collections import deque
import copy
from cereal import car
from common.conversions import Conversions as CV
from opendbc.can.parser import CANParser
from opendbc.can.can_define import CANDefine
from selfdrive.car.hyundai.values import DBC, STEER_THRESHOLD, FEATURES, HDA2_CAR, EV_CAR, HYBRID_CAR, Buttons
from selfdrive.car.interfaces import CarStateBase
PREV_BUTTON_SAMPLES = 4
class CarState(CarStateBase):
def __init__(self, CP):
super().__init__(CP)
can_define = CANDefine(DBC[CP.carFingerprint]["pt"])
self.cruise_buttons = deque([Buttons.NONE] * PREV_BUTTON_SAMPLES, maxlen=PREV_BUTTON_SAMPLES)
self.main_buttons = deque([Buttons.NONE] * PREV_BUTTON_SAMPLES, maxlen=PREV_BUTTON_SAMPLES)
if CP.carFingerprint in HDA2_CAR:
self.shifter_values = can_define.dv["ACCELERATOR"]["GEAR"]
elif self.CP.carFingerprint in FEATURES["use_cluster_gears"]:
self.shifter_values = can_define.dv["CLU15"]["CF_Clu_Gear"]
elif self.CP.carFingerprint in FEATURES["use_tcu_gears"]:
self.shifter_values = can_define.dv["TCU12"]["CUR_GR"]
else: # preferred and elect gear methods use same definition
self.shifter_values = can_define.dv["LVR12"]["CF_Lvr_Gear"]
self.brake_error = False
self.park_brake = False
self.buttons_counter = 0
def update(self, cp, cp_cam):
if self.CP.carFingerprint in HDA2_CAR:
return self.update_hda2(cp, cp_cam)
ret = car.CarState.new_message()
ret.doorOpen = any([cp.vl["CGW1"]["CF_Gway_DrvDrSw"], cp.vl["CGW1"]["CF_Gway_AstDrSw"],
cp.vl["CGW2"]["CF_Gway_RLDrSw"], cp.vl["CGW2"]["CF_Gway_RRDrSw"]])
ret.seatbeltUnlatched = cp.vl["CGW1"]["CF_Gway_DrvSeatBeltSw"] == 0
ret.wheelSpeeds = self.get_wheel_speeds(
cp.vl["WHL_SPD11"]["WHL_SPD_FL"],
cp.vl["WHL_SPD11"]["WHL_SPD_FR"],
cp.vl["WHL_SPD11"]["WHL_SPD_RL"],
cp.vl["WHL_SPD11"]["WHL_SPD_RR"],
)
ret.vEgoRaw = (ret.wheelSpeeds.fl + ret.wheelSpeeds.fr + ret.wheelSpeeds.rl + ret.wheelSpeeds.rr) / 4.
ret.vEgo, ret.aEgo = self.update_speed_kf(ret.vEgoRaw)
ret.standstill = ret.vEgoRaw < 0.1
ret.steeringAngleDeg = cp.vl["SAS11"]["SAS_Angle"]
ret.steeringRateDeg = cp.vl["SAS11"]["SAS_Speed"]
ret.yawRate = cp.vl["ESP12"]["YAW_RATE"]
ret.leftBlinker, ret.rightBlinker = self.update_blinker_from_lamp(
50, cp.vl["CGW1"]["CF_Gway_TurnSigLh"], cp.vl["CGW1"]["CF_Gway_TurnSigRh"])
ret.steeringTorque = cp.vl["MDPS12"]["CR_Mdps_StrColTq"]
ret.steeringTorqueEps = cp.vl["MDPS12"]["CR_Mdps_OutTq"]
ret.steeringPressed = abs(ret.steeringTorque) > STEER_THRESHOLD
ret.steerFaultTemporary = cp.vl["MDPS12"]["CF_Mdps_ToiUnavail"] != 0 or cp.vl["MDPS12"]["CF_Mdps_ToiFlt"] != 0
# cruise state
if self.CP.openpilotLongitudinalControl:
# These are not used for engage/disengage since openpilot keeps track of state using the buttons
ret.cruiseState.available = cp.vl["TCS13"]["ACCEnable"] == 0
ret.cruiseState.enabled = cp.vl["TCS13"]["ACC_REQ"] == 1
ret.cruiseState.standstill = False
else:
ret.cruiseState.available = cp.vl["SCC11"]["MainMode_ACC"] == 1
ret.cruiseState.enabled = cp.vl["SCC12"]["ACCMode"] != 0
ret.cruiseState.standstill = cp.vl["SCC11"]["SCCInfoDisplay"] == 4.
speed_conv = CV.MPH_TO_MS if cp.vl["CLU11"]["CF_Clu_SPEED_UNIT"] else CV.KPH_TO_MS
ret.cruiseState.speed = cp.vl["SCC11"]["VSetDis"] * speed_conv
# TODO: Find brake pressure
ret.brake = 0
ret.brakePressed = cp.vl["TCS13"]["DriverBraking"] != 0
ret.brakeHoldActive = cp.vl["TCS15"]["AVH_LAMP"] == 2 # 0 OFF, 1 ERROR, 2 ACTIVE, 3 READY
ret.parkingBrake = cp.vl["TCS13"]["PBRAKE_ACT"] == 1
if self.CP.carFingerprint in (HYBRID_CAR | EV_CAR):
if self.CP.carFingerprint in HYBRID_CAR:
ret.gas = cp.vl["E_EMS11"]["CR_Vcu_AccPedDep_Pos"] / 254.
else:
ret.gas = cp.vl["E_EMS11"]["Accel_Pedal_Pos"] / 254.
ret.gasPressed = ret.gas > 0
else:
ret.gas = cp.vl["EMS12"]["PV_AV_CAN"] / 100.
ret.gasPressed = bool(cp.vl["EMS16"]["CF_Ems_AclAct"])
# Gear Selection via Cluster - For those Kia/Hyundai which are not fully discovered, we can use the Cluster Indicator for Gear Selection,
# as this seems to be standard over all cars, but is not the preferred method.
if self.CP.carFingerprint in FEATURES["use_cluster_gears"]:
gear = cp.vl["CLU15"]["CF_Clu_Gear"]
elif self.CP.carFingerprint in FEATURES["use_tcu_gears"]:
gear = cp.vl["TCU12"]["CUR_GR"]
elif self.CP.carFingerprint in FEATURES["use_elect_gears"]:
gear = cp.vl["ELECT_GEAR"]["Elect_Gear_Shifter"]
else:
gear = cp.vl["LVR12"]["CF_Lvr_Gear"]
ret.gearShifter = self.parse_gear_shifter(self.shifter_values.get(gear))
if not self.CP.openpilotLongitudinalControl:
if self.CP.carFingerprint in FEATURES["use_fca"]:
ret.stockAeb = cp.vl["FCA11"]["FCA_CmdAct"] != 0
ret.stockFcw = cp.vl["FCA11"]["CF_VSM_Warn"] == 2
else:
ret.stockAeb = cp.vl["SCC12"]["AEB_CmdAct"] != 0
ret.stockFcw = cp.vl["SCC12"]["CF_VSM_Warn"] == 2
if self.CP.enableBsm:
ret.leftBlindspot = cp.vl["LCA11"]["CF_Lca_IndLeft"] != 0
ret.rightBlindspot = cp.vl["LCA11"]["CF_Lca_IndRight"] != 0
# save the entire LKAS11 and CLU11
self.lkas11 = copy.copy(cp_cam.vl["LKAS11"])
self.clu11 = copy.copy(cp.vl["CLU11"])
self.steer_state = cp.vl["MDPS12"]["CF_Mdps_ToiActive"] # 0 NOT ACTIVE, 1 ACTIVE
self.brake_error = cp.vl["TCS13"]["ACCEnable"] != 0 # 0 ACC CONTROL ENABLED, 1-3 ACC CONTROL DISABLED
self.prev_cruise_buttons = self.cruise_buttons[-1]
self.cruise_buttons.extend(cp.vl_all["CLU11"]["CF_Clu_CruiseSwState"])
self.main_buttons.extend(cp.vl_all["CLU11"]["CF_Clu_CruiseSwMain"])
return ret
def update_hda2(self, cp, cp_cam):
ret = car.CarState.new_message()
ret.gas = cp.vl["ACCELERATOR"]["ACCELERATOR_PEDAL"] / 255.
ret.gasPressed = ret.gas > 1e-3
ret.brakePressed = cp.vl["BRAKE"]["BRAKE_PRESSED"] == 1
ret.doorOpen = cp.vl["DOORS_SEATBELTS"]["DRIVER_DOOR_OPEN"] == 1
ret.seatbeltUnlatched = cp.vl["DOORS_SEATBELTS"]["DRIVER_SEATBELT_LATCHED"] == 0
gear = cp.vl["ACCELERATOR"]["GEAR"]
ret.gearShifter = self.parse_gear_shifter(self.shifter_values.get(gear))
# TODO: figure out positions
ret.wheelSpeeds = self.get_wheel_speeds(
cp.vl["WHEEL_SPEEDS"]["WHEEL_SPEED_1"],
cp.vl["WHEEL_SPEEDS"]["WHEEL_SPEED_2"],
cp.vl["WHEEL_SPEEDS"]["WHEEL_SPEED_3"],
cp.vl["WHEEL_SPEEDS"]["WHEEL_SPEED_4"],
)
ret.vEgoRaw = (ret.wheelSpeeds.fl + ret.wheelSpeeds.fr + ret.wheelSpeeds.rl + ret.wheelSpeeds.rr) / 4.
ret.vEgo, ret.aEgo = self.update_speed_kf(ret.vEgoRaw)
ret.standstill = ret.vEgoRaw < 0.1
ret.steeringRateDeg = cp.vl["STEERING_SENSORS"]["STEERING_RATE"]
ret.steeringAngleDeg = cp.vl["STEERING_SENSORS"]["STEERING_ANGLE"] * -1
ret.steeringTorque = cp.vl["MDPS"]["STEERING_COL_TORQUE"]
ret.steeringTorqueEps = cp.vl["MDPS"]["STEERING_OUT_TORQUE"]
ret.steeringPressed = abs(ret.steeringTorque) > STEER_THRESHOLD
ret.leftBlinker, ret.rightBlinker = self.update_blinker_from_lamp(50, cp.vl["BLINKERS"]["LEFT_LAMP"],
cp.vl["BLINKERS"]["RIGHT_LAMP"])
ret.cruiseState.available = True
ret.cruiseState.enabled = cp.vl["SCC1"]["CRUISE_ACTIVE"] == 1
ret.cruiseState.standstill = cp.vl["CRUISE_INFO"]["CRUISE_STANDSTILL"] == 1
speed_factor = CV.MPH_TO_MS if cp.vl["CLUSTER_INFO"]["DISTANCE_UNIT"] == 1 else CV.KPH_TO_MS
ret.cruiseState.speed = cp.vl["CRUISE_INFO"]["SET_SPEED"] * speed_factor
self.buttons_counter = cp.vl["CRUISE_BUTTONS"]["_COUNTER"]
return ret
@staticmethod
def get_can_parser(CP):
if CP.carFingerprint in HDA2_CAR:
return CarState.get_can_parser_hda2(CP)
signals = [
# signal_name, signal_address
("WHL_SPD_FL", "WHL_SPD11"),
("WHL_SPD_FR", "WHL_SPD11"),
("WHL_SPD_RL", "WHL_SPD11"),
("WHL_SPD_RR", "WHL_SPD11"),
("YAW_RATE", "ESP12"),
("CF_Gway_DrvSeatBeltInd", "CGW4"),
("CF_Gway_DrvSeatBeltSw", "CGW1"),
("CF_Gway_DrvDrSw", "CGW1"), # Driver Door
("CF_Gway_AstDrSw", "CGW1"), # Passenger Door
("CF_Gway_RLDrSw", "CGW2"), # Rear left Door
("CF_Gway_RRDrSw", "CGW2"), # Rear right Door
("CF_Gway_TurnSigLh", "CGW1"),
("CF_Gway_TurnSigRh", "CGW1"),
("CF_Gway_ParkBrakeSw", "CGW1"),
("CYL_PRES", "ESP12"),
("CF_Clu_CruiseSwState", "CLU11"),
("CF_Clu_CruiseSwMain", "CLU11"),
("CF_Clu_SldMainSW", "CLU11"),
("CF_Clu_ParityBit1", "CLU11"),
("CF_Clu_VanzDecimal" , "CLU11"),
("CF_Clu_Vanz", "CLU11"),
("CF_Clu_SPEED_UNIT", "CLU11"),
("CF_Clu_DetentOut", "CLU11"),
("CF_Clu_RheostatLevel", "CLU11"),
("CF_Clu_CluInfo", "CLU11"),
("CF_Clu_AmpInfo", "CLU11"),
("CF_Clu_AliveCnt1", "CLU11"),
("ACCEnable", "TCS13"),
("ACC_REQ", "TCS13"),
("DriverBraking", "TCS13"),
("StandStill", "TCS13"),
("PBRAKE_ACT", "TCS13"),
("ESC_Off_Step", "TCS15"),
("AVH_LAMP", "TCS15"),
("CR_Mdps_StrColTq", "MDPS12"),
("CF_Mdps_ToiActive", "MDPS12"),
("CF_Mdps_ToiUnavail", "MDPS12"),
("CF_Mdps_ToiFlt", "MDPS12"),
("CR_Mdps_OutTq", "MDPS12"),
("SAS_Angle", "SAS11"),
("SAS_Speed", "SAS11"),
]
checks = [
# address, frequency
("MDPS12", 50),
("TCS13", 50),
("TCS15", 10),
("CLU11", 50),
("ESP12", 100),
("CGW1", 10),
("CGW2", 5),
("CGW4", 5),
("WHL_SPD11", 50),
("SAS11", 100),
]
if not CP.openpilotLongitudinalControl:
signals += [
("MainMode_ACC", "SCC11"),
("VSetDis", "SCC11"),
("SCCInfoDisplay", "SCC11"),
("ACC_ObjDist", "SCC11"),
("ACCMode", "SCC12"),
]
checks += [
("SCC11", 50),
("SCC12", 50),
]
if CP.carFingerprint in FEATURES["use_fca"]:
signals += [
("FCA_CmdAct", "FCA11"),
("CF_VSM_Warn", "FCA11"),
]
checks.append(("FCA11", 50))
else:
signals += [
("AEB_CmdAct", "SCC12"),
("CF_VSM_Warn", "SCC12"),
]
if CP.enableBsm:
signals += [
("CF_Lca_IndLeft", "LCA11"),
("CF_Lca_IndRight", "LCA11"),
]
checks.append(("LCA11", 50))
if CP.carFingerprint in (HYBRID_CAR | EV_CAR):
if CP.carFingerprint in HYBRID_CAR:
signals.append(("CR_Vcu_AccPedDep_Pos", "E_EMS11"))
else:
signals.append(("Accel_Pedal_Pos", "E_EMS11"))
checks.append(("E_EMS11", 50))
else:
signals += [
("PV_AV_CAN", "EMS12"),
("CF_Ems_AclAct", "EMS16"),
]
checks += [
("EMS12", 100),
("EMS16", 100),
]
if CP.carFingerprint in FEATURES["use_cluster_gears"]:
signals.append(("CF_Clu_Gear", "CLU15"))
checks.append(("CLU15", 5))
elif CP.carFingerprint in FEATURES["use_tcu_gears"]:
signals.append(("CUR_GR", "TCU12"))
checks.append(("TCU12", 100))
elif CP.carFingerprint in FEATURES["use_elect_gears"]:
signals.append(("Elect_Gear_Shifter", "ELECT_GEAR"))
checks.append(("ELECT_GEAR", 20))
else:
signals.append(("CF_Lvr_Gear", "LVR12"))
checks.append(("LVR12", 100))
return CANParser(DBC[CP.carFingerprint]["pt"], signals, checks, 0)
@staticmethod
def get_cam_can_parser(CP):
if CP.carFingerprint in HDA2_CAR:
return None
signals = [
# signal_name, signal_address
("CF_Lkas_LdwsActivemode", "LKAS11"),
("CF_Lkas_LdwsSysState", "LKAS11"),
("CF_Lkas_SysWarning", "LKAS11"),
("CF_Lkas_LdwsLHWarning", "LKAS11"),
("CF_Lkas_LdwsRHWarning", "LKAS11"),
("CF_Lkas_HbaLamp", "LKAS11"),
("CF_Lkas_FcwBasReq", "LKAS11"),
("CF_Lkas_HbaSysState", "LKAS11"),
("CF_Lkas_FcwOpt", "LKAS11"),
("CF_Lkas_HbaOpt", "LKAS11"),
("CF_Lkas_FcwSysState", "LKAS11"),
("CF_Lkas_FcwCollisionWarning", "LKAS11"),
("CF_Lkas_FusionState", "LKAS11"),
("CF_Lkas_FcwOpt_USM", "LKAS11"),
("CF_Lkas_LdwsOpt_USM", "LKAS11"),
]
checks = [
("LKAS11", 100)
]
return CANParser(DBC[CP.carFingerprint]["pt"], signals, checks, 2)
@staticmethod
def get_can_parser_hda2(CP):
signals = [
("WHEEL_SPEED_1", "WHEEL_SPEEDS"),
("WHEEL_SPEED_2", "WHEEL_SPEEDS"),
("WHEEL_SPEED_3", "WHEEL_SPEEDS"),
("WHEEL_SPEED_4", "WHEEL_SPEEDS"),
("ACCELERATOR_PEDAL", "ACCELERATOR"),
("GEAR", "ACCELERATOR"),
("BRAKE_PRESSED", "BRAKE"),
("STEERING_RATE", "STEERING_SENSORS"),
("STEERING_ANGLE", "STEERING_SENSORS"),
("STEERING_COL_TORQUE", "MDPS"),
("STEERING_OUT_TORQUE", "MDPS"),
("CRUISE_ACTIVE", "SCC1"),
("SET_SPEED", "CRUISE_INFO"),
("CRUISE_STANDSTILL", "CRUISE_INFO"),
("_COUNTER", "CRUISE_BUTTONS"),
("DISTANCE_UNIT", "CLUSTER_INFO"),
("LEFT_LAMP", "BLINKERS"),
("RIGHT_LAMP", "BLINKERS"),
("DRIVER_DOOR_OPEN", "DOORS_SEATBELTS"),
("DRIVER_SEATBELT_LATCHED", "DOORS_SEATBELTS"),
]
checks = [
("WHEEL_SPEEDS", 100),
("ACCELERATOR", 100),
("BRAKE", 100),
("STEERING_SENSORS", 100),
("MDPS", 100),
("SCC1", 50),
("CRUISE_INFO", 50),
("CRUISE_BUTTONS", 50),
("CLUSTER_INFO", 4),
("BLINKERS", 4),
("DOORS_SEATBELTS", 4),
]
return CANParser(DBC[CP.carFingerprint]["pt"], signals, checks, 5)
|
#!/usr/bin/env python
# This daemon runs on the CA side to look for requests in
# the database that are waiting for the CA to test whether
# challenges have been met, and to perform this test.
import redis, time, sys, signal
import policy
from redis_lock import redis_lock
from sni_challenge.verify import verify_challenge
r = redis.Redis()
ps = r.pubsub()
debug = "debug" in sys.argv
clean_shutdown = False
from daemon_common import signal_handler, short, random, random_raw
def signal_handler(a, b):
global clean_shutdown
clean_shutdown = True
r.publish("exit", "clean-exit")
signal.signal(signal.SIGTERM, signal_handler)
signal.signal(signal.SIGINT, signal_handler)
def testchallenge(session):
if r.hget(session, "live") != "True":
# This session has died due to some other reason, like an
# illegal request or timeout, since it entered testchallenge
# state. Consequently, we're not allowed to advance its
# state any further, and it should be removed from the
# pending-requests queue and not pushed into any other queue.
# We don't have to remove it from pending-testchallenge
# because the caller has already done so.
if debug: print "removing expired session", short(session)
r.lrem("pending-requests", session)
return
if r.hget(session, "state") != "testchallenge":
return
if int(r.hincrby(session, "times-tested", 1)) > 3:
# This session has already been unsuccessfully tested three
# times. Clearly, something has gone wrong or the client is
# just trying to annoy us. Do not allow it to be tested again.
r.hset(session, "live", False)
r.lrem("pending-requests", session)
return
all_satisfied = True
for i, name in enumerate(r.lrange("%s:names" % session, 0, -1)):
challenge = "%s:%d" % (session, i)
if debug: print "testing challenge", short(challenge)
challtime = int(r.hget(challenge, "challtime"))
challtype = int(r.hget(challenge, "type"))
name = r.hget(challenge, "name")
satisfied = r.hget(challenge, "satisfied") == "True"
failed = r.hget(challenge, "failed") == "True"
# TODO: check whether this challenge is too old
if not satisfied and not failed:
# if debug: print "challenge", short(challenge), "being tested"
if challtype == 0: # DomainValidateSNI
if debug: print "\tbeginning dvsni test to %s" % name
dvsni_nonce = r.hget(challenge, "dvsni:nonce")
dvsni_r = r.hget(challenge, "dvsni:r")
dvsni_ext = r.hget(challenge, "dvsni:ext")
direct_result, direct_reason = verify_challenge(name, dvsni_r, dvsni_nonce, False)
proxy_result, proxy_reason = verify_challenge(name, dvsni_r, dvsni_nonce, True)
if debug:
print "\t...direct probe: %s (%s)" % (direct_result, direct_reason)
print "\tTor proxy probe: %s (%s)" % (proxy_result, proxy_reason)
if direct_result and proxy_result:
r.hset(challenge, "satisfied", True)
else:
all_satisfied = False
# TODO: distinguish permanent and temporarily failures
# can cause a permanent failure under some conditions, causing
# the session to become dead. TODO: need to articulate what
# those conditions are
else:
# Don't know how to handle this challenge type
all_satisfied = False
elif not satisfied:
if debug: print "\tchallenge was not attempted"
all_satisfied = False
if all_satisfied:
# Challenges all succeeded, so we should prepare to issue
# the requested cert or request a payment if applicable.
# TODO: double-check that there were > 0 challenges,
# so that we don't somehow mistakenly issue a cert in
# response to an empty list of challenges (even though
# the daemon that put this session on the queue should
# also have implicitly guaranteed this).
if policy.payment_required(session):
if debug: print "\t** All challenges satisfied; request %s NEEDS PAYMENT" % short(session)
# Try to get a unique abbreviated ID (10 hex digits)
for i in xrange(20):
abbreviation = random()[:10]
if r.hget("shorturl-%s" % abbreviation) is None:
break
else:
# Mysteriously unable to get a unique abbreviated session ID!
r.hset(session, "live", "False")
return
r.set("shorturl-%s" % abbreviation, session)
r.expire("shorturl-%s" % abbreviation, 3600)
r.hset(session, "shorturl", abbreviation)
r.hset(session, "state", "payment")
# According to current practice, there is no pending-payment
# queue because sessions can get out of payment state
# instantaneously as soon as the payment system sends a "payments"
# pubsub message to the payments daemon.
else:
if debug: print "\t** All challenges satisfied; request %s GRANTED" % short(session)
r.hset(session, "state", "issue")
r.lpush("pending-issue", session)
else:
# Some challenges were not verified. In the current
# design of this daemon, the client must contact
# us again to request that the session be placed back
# in pending-testchallenge!
pass
while True:
(where, what) = r.brpop(["exit", "pending-testchallenge"])
if where == "exit":
r.lpush("exit", "exit")
break
elif where == "pending-testchallenge":
with redis_lock(r, "lock-" + what):
testchallenge(what)
if clean_shutdown:
print "daemon exiting cleanly"
break
this is a simple key, not a hash
#!/usr/bin/env python
# This daemon runs on the CA side to look for requests in
# the database that are waiting for the CA to test whether
# challenges have been met, and to perform this test.
import redis, time, sys, signal
import policy
from redis_lock import redis_lock
from sni_challenge.verify import verify_challenge
r = redis.Redis()
ps = r.pubsub()
debug = "debug" in sys.argv
clean_shutdown = False
from daemon_common import signal_handler, short, random, random_raw
def signal_handler(a, b):
global clean_shutdown
clean_shutdown = True
r.publish("exit", "clean-exit")
signal.signal(signal.SIGTERM, signal_handler)
signal.signal(signal.SIGINT, signal_handler)
def testchallenge(session):
if r.hget(session, "live") != "True":
# This session has died due to some other reason, like an
# illegal request or timeout, since it entered testchallenge
# state. Consequently, we're not allowed to advance its
# state any further, and it should be removed from the
# pending-requests queue and not pushed into any other queue.
# We don't have to remove it from pending-testchallenge
# because the caller has already done so.
if debug: print "removing expired session", short(session)
r.lrem("pending-requests", session)
return
if r.hget(session, "state") != "testchallenge":
return
if int(r.hincrby(session, "times-tested", 1)) > 3:
# This session has already been unsuccessfully tested three
# times. Clearly, something has gone wrong or the client is
# just trying to annoy us. Do not allow it to be tested again.
r.hset(session, "live", False)
r.lrem("pending-requests", session)
return
all_satisfied = True
for i, name in enumerate(r.lrange("%s:names" % session, 0, -1)):
challenge = "%s:%d" % (session, i)
if debug: print "testing challenge", short(challenge)
challtime = int(r.hget(challenge, "challtime"))
challtype = int(r.hget(challenge, "type"))
name = r.hget(challenge, "name")
satisfied = r.hget(challenge, "satisfied") == "True"
failed = r.hget(challenge, "failed") == "True"
# TODO: check whether this challenge is too old
if not satisfied and not failed:
# if debug: print "challenge", short(challenge), "being tested"
if challtype == 0: # DomainValidateSNI
if debug: print "\tbeginning dvsni test to %s" % name
dvsni_nonce = r.hget(challenge, "dvsni:nonce")
dvsni_r = r.hget(challenge, "dvsni:r")
dvsni_ext = r.hget(challenge, "dvsni:ext")
direct_result, direct_reason = verify_challenge(name, dvsni_r, dvsni_nonce, False)
proxy_result, proxy_reason = verify_challenge(name, dvsni_r, dvsni_nonce, True)
if debug:
print "\t...direct probe: %s (%s)" % (direct_result, direct_reason)
print "\tTor proxy probe: %s (%s)" % (proxy_result, proxy_reason)
if direct_result and proxy_result:
r.hset(challenge, "satisfied", True)
else:
all_satisfied = False
# TODO: distinguish permanent and temporarily failures
# can cause a permanent failure under some conditions, causing
# the session to become dead. TODO: need to articulate what
# those conditions are
else:
# Don't know how to handle this challenge type
all_satisfied = False
elif not satisfied:
if debug: print "\tchallenge was not attempted"
all_satisfied = False
if all_satisfied:
# Challenges all succeeded, so we should prepare to issue
# the requested cert or request a payment if applicable.
# TODO: double-check that there were > 0 challenges,
# so that we don't somehow mistakenly issue a cert in
# response to an empty list of challenges (even though
# the daemon that put this session on the queue should
# also have implicitly guaranteed this).
if policy.payment_required(session):
if debug: print "\t** All challenges satisfied; request %s NEEDS PAYMENT" % short(session)
# Try to get a unique abbreviated ID (10 hex digits)
for i in xrange(20):
abbreviation = random()[:10]
if r.get("shorturl-%s" % abbreviation) is None:
break
else:
# Mysteriously unable to get a unique abbreviated session ID!
r.hset(session, "live", "False")
return
r.set("shorturl-%s" % abbreviation, session)
r.expire("shorturl-%s" % abbreviation, 3600)
r.hset(session, "shorturl", abbreviation)
r.hset(session, "state", "payment")
# According to current practice, there is no pending-payment
# queue because sessions can get out of payment state
# instantaneously as soon as the payment system sends a "payments"
# pubsub message to the payments daemon.
else:
if debug: print "\t** All challenges satisfied; request %s GRANTED" % short(session)
r.hset(session, "state", "issue")
r.lpush("pending-issue", session)
else:
# Some challenges were not verified. In the current
# design of this daemon, the client must contact
# us again to request that the session be placed back
# in pending-testchallenge!
pass
while True:
(where, what) = r.brpop(["exit", "pending-testchallenge"])
if where == "exit":
r.lpush("exit", "exit")
break
elif where == "pending-testchallenge":
with redis_lock(r, "lock-" + what):
testchallenge(what)
if clean_shutdown:
print "daemon exiting cleanly"
break
|
#!/usr/bin/env python
#
# VM Backup extension
#
# Copyright 2014 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class CommonVariables:
azure_path = 'main/azure'
utils_path_name = 'Utils'
extension_name = 'VMBackupForLinuxExtension'
extension_version = "1.0.9103.1"
extension_type = extension_name
extension_media_link = 'https://sopattna.blob.core.windows.net/extensions/' + extension_name + '-' + str(extension_version) + '.zip'
extension_label = 'Windows Azure VMBackup Extension for Linux IaaS'
extension_description = extension_label
object_str = 'objectStr'
logs_blob_uri = 'logsBlobUri'
status_blob_uri = 'statusBlobUri'
commandStartTimeUTCTicks = "commandStartTimeUTCTicks"
task_id = 'taskId'
command_to_execute = 'commandToExecute'
iaas_vmbackup_command = 'snapshot'
iaas_install_command = 'install'
locale = 'locale'
vmType = 'vmType'
VmTypeV1 = 'microsoft.classiccompute/virtualmachines'
VmTypeV2 = 'microsoft.compute/virtualmachines'
status_transitioning = 'transitioning'
status_warning = 'warning'
status_success = 'success'
status_error = 'error'
"""
error code definitions
"""
success_appconsistent = 0
success = 1
ExtensionTempTerminalState = 4
error_parameter = 11
error_12 = 12
error_wrong_time = 13
error_same_taskid = 14
error_http_failure = 15
error_upload_status_blob = 16
error = 2
FailedRetryableSnapshotFailedNoNetwork=76
"""
Pre-Post Plugin error code definitions
"""
PrePost_PluginStatus_Success = 0
PrePost_ScriptStatus_Success = 0
PrePost_ScriptStatus_Error = 1
PrePost_ScriptStatus_Warning = 2
FailedPrepostPreScriptFailed = 1100
FailedPrepostPostScriptFailed = 1101
FailedPrepostPreScriptNotFound = 1102
FailedPrepostPostScriptNotFound = 1103
FailedPrepostPluginhostConfigParsing = 1104
FailedPrepostPluginConfigParsing = 1105
FailedPrepostPreScriptPermissionError = 1106
FailedPrepostPostScriptPermissionError = 1107
FailedPrepostPreScriptTimeout = 1108
FailedPrepostPostScriptTimeout = 1109
FailedPrepostPluginhostPreTimeout = 1110
FailedPrepostPluginhostPostTimeout = 1111
FailedPrepostCheckSumMismatch = 1112
FailedPrepostPluginhostConfigNotFound = 1113
FailedPrepostPluginhostConfigPermissionError = 1114
FailedPrepostPluginhostConfigOwnershipError = 1115
FailedPrepostPluginConfigNotFound = 1116
FailedPrepostPluginConfigPermissionError = 1117
FailedPrepostPluginConfigOwnershipError = 1118
@staticmethod
def isTerminalStatus(status):
return (status==CommonVariables.status_success or status==CommonVariables.status_error)
class DeviceItem(object):
def __init__(self):
#NAME,TYPE,FSTYPE,MOUNTPOINT,LABEL,UUID,MODEL
self.name = None
self.type = None
self.file_system = None
self.mount_point = None
self.label = None
self.uuid = None
self.model = None
self.size = None
def __str__(self):
return "name:" + str(self.name) + " type:" + str(self.type) + " fstype:" + str(self.file_system) + " mountpoint:" + str(self.mount_point) + " label:" + str(self.label) + " model:" + str(self.model)
Changing error code numbers
#!/usr/bin/env python
#
# VM Backup extension
#
# Copyright 2014 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class CommonVariables:
azure_path = 'main/azure'
utils_path_name = 'Utils'
extension_name = 'VMBackupForLinuxExtension'
extension_version = "1.0.9103.1"
extension_type = extension_name
extension_media_link = 'https://sopattna.blob.core.windows.net/extensions/' + extension_name + '-' + str(extension_version) + '.zip'
extension_label = 'Windows Azure VMBackup Extension for Linux IaaS'
extension_description = extension_label
object_str = 'objectStr'
logs_blob_uri = 'logsBlobUri'
status_blob_uri = 'statusBlobUri'
commandStartTimeUTCTicks = "commandStartTimeUTCTicks"
task_id = 'taskId'
command_to_execute = 'commandToExecute'
iaas_vmbackup_command = 'snapshot'
iaas_install_command = 'install'
locale = 'locale'
vmType = 'vmType'
VmTypeV1 = 'microsoft.classiccompute/virtualmachines'
VmTypeV2 = 'microsoft.compute/virtualmachines'
status_transitioning = 'transitioning'
status_warning = 'warning'
status_success = 'success'
status_error = 'error'
"""
error code definitions
"""
success_appconsistent = 0
success = 1
ExtensionTempTerminalState = 4
error_parameter = 11
error_12 = 12
error_wrong_time = 13
error_same_taskid = 14
error_http_failure = 15
error_upload_status_blob = 16
error = 2
FailedRetryableSnapshotFailedNoNetwork=76
"""
Pre-Post Plugin error code definitions
"""
PrePost_PluginStatus_Success = 0
PrePost_ScriptStatus_Success = 0
PrePost_ScriptStatus_Error = 1
PrePost_ScriptStatus_Warning = 2
FailedPrepostPreScriptFailed = 300
FailedPrepostPostScriptFailed = 301
FailedPrepostPreScriptNotFound = 302
FailedPrepostPostScriptNotFound = 303
FailedPrepostPluginhostConfigParsing = 304
FailedPrepostPluginConfigParsing = 305
FailedPrepostPreScriptPermissionError = 306
FailedPrepostPostScriptPermissionError = 307
FailedPrepostPreScriptTimeout = 308
FailedPrepostPostScriptTimeout = 309
FailedPrepostPluginhostPreTimeout = 310
FailedPrepostPluginhostPostTimeout = 311
FailedPrepostCheckSumMismatch = 312
FailedPrepostPluginhostConfigNotFound = 313
FailedPrepostPluginhostConfigPermissionError = 314
FailedPrepostPluginhostConfigOwnershipError = 315
FailedPrepostPluginConfigNotFound = 316
FailedPrepostPluginConfigPermissionError = 317
FailedPrepostPluginConfigOwnershipError = 318
@staticmethod
def isTerminalStatus(status):
return (status==CommonVariables.status_success or status==CommonVariables.status_error)
class DeviceItem(object):
def __init__(self):
#NAME,TYPE,FSTYPE,MOUNTPOINT,LABEL,UUID,MODEL
self.name = None
self.type = None
self.file_system = None
self.mount_point = None
self.label = None
self.uuid = None
self.model = None
self.size = None
def __str__(self):
return "name:" + str(self.name) + " type:" + str(self.type) + " fstype:" + str(self.file_system) + " mountpoint:" + str(self.mount_point) + " label:" + str(self.label) + " model:" + str(self.model)
|
#!/usr/bin/env python
#
# VM Backup extension
#
# Copyright 2014 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.7+
#
import array
import base64
import os
import os.path
import re
import string
import subprocess
import sys
import imp
import shlex
import traceback
import httplib
import xml.parsers.expat
from mounts import Mounts
from mounts import Mount
from fsfreezer import FsFreezer
from common import CommonVariables
from parameterparser import ParameterParser
from Utils import HandlerUtil
from urlparse import urlparse
from snapshotter import Snapshotter
from backuplogger import Backuplogger
from machineidentity import MachineIdentity
#Main function is the only entrence to this extension handler
def main():
global backup_logger
global hutil
HandlerUtil.LoggerInit('/var/log/waagent.log','/dev/stdout')
HandlerUtil.waagent.Log("%s started to handle." % (CommonVariables.extension_name))
hutil = HandlerUtil.HandlerUtility(HandlerUtil.waagent.Log, HandlerUtil.waagent.Error, CommonVariables.extension_name)
backup_logger = Backuplogger(hutil)
for a in sys.argv[1:]:
if re.match("^([-/]*)(disable)", a):
disable()
elif re.match("^([-/]*)(uninstall)", a):
uninstall()
elif re.match("^([-/]*)(install)", a):
install()
elif re.match("^([-/]*)(enable)", a):
enable()
elif re.match("^([-/]*)(update)", a):
update()
def install():
hutil.do_parse_context('Install')
hutil.do_exit(0, 'Install','success','0', 'Install Succeeded')
def enable():
freezer = FsFreezer(backup_logger)
unfreeze_result = None
snapshot_result = None
freeze_result = None
global_error_result = None
para_parser = None
run_result = 1
error_msg = ''
run_status = None
# precheck
freeze_called = False
try:
hutil.do_parse_context('Enable')
# handle the restoring scenario.
mi = MachineIdentity()
stored_identity = mi.stored_identity()
hutil.log(" stored identity is " + stored_identity)
if(stored_identity is None):
mi.save_identity()
hutil.exit_if_enabled()
else:
current_identity = mi.current_identity()
hutil.log(" current identity " + current_identity)
if(current_identity != stored_identity):
current_seq_no = hutil._get_current_seq_no(hutil._context._config_dir)
backup_logger.log("machine identity not same, set current_seq_no to " + str(current_seq_no) + " " + str(stored_identity) + " " + str(current_identity), True)
#remove other .config files. or the waagent would report the 3
#status...
for subdir, dirs, files in os.walk(hutil._context._config_dir):
for file in files:
try:
cur_seq_no = int(os.path.basename(file).split('.')[0])
if(cur_seq_no != current_seq_no):
os.remove(join(config_folder,file))
except ValueError:
continue
hutil.set_inused_config_seq(current_seq_no)
mi.save_identity()
else:
hutil.exit_if_enabled()
# we need to freeze the file system first
backup_logger.log('starting to enable', True)
"""
protectedSettings is the privateConfig passed from Powershell.
"""
protected_settings = hutil._context._config['runtimeSettings'][0]['handlerSettings'].get('protectedSettings')
public_settings = hutil._context._config['runtimeSettings'][0]['handlerSettings'].get('publicSettings')
para_parser = ParameterParser(protected_settings, public_settings)
commandToExecute = para_parser.commandToExecute
#validate all the required parameter here
if(commandToExecute.lower() == CommonVariables.iaas_install_command):
backup_logger.log("install succeed.",True)
run_status = 'success'
error_msg = 'Install Succeeded'
run_result = 0
backup_logger.log(error_msg)
elif(commandToExecute.lower() == CommonVariables.iaas_vmbackup_command):
if(para_parser.backup_metadata is None or para_parser.public_config_obj is None or para_parser.private_config_obj is None):
run_result = 11
run_status = 'error'
error_msg = 'required field empty or not correct'
backup_logger.log(error_msg, False, 'Error')
else:
backup_logger.log('commandToExecute is ' + commandToExecute, True)
"""
make sure the log is not doing when the file system is freezed.
"""
backup_logger.log("doing freeze now...", True)
freeze_called = True
freeze_result = freezer.freezeall()
backup_logger.log("freeze result " + str(freeze_result))
# check whether we freeze succeed first?
if(freeze_result is not None and len(freeze_result.errors) > 0 ):
run_result = 2
run_status = 'error'
error_msg = 'Enable failed with error' + str(freeze_result.errors)
backup_logger.log(error_msg, False, 'Warning')
else:
backup_logger.log("doing snapshot now...")
snap_shotter = Snapshotter(backup_logger)
snapshot_result = snap_shotter.snapshotall(para_parser)
backup_logger.log("snapshotall ends...")
if(snapshot_result is not None and len(snapshot_result.errors) > 0):
error_msg = "snapshot result: " + str(snapshot_result.errors)
run_result = 2
run_status = 'error'
backup_logger.log(error_msg, False, 'Error')
else:
run_result = 1
run_status = 'success'
error_msg = 'Enable Succeeded'
backup_logger.log(error_msg)
else:
run_status = 'error'
run_result = 11
error_msg = 'command is not correct'
backup_logger.log(error_msg, False, 'Error')
except Exception as e:
errMsg = "Failed to enable the extension with error: %s, stack trace: %s" % (str(e), traceback.format_exc())
backup_logger.log(errMsg, False, 'Error')
print(errMsg)
global_error_result = e
finally:
backup_logger.log("doing unfreeze now...")
if(freeze_called):
unfreeze_result = freezer.unfreezeall()
backup_logger.log("unfreeze result " + str(unfreeze_result))
error_msg += ('Enable Succeeded with error: ' + str(unfreeze_result.errors))
if(unfreeze_result is not None and len(unfreeze_result.errors) > 0):
backup_logger.log(error_msg, False, 'Warning')
backup_logger.log("unfreeze ends...")
if(para_parser is not None):
backup_logger.commit(para_parser.logsBlobUri)
"""
we do the final report here to get rid of the complex logic to handle the logging when file system be freezed issue.
"""
if(global_error_result is not None):
if(hasattr(global_error_result,'errno') and global_error_result.errno==2):
run_result = 12
elif(para_parser is None):
run_result = 11
else:
run_result = 2
run_status = 'error'
error_msg += ('Enable failed.' + str(global_error_result))
hutil.do_exit(0, 'Enable', run_status, str(run_result), error_msg)
def uninstall():
hutil.do_parse_context('Uninstall')
hutil.do_exit(0,'Uninstall','success','0', 'Uninstall succeeded')
def disable():
hutil.do_parse_context('Disable')
hutil.do_exit(0,'Disable','success','0', 'Disable Succeeded')
def update():
hutil.do_parse_context('Upadate')
hutil.do_exit(0,'Update','success','0', 'Update Succeeded')
if __name__ == '__main__' :
main()
import join.
#!/usr/bin/env python
#
# VM Backup extension
#
# Copyright 2014 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.7+
#
import array
import base64
import os
import os.path
import re
import string
import subprocess
import sys
import imp
import shlex
import traceback
import httplib
import xml.parsers.expat
from os.path import join
from mounts import Mounts
from mounts import Mount
from fsfreezer import FsFreezer
from common import CommonVariables
from parameterparser import ParameterParser
from Utils import HandlerUtil
from urlparse import urlparse
from snapshotter import Snapshotter
from backuplogger import Backuplogger
from machineidentity import MachineIdentity
#Main function is the only entrence to this extension handler
def main():
global backup_logger
global hutil
HandlerUtil.LoggerInit('/var/log/waagent.log','/dev/stdout')
HandlerUtil.waagent.Log("%s started to handle." % (CommonVariables.extension_name))
hutil = HandlerUtil.HandlerUtility(HandlerUtil.waagent.Log, HandlerUtil.waagent.Error, CommonVariables.extension_name)
backup_logger = Backuplogger(hutil)
for a in sys.argv[1:]:
if re.match("^([-/]*)(disable)", a):
disable()
elif re.match("^([-/]*)(uninstall)", a):
uninstall()
elif re.match("^([-/]*)(install)", a):
install()
elif re.match("^([-/]*)(enable)", a):
enable()
elif re.match("^([-/]*)(update)", a):
update()
def install():
hutil.do_parse_context('Install')
hutil.do_exit(0, 'Install','success','0', 'Install Succeeded')
def enable():
freezer = FsFreezer(backup_logger)
unfreeze_result = None
snapshot_result = None
freeze_result = None
global_error_result = None
para_parser = None
run_result = 1
error_msg = ''
run_status = None
# precheck
freeze_called = False
try:
hutil.do_parse_context('Enable')
# handle the restoring scenario.
mi = MachineIdentity()
stored_identity = mi.stored_identity()
hutil.log(" stored identity is " + stored_identity)
if(stored_identity is None):
mi.save_identity()
hutil.exit_if_enabled()
else:
current_identity = mi.current_identity()
hutil.log(" current identity " + current_identity)
if(current_identity != stored_identity):
current_seq_no = hutil._get_current_seq_no(hutil._context._config_dir)
backup_logger.log("machine identity not same, set current_seq_no to " + str(current_seq_no) + " " + str(stored_identity) + " " + str(current_identity), True)
#remove other .config files. or the waagent would report the 3
#status...
for subdir, dirs, files in os.walk(hutil._context._config_dir):
for file in files:
try:
cur_seq_no = int(os.path.basename(file).split('.')[0])
if(cur_seq_no != current_seq_no):
os.remove(join(config_folder,file))
except ValueError:
continue
hutil.set_inused_config_seq(current_seq_no)
mi.save_identity()
else:
hutil.exit_if_enabled()
# we need to freeze the file system first
backup_logger.log('starting to enable', True)
"""
protectedSettings is the privateConfig passed from Powershell.
"""
protected_settings = hutil._context._config['runtimeSettings'][0]['handlerSettings'].get('protectedSettings')
public_settings = hutil._context._config['runtimeSettings'][0]['handlerSettings'].get('publicSettings')
para_parser = ParameterParser(protected_settings, public_settings)
commandToExecute = para_parser.commandToExecute
#validate all the required parameter here
if(commandToExecute.lower() == CommonVariables.iaas_install_command):
backup_logger.log("install succeed.",True)
run_status = 'success'
error_msg = 'Install Succeeded'
run_result = 0
backup_logger.log(error_msg)
elif(commandToExecute.lower() == CommonVariables.iaas_vmbackup_command):
if(para_parser.backup_metadata is None or para_parser.public_config_obj is None or para_parser.private_config_obj is None):
run_result = 11
run_status = 'error'
error_msg = 'required field empty or not correct'
backup_logger.log(error_msg, False, 'Error')
else:
backup_logger.log('commandToExecute is ' + commandToExecute, True)
"""
make sure the log is not doing when the file system is freezed.
"""
backup_logger.log("doing freeze now...", True)
freeze_called = True
freeze_result = freezer.freezeall()
backup_logger.log("freeze result " + str(freeze_result))
# check whether we freeze succeed first?
if(freeze_result is not None and len(freeze_result.errors) > 0 ):
run_result = 2
run_status = 'error'
error_msg = 'Enable failed with error' + str(freeze_result.errors)
backup_logger.log(error_msg, False, 'Warning')
else:
backup_logger.log("doing snapshot now...")
snap_shotter = Snapshotter(backup_logger)
snapshot_result = snap_shotter.snapshotall(para_parser)
backup_logger.log("snapshotall ends...")
if(snapshot_result is not None and len(snapshot_result.errors) > 0):
error_msg = "snapshot result: " + str(snapshot_result.errors)
run_result = 2
run_status = 'error'
backup_logger.log(error_msg, False, 'Error')
else:
run_result = 1
run_status = 'success'
error_msg = 'Enable Succeeded'
backup_logger.log(error_msg)
else:
run_status = 'error'
run_result = 11
error_msg = 'command is not correct'
backup_logger.log(error_msg, False, 'Error')
except Exception as e:
errMsg = "Failed to enable the extension with error: %s, stack trace: %s" % (str(e), traceback.format_exc())
backup_logger.log(errMsg, False, 'Error')
print(errMsg)
global_error_result = e
finally:
backup_logger.log("doing unfreeze now...")
if(freeze_called):
unfreeze_result = freezer.unfreezeall()
backup_logger.log("unfreeze result " + str(unfreeze_result))
error_msg += ('Enable Succeeded with error: ' + str(unfreeze_result.errors))
if(unfreeze_result is not None and len(unfreeze_result.errors) > 0):
backup_logger.log(error_msg, False, 'Warning')
backup_logger.log("unfreeze ends...")
if(para_parser is not None):
backup_logger.commit(para_parser.logsBlobUri)
"""
we do the final report here to get rid of the complex logic to handle the logging when file system be freezed issue.
"""
if(global_error_result is not None):
if(hasattr(global_error_result,'errno') and global_error_result.errno==2):
run_result = 12
elif(para_parser is None):
run_result = 11
else:
run_result = 2
run_status = 'error'
error_msg += ('Enable failed.' + str(global_error_result))
hutil.do_exit(0, 'Enable', run_status, str(run_result), error_msg)
def uninstall():
hutil.do_parse_context('Uninstall')
hutil.do_exit(0,'Uninstall','success','0', 'Uninstall succeeded')
def disable():
hutil.do_parse_context('Disable')
hutil.do_exit(0,'Disable','success','0', 'Disable Succeeded')
def update():
hutil.do_parse_context('Upadate')
hutil.do_exit(0,'Update','success','0', 'Update Succeeded')
if __name__ == '__main__' :
main()
|
import random
import items, world
__author__ = 'Phillip Johnson'
class Player:
inventory = [items.Gold(15), items.Rock()]
hp = 100
location_x, location_y = world.starting_position
victory = False
def is_alive(self):
return self.hp > 0
def do_action(self, action, **kwargs):
action_method = getattr(self, action.method.__name__)
if action_method:
action_method(**kwargs)
def print_inventory(self):
for item in self.inventory:
print(item, '\n')
def move(self, dx, dy):
self.location_x += dx
self.location_y += dy
print(world.tile_exists(self.location_x, self.location_y).intro_text())
def move_north(self):
self.move(dx=0, dy=-1)
def move_south(self):
self.move(dx=0, dy=1)
def move_east(self):
self.move(dx=1, dy=0)
def move_west(self):
self.move(dx=-1, dy=0)
def attack(self, enemy):
best_weapon = None
max_dmg = 0
for i in self.inventory:
if isinstance(i, items.Weapon):
if i.damage > max_dmg:
max_dmg = i.damage
best_weapon = i
print("You use {} against {}!".format(best_weapon.name, enemy.name))
enemy.hp -= best_weapon.damage
if not enemy.is_alive():
print("You killed {}!".format(enemy.name))
else:
print("{} HP is {}.".format(enemy.name, enemy.hp))
def flee(self, tile):
"""Moves the player randomly to an adjacent tile"""
available_moves = tile.adjacent_moves()
r = random.randint(0, len(available_moves) - 1)
self.do_action(available_moves[r])
Making player variable instance variables
import random
import items, world
__author__ = 'Phillip Johnson'
class Player():
def __init__(self):
self.inventory = [items.Gold(15), items.Rock()]
self.hp = 100
self.location_x, self.location_y = world.starting_position
self.victory = False
def is_alive(self):
return self.hp > 0
def do_action(self, action, **kwargs):
action_method = getattr(self, action.method.__name__)
if action_method:
action_method(**kwargs)
def print_inventory(self):
for item in self.inventory:
print(item, '\n')
def move(self, dx, dy):
self.location_x += dx
self.location_y += dy
print(world.tile_exists(self.location_x, self.location_y).intro_text())
def move_north(self):
self.move(dx=0, dy=-1)
def move_south(self):
self.move(dx=0, dy=1)
def move_east(self):
self.move(dx=1, dy=0)
def move_west(self):
self.move(dx=-1, dy=0)
def attack(self, enemy):
best_weapon = None
max_dmg = 0
for i in self.inventory:
if isinstance(i, items.Weapon):
if i.damage > max_dmg:
max_dmg = i.damage
best_weapon = i
print("You use {} against {}!".format(best_weapon.name, enemy.name))
enemy.hp -= best_weapon.damage
if not enemy.is_alive():
print("You killed {}!".format(enemy.name))
else:
print("{} HP is {}.".format(enemy.name, enemy.hp))
def flee(self, tile):
"""Moves the player randomly to an adjacent tile"""
available_moves = tile.adjacent_moves()
r = random.randint(0, len(available_moves) - 1)
self.do_action(available_moves[r])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.