code stringlengths 1 1.72M | language stringclasses 1
value |
|---|---|
class Measurement:
def __init__(self,sensorname,timestamp,measureVal,errorCode):
assert type(sensorname) is str
self._sensorname = sensorname
self._timestamp = timestamp
self._measureVal = measureVal
self._errorCode = errorCode
def getSensorname(self):
return self._sensorname
def getTimestamp(self):
return self._timestamp
def getMeasureVal(self):
return self._measureVal
def getErrorCode(self):
return self._errorCode
def __str__(self):
return self._sensorname + ";" + str(self._timestamp) + ";" + str(self._measureVal) + ";" + str(self._errorCode) | Python |
class Sensor:
def __init__(self,id,name):
self._id = id
self._name = name
#Liste von [Measurement]
self._measures = []
def getName(self):
return str(self._name)
def getId(self):
return self._id
def getMeasurements(self):
return self._measures
def addMeasurement(self,measure):
self._measures.append(measure)
def __str__(self):
return self._name;
@staticmethod
def hasId(sensor,id):
if sensor is None:
return False
elif sensor.getId() == id:
return True
else:
return False
| Python |
import datetime
from libthermalraspi.database.Sensor import Sensor
from libthermalraspi.database.MeasurementDAO import MeasurementDAO
import sqlite3
class SensorDAO(Sensor):
def __init__(self,id,name):
Sensor.__init__(self,id,name)
def readMyMeasurements(self,connection,fromTimestamp=None,toTimestamp=datetime.datetime.now(),topCount=None):
#Liest alle Messungen entsprechend dem Filter zum aktuellen Sensor aus der Datenbank und speichert sie im Objekt
sensors = SensorDAO.readMeasurements(connection,fromTimestamp,toTimestamp,topCount,[self._id])
if len(sensors) > 0:
self._measures = sensors[0].getMeasurements()
else:
self._measures = []
return self._measures
@staticmethod
def readMeasurements(connection,fromTimestamp=None,toTimestamp=datetime.datetime.now(),topCount=None,sensorIDs=None):
#Liest alle Messungen entsprechend dem Filter aus der Datenbank und gibt sie als Sensorliste zurueck
#fromTimestamp datetime: Von einem Eintragszeitpunkt
#toTimestamp datetime: Bis zu einem Eintragszeitpunkt
#topCount int: Maximale Anzahl an zurueckggebenen Messwerten pro Sensor
#sensorIDs [int]: Filterung nach den uebergebenen Sensord-ID
if not sensorIDs is None:
if len(sensorIDs) == 0:
return []
#WHERE Query-----------------------------------
filterValues = {}
filterQuery = []
whereExpression = ""
if not fromTimestamp is None:
filterValues["fromTimestamp"] = fromTimestamp
filterQuery.append("m.timestamp >= :fromTimestamp")
if not toTimestamp is None:
filterValues["toTimestamp"] = toTimestamp
filterQuery.append("m.timestamp <= :toTimestamp")
if not topCount is None:
filterValues["topCount"] = topCount
tmpFilterForSubquery = ""
if len(filterQuery) > 0:
tmpFilterForSubquery = "AND " + " AND ".join([f.replace("m.","") for f in filterQuery])
filterQuery.append("m.timestamp IN ( " + \
"SELECT timestamp FROM measurements " + \
"WHERE sensorid = m.sensorid " + tmpFilterForSubquery +\
" ORDER BY timestamp DESC LIMIT :topCount)")
if not sensorIDs is None:
filterQuery.append("s.id IN (" + ",".join([str(id) for id in sensorIDs]) + ")")
if len(filterQuery) > 0:
whereExpression = "WHERE " + " AND ".join(filterQuery)
#---------------------------------------------
cursor = connection.cursor()
sql = "SELECT s.id as sensorid, s.name, m.timestamp, m.value, m.errorcode " + \
"FROM sensors s " + \
"LEFT JOIN measurements m ON s.id = m.sensorid " + whereExpression + \
" ORDER BY s.name, s.id, m.timestamp DESC"
#print(sql)
#print(filterValues)
cursor.execute(sql,filterValues)
sensors = []
cSensor = None
for row in cursor:
#Abrufe aller Messungen und zuweisung zu den Sensoren
#Betrifft die aktuelle Zeile einen anderen Sensor wir dieser angelegt.
if not Sensor.hasId(cSensor,row[0]):
cSensor = Sensor(row[0],row[1])
sensors.append(cSensor)
if not row[2] is None:
cSensor.addMeasurement(MeasurementDAO(cSensor.getName(),row[2],row[3],row[4]))
return sensors;
@staticmethod
def readAllSensors(connection):
#Liest alle in der Datenbank gespeicherten Sensoren aus
#Messwerte sind nicht enthalten
#Liste von [Sensor]
cursor = connection.cursor()
cursor.execute("SELECT id, name FROM sensors ORDER BY name")
sensors = []
for row in cursor:
sensors.append(SensorDAO(row[0],row[1]))
return sensors
@staticmethod
def getSensorId(connection,sensorname):
queryValues = {"name":sensorname}
cursor = connection.cursor()
cursor.execute("SELECT id FROM sensors WHERE name=:name",queryValues)
row = cursor.fetchone()
if row is None:
cursor.execute("INSERT INTO sensors (name) VALUES (:name)",queryValues)
cursor.execute("SELECT last_insert_rowid()")
row = cursor.fetchone()
connection.commit()
return row[0]
| Python |
from libthermalraspi.database.Measurement import Measurement
import libthermalraspi.database
import sqlite3
class MeasurementDAO(Measurement):
def __init__(self,sensorname,timestamp,measureVal,errorCode):
Measurement.__init__(self,sensorname,timestamp,measureVal,errorCode)
def insert(self,connection):
MeasurementDAO.insertMeasurement(connection,self)
@staticmethod
def insertMeasurement(connection,measurement):
#Speichert das aktuelle Objekt in die Datenbank
cursor = connection.cursor()
insertValues = {
"sensorid":libthermalraspi.database.SensorDAO.SensorDAO.getSensorId(connection,measurement._sensorname),
"timestamp": measurement._timestamp,
"value": measurement._measureVal,
"errorcode": measurement._errorCode}
cursor.execute("INSERT INTO measurements " +
"(sensorid,timestamp,value,errorcode) " +
" VALUES (:sensorid,:timestamp,:value,:errorcode)",insertValues)
connection.commit()
| Python |
import abc
class DataStore(object):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def get_samples(self, fromDatetime, toDatetime):
"""Anzahl von Messwerten eines Zeitraumes aus der Datenbank auslesen.
Parameter:
fromDatetime -- Timestamp, Anfangszeitpunkt der Messwerte
toDatetime -- Timestamp, Endzeitpunkt der Messwerte
"""
return
@abc.abstractmethod
def add_sample(self, timestamp, sensorname, temperatur, status):
"""Messwert eines bestimmten Sensors in die Datenbank schreiben.
Parameter:
timestamp -- Timestamp, Zeitstempel der Uhrzeit des Messwertes
sensorname -- String, Bezeichnung des messenden Sensors
temperatur -- float, Wert der gemessenen Temperatur
status -- int, Status des Sensors
"""
return
| Python |
from libthermalraspi.database.MeasurementDAO import MeasurementDAO
from libthermalraspi.database.SensorDAO import SensorDAO
from libthermalraspi.database.DataStore import DataStore
class DataStoreSQL(DataStore):
def __init__(self,db):
self._db = db
def get_samples(self,fromTimestamp=None,toTimestamp=None):
"""Anzahl von Messwerten eines Zeitraumes aus der Datenbank auslesen.
Parameter:
fromDatetime -- Timestamp, Anfangszeitpunkt der Messwerte
toDatetime -- Timestamp, Endzeitpunkt der Messwerte
"""
sensors = SensorDAO.readMeasurements(self._db,fromTimestamp=fromTimestamp,toTimestamp=toTimestamp)
measurements = []
for s in sensors:
for m in s.getMeasurements():
measurements.append(m)
return measurements
def add_sample(self, timestamp, sensorname, temperatur, status):
"""Messwert eines bestimmten Sensors in die Datenbank schreiben.
Parameter:
timestamp -- Timestamp, Zeitstempel der Uhrzeit des Messwertes
sensorname -- String, Bezeichnung des messenden Sensors
temperatur -- float, Wert der gemessenen Temperatur
status -- int, Status des Sensors
"""
sample = MeasurementDAO(sensorname, timestamp, temperatur, status)
sample.insert(self._db)
pass
def get_Database(self):
return self._db | Python |
#!/usr/bin/python
from libthermalraspi.database.DataStoreInMemory import DataStoreInMemory
from libthermalraspi.services.SensorConfigReader import SensorConfigReader
from libthermalraspi.services.parallelSampleCollector import ParallelSampleCollector
from libthermalraspi.programlooper import ProgramLooper
from libthermalraspi.database.DataStoreSQL import DataStoreSQL
from optparse import OptionParser
import sqlite3
parser = OptionParser()
parser.add_option("-f", "--dbfile",
action="store", type="string", dest="DBFILE",
default="/tmp/some.db",
help="used sqlite file name (default=/tmp/some.db)")
parser.add_option("-s", "--sensors",
action="store", type="string", dest="SENSORS",
default="/tmp/sensors.cfg",
help="sensor config file path (default=/tmp/sensors.cfg)")
parser.add_option("-i", "--interval",
action="store", type="float", dest="INTERVAL",
default=3,
help="used interval in seconds(default=3)")
parser.add_option("-d", "--dummy",
action="store_true", dest="DUMMY",
default=False,
help="dummy usage (default=False)")
(options, args) = parser.parse_args()
print("Config: dummy=%s, dbfile=%s, interval=%s, sensorconfig=%s" % \
(options.DUMMY, options.DBFILE, options.INTERVAL, options.SENSORS))
if options.DUMMY:
store = DataStoreInMemory()
store.initSomeTestData()
else:
db=sqlite3.connect(options.DBFILE)
store=DataStoreSQL(db)
sensors = SensorConfigReader(options.SENSORS).read()
collector = ParallelSampleCollector( store=store, sensorList=sensors )
# this is the ominous looping construct that needs to be done:
looper = ProgramLooper(options.INTERVAL)
collector.run(looper)
| Python |
#!/usr/bin/env python
from libthermalraspi.sensors.ad7414 import AD7414Thermometer
# Create the I2CDevice with bus no. and device address
ad7414 = AD7414Thermometer(1, 0x49)
print "AD7414 I2C-implementation:"
print("Temperature: %s" % ad7414.get_temperature())
| Python |
#! /usr/bin/python
import sys, socket, time, datetime
from libthermalraspi.sensors.simulation import CyclicThermometer
from libthermalraspi.sensors.stds75 import Stds75
th = CyclicThermometer([1, 2.5, 3])
# th = Stds75(1, 0x4e)
serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serversocket.bind((socket.gethostname(), 7000))
serversocket.listen(5)
print("Waiting for connections")
while 1:
(clientsocket, address) = serversocket.accept()
print("Received connection")
data = ""
while "QUIT" not in data:
data = clientsocket.recv(1024)
if len(data) == 0:
break
if "get_temperature" in data:
clientsocket.sendall(str(th.get_temperature()) + "\n")
clientsocket.close()
print("Connection closed")
| Python |
#! /usr/bin/python
import socket, sys, threading
# import drivers
import libthermalraspi.sensors.stds75
import libthermalraspi.sensors.lm73device
import libthermalraspi.sensors.simulation
class SocketServer():
def __init__(self, host, port, thermometer):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.bind((host, port))
self.sock.listen(1)
self.thermometer = thermometer
def start(self):
print("Temperature Server started...")
while True:
conn, _ = self.sock.accept()
ClientConnection(conn, self.thermometer).start()
self.sock.close() # TODO: define some break condition
class ClientConnection(threading.Thread):
def __init__(self, conn, thermometer):
threading.Thread.__init__(self)
self.conn = conn
self.thermometer = thermometer
def run(self):
client = self.conn.getpeername()[0] + ":" + str(self.conn.getpeername()[1])
print(client + " connected")
self.conn.sendall("Welcome to the Raspberry Pi thermometer server!\nType HELP for a list of available commands.\n")
data = "" # init data
while "BYE" not in data:
data = self.conn.recv(1024)
if len(data) == 0:
break
print(client + ": " + data)
if "HELP" in data:
self.conn.sendall("====== COMMANDS ======\nHELP\nGET_TEMP\nBYE")
elif "GET_TEMP" in data:
print("Temperature data requested by " + client + ". Sending...")
self.conn.sendall(str(self.thermometer.get_temperature())) # send actual data
elif "EMPTY" in data:
self.conn.sendall(" ")
elif "BYE" in data:
pass
else:
self.conn.sendall("Command not found")
self.conn.close()
print(client + " closed connection")
HOST = sys.argv[1]
PORT = int(sys.argv[2])
#get driver from config file
# demo-config-file: STDS75(0, 0x4e)
DRIVER = eval(file(sys.argv[3]).read(), {'STDS75': libthermalraspi.sensors.stds75.Stds75,
'LM73': libthermalraspi.sensors.lm73device.LM73Device,
'Cyclic': libthermalraspi.sensors.simulation.CyclicThermometer})
if __name__ == '__main__':
server = SocketServer(HOST, PORT, DRIVER)
server.start()
| Python |
#!/usr/bin/python
from libthermalraspi.sensors.thermo_proxy_itmG2 import ThermoProxy_ItmG2
t= ThermoProxy_ItmG2()
print("Temperature: %s" %t.get_temperature())
| Python |
#!/usr/bin/env python
from libthermalraspi.sensors.itm11_g1_stds75 import Stds75
import time
stds75 = Stds75(0, 0x4e)
print "read/write implementation:"
while(True):
print("Temperature: %f" % stds75.get_temperature())
time.sleep(1)
| Python |
#!/usr/bin/env python
from libthermalraspi.sensors.composite_g2_1 import CompositeSensor
from libthermalraspi.sensors.simulation import CyclicThermometer
t = CompositeSensor()
t.append_sensor(CyclicThermometer([5]))
t.append_sensor(CyclicThermometer([10]))
t.append_sensor(CyclicThermometer([45]))
print("AVG temperature: %s" % t.get_temperature()) | Python |
#!/usr/bin/python
from libthermalraspi.unittests import easy_suite
import unittest
if __name__ == '__main__':
unittest.TextTestRunner().run(easy_suite.suite)
pass
| Python |
#!/usr/bin/python
from libthermalraspi.sensors.tc74 import TC74Thermometer
t= TC74Thermometer (1, 0x4d)
print("Temperature: %s" %t.get_temperature()) | Python |
#!/usr/bin/python
from libthermalraspi.programlooper import ProgramLooper
looper = ProgramLooper(2)
for i in looper:
print i
| Python |
#!/usr/bin/env python
from libthermalraspi.sensors.hyt221 import Hyt221
from libthermalraspi.sensors.hyt221_smbus import Hyt221SMBus
hyt1 = Hyt221(1, 0x28)
print("read/write implementation:")
print("Temperature: %s" % hyt1.get_temperature())
print("Humidity: %s" % hyt1.get_humidity())
hyt2 = Hyt221(1, 0x28)
print("smbus implementation:")
print("Temperature: %s" % hyt2.get_temperature())
print("Humidity: %s" % hyt2.get_humidity())
| Python |
#! /usr/bin/python
import sys, socket, time, datetime
from libthermalraspi.sensors.simulation import CyclicThermometer
from libthermalraspi.sensors.stds75 import Stds75
th = CyclicThermometer([1, 2.5, 3])
# th = Stds75(1, 0x4e)
serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serversocket.bind((socket.gethostname(), 7000))
serversocket.listen(5)
print("Waiting for connections")
while 1:
(clientsocket, address) = serversocket.accept()
print("Received connection")
data = ""
while "QUIT" not in data:
data = clientsocket.recv(1024)
if len(data) == 0:
break
if "get_temperature" in data:
clientsocket.sendall(str(th.get_temperature()) + "\n")
clientsocket.close()
print("Connection closed")
| Python |
#!/usr/bin/python
from libthermalraspi.sensors.thermo_proxy_itmG2 import ThermoProxy_ItmG2
t= ThermoProxy_ItmG2()
print("Temperature: %s" %t.get_temperature())
| Python |
#!/usr/bin/python
from libthermalraspi.unittests import easy_suite
import unittest
if __name__ == '__main__':
unittest.TextTestRunner().run(easy_suite.suite)
pass
| Python |
#!/usr/bin/env python
from libthermalraspi.sensors.ad7414 import AD7414Thermometer
# Create the I2CDevice with bus no. and device address
ad7414 = AD7414Thermometer(1, 0x49)
print "AD7414 I2C-implementation:"
print("Temperature: %s" % ad7414.get_temperature())
| Python |
#!/usr/bin/env python
from libthermalraspi.sensors.hyt221 import Hyt221
from libthermalraspi.sensors.hyt221_smbus import Hyt221SMBus
hyt1 = Hyt221(1, 0x28)
print("read/write implementation:")
print("Temperature: %s" % hyt1.get_temperature())
print("Humidity: %s" % hyt1.get_humidity())
hyt2 = Hyt221(1, 0x28)
print("smbus implementation:")
print("Temperature: %s" % hyt2.get_temperature())
print("Humidity: %s" % hyt2.get_humidity())
| Python |
#!/usr/bin/python
from libthermalraspi.programlooper import ProgramLooper
looper = ProgramLooper(2)
for i in looper:
print i
| Python |
#!/usr/bin/python
from libthermalraspi.sensors.tc74 import TC74Thermometer
t= TC74Thermometer (1, 0x4d)
print("Temperature: %s" %t.get_temperature()) | Python |
#!/usr/bin/python
from libthermalraspi.unittests import test_compositeThermometer_SWD11G2_3
import unittest
if __name__ == '__main__':
unittest.TextTestRunner().run(test_compositeThermometer_SWD11G2_3.suite)
pass
| Python |
#!/usr/bin/python
from optparse import OptionParser
from libthermalraspi.database.DataStoreInMemory import DataStoreInMemory
from libthermalraspi.network.tempserver import TempServer
import sqlite3
import logging
# instantiate TempServer as see fit, and run it
parser = OptionParser()
parser.add_option("-f", "--dbfile",
action="store", type="string", dest="DBFILE", default="/tmp/some.db", help="used sqlite file name (default=/tmp/some.db)")
parser.add_option("-l", "--logfile",
action="store", type="string", dest="LOGFILE", default="", help="used logfile (default=/tmp/tempserver.log)")
parser.add_option("-p", "--port",
action="store", type="int", dest="PORT", default=2345, help="used port (default=2345)")
parser.add_option("-d", "--dummy",
action="store_true", dest="DUMMY", default=False, help="dummy usage, no dbfile required (default=False)")
parser.add_option("-H", "--host",
action="store", dest="HOST", default="localhost", help="hostname (default=localhost)")
(options, args) = parser.parse_args()
print("Config: dummy=%s, dbfile=%s, port=%s" % (options.DUMMY, options.DBFILE, options.PORT))
if options.DUMMY:
store = DataStoreInMemory()
store.initSomeTestData()
else:
db=sqlite3.connect(options.DBFILE)
store=DataStoreSQL(db)
if options.LOGFILE=="":
options.LOGFILE=None
#addSignalHandler()
#TOTO Signalhandler
tempServer = TempServer(host=options.HOST, port=options.PORT, datastore=store, logfile=options.LOGFILE, loglevel=logging.INFO)
tempServer.start()
| Python |
#! /usr/bin/python
import socket, sys, threading
# import drivers
import libthermalraspi.sensors.stds75
import libthermalraspi.sensors.lm73device
import libthermalraspi.sensors.simulation
class SocketServer():
def __init__(self, host, port, thermometer):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.bind((host, port))
self.sock.listen(1)
self.thermometer = thermometer
def start(self):
print("Temperature Server started...")
while True:
conn, _ = self.sock.accept()
ClientConnection(conn, self.thermometer).start()
self.sock.close() # TODO: define some break condition
class ClientConnection(threading.Thread):
def __init__(self, conn, thermometer):
threading.Thread.__init__(self)
self.conn = conn
self.thermometer = thermometer
def run(self):
client = self.conn.getpeername()[0] + ":" + str(self.conn.getpeername()[1])
print(client + " connected")
self.conn.sendall("Welcome to the Raspberry Pi thermometer server!\nType HELP for a list of available commands.\n")
data = "" # init data
while "BYE" not in data:
data = self.conn.recv(1024)
if len(data) == 0:
break
print(client + ": " + data)
if "HELP" in data:
self.conn.sendall("====== COMMANDS ======\nHELP\nGET_TEMP\nBYE")
elif "GET_TEMP" in data:
print("Temperature data requested by " + client + ". Sending...")
self.conn.sendall(str(self.thermometer.get_temperature())) # send actual data
elif "EMPTY" in data:
self.conn.sendall(" ")
elif "BYE" in data:
pass
else:
self.conn.sendall("Command not found")
self.conn.close()
print(client + " closed connection")
HOST = sys.argv[1]
PORT = int(sys.argv[2])
#get driver from config file
# demo-config-file: STDS75(0, 0x4e)
DRIVER = eval(file(sys.argv[3]).read(), {'STDS75': libthermalraspi.sensors.stds75.Stds75,
'LM73': libthermalraspi.sensors.lm73device.LM73Device,
'Cyclic': libthermalraspi.sensors.simulation.CyclicThermometer})
if __name__ == '__main__':
server = SocketServer(HOST, PORT, DRIVER)
server.start()
| Python |
#!/usr/bin/env python
from libthermalraspi.sensors.composite_g2_1 import CompositeSensor
from libthermalraspi.sensors.simulation import CyclicThermometer
t = CompositeSensor()
t.append_sensor(CyclicThermometer([5]))
t.append_sensor(CyclicThermometer([10]))
t.append_sensor(CyclicThermometer([45]))
print("AVG temperature: %s" % t.get_temperature()) | Python |
#!/usr/bin/python
from libthermalraspi.database.DataStoreInMemory import DataStoreInMemory
from libthermalraspi.services.SensorConfigReader import SensorConfigReader
from libthermalraspi.services.parallelSampleCollector import ParallelSampleCollector
from libthermalraspi.programlooper import ProgramLooper
from libthermalraspi.database.DataStoreSQL import DataStoreSQL
from optparse import OptionParser
import sqlite3
parser = OptionParser()
parser.add_option("-f", "--dbfile",
action="store", type="string", dest="DBFILE",
default="/tmp/some.db",
help="used sqlite file name (default=/tmp/some.db)")
parser.add_option("-s", "--sensors",
action="store", type="string", dest="SENSORS",
default="/tmp/sensors.cfg",
help="sensor config file path (default=/tmp/sensors.cfg)")
parser.add_option("-i", "--interval",
action="store", type="float", dest="INTERVAL",
default=3,
help="used interval in seconds(default=3)")
parser.add_option("-d", "--dummy",
action="store_true", dest="DUMMY",
default=False,
help="dummy usage (default=False)")
(options, args) = parser.parse_args()
print("Config: dummy=%s, dbfile=%s, interval=%s, sensorconfig=%s" % \
(options.DUMMY, options.DBFILE, options.INTERVAL, options.SENSORS))
if options.DUMMY:
store = DataStoreInMemory()
store.initSomeTestData()
else:
db=sqlite3.connect(options.DBFILE)
store=DataStoreSQL(db)
sensors = SensorConfigReader(options.SENSORS).read()
collector = ParallelSampleCollector( store=store, sensorList=sensors )
# this is the ominous looping construct that needs to be done:
looper = ProgramLooper(options.INTERVAL)
collector.run(looper)
| Python |
#! /usr/bin/python
import sys, socket
from sensors.simulation import Thermometer
class ThermoProxy():
"""Connects with a server"""
def __init__(self, host="127.0.0.1", port=1024):
self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
self._sock.connect((host, port))
except:
sys.exit("Host unreachable")
def send_msg(self, msg):
if msg == "": msg = "EMPTY"
self._sock.send(msg)
def receive_msg(self):
return self._sock.recv(1024)
if __name__ == '__main__':
default = "127.0.0.1:1024"
host_str = raw_input("<HOST>:<PORT> [%s] " % (default))
# use default address if not specified
if (host_str == ''): host_str = default
host_str_array = host_str.split(":")
# use specified host address
HOST = host_str_array[0]
if len(host_str_array) > 1:
# use specified port
PORT = int(host_str_array[1])
else:
# use default port
PORT = int(default.split(":")[1])
tp = ThermoProxy(HOST, PORT)
print(tp.receive_msg())
while True:
# prefix that will be shown as long as you are connected to the server
prefix = "client@%s# " % (tp._sock.getpeername()[0])
# let the client set a command
cmd = raw_input(prefix)
# send message
#tp.send_msg(cmd.upper())
tp.send_msg(cmd.upper())
# receive message
rcv = tp.receive_msg()
# end program if connection closed by server
if not rcv:
print("Connection closed by server.")
break
# print the received message of the server
print(rcv)
print("--- END OF PROGRAM ---") | Python |
#!/usr/bin/env python
from libthermalraspi.sensors.itm11_g1_stds75 import Stds75
import time
stds75 = Stds75(0, 0x4e)
print "read/write implementation:"
while(True):
print("Temperature: %f" % stds75.get_temperature())
time.sleep(1)
| Python |
#!/usr/bin/python
from libthermalraspi.unittests import test_compositeThermometer_SWD11G2_3
import unittest
if __name__ == '__main__':
unittest.TextTestRunner().run(test_compositeThermometer_SWD11G2_3.suite)
pass
| Python |
#!/usr/bin/python
from optparse import OptionParser
from libthermalraspi.database.DataStoreInMemory import DataStoreInMemory
from libthermalraspi.network.tempserver import TempServer
import sqlite3
import logging
# instantiate TempServer as see fit, and run it
parser = OptionParser()
parser.add_option("-f", "--dbfile",
action="store", type="string", dest="DBFILE", default="/tmp/some.db", help="used sqlite file name (default=/tmp/some.db)")
parser.add_option("-l", "--logfile",
action="store", type="string", dest="LOGFILE", default="", help="used logfile (default=/tmp/tempserver.log)")
parser.add_option("-p", "--port",
action="store", type="int", dest="PORT", default=2345, help="used port (default=2345)")
parser.add_option("-d", "--dummy",
action="store_true", dest="DUMMY", default=False, help="dummy usage, no dbfile required (default=False)")
parser.add_option("-H", "--host",
action="store", dest="HOST", default="localhost", help="hostname (default=localhost)")
(options, args) = parser.parse_args()
print("Config: dummy=%s, dbfile=%s, port=%s" % (options.DUMMY, options.DBFILE, options.PORT))
if options.DUMMY:
store = DataStoreInMemory()
store.initSomeTestData()
else:
db=sqlite3.connect(options.DBFILE)
store=DataStoreSQL(db)
if options.LOGFILE=="":
options.LOGFILE=None
#addSignalHandler()
#TOTO Signalhandler
tempServer = TempServer(host=options.HOST, port=options.PORT, datastore=store, logfile=options.LOGFILE, loglevel=logging.INFO)
tempServer.start()
| Python |
#! /usr/bin/python
import sys, socket
from sensors.simulation import Thermometer
class ThermoProxy():
"""Connects with a server"""
def __init__(self, host="127.0.0.1", port=1024):
self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
self._sock.connect((host, port))
except:
sys.exit("Host unreachable")
def send_msg(self, msg):
if msg == "": msg = "EMPTY"
self._sock.send(msg)
def receive_msg(self):
return self._sock.recv(1024)
if __name__ == '__main__':
default = "127.0.0.1:1024"
host_str = raw_input("<HOST>:<PORT> [%s] " % (default))
# use default address if not specified
if (host_str == ''): host_str = default
host_str_array = host_str.split(":")
# use specified host address
HOST = host_str_array[0]
if len(host_str_array) > 1:
# use specified port
PORT = int(host_str_array[1])
else:
# use default port
PORT = int(default.split(":")[1])
tp = ThermoProxy(HOST, PORT)
print(tp.receive_msg())
while True:
# prefix that will be shown as long as you are connected to the server
prefix = "client@%s# " % (tp._sock.getpeername()[0])
# let the client set a command
cmd = raw_input(prefix)
# send message
#tp.send_msg(cmd.upper())
tp.send_msg(cmd.upper())
# receive message
rcv = tp.receive_msg()
# end program if connection closed by server
if not rcv:
print("Connection closed by server.")
break
# print the received message of the server
print(rcv)
print("--- END OF PROGRAM ---") | Python |
#!/usr/bin/python
from Queue import Queue
import threading
import time
import os
# --------------------------------------------------------------------
# FROM THIS ONE YOU CAN COPY. THREAD SAFE, NON-POLLING!
# --------------------------------------------------------------------
# items are produced by one thread, and consumed by another.
# items are big, and stored in a database, indexed by their ID. (using
# a dictionary as a symbolic database.)
database = {}
item_no = 0
# the IDs (simple integer values) are enqueued by the producer, and
# dequeued by a consumer who then looks up the corresponding item in
# the database. the queue has a maximum size to throttle the producer
# in case consuming is delayed.
queue_size = 10
queue = Queue(10)
# the lock protects only the database. unlike the other, polling,
# versions, the queue protects itself.
lock = threading.Lock()
class Producer(threading.Thread):
def run(self):
global item_no
while True:
with lock:
database[item_no] = 'some big chunk to process in another thread'
item_no += 1
# announce item. blocks if queue is full.
print 'producing %d ...' % (item_no-1)
queue.put(item_no-1)
class Consumer(threading.Thread):
def run(self):
while True:
# get next item. blocks if queue is empty.
next_item_no = queue.get()
with lock:
# get and remove item to process
item = database[next_item_no]
del database[next_item_no]
print("processing '%s'" % item)
if __name__ == '__main__':
print os.getpid()
consumers = []
for i in xrange(2):
consumer = Consumer()
consumer.start()
consumers.append(consumer)
producers = []
for i in xrange(2):
producer = Producer()
producer.start()
producers.append(producer)
for consumer in consumers:
consumer.join()
for producer in producers:
producer.join()
| Python |
#!/usr/bin/python
import threading
import time
import os
# --------------------------------------------------------------------
# WARNING: THIS PROGRAM IS A DEMONSTRATION OF SOME OF THE REALLY BAD
# (BUT, SADLY, POPULAR) PRACTICES OF INTER-THREAD COMMUNICATION. DON'T
# EVER COPY FROM IT!!!
# WEAKNESSES:
# * locking is in place, but not exception-safe. lock remains held if
# the critical section is terminated by an exception
# * the consumer polls for data. there sure is a better way.
# * the producer produces no matter what. if the consumer does not
# consume fast enough, the program will run out of memory.
# --------------------------------------------------------------------
# items are produced by one thread, and consumed by another.
# items are big, and stored in a database, indexed by their ID. (using
# a dictionary as a symbolic database.)
database = {}
# the IDs (simple integer values) are enqueued by the producer, and
# dequeued by a consumer who then looks up the corresponding item in
# the database.
queue = []
lock = threading.Lock()
producer_interval = 1
consumer_interval = 1
class Producer(threading.Thread):
def run(self):
item_no = 0
while True:
# begin critical section
lock.acquire()
queue.append(item_no)
database[item_no] = 'some big chunk to process in another thread'
item_no += 1
lock.release()
# end critical section
time.sleep(producer_interval)
class Consumer(threading.Thread):
def run(self):
while True:
# begin critical section
lock.acquire()
if len(queue) == 0:
# end critical section (nothing to do)
lock.release()
time.sleep(consumer_interval)
continue
# dequeue and get next item ID
item_no = queue[0]
del queue[0]
# get and remove item to process
item = database[item_no]
del database[item_no]
# print("processing '%s'" % item)
# end critical section
lock.release()
if __name__ == '__main__':
print os.getpid()
consumer = Consumer()
producer = Producer()
consumer.start()
producer.start()
consumer.join()
producer.join()
| Python |
#!/usr/bin/python
import threading
import time
import os
# --------------------------------------------------------------------
# WARNING: THIS PROGRAM IS A DEMONSTRATION OF SOME OF THE REALLY BAD
# (BUT, SADLY, POPULAR) PRACTICES OF INTER-THREAD COMMUNICATION. DON'T
# EVER COPY FROM IT!!!
# WEAKNESSES:
# * the consumer polls for data. there sure is a better way.
# * the producer produces no matter what. if the consumer does not
# consume fast enough, the program will run out of memory.
# --------------------------------------------------------------------
# items are produced by one thread, and consumed by another.
# items are big, and stored in a database, indexed by their ID. (using
# a dictionary as a symbolic database.)
database = {}
# the IDs (simple integer values) are enqueued by the producer, and
# dequeued by a consumer who then looks up the corresponding item in
# the database.
queue = []
lock = threading.Lock()
producer_interval = 1
consumer_interval = 1
class Producer(threading.Thread):
def run(self):
item_no = 0
while True:
with lock:
queue.append(item_no)
database[item_no] = 'some big chunk to process in another thread'
item_no += 1
time.sleep(producer_interval)
class Consumer(threading.Thread):
def run(self):
do_sleep = False
while True:
if do_sleep:
print 'sleeping'
time.sleep(consumer_interval)
do_sleep = False
with lock:
if len(queue) == 0:
do_sleep = True
continue
# dequeue and get next item ID
item_no = queue[0]
del queue[0]
# get and remove item to process
item = database[item_no]
del database[item_no]
# print("processing '%s'" % item)
if __name__ == '__main__':
print os.getpid()
consumer = Consumer()
producer = Producer()
consumer.start()
producer.start()
consumer.join()
producer.join()
| Python |
#!/usr/bin/python
import threading
import time
import os
# --------------------------------------------------------------------
# WARNING: THIS PROGRAM IS A DEMONSTRATION OF SOME OF THE REALLY BAD
# (BUT, SADLY, POPULAR) PRACTICES OF INTER-THREAD COMMUNICATION. DON'T
# EVER COPY FROM IT!!!
# WEAKNESSES:
# * two threads hammer on two related but unprotected datastructures
# * the consumer polls for data. there sure is a better way.
# * the producer produces no matter what. if the consumer does not
# consume fast enough, the program will run out of memory.
# --------------------------------------------------------------------
# items are produced by one thread, and consumed by another.
# items are big, and stored in a database, indexed by their ID. (using
# a dictionary as a symbolic database.)
database = {}
# the IDs (simple integer values) are enqueued by the producer, and
# dequeued by a consumer who then looks up the corresponding item in
# the database.
queue = []
producer_interval = 1
consumer_interval = 1
class Producer(threading.Thread):
def run(self):
item_no = 0
while True:
queue.append(item_no)
database[item_no] = 'some big chunk to process in another thread'
item_no += 1
time.sleep(producer_interval)
class Consumer(threading.Thread):
def run(self):
while True:
if len(queue) == 0:
time.sleep(consumer_interval)
continue
# dequeue and get next item ID
item_no = queue[0]
del queue[0]
# get and remove item to process
item = database[item_no]
del database[item_no]
# print("processing '%s'" % item)
if __name__ == '__main__':
print os.getpid()
consumer = Consumer()
producer = Producer()
consumer.start()
producer.start()
consumer.join()
producer.join()
| Python |
#!/usr/bin/python
import threading
import time
import os
# --------------------------------------------------------------------
# WARNING: THIS PROGRAM IS A DEMONSTRATION OF SOME OF THE REALLY BAD
# (BUT, SADLY, POPULAR) PRACTICES OF INTER-THREAD COMMUNICATION. DON'T
# EVER COPY FROM IT!!!
# WEAKNESSES:
# * locking is in place, but not exception-safe. lock remains held if
# the critical section is terminated by an exception
# * the consumer polls for data. there sure is a better way.
# * the producer produces no matter what. if the consumer does not
# consume fast enough, the program will run out of memory.
# --------------------------------------------------------------------
# items are produced by one thread, and consumed by another.
# items are big, and stored in a database, indexed by their ID. (using
# a dictionary as a symbolic database.)
database = {}
# the IDs (simple integer values) are enqueued by the producer, and
# dequeued by a consumer who then looks up the corresponding item in
# the database.
queue = []
lock = threading.Lock()
producer_interval = 1
consumer_interval = 1
class Producer(threading.Thread):
def run(self):
item_no = 0
while True:
# begin critical section
lock.acquire()
queue.append(item_no)
database[item_no] = 'some big chunk to process in another thread'
item_no += 1
lock.release()
# end critical section
time.sleep(producer_interval)
class Consumer(threading.Thread):
def run(self):
while True:
# begin critical section
lock.acquire()
if len(queue) == 0:
# end critical section (nothing to do)
lock.release()
time.sleep(consumer_interval)
continue
# dequeue and get next item ID
item_no = queue[0]
del queue[0]
# get and remove item to process
item = database[item_no]
del database[item_no]
# print("processing '%s'" % item)
# end critical section
lock.release()
if __name__ == '__main__':
print os.getpid()
consumer = Consumer()
producer = Producer()
consumer.start()
producer.start()
consumer.join()
producer.join()
| Python |
#!/usr/bin/python
import threading
import time
import os
# --------------------------------------------------------------------
# WARNING: THIS PROGRAM IS A DEMONSTRATION OF SOME OF THE REALLY BAD
# (BUT, SADLY, POPULAR) PRACTICES OF INTER-THREAD COMMUNICATION. DON'T
# EVER COPY FROM IT!!!
# WEAKNESSES:
# * two threads hammer on two related but unprotected datastructures
# * the consumer polls for data. there sure is a better way.
# * the producer produces no matter what. if the consumer does not
# consume fast enough, the program will run out of memory.
# --------------------------------------------------------------------
# items are produced by one thread, and consumed by another.
# items are big, and stored in a database, indexed by their ID. (using
# a dictionary as a symbolic database.)
database = {}
# the IDs (simple integer values) are enqueued by the producer, and
# dequeued by a consumer who then looks up the corresponding item in
# the database.
queue = []
producer_interval = 1
consumer_interval = 1
class Producer(threading.Thread):
def run(self):
item_no = 0
while True:
queue.append(item_no)
database[item_no] = 'some big chunk to process in another thread'
item_no += 1
time.sleep(producer_interval)
class Consumer(threading.Thread):
def run(self):
while True:
if len(queue) == 0:
time.sleep(consumer_interval)
continue
# dequeue and get next item ID
item_no = queue[0]
del queue[0]
# get and remove item to process
item = database[item_no]
del database[item_no]
# print("processing '%s'" % item)
if __name__ == '__main__':
print os.getpid()
consumer = Consumer()
producer = Producer()
consumer.start()
producer.start()
consumer.join()
producer.join()
| Python |
#!/usr/bin/python
from Queue import Queue
import threading
import time
import os
# --------------------------------------------------------------------
# FROM THIS ONE YOU CAN COPY. THREAD SAFE, NON-POLLING!
# --------------------------------------------------------------------
# items are produced by one thread, and consumed by another.
# items are big, and stored in a database, indexed by their ID. (using
# a dictionary as a symbolic database.)
database = {}
item_no = 0
# the IDs (simple integer values) are enqueued by the producer, and
# dequeued by a consumer who then looks up the corresponding item in
# the database. the queue has a maximum size to throttle the producer
# in case consuming is delayed.
queue_size = 10
queue = Queue(10)
# the lock protects only the database. unlike the other, polling,
# versions, the queue protects itself.
lock = threading.Lock()
class Producer(threading.Thread):
def run(self):
global item_no
while True:
with lock:
database[item_no] = 'some big chunk to process in another thread'
item_no += 1
# announce item. blocks if queue is full.
print 'producing %d ...' % (item_no-1)
queue.put(item_no-1)
class Consumer(threading.Thread):
def run(self):
while True:
# get next item. blocks if queue is empty.
next_item_no = queue.get()
with lock:
# get and remove item to process
item = database[next_item_no]
del database[next_item_no]
print("processing '%s'" % item)
if __name__ == '__main__':
print os.getpid()
consumers = []
for i in xrange(2):
consumer = Consumer()
consumer.start()
consumers.append(consumer)
producers = []
for i in xrange(2):
producer = Producer()
producer.start()
producers.append(producer)
for consumer in consumers:
consumer.join()
for producer in producers:
producer.join()
| Python |
#!/usr/bin/python
import threading
import time
import os
# --------------------------------------------------------------------
# WARNING: THIS PROGRAM IS A DEMONSTRATION OF SOME OF THE REALLY BAD
# (BUT, SADLY, POPULAR) PRACTICES OF INTER-THREAD COMMUNICATION. DON'T
# EVER COPY FROM IT!!!
# WEAKNESSES:
# * the consumer polls for data. there sure is a better way.
# * the producer produces no matter what. if the consumer does not
# consume fast enough, the program will run out of memory.
# --------------------------------------------------------------------
# items are produced by one thread, and consumed by another.
# items are big, and stored in a database, indexed by their ID. (using
# a dictionary as a symbolic database.)
database = {}
# the IDs (simple integer values) are enqueued by the producer, and
# dequeued by a consumer who then looks up the corresponding item in
# the database.
queue = []
lock = threading.Lock()
producer_interval = 1
consumer_interval = 1
class Producer(threading.Thread):
def run(self):
item_no = 0
while True:
with lock:
queue.append(item_no)
database[item_no] = 'some big chunk to process in another thread'
item_no += 1
time.sleep(producer_interval)
class Consumer(threading.Thread):
def run(self):
do_sleep = False
while True:
if do_sleep:
print 'sleeping'
time.sleep(consumer_interval)
do_sleep = False
with lock:
if len(queue) == 0:
do_sleep = True
continue
# dequeue and get next item ID
item_no = queue[0]
del queue[0]
# get and remove item to process
item = database[item_no]
del database[item_no]
# print("processing '%s'" % item)
if __name__ == '__main__':
print os.getpid()
consumer = Consumer()
producer = Producer()
consumer.start()
producer.start()
consumer.join()
producer.join()
| Python |
#!/usr/bin/env python
import imageshack
import sys
if __name__ == "__main__":
if len(sys.argv)!=3:
print "Usage upload.py <devkey> <filename/url>"
sys.exit(1)
u = imageshack.Uploader(sys.argv[1])
try:
if sys.argv[2].startswith("http://"):
print u.uploadURL(sys.argv[2])
else:
print u.uploadFile(sys.argv[2])
except imageshack.ServerException, e:
print str(e)
| Python |
#!/usr/bin/env python
import yfrog
import sys
if __name__ == "__main__":
if len(sys.argv)<4:
print "Usage upload.py <username> <password> <filename/url> [<text>] [<source>]"
sys.exit(1)
u = yfrog.Uploader()
if len(sys.argv)>=5:
msg = sys.argv[4]
else:
msg = None
if len(sys.argv)>=6:
src = sys.argv[5]
else:
src = 'yfrog'
try:
if sys.argv[3].startswith("http://"):
print u.uploadURL(sys.argv[3],sys.argv[1],sys.argv[2],message=msg,source=src)
else:
print u.uploadFile(sys.argv[3],sys.argv[1],sys.argv[2],message=msg,source=src)
except yfrog.ServerException, e:
print str(e)
| Python |
#!/usr/bin/env python
import os
import sys
import getopt
import imageshack
def usage():
print "Usage apitest.py --file filename --key key [--id ID] [--cookie COOKIE] [--tags TAGS] [--visibility PUBLIC] [--username USERNAME] [--password PASSWORD] [--blocksize=BLOCKSIZE] [--numblocks=NUMBLOCKS]"
def getopts(argv):
data = { 'file': None,
'key': None,
'cookie': None,
'id': None,
'tags': None,
'username': None,
'password': None,
'public': None }
try:
opts, args = getopt.getopt(sys.argv[1:], "f:k:i:t:v:u:p:b:n:", ["file=","key=","id=","cookie=","tags=","visibility=","username=","password=","blocksize=","numblocks="])
except getopt.GetoptError, err:
print str(err)
usage()
sys.exit(2)
for o, a in opts:
if o in ("-f", "--file"):
data['file'] = a
if o in ("-k", "--key"):
data['key'] = a
elif o in ("-i", "--id"):
data['id'] = a
elif o in ("-c", "--cookie"):
data['cookie'] = a
elif o in ("-t", "--tags"):
data['tags'] = a
elif o in ("-v", "--visibility"):
data['public'] = True if a.upper() == 'TRUE' else False
elif o in ("-u", "--username"):
data['username'] = a
elif o in ("-p", "--password"):
data['password'] = a
elif o in ("-b", "--blocksize"):
data['blocksize'] = int(a)
elif o in ("-n", "--numblocks"):
data['numblocks'] = int(a)
return data
def main():
data = getopts(sys.argv)
if not data['key']:
print 'ERROR: No developer key specified'
sys.exit(1)
if not data['file'] or not os.path.isfile(data['file']):
print 'ERROR: No file specified or not existing file'
sys.exit(1)
uploader = imageshack.ChunkedUploader(data['key'], data['cookie'],
data['username'], data['password'])
try:
res = uploader.upload_file(data['file'], data['tags'], data['public'])
except Exception as e:
print 'ERROR: File could not be uploaded:'
print e
sys.exit(1)
print res[2]
if __name__ == "__main__":
main()
| Python |
#!/usr/bin/env python
'''
Client API library to upload images and videos to yfrog.com
Using Yfrog public API, as described here:
http://yfrog.com/api.php
'''
import urllib2_file
import urllib2
import socket
from mimetypes import guess_type
from xml.dom.minidom import parseString
from os.path import exists
API_URL = 'http://yfrog.com/api/%s'
HTTP_UPLOAD_TIMEOUT = 300
class UploadException(Exception):
''' Exceptions of this class are raised for various upload based errors '''
pass
class ServerException(Exception):
''' Exceptions of this class are raised for upload errors reported by server '''
def __init__(self, code, message):
self.code = code
self.message = message
def __str__(self):
return "ServerException:%d:%s" % (self.code, self.message)
class Uploader:
''' Class to upload images and video to yfrog.com '''
def __init__(self, timeout=HTTP_UPLOAD_TIMEOUT):
'''Creates uploader object.
Args:
timeout: timeout in seconds for upload operation (optional)
'''
self.timeout = timeout
def uploadURL(self,
url,
twitter_username,
twitter_password,
message = None,
tags = None,
public = True,
source = 'yfrog',
auth = None,
key = None):
'''Uploads local file.
Args:
url: url of file to be uploaded
twitter_username: password
twitter_password: username
message: Message to post to twitter. The URL of the image or video is automatically added. (optional)
tags: comma-separated list of tags (optional)
public: whenever image is public or not
source: Twitter 'posted from' attribute
key: Developer key. See http://code.google.com/p/imageshackapi/wiki/DeveloperKey
returns dictionary with with following keys:
url: url of uploaded image (this is URL for HTML page)
'''
data = {'url' : url,
'public' : self._yesno(public),
'username' : twitter_username,
'password' : twitter_password,
'source' : source
}
if tags:
data['tags'] = tags
if auth:
data['auth'] = auth
if key:
data['key'] = key
if message:
data['message'] = message
apiurl = API_URL % "uploadAndPost"
else:
apiurl = API_URL % "upload"
req = urllib2.Request(apiurl, data, {})
socket.setdefaulttimeout(self.timeout)
u = urllib2.urlopen(req)
xmlres = u.read()
return self._parseResponse(xmlres)
def uploadFile(self,
filename,
twitter_username,
twitter_password,
message = None,
content_type = None,
tags = None,
public = True,
source = 'yfrog',
auth = None,
key = None):
'''Uploads local file.
Args:
filename: media file name to be uploaded
twitter_username: password
twitter_password: username
message: Message to post to twitter. The URL of the image or video is automatically added. (optional)
content_type: content type of file. (optional)
tags: comma-separated list of tags (optional)
public: whenever image is public or not
source: Twitter 'posted from' attribute
key: Developer key. See http://code.google.com/p/imageshackapi/wiki/DeveloperKey
returns dictionary with with following keys:
url: url of uploaded image (this is URL for HTML page)
'''
if not exists(filename):
raise UploadException("File %s does not exist" % filename)
if content_type == None:
(content_type, encoding) = guess_type(filename, False)
if content_type==None:
raise UploadException("Could not guess content/type for input file %s" % filename)
fd = open(filename,'rb')
try:
data = {'media' : urllib2_file.FileUpload(fd, content_type),
'public' : self._yesno(public),
'username' : twitter_username,
'password' : twitter_password,
'source' : source
}
if tags:
data['tags'] = tags
if auth:
data['auth'] = auth
if key:
data['key'] = key
if message:
data['message'] = message
apiurl = API_URL % "uploadAndPost"
else:
apirul = API_URL % "upload"
req = urllib2.Request(apiurl, data, {})
socket.setdefaulttimeout(self.timeout)
u = urllib2.urlopen(req)
xmlres = u.read()
return self._parseResponse(xmlres)
finally:
fd.close()
def _parseErrorResponse(self, d):
err = d.getElementsByTagName('err')
if err==None or len(err)!=1:
raise UploadException("Cound not decode server XML response (no err element)")
ca = err[0].attributes.get('code')
if ca==None:
raise UploadException("Cound not decode server XML response (no code attriubute)")
ma = err[0].attributes.get('msg')
if ma==None:
raise ServerException(int(ca.value), None)
else:
raise ServerException(int(ca.value),ma.value)
def _parseOKResponse(self,d):
mu = d.getElementsByTagName('mediaurl')
if mu==None or len(mu)!=1:
raise UploadException("Cound not decode server XML response (no mediaurl element)")
url = self._getText(mu[0].childNodes)
return {'url':url}
def _parseResponse(self, xmlres):
d = parseString(xmlres)
try:
rsp = d.getElementsByTagName('rsp')
if rsp==None or len(rsp)!=1:
raise UploadException("Cound not decode server XML response (no rsp element)")
sa =rsp[0].attributes.get('stat')
if sa==None:
raise UploadException("Cound not decode server XML response (no stat attriubute)")
if sa.value=='fail':
return self._parseErrorResponse(d)
elif sa.value=='ok':
return self._parseOKResponse(d)
else:
raise UploadException("Cound not decode server XML response (unrecognized stat attriubute value)")
finally:
d.unlink()
def _yesno(self, x):
if x:
return 'yes'
else:
return 'no'
def _getText(self, nodelist):
rc = ""
for node in nodelist:
if node.nodeType == node.TEXT_NODE:
rc = rc + node.data
return rc
| Python |
''' yfrog api '''
from upload import *
| Python |
#!/usr/bin/env python
####
# Version: 0.2.0
# - UTF-8 filenames are now allowed (Eli Golovinsky)<br/>
# - File object is no more mandatory, Object only needs to have seek() read() attributes (Eli Golovinsky)<br/>
#
# Version: 0.1.0
# - upload is now done with chunks (Adam Ambrose)
#
# Version: older
# THANKS TO:
# bug fix: kosh @T aesaeion.com
# HTTPS support : Ryan Grow <ryangrow @T yahoo.com>
# Copyright (C) 2004,2005,2006 Fabien SEISEN
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# you can contact me at: <fabien@seisen.org>
# http://fabien.seisen.org/python/
#
# Also modified by Adam Ambrose (aambrose @T pacbell.net) to write data in
# chunks (hardcoded to CHUNK_SIZE for now), so the entire contents of the file
# don't need to be kept in memory.
#
"""
enable to upload files using multipart/form-data
idea from:
upload files in python:
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/146306
timeoutsocket.py: overriding Python socket API:
http://www.timo-tasi.org/python/timeoutsocket.py
http://mail.python.org/pipermail/python-announce-list/2001-December/001095.html
import urllib2_files
import urllib2
u = urllib2.urlopen('http://site.com/path' [, data])
data can be a mapping object or a sequence of two-elements tuples
(like in original urllib2.urlopen())
varname still need to be a string and
value can be string of a file object
eg:
((varname, value),
(varname2, value),
)
or
{ name: value,
name2: value2
}
"""
import os
import socket
import sys
import stat
import mimetypes
import mimetools
import httplib
import urllib
import urllib2
CHUNK_SIZE = 65536
class FileUpload:
def __init__(self, fd, content_type):
self.fd = fd
self.content_type = content_type
def get_content_type(filename):
return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
# if sock is None, juste return the estimate size
def send_data(v_vars, v_files, boundary, sock=None):
l = 0
for (k, v) in v_vars:
buffer=''
buffer += '--%s\r\n' % boundary
buffer += 'Content-Disposition: form-data; name="%s"\r\n' % k
buffer += '\r\n'
buffer += v + '\r\n'
if sock:
sock.send(buffer)
l += len(buffer)
for (k, v) in v_files:
fd = v.fd
file_size = os.fstat(fd.fileno())[stat.ST_SIZE]
name = fd.name.split('/')[-1]
if isinstance(name, unicode):
name = name.encode('UTF-8')
buffer=''
buffer += '--%s\r\n' % boundary
buffer += 'Content-Disposition: form-data; name="%s"; filename="%s"\r\n' \
% (k, name)
if v.content_type != None:
content_type = v.content_type
else:
content_type = get_content_type(name)
buffer += 'Content-Type: %s\r\n' % content_type
buffer += 'Content-Length: %ld\r\n' % file_size
buffer += '\r\n'
l += len(buffer)
if sock:
sock.send(buffer)
if hasattr(fd, 'seek'):
fd.seek(0)
while True:
chunk = fd.read(CHUNK_SIZE)
if not chunk: break
sock.send(chunk)
l += file_size
buffer='\r\n'
buffer += '--%s--\r\n' % boundary
buffer += '\r\n'
if sock:
sock.send(buffer)
l += len(buffer)
return l
# mainly a copy of HTTPHandler from urllib2
class newHTTPHandler(urllib2.BaseHandler):
def http_open(self, req):
return self.do_open(httplib.HTTP, req)
def do_open(self, http_class, req):
data = req.get_data()
v_files=[]
v_vars=[]
# mapping object (dict)
if req.has_data() and type(data) != str:
if hasattr(data, 'items'):
data = data.items()
else:
try:
if len(data) and not isinstance(data[0], tuple):
raise TypeError
except TypeError:
ty, va, tb = sys.exc_info()
raise TypeError, "not a valid non-string sequence or mapping object", tb
for (k, v) in data:
if isinstance(v, FileUpload):
v_files.append((k, v))
else:
v_vars.append( (k, v) )
# no file ? convert to string
if len(v_vars) > 0 and len(v_files) == 0:
data = urllib.urlencode(v_vars)
v_files=[]
v_vars=[]
host = req.get_host()
if not host:
raise urllib2.URLError('no host given')
h = http_class(host) # will parse host:port
if req.has_data():
h.putrequest('POST', req.get_selector())
if not 'Content-type' in req.headers:
if len(v_files) > 0:
boundary = mimetools.choose_boundary()
l = send_data(v_vars, v_files, boundary)
h.putheader('Content-Type',
'multipart/form-data; boundary=%s' % boundary)
h.putheader('Content-length', str(l))
else:
h.putheader('Content-type',
'application/x-www-form-urlencoded')
if not 'Content-length' in req.headers:
h.putheader('Content-length', '%d' % len(data))
else:
h.putrequest('GET', req.get_selector())
scheme, sel = urllib.splittype(req.get_selector())
sel_host, sel_path = urllib.splithost(sel)
h.putheader('Host', sel_host or host)
for name, value in self.parent.addheaders:
name = name.capitalize()
if name not in req.headers:
h.putheader(name, value)
for k, v in req.headers.items():
h.putheader(k, v)
# httplib will attempt to connect() here. be prepared
# to convert a socket error to a URLError.
try:
h.endheaders()
except socket.error, err:
raise urllib2.URLError(err)
if req.has_data():
if len(v_files) >0:
l = send_data(v_vars, v_files, boundary, h)
elif len(v_vars) > 0:
# if data is passed as dict ...
data = urllib.urlencode(v_vars)
h.send(data)
else:
# "normal" urllib2.urlopen()
h.send(data)
code, msg, hdrs = h.getreply()
fp = h.getfile()
if code == 200:
resp = urllib.addinfourl(fp, hdrs, req.get_full_url())
resp.code = code
resp.msg = msg
return resp
else:
return self.parent.error('http', req, fp, code, msg, hdrs)
urllib2._old_HTTPHandler = urllib2.HTTPHandler
urllib2.HTTPHandler = newHTTPHandler
class newHTTPSHandler(newHTTPHandler):
def https_open(self, req):
return self.do_open(httplib.HTTPS, req)
urllib2.HTTPSHandler = newHTTPSHandler
if __name__ == '__main__':
import getopt
import urllib2
import urllib2_file
import string
import sys
def usage(progname):
print """
SYNTAX: %s -u url -f file [-v]
""" % progname
try:
opts, args = getopt.getopt(sys.argv[1:], 'hvu:f:')
except getopt.GetoptError, errmsg:
print "ERROR:", errmsg
sys.exit(1)
v_url = ''
v_verbose = 0
v_file = ''
for name, value in opts:
if name in ('-h',):
usage(sys.argv[0])
sys.exit(0)
elif name in ('-v',):
v_verbose += 1
elif name in ('-u',):
v_url = value
elif name in ('-f',):
v_file = value
else:
print "invalid argument:", name
sys.exit(2)
error = 0
if v_url == '':
print "need -u"
error += 1
if v_file == '':
print "need -f"
error += 1
if error > 0:
sys.exit(3)
fd = open(v_file, 'r')
data = {
'filename' : fd,
}
# u = urllib2.urlopen(v_url, data)
req = urllib2.Request(v_url, data, {})
try:
u = urllib2.urlopen(req)
except urllib2.HTTPError, errobj:
print "HTTPError:", errobj.code
else:
buf = u.read()
print "OK"
| Python |
#!/usr/bin/env python
'''
Client API library to upload images and videos to imageshack.us
Using "Unified upload API" as described here:
http://reg.imageshack.us/content.php?page=developerpublic
'''
import urllib2_file
import urllib2
import socket
import httplib
from mimetypes import guess_type
from xml.dom.minidom import parseString
from os.path import exists
from urlparse import urlsplit
IMAGE_API_URL = 'http://www.imageshack.us/upload_api.php'
VIDEO_API_URL = 'http://render.imageshack.us/upload_api.php'
HTTP_UPLOAD_TIMEOUT = 300
class UploadException(Exception):
''' Exceptions of this class are raised for various upload based errors '''
pass
class ServerException(Exception):
''' Exceptions of this class are raised for upload errors reported by server '''
def __init__(self, code, message):
self.code = code
self.message = message
def __str__(self):
return "ServerException:%s:%s" % (self.code, self.message)
class Uploader:
''' Class to upload images and video to imageshack.
'''
def __init__(self, dev_key, cookie=None, username=None, password=None, timeout=HTTP_UPLOAD_TIMEOUT):
'''Creates uploader object.
Args:
dev_key: developer key (mandatory)
cookie: imagesack user cookie (optional)
username,password: imageshack user account credentials (optional)
timeout: timeout in seconds for upload operation (optional)
'''
self.cookie = cookie
self.username = username
self.password = password
self.dev_key = dev_key
self.timeout = timeout
def uploadFile(self,
filename,
optsize = None,
remove_bar = True,
tags = None,
public = None,
content_type = None,
frame_filename = None):
''' upload image or video file
Args:
filename: file name of image or video file to upload
optizie: optional reisizing parameter in format of (widh, height) tuple
remove_bar: remove information bar on thumbnail
content_type: content type of file. (optional)
tags: comma-separated list of tags (optional)
public: whenever image is public or not. None means "user default" (optional)
frame_filename: for video files optional video frame which will be shown in player while movie is loading. Must be in JPEG format.
Returns:
returns XML document with information on uploaded image.
'''
return self._upload(filename, None,
optsize, remove_bar,
tags, public,
content_type, frame_filename)
def uploadURL(self,
url,
optsize = None,
remove_bar = True,
tags = None,
public = None,
frame_filename = None):
''' upload image or video file
Args:
url: URL pointing to image or video file to upload
optizie: optional reisizing parameter in format of (widh, height) tuple
remove_bar: remove information bar on thumbnail
content_type: content type of file. (optional)
tags: comma-separated list of tags (optional)
public: whenever image is public or not. None means "user default" (optional)
frame_filename: for video files optional video frame which will be shown in player while movie is loading. Must be in JPEG format.
Returns:
returns XML document with information on uploaded image.
'''
return self._upload(None, url,
optsize, remove_bar,
tags, public,
None, frame_filename)
def _upload(self,
filename,
url,
optsize = None,
remove_bar = True,
tags = None,
public = True,
content_type = None,
frame_filename = None):
if not filename and not url:
raise UploadException("No source specified")
if (self.username and not self.password) or (self.password and not self.username):
raise UploadException("Must specify both usernane and password")
if self.username and self.cookie:
raise UploadException("Must specify either usernane/password or cookie but not both")
if frame_filename and not exists(frame_filename):
raise UploadException("File %s does not exist" % frame_filename)
if filename:
if not exists(filename):
raise UploadException("File %s does not exist" % filename)
if content_type == None:
(content_type, encoding) = guess_type(filename, False)
else:
content_type = self._getURLContentType(url)
if content_type==None:
raise UploadException("Could not guess content/type for input file %s" % filename)
if content_type.lower().startswith("image/"):
u = IMAGE_API_URL
is_video=False
elif content_type.lower().startswith("video/"):
u = VIDEO_API_URL
is_video=True
else:
raise UploadException("Unsupported content type %s" % content_type)
# some sanity checks
if is_video:
if optsize:
raise UploadException("Resizing is not supported for video files")
else:
if frame_filename:
raise UploadException("Could not specify frame for image files")
if filename:
fd = open(filename,'rb')
else:
fd = None
try:
data = {'key' : self.dev_key,
'rembar' : self._yesno(remove_bar)
}
if fd:
data['fileupload']=urllib2_file.FileUpload(fd,content_type)
else:
data['url']=url
if frame_filename!=None:
tfd = open(frame_filename,'rb')
else:
tfd = None
try:
if tfd!=None:
data['frmupload'] = urllib2_file.FileUpload(tfd,"image/jpeg")
# Some optional parameters
if public:
data['public'] = self._yesno(public)
if optsize:
data['optimage'] = '1'
data['optsize'] = "%dx%d" % optsize
if self.cookie:
data['cookie'] = self.cookie
if self.username:
data['a_username'] = self.username
if self.password:
data['a_password'] = self.username
if tags:
data['tags'] = tags
req = urllib2.Request(u, data, {})
socket.setdefaulttimeout(HTTP_UPLOAD_TIMEOUT)
u = urllib2.urlopen(req)
xmlres = u.read()
return self._parseResponse(xmlres)
finally:
if tfd!=None:
tfd.close()
finally:
if fd:
fd.close()
def _yesno(self, x):
if x:
return 'yes'
else:
return 'no'
def _parseErrorResponse(self, err):
ia = err.attributes.get('id')
if ia==None:
raise UploadException("Cound not decode server error XML response (no id attriubute)")
raise ServerException(ia.value, self._getText(err.childNodes))
def _parseResponse(self, xmlres):
d = parseString(xmlres)
try:
links = d.getElementsByTagName('links')
if links==None or len(links)!=1:
raise UploadException("Cound not decode server XML response (no links element)")
error = links[0].getElementsByTagName('error')
if error!=None and len(error)>0:
return self._parseErrorResponse(error[0])
else:
return xmlres
finally:
d.unlink()
def _getText(self, nodelist):
rc = ""
for node in nodelist:
if node.nodeType == node.TEXT_NODE:
rc = rc + node.data
return rc
def _getURLContentType(self, url):
parsed_url = urlsplit(url)
if parsed_url==None or parsed_url.hostname==None or len(parsed_url.hostname)==0:
raise UploadException("Invalid URL %s" % url)
c = httplib.HTTPConnection(parsed_url.hostname)
c.request('HEAD', url)
r = c.getresponse()
if r.status!=200:
raise UploadException("Error %d fetching URL %s" % (r.status, url))
return r.getheader("Content-Type")
| Python |
#!/usr/bin/env python
'''
Client API library for chuncked video uploading to imageshack.us
Using "Streaming upload API" as described here:
http://code.google.com/p/imageshackapi/wiki/StreamingAPI
'''
import os
import urllib
import httplib
import urllib2
from urlparse import urlparse
from os.path import exists
from urlparse import urlsplit
from mimetypes import guess_type
from xml.dom.minidom import parse
from xml.dom.minidom import parseString
BLOCK_SIZE=1024
SERVER='render.imageshack.us'
PATH='/renderapi'
ENDPOINT='http://'+SERVER+PATH
class UploadException(Exception):
''' Exceptions of this class are raised for various upload based errors '''
pass
class ServerException(Exception):
''' Exceptions of this class are raised for upload errors reported by server '''
def __init__(self, code, message):
self.code = code
self.message = message
def __str__(self):
return "ServerException:%s:%s" % (self.code, self.message)
class Uploader:
''' Class to upload images and video to imageshack.
'''
def __init__(self, dev_key, cookie=None, username=None, password=None):
'''Creates uploader object.
Args:
dev_key: developer key (mandatory)
cookie: imagesack user cookie (optional)
username,password: imageshack user account credentials (optional)
'''
self.cookie = cookie
self.username = username
self.password = password
self.dev_key = dev_key
def start(self, filename, tags = [], public = None):
'''Request file upload URL from server
tags: list of tags
public: visibility
'''
data = {'filename' : filename}
data['key'] = self.dev_key
if self.cookie is not None:
data['cookie'] = self.cookie
if tags:
data['tags'] = ','.join(tags)
if public in (True, False):
data['public'] = "yes" if public else "no"
if self.username is not None:
data['a_username'] = self.username
if self.password is not None:
data['a_password'] = self.password
print data
try:
req = urllib2.urlopen(ENDPOINT+'/start', urllib.urlencode(data))
xml = req.read()
except:
raise UploadException('Could not connect to server')
try:
dom = parseString(xml)
url = dom.documentElement.getAttribute('putURL')
getlenurl = dom.documentElement.getAttribute('getlengthURL')
except:
raise ServerException('Wrong server response')
dom.unlink()
req.close()
return (url, getlenurl)
def get_length(self, url):
'''Get uploaded file name
Args:
url: getlengthURL of start output
returns int byte count
'''
try: size = urllib.urlopen(url).read()
except: raise UploadException('Could not connect to server')
try: size = int(size)
except: raise ServerException('Wrong server response')
return size
def upload_file(self, filename, tags = [], public = True, end = -1):
'''Upload file to ImageShack using streaming API
Args:
tags: list of tags
public: visibility (True, False or None)
end: last byte number that will be uploaded.
If end is -1, file will be uploaded to the end.
'''
url = self.start(filename, tags, public)[0]
return self.upload_range(filename, url, 0, -1)
def resume_upload(self, filename, url, getlenurl, end = -1):
'''Resumes file upload
Args:
url: putURL from start output
getlenurl: getlenURL from start output
end: last byte number to upload (-1 for all file)
'''
size = self.get_length(getlenurl)
return self.upload_range(filename, url, size, end)
def upload_range(self, filename, url, begin = 0, end = -1):
'''Upload file to server
Args:
url: upload url (get one using start method)
begin: first byte number
end: last byte number to upload (-1 for all file)
'''
purl = urlparse(url)
current_byte = begin
filelen = os.path.getsize(filename)
if end == -1: end = filelen
if end > filelen: end = filelen
try:
conn = httplib.HTTPConnection(purl.netloc)
conn.connect()
conn.putrequest('PUT', purl.path)
range_str="bytes %d-%d/%d" % (begin, end, filelen)
conn.putheader('Content-range', range_str)
conn.putheader('Content-type', 'application/octet-stream')
conn.putheader('Content-length', (end - begin)))
conn.endheaders()
except:
raise UploadException('Could not connect to server')
try: fileobj = open(filename, 'rb')
except: raise UploadException('Could not open file')
try: fileobj.seek(begin)
except: raise UploadException('Could not seek file')
while current_byte < end:
try:
data = fileobj.read(BLOCK_SIZE)
print 'sending %d bytes' % len(data)
except: raise UploadException('File I/O error')
try: conn.send(data)
except: raise UploadException('Could not send data')
current_byte += len(data)
print 'sent data'
fileobj.close()
try:
print 'waiting for response'
resp = conn.getresponse()
print 'reading response'
res = resp.read()
except:
raise UploadException('Could not get server response')
return (resp.status, resp.reason, res)
| Python |
''' imageshack api '''
from upload import *
from chuncked_upload import Uploader as ChunkedUploader | Python |
#!/usr/bin/env python
####
# Version: 0.2.0
# - UTF-8 filenames are now allowed (Eli Golovinsky)<br/>
# - File object is no more mandatory, Object only needs to have seek() read() attributes (Eli Golovinsky)<br/>
#
# Version: 0.1.0
# - upload is now done with chunks (Adam Ambrose)
#
# Version: older
# THANKS TO:
# bug fix: kosh @T aesaeion.com
# HTTPS support : Ryan Grow <ryangrow @T yahoo.com>
# Copyright (C) 2004,2005,2006 Fabien SEISEN
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# you can contact me at: <fabien@seisen.org>
# http://fabien.seisen.org/python/
#
# Also modified by Adam Ambrose (aambrose @T pacbell.net) to write data in
# chunks (hardcoded to CHUNK_SIZE for now), so the entire contents of the file
# don't need to be kept in memory.
#
"""
enable to upload files using multipart/form-data
idea from:
upload files in python:
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/146306
timeoutsocket.py: overriding Python socket API:
http://www.timo-tasi.org/python/timeoutsocket.py
http://mail.python.org/pipermail/python-announce-list/2001-December/001095.html
import urllib2_files
import urllib2
u = urllib2.urlopen('http://site.com/path' [, data])
data can be a mapping object or a sequence of two-elements tuples
(like in original urllib2.urlopen())
varname still need to be a string and
value can be string of a file object
eg:
((varname, value),
(varname2, value),
)
or
{ name: value,
name2: value2
}
"""
import os
import socket
import sys
import stat
import mimetypes
import mimetools
import httplib
import urllib
import urllib2
CHUNK_SIZE = 65536
class FileUpload:
def __init__(self, fd, content_type):
self.fd = fd
self.content_type = content_type
def get_content_type(filename):
return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
# if sock is None, juste return the estimate size
def send_data(v_vars, v_files, boundary, sock=None):
l = 0
for (k, v) in v_vars:
buffer=''
buffer += '--%s\r\n' % boundary
buffer += 'Content-Disposition: form-data; name="%s"\r\n' % k
buffer += '\r\n'
buffer += v + '\r\n'
if sock:
sock.send(buffer)
l += len(buffer)
for (k, v) in v_files:
fd = v.fd
file_size = os.fstat(fd.fileno())[stat.ST_SIZE]
name = fd.name.split('/')[-1]
if isinstance(name, unicode):
name = name.encode('UTF-8')
buffer=''
buffer += '--%s\r\n' % boundary
buffer += 'Content-Disposition: form-data; name="%s"; filename="%s"\r\n' \
% (k, name)
if v.content_type != None:
content_type = v.content_type
else:
content_type = get_content_type(name)
buffer += 'Content-Type: %s\r\n' % content_type
buffer += 'Content-Length: %ld\r\n' % file_size
buffer += '\r\n'
l += len(buffer)
if sock:
sock.send(buffer)
if hasattr(fd, 'seek'):
fd.seek(0)
while True:
chunk = fd.read(CHUNK_SIZE)
if not chunk: break
sock.send(chunk)
l += file_size
buffer='\r\n'
buffer += '--%s--\r\n' % boundary
buffer += '\r\n'
if sock:
sock.send(buffer)
l += len(buffer)
return l
# mainly a copy of HTTPHandler from urllib2
class newHTTPHandler(urllib2.BaseHandler):
def http_open(self, req):
return self.do_open(httplib.HTTP, req)
def do_open(self, http_class, req):
data = req.get_data()
v_files=[]
v_vars=[]
# mapping object (dict)
if req.has_data() and type(data) != str:
if hasattr(data, 'items'):
data = data.items()
else:
try:
if len(data) and not isinstance(data[0], tuple):
raise TypeError
except TypeError:
ty, va, tb = sys.exc_info()
raise TypeError, "not a valid non-string sequence or mapping object", tb
for (k, v) in data:
if isinstance(v, FileUpload):
v_files.append((k, v))
else:
v_vars.append( (k, v) )
# no file ? convert to string
if len(v_vars) > 0 and len(v_files) == 0:
data = urllib.urlencode(v_vars)
v_files=[]
v_vars=[]
host = req.get_host()
if not host:
raise urllib2.URLError('no host given')
h = http_class(host) # will parse host:port
if req.has_data():
h.putrequest('POST', req.get_selector())
if not 'Content-type' in req.headers:
if len(v_files) > 0:
boundary = mimetools.choose_boundary()
l = send_data(v_vars, v_files, boundary)
h.putheader('Content-Type',
'multipart/form-data; boundary=%s' % boundary)
h.putheader('Content-length', str(l))
else:
h.putheader('Content-type',
'application/x-www-form-urlencoded')
if not 'Content-length' in req.headers:
h.putheader('Content-length', '%d' % len(data))
else:
h.putrequest('GET', req.get_selector())
scheme, sel = urllib.splittype(req.get_selector())
sel_host, sel_path = urllib.splithost(sel)
h.putheader('Host', sel_host or host)
for name, value in self.parent.addheaders:
name = name.capitalize()
if name not in req.headers:
h.putheader(name, value)
for k, v in req.headers.items():
h.putheader(k, v)
# httplib will attempt to connect() here. be prepared
# to convert a socket error to a URLError.
try:
h.endheaders()
except socket.error, err:
raise urllib2.URLError(err)
if req.has_data():
if len(v_files) >0:
l = send_data(v_vars, v_files, boundary, h)
elif len(v_vars) > 0:
# if data is passed as dict ...
data = urllib.urlencode(v_vars)
h.send(data)
else:
# "normal" urllib2.urlopen()
h.send(data)
code, msg, hdrs = h.getreply()
fp = h.getfile()
if code == 200:
resp = urllib.addinfourl(fp, hdrs, req.get_full_url())
resp.code = code
resp.msg = msg
return resp
else:
return self.parent.error('http', req, fp, code, msg, hdrs)
urllib2._old_HTTPHandler = urllib2.HTTPHandler
urllib2.HTTPHandler = newHTTPHandler
class newHTTPSHandler(newHTTPHandler):
def https_open(self, req):
return self.do_open(httplib.HTTPS, req)
urllib2.HTTPSHandler = newHTTPSHandler
if __name__ == '__main__':
import getopt
import urllib2
import urllib2_file
import string
import sys
def usage(progname):
print """
SYNTAX: %s -u url -f file [-v]
""" % progname
try:
opts, args = getopt.getopt(sys.argv[1:], 'hvu:f:')
except getopt.GetoptError, errmsg:
print "ERROR:", errmsg
sys.exit(1)
v_url = ''
v_verbose = 0
v_file = ''
for name, value in opts:
if name in ('-h',):
usage(sys.argv[0])
sys.exit(0)
elif name in ('-v',):
v_verbose += 1
elif name in ('-u',):
v_url = value
elif name in ('-f',):
v_file = value
else:
print "invalid argument:", name
sys.exit(2)
error = 0
if v_url == '':
print "need -u"
error += 1
if v_file == '':
print "need -f"
error += 1
if error > 0:
sys.exit(3)
fd = open(v_file, 'r')
data = {
'filename' : fd,
}
# u = urllib2.urlopen(v_url, data)
req = urllib2.Request(v_url, data, {})
try:
u = urllib2.urlopen(req)
except urllib2.HTTPError, errobj:
print "HTTPError:", errobj.code
else:
buf = u.read()
print "OK"
| Python |
#!/usr/bin/env python
"""
This file is executed from ../setup.py only.
Calculate cumulative version from Revision strings.
Copyright 2000 Pearu Peterson all rights reserved,
Pearu Peterson <pearu@ioc.ee>
Permission to use, modify, and distribute this software is given under the
terms of the LGPL. See http://www.fsf.org
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Revision: 1.1 $
$Date: 2001/05/20 12:51:29 $
Pearu Peterson
"""
from __future__ import print_function
import os,fileinput,re
files=[]
for d in ['lib']:
for f in os.listdir(d):
if f[-3:]=='.py' or f[-2:]=='.c':
fn = os.path.join(d,f)
if os.path.exists(fn): files.append(fn)
else: print('File "%s" does not exists. Skipping.'%(fn))
revision_version = 0
for l in fileinput.input(files):
m = re.match(r'.*?\$Re[v]ision:\s*\d+[.](?P<rev>\d+)\s*\$',l)
if m:
revision_version = revision_version + eval(m.group('rev'))
fileinput.nextfile()
| Python |
#!/usr/bin/env python
import sys
sys.path = ['..']+sys.path
if sys.version[:3]=='1.5':
from lib152 import *
else:
from lib import *
#from pyvtk import *
structure = PolyData(points=[[0,0,0],[1,0,0],[1,1,0],[0,1,0],
[0,0,1],[1,0,1],[1,1,1],[0,1,1]],
polygons=[[0,1,2,3],[4,5,6,7],[0,1,5,4],
[2,3,7,6],[0,4,7,3],[1,2,6,5]])
pointdata = PointData(\
Scalars([0,1,2,3,4,5,6,7],
name='sample_scalars',
lookup_table='my_table'),
LookupTable([[0,0,0,1],[1,0,0,1],[0,1,0,1],[1,1,0,1],
[0,0,1,1],[1,0,1,1],[0,1,1,1],[1,1,1,1]],
name='my_table'))
celldata = CellData(\
Scalars([0,1,2,3,4,5],
name='cell_scalars'),
Normals([[0,0,-1],[0,0,1],[0,-1,0],
[0,1,0],[-1,0,0],[1,0,0]],
name='cell_normals'),
Field('FieldData',
cellIds=[[0],[1],[2],[3],[4],[5]],
faceAttributes=[[0,1],[1,2],[2,3],[3,4],[4,5],[5,6]]))
vtk = VtkData(structure,pointdata,celldata)
vtk.tofile('example1','ascii')
vtk.tofile('example1b','binary')
vtk2 = VtkData('example1')
| Python |
#!/usr/bin/env python
import sys
sys.path = ['..']+sys.path
if sys.version[:3]=='1.5':
from lib152 import *
else:
from lib import *
#from pyvtk import *
points = [[0,0,0],[1,0,0],[2,0,0],[0,1,0],[1,1,0],[2,1,0],
[0,0,1],[1,0,1],[2,0,1],[0,1,1],[1,1,1],[2,1,1],
[0,1,2],[1,1,2],[2,1,2],[0,1,3],[1,1,3],[2,1,3],
[0,1,4],[1,1,4],[2,1,4],[0,1,5],[1,1,5],[2,1,5],
[0,1,6],[1,1,6],[2,1,6]
]
vectors = [[1,0,0],[1,1,0],[0,2,0],[1,0,0],[1,1,0],[0,2,0],
[1,0,0],[1,1,0],[0,2,0],[1,0,0],[1,1,0],[0,2,0],
[0,0,1],[0,0,1],[0,0,1],[0,0,1],[0,0,1],[0,0,1],
[0,0,1],[0,0,1],[0,0,1],[0,0,1],[0,0,1],[0,0,1],
[0,0,1],[0,0,1],[0,0,1]
]
vtk = VtkData(\
UnstructuredGrid(points,
hexahedron=[[0,1,4,3,6,7,10,9],
[1,2,5,4,7,8,11,10]],
tetra=[[6,10,9,12],
[5,11,10,14]],
polygon=[15,16,17,14,13,12],
triangle_strip=[18,15,19,16,20,17],
quad=[22,23,20,19],
triangle=[[21,22,18],
[22,19,18]],
line=[26,25],
vertex=[24]
),
PointData(Vectors(vectors),Scalars(range(27))),
'Unstructured Grid Example'
)
vtk.tofile('example3')
vtk.tofile('example3b','binary')
VtkData('example3')
| Python |
#!/usr/bin/env python
import sys
sys.path = ['..']+sys.path
if sys.version[:3]=='1.5':
from lib152 import *
else:
from lib import *
#from pyvtk import *
vtk = VtkData(StructuredPoints([3,4,6]),
PointData(Scalars([0,0,0,0,0,0,0,0,0,0,0,0,
0,5,10,15,20,25,25,20,15,10,5,0,
0,10,20,30,40,50,50,40,30,20,10,0,
0,10,20,30,40,50,50,40,30,20,10,0,
0,5,10,15,20,25,25,20,15,10,5,0,
0,0,0,0,0,0,0,0,0,0,0,0
])))
vtk.tofile('example2')
vtk.tofile('example2b','binary')
vtk = VtkData('example2',only_structure = 1)
def f(x,y,z):
return x*y*z
vtk.point_data.append(vtk.structure.Scalars(f,'x*y*z'))
vtk.tofile('example2f_sp')
pp = [(i,j,k) for k in range(6) for j in range(4) for i in range(3)]
vtk = VtkData(StructuredGrid([3,4,6],pp))
vtk.point_data.append(vtk.structure.Scalars(f,'x*y*z'))
vtk.tofile('example2f_sg')
vtk = VtkData(RectilinearGrid(range(3),range(4),range(6)))
vtk.point_data.append(vtk.structure.Scalars(f,'x*y*z'))
vtk.tofile('example2f_rg')
voxels = []
points = []
n = 0
for k in range(6):
for j in range(4):
for i in range(3):
points.append((i,j,k))
if not (k==5 or j==3 or i==2):
voxels.append([n,n+1,n+3,n+3+1,n+3*4,n+3*4+1,n+3*4+3,n+3*4+3+1])
n += 1
vtk = VtkData(UnstructuredGrid(points,voxel=voxels))
vtk.point_data.append(vtk.structure.Scalars(f,'x*y*z'))
vtk.tofile('example2f_usg')
| Python |
#!/usr/bin/env python
"""
Copyright 2001 Pearu Peterson all rights reserved,
Pearu Peterson <pearu@ioc.ee>
Permission to use, modify, and distribute this software is given under the
terms of the LGPL. See http://www.fsf.org
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Revision: 1.1 $
$Date: 2001/05/20 12:51:29 $
Pearu Peterson
"""
import DataSetAttr
import string
class Field(DataSetAttr.DataSetAttr):
"""Holds VTK Field.
"""
def __init__(self,*args,**kws):
if len(args): name = args[0]
else: name = None
if len(args)>1:
self.warning('Ignoring all arguments except the first')
self.name = self._get_name(name)
data = {}
mx = 0
for k,v in kws.items():
data[k] = self.get_n_seq_seq(v,self.default_value)
mx = max(map(len,data.values()))
#mx = max([len(l) for l in data.values()])
for k,v in data.items():
if len(v)<mx:
self.warning('Filling array %s (size=%s) with default value (%s) to obtain size=%s'%(`k`,len(v),self.default_value,mx))
while len(v)<mx:
v.append([self.default_value]*len(v[0]))
self.data = data
def to_string(self,format='ascii'):
ret = ['FIELD %s %s'%(self.name,len(self.data))]
for k,v in self.data.items():
t = self.get_datatype(v)
ret = ret + ['%s %s %s %s'%(k,len(v[0]),len(v),t),
self.seq_to_string(v,format,t)]
return string.join(ret,'\n')
def get_size(self):
return len(self.data.values()[0])
if __name__ == "__main__":
print Field(a=[[2,23],3,3],c=[2,3,4,5]).to_string()
| Python |
#!/usr/bin/env python
"""
Copyright 2001 Pearu Peterson all rights reserved,
Pearu Peterson <pearu@ioc.ee>
Permission to use, modify, and distribute this software is given under the
terms of the LGPL. See http://www.fsf.org
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Revision: 1.1 $
$Date: 2001/05/20 12:51:29 $
Pearu Peterson
"""
import DataSetAttr
import string
class Tensors(DataSetAttr.DataSetAttr):
"""Holds VTK Tensors.
"""
def __init__(self,tensors,name=None):
self.name = self._get_name(name)
self.tensors = self.get_3_3_tuple_list(tensors,(self.default_value,)*3)
def to_string(self,format='ascii'):
t = self.get_datatype(self.tensors)
ret = ['TENSORS %s %s'%(self.name,t)]
ret.append(self.seq_to_string(self.tensors,format,t))
return string.join(ret,'\n')
def get_size(self):
return len(self.tensors)
if __name__ == "__main__":
print Tensors([[[3,3]],[4,3.],[[240]],3,2,3]).to_string('ascii')
print Tensors(3).to_string('ascii')
| Python |
#!/usr/bin/env python
"""
Copyright 2001 Pearu Peterson all rights reserved,
Pearu Peterson <pearu@ioc.ee>
Permission to use, modify, and distribute this software is given under the
terms of the LGPL. See http://www.fsf.org
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Revision: 1.1 $
$Date: 2001/05/20 12:51:29 $
Pearu Peterson
"""
import DataSet
import string
class UnstructuredGrid(DataSet.DataSet):
_vtk_cell_types_map = {'vertex':1,'poly_vertex':2,'line':3,'poly_line':4,
'triangle':5,'triangle_strip':6,'polygon':7,'pixel':8,
'quad':9,'tetra':10,'voxel':11,'hexahedron':12,
'wedge':13,'pyramid':14}
_vtk_cell_nums_map = {'vertex':1,'poly_vertex':-1,'line':2,'poly_line':-1,
'triangle':3,'triangle_strip':-1,'polygon':-1,'pixel':4,
'quad':4,'tetra':4,'voxel':8,'hexahedron':8,
'wedge':6,'pyramid':5}
def __init__(self,points,vertex=[],poly_vertex=[],line=[],poly_line=[],
triangle=[],triangle_strip=[],polygon=[],pixel=[],
quad=[],tetra=[],voxel=[],hexahedron=[],wedge=[],pyramid=[]):
self.points = self.get_3_tuple_list(points,(0,0,0))
sz = len(self.points)
for k in self._vtk_cell_types_map.keys():
exec 'self.%s = self.get_seq_seq(%s,[])'%(k,k)
if k=='vertex':
r = []
for v in self.vertex:
r = r + map(lambda a:[a],v)
self.vertex = r
if self._check_int_seq(getattr(self,k),sz):
raise ValueError,'In cell %s: must be (seq of seq|seq) integers less than %s'%(k,sz)
for k,n in self._vtk_cell_nums_map.items():
if n==-1: continue
kv = getattr(self,k)
if kv==[] or kv[0]==[]: continue
for v in kv:
if len(v)!=n:
raise ValueError,'Cell %s requires exactly %s points but got %s: %s'%(`k`,n,len(v),v)
def to_string(self,format='ascii'):
t = self.get_datatype(self.points)
ret = ['DATASET UNSTRUCTURED_GRID',
'POINTS %s %s'%(self.get_size(),t)
]
ret.append(self.seq_to_string(self.points,format,t))
tps = []
r = ''
sz = 0
for k in self._vtk_cell_types_map.keys():
kv = getattr(self,k)
if kv==[] or kv[0]==[]: continue
r = r + self.seq_to_string(map(lambda v:[len(v)]+list(v),kv),format,'int')
#r = r + self.seq_to_string([[len(v)]+list(v) for v in kv],format,'int')
for v in kv:
tps.append(self._vtk_cell_types_map[k])
sz = sz + len(v)+1
ret = ret + ['CELLS %s %s'%(len(tps),sz),
r,
'CELL_TYPES %s'%(len(tps)),
self.seq_to_string(tps,format,'int')]
return string.join(ret,'\n')
def get_cell_size(self):
sz = 0
for k in self._vtk_cell_types_map.keys():
kv = getattr(self,k)
if kv==[] or kv[0]==[]: continue
sz = sz + len(kv)
return sz
if __name__ == "__main__":
print UnstructuredGrid([[1,2],[2,4],3,5],
line = [[2,3],[1,2],[2,3]],
vertex=2)
| Python |
#!/usr/bin/env python
"""
Copyright 2001 Pearu Peterson all rights reserved,
Pearu Peterson <pearu@ioc.ee>
Permission to use, modify, and distribute this software is given under the
terms of the LGPL. See http://www.fsf.org
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Revision: 1.1 $
$Date: 2001/05/20 12:51:29 $
Pearu Peterson
"""
__version__ = "$Id: LookupTable.py,v 1.1 2001/05/20 12:51:29 pearu Exp $"
import common
import DataSetAttr
import string
class LookupTable(DataSetAttr.DataSetAttr):
"""Holds VTK LookupTable.
"""
def __init__(self,table,name=None):
self.name = self._get_name(name)
self.table = self.get_n_seq_seq(table,[0,0,0,0])
if len(self.table[0])!=4:
raise ValueError,'expected sequence of 4-sequences but got %s'%(len(self.table[0]))
def to_string(self,format='ascii'):
ret = ['LOOKUP_TABLE %s %s'%(self.name,len(self.table))]
seq = self.table
if format=='binary':
if not common.is_int255(seq):
seq = self.float01_to_int255(seq)
ret.append(self.seq_to_string(seq,format,'unsigned char'))
else:
if not common.is_float01(seq):
seq = self.int255_to_float01(seq)
ret.append(self.seq_to_string(seq,format,'float'))
return string.join(ret,'\n')
def get_size(self):
return len(self.table)
if __name__ == "__main__":
print LookupTable([[3,3],[4,3],240,3,2]).to_string()
| Python |
#!/usr/bin/env python
"""
Copyright 2001 Pearu Peterson all rights reserved,
Pearu Peterson <pearu@ioc.ee>
Permission to use, modify, and distribute this software is given under the
terms of the LGPL. See http://www.fsf.org
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Revision: 1.1 $
$Date: 2001/05/20 12:51:29 $
Pearu Peterson
"""
__version__ = "$Id: Data.py,v 1.1 2001/05/20 12:51:29 pearu Exp $"
import string
import common
class Data(common.Common):
data_type = None
def __init__(self,*args):
if self.__class__.__name__ not in ['PointData','CellData']:
raise TypeError,'use PointData or CellData instead of Data'
if not args:
raise TypeError,self.__class__.__name__+'() takes at least 1 argument: Scalars|ColorScalars|LookupTable|Vectors|Normals|TextureCoordinates|Tensors|Field'
args = list(args)
length = None
for a in args:
if not common.is_datasetattr(a):
self.skipping('expected DataSetAttr argument but got %s'%(type(a)))
continue
if length is None:
length = a.get_size()
elif length != a.get_size():
self.skipping('attribute data %s must be of length %s (as defined by first DataSetAttr) but got %s'%(`a.__class__.__name__`,length,a.get_size()))
continue
self.length = length
self.data = args
def get_size(self):
return self.length
def to_string(self,format='ascii'):
if self.data_type is None:
raise TypeError,'use PointData or CellData instead of Data'
ret = ['%s %s'%(self.data_type,self.length)]
for a in self.data:
ret.append(a.to_string(format))
#ret += [a.to_string(format) for a in self.data]
return string.join(ret,'\n')
class PointData(Data):
data_type = 'POINT_DATA'
class CellData(Data):
data_type = 'CELL_DATA'
if __name__ == "__main__":
import Scalars
print PointData(Scalars.Scalars([2,3]))
| Python |
#!/usr/bin/env python
"""
Copyright 2001 Pearu Peterson all rights reserved,
Pearu Peterson <pearu@ioc.ee>
Permission to use, modify, and distribute this software is given under the
terms of the LGPL. See http://www.fsf.org
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Revision: 1.1 $
$Date: 2001/05/20 12:51:29 $
Pearu Peterson
"""
import DataSetAttr
import string
class TextureCoordinates(DataSetAttr.DataSetAttr):
"""Holds VTK Texture Coordinates.
"""
def __init__(self,scalars,name=None):
self.name = self._get_name(name)
self.coords = self.get_n_seq_seq(scalars,self.default_value)
if not 1<=len(self.coords[0])<=3:
raise ValueError,'texture coordinates dimension must be 1, 2, or 3 but got %s'%(len(self.coords[0]))
def to_string(self,format='ascii'):
t = self.get_datatype(self.coords)
ret = ['TEXTURE_COORDINATES %s %s %s'%(self.name,len(self.coords[0]),t)]
ret.append(self.seq_to_string(self.coords,format,t))
return string.join(ret,'\n')
def get_size(self):
return len(self.coords)
if __name__ == "__main__":
print TextureCoordinates([[3,3],[4,3],240,3,2]).to_string()
| Python |
#!/usr/bin/env python
"""
Copyright 2001 Pearu Peterson all rights reserved,
Pearu Peterson <pearu@ioc.ee>
Permission to use, modify, and distribute this software is given under the
terms of the LGPL. See http://www.fsf.org
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Revision: 1.1 $
$Date: 2001/05/20 12:51:29 $
Pearu Peterson
"""
import DataSetAttr
import string
class Vectors(DataSetAttr.DataSetAttr):
"""Holds VTK Vectors.
"""
def __init__(self,vectors,name=None):
self.name = self._get_name(name)
self.vectors = self.get_3_tuple_list(vectors,(self.default_value,)*3)
def to_string(self,format='ascii'):
t = self.get_datatype(self.vectors)
ret = ['VECTORS %s %s'%(self.name,t),
self.seq_to_string(self.vectors,format,t)]
return string.join(ret,'\n')
def get_size(self):
return len(self.vectors)
if __name__ == "__main__":
print Vectors([[3,3],[4,3.],240,3,2]).to_string()
| Python |
#!/usr/bin/env python
"""
Copyright 2001 Pearu Peterson all rights reserved,
Pearu Peterson <pearu@ioc.ee>
Permission to use, modify, and distribute this software is given under the
terms of the LGPL. See http://www.fsf.org
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Revision: 1.1 $
$Date: 2001/05/20 12:51:29 $
Pearu Peterson
"""
__version__ = "$Id: DataSetAttr.py,v 1.1 2001/05/20 12:51:29 pearu Exp $"
import common
import string
class DataSetAttr(common.Common):
"""Abstract class for VTK data."""
counters = {}
default_value = 0
def _get_default_name(self):
n = self.__class__.__name__
try:
self.counters[n] = self.counters[n] + 1
except KeyError:
self.counters[n] = 0
return self.__class__.__name__+str(self.counters[n])
def _get_name(self,name):
if name is None:
name = self._get_default_name()
self.warning('Using name=%s'%(`name`))
return name
if common.is_string(name):
name = string.replace(string.strip(name),' ','_')
#name = name.strip().replace(' ','_')
if name:
return name
raise ValueError,'name=%s must be non-empty string'%(`name`)
def _get_lookup_table(self,name):
if name is None:
name = 'default'
self.warning('Using lookup_table=%s'%(`name`))
return name
if common.is_string(name):
name = string.replace(string.strip(name),' ','_')
#name = name.strip().replace(' ','_')
if name:
return name
raise ValueError,'lookup_table=%s must be nonempty string'%(`name`)
if __name__ == "__main__":
pass
| Python |
#!/usr/bin/env python
"""
Copyright 2001 Pearu Peterson all rights reserved,
Pearu Peterson <pearu@ioc.ee>
Permission to use, modify, and distribute this software is given under the
terms of the LGPL. See http://www.fsf.org
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Revision: 1.1 $
$Date: 2001/05/20 12:51:29 $
Pearu Peterson
"""
import DataSetAttr
import math
import string
class Normals(DataSetAttr.DataSetAttr):
"""Holds VTK Normals.
"""
def __init__(self,normals,name=None):
self.name = self._get_name(name)
seq = []
for v in self.get_3_tuple_list(normals,(self.default_value,)*3):
n = math.sqrt(v[0]*v[0]+v[1]*v[1]+v[2]*v[2])
if n==0:
self.warning('Cannot normalize zero vector to 1-length')
seq.append(v)
elif n==1:
seq.append(v)
else:
seq.append(tuple(map(lambda c,n=n:c/n,v)))
#seq.append(tuple([c/n for c in v]))
self.normals = seq
def to_string(self,format='ascii'):
t = self.get_datatype(self.normals)
ret = ['NORMALS %s %s'%(self.name,t)]
ret.append(self.seq_to_string(self.normals,format,t))
return string.join(ret,'\n')
def get_size(self):
return len(self.normals)
if __name__ == "__main__":
print Normals([[3,3],[4,3.],240,3,2]).to_string()
| Python |
#!/usr/bin/env python
"""
Copyright 2001 Pearu Peterson all rights reserved,
Pearu Peterson <pearu@ioc.ee>
Permission to use, modify, and distribute this software is given under the
terms of the LGPL. See http://www.fsf.org
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Revision: 1.1 $
$Date: 2001/05/20 12:51:29 $
Pearu Peterson
"""
__version__ = "$Id: DataSet.py,v 1.1 2001/05/20 12:51:29 pearu Exp $"
import common
class DataSet(common.Common):
"""Abstract class.
It describes the geometry and topology of VTK dataset.
"""
def get_size(self):
if hasattr(self,'points'):
return len(self.points)
return reduce(lambda x,y:x*y,self.dimensions,1)
def get_cell_size(self):
return 0
def _check_dimensions(self):
for i in range(3):
d = self.dimensions[i]
if not common.is_int(d):
self.error('dimensions[%s] must be int but got %s'%(i,type(d)))
return 1
if d<=0:
self.error('dimensions[%s] must be positive int but got %s'%(i,d))
return 1
if hasattr(self,'points'):
d = reduce(lambda x,y:x*y,self.dimensions,1)
if len(self.points)!=d:
self.error('mismatch of points length (%s) and dimensions size (%s)'%(len(self.points),d))
return 1
return 0
def _check_origin(self):
for i in range(3):
d = self.origin[i]
if not common.is_number(d):
self.error('origin[%s] must be number but got %s'%(i,type(d)))
return 1
return 0
def _check_spacing(self):
for i in range(3):
d = self.spacing[i]
if not common.is_number(d):
self.error('spacing[%s] must be number but got %s'%(i,type(d)))
return 1
if d<=0:
self.error('spacing[%s] must be positive number but got %s'%(i,d))
return 1
return 0
def _check_int_seq(self,obj,mx_int):
if common.is_sequence(obj):
for o in obj:
if self._check_int_seq(o,mx_int):
return 1
elif not common.is_int(obj) or obj>=mx_int:
return 1
return 0
| Python |
#!/usr/bin/env python
"""
Copyright 2001 Pearu Peterson all rights reserved,
Pearu Peterson <pearu@ioc.ee>
Permission to use, modify, and distribute this software is given under the
terms of the LGPL. See http://www.fsf.org
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Revision: 1.1 $
$Date: 2001/05/20 12:51:29 $
Pearu Peterson
"""
import DataSet
import string
class StructuredGrid(DataSet.DataSet):
"""The topology of a dataset is described by
dimensions - 3-sequence of positive integers
points - sequence of 3-sequences|3x-sequence
"""
def __init__(self,dimensions,points):
self.dimensions = self.get_3_tuple(dimensions,(1,1,1))
self.points = self.get_3_tuple_list(points,(0,0,0))
if self._check_dimensions():
raise ValueError,'dimensions must be 3-tuple of ints >=1 and matching with the size of points'
def to_string(self, format='ascii'):
t = self.get_datatype(self.points)
ret = ['DATASET STRUCTURED_GRID',
'DIMENSIONS %s %s %s'%self.dimensions,
'POINTS %s %s'%(self.get_size(),t)
]
ret.append(self.seq_to_string(self.points,format,t))
return string.join(ret,'\n')
if __name__ == "__main__":
print StructuredGrid((1,2),[1,2,2,4,4,5.4])
| Python |
#!/usr/bin/env python
"""
Copyright 2001 Pearu Peterson all rights reserved,
Pearu Peterson <pearu@ioc.ee>
Permission to use, modify, and distribute this software is given under the
terms of the LGPL. See http://www.fsf.org
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Revision: 1.1 $
$Date: 2001/05/20 12:51:29 $
Pearu Peterson
"""
__version__ = "$Id: Scalars.py,v 1.1 2001/05/20 12:51:29 pearu Exp $"
import DataSetAttr
import string
class Scalars(DataSetAttr.DataSetAttr):
"""Holds VTK scalars.
"""
def __init__(self,scalars,name=None,lookup_table=None):
self.name = self._get_name(name)
self.lookup_table = self._get_lookup_table(lookup_table)
self.scalars = self.get_seq(scalars,[])
def to_string(self,format='ascii'):
t = self.get_datatype(self.scalars)
ret = ['SCALARS %s %s %s'%(self.name,t,1),
'LOOKUP_TABLE %s'%(self.lookup_table)]
ret.append(self.seq_to_string(self.scalars,format,t))
return string.join(ret,'\n')
def get_size(self):
return len(self.scalars)
if __name__ == "__main__":
print Scalars([3,4,240]).to_string('binary')
| Python |
#!/usr/bin/env python
"""
PyVTK provides tools for manipulating VTK files in Python.
VtkData - create VTK files from Python objects.
"""
"""
Copyright 2001 Pearu Peterson all rights reserved,
Pearu Peterson <pearu@ioc.ee>
Permission to use, modify, and distribute this software is given under the
terms of the LGPL. See http://www.fsf.org
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Revision: 1.1 $
$Date: 2001/05/20 12:51:29 $
Pearu Peterson
"""
__author__ = "Pearu Peterson <pearu@cens.ioc.ee>"
__license__ = "LGPL (see http://www.fsf.org)"
from __version__ import __version__
__all__ = ['StructuredPoints','StructuredGrid','UnstructuredGrid','RectilinearGrid',
'RectilinearGrid','PolyData',
'Scalars','ColorScalars','LookupTable','Vectors','Normals',
'TextureCoordinates','Tensors','Field',
'PointData','CellData',
'VtkData']
import common
import string
from StructuredPoints import StructuredPoints
from StructuredGrid import StructuredGrid
from UnstructuredGrid import UnstructuredGrid
from RectilinearGrid import RectilinearGrid
from PolyData import PolyData
from Scalars import Scalars
from ColorScalars import ColorScalars
from LookupTable import LookupTable
from Vectors import Vectors
from Normals import Normals
from TextureCoordinates import TextureCoordinates
from Tensors import Tensors
from Field import Field
from Data import PointData,CellData
class VtkData(common.Common):
"""
VtkData
=======
Represents VTK file that has four relevant parts:
header - string up to length 256
format - string: ascii | binary
DataSet - StructuredPoints | StructuredGrid | UnstructuredGrid
| RectilinearGrid | PolyData
Data - PointData | CellData
Usage:
------
v = VtkData(<DataSet instance> [,<header string>,<Data instances>,..])
v.tofile(filename, format = 'ascii') - save VTK data to file.
DataSet
=======
StructuredPoints(<3-sequence of dimensions>
[,<3-sequence of origin> [, <3-sequence of spacing>]])
StructuredGrid(<3-sequence of dimensions>,
<sequence of 3-sequences of points>)
UnstructuredGrid(<sequence of 3-sequences of points>
[,<cell> = <sequence of (sequences of) integers>])
cell - vertex | poly_vertex | line | poly_line | triangle
| triangle_strip | polygon | pixel | quad | tetra
| voxel | hexahedron | wedge | pyramid
RectilinearGrid([x = <sequence of x-coordinates>],
[y = <sequence of y-coordinates>],
[z = <sequence of z-coordinates>])
PolyData(<sequence of 3-sequences of points>,
[vertices = <sequence of (sequences of) integers>],
[lines = <sequence of (sequences of) integers>],
[polygons = <sequence of (sequences of) integers>],
[triangle_strips = <sequence of (sequences of) integers>])
Data
====
PointData | CellData ([<DataSetAttr instances>]) - construct Data instance
DataSetAttr
===========
DataSetAttr - Scalars | ColorScalars | LookupTable | Vectors
| Normals | TextureCoordinates | Tensors | Field
Scalars(<sequence of scalars> [,name[, lookup_table]])
ColorScalars(<sequence of scalar sequences> [,name])
LookupTable(<sequence of 4-sequences> [,name])
Vectors(<sequence of 3-sequences> [,name])
Normals(<sequence of 3-sequences> [,name])
TextureCoordinates(<sequence of (1,2, or 3)-sequences> [,name])
Tensors(<sequence of (3x3)-sequences> [,name])
Field([name,] [arrayname_1 = sequence of n_1-sequences, ...
arrayname_m = sequence of n_m-sequences,])
where len(array_1) == .. == len(array_m) must hold.
"""
header = None
point_data = None
cell_data = None
def __init__(self,structure,*args):
if not common.is_dataset(structure):
raise TypeError,'argument structure must be StructuredPoints|StructuredGrid|UnstructuredGrid|RectilinearGrid|PolyData but got %s'%(type(structure))
self.structure = structure
for a in args:
if common.is_string(a):
if len(a)>255:
self.skipping('striping header string to length 256')
self.header = a[:256]
elif common.is_pointdata(a):
self.point_data = a
elif common.is_celldata(a):
self.cell_data = a
else:
self.skipping('unexpexted argument %s'%(type(a)))
if self.header is None:
self.header = 'Really cool data'
self.warning('Using header=%s'%(`self.header`))
if self.point_data is None and self.cell_data is None:
self.warning('No data defined')
if self.point_data is not None:
s = self.structure.get_size()
s1 = self.point_data.get_size()
if s1 != s:
raise ValueError,'DataSet (size=%s) and PointData (size=%s) have different sizes'%(s,s1)
if self.cell_data is not None:
s = self.structure.get_cell_size()
s1 = self.cell_data.get_size()
if s1 != s:
raise ValueError,'DataSet (cell_size=%s) and CellData (size=%s) have different sizes'%(s,s1)
def tofile(self, filename, format = 'ascii'):
if not common.is_string(filename):
raise TypeError,'argument filename must be string but got %s'%(type(filename))
if format not in ['ascii','binary']:
raise TypeError,'argument format must be ascii | binary'
filename = string.strip(filename)
if not filename:
raise ValueError,'filename must be non-empty string'
if filename[-4:]!='.vtk':
filename = filename + '.vtk'
f = open(filename,'wb')
f.write('# vtk DataFile Version 2.0\n')
f.write(self.header+'\n')
f.write(string.upper(format)+'\n')
f.write(self.structure.to_string(format)+'\n')
if self.cell_data:
f.write(self.cell_data.to_string(format)+'\n')
if self.point_data:
f.write(self.point_data.to_string(format))
f.close()
if __name__ == "__main__":
vtk = VtkData(StructuredPoints((3,1,1)),
'This is title',
PointData(Scalars([3,4,5]))
)
vtk.tofile('test')
| Python |
__version__ = "0.4.85"
| Python |
#!/usr/bin/env python
"""
Copyright 2001 Pearu Peterson all rights reserved,
Pearu Peterson <pearu@ioc.ee>
Permission to use, modify, and distribute this software is given under the
terms of the LGPL. See http://www.fsf.org
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Revision: 1.1 $
$Date: 2001/05/20 12:51:29 $
Pearu Peterson
"""
import DataSet
import string
class PolyData(DataSet.DataSet):
"""The topology of a dataset is described by
points
vertices
lines
polygons
triangle_strips
"""
def __init__(self,points,
vertices=[],lines=[],polygons=[],triangle_strips=[]):
self.points = self.get_3_tuple_list(points,(0,0,0))
self.vertices = self.get_seq_seq(vertices,[])
self.lines = self.get_seq_seq(lines,[])
self.polygons = self.get_seq_seq(polygons,[])
self.triangle_strips = self.get_seq_seq(triangle_strips,[])
sz = len(self.points)
for k in ['vertices','lines','polygons','triangle_strips']:
if self._check_int_seq(getattr(self,k),sz):
raise ValueError,'%s must be (seq of seq|seq) integers less than %s'%(k,sz)
def to_string(self, format='ascii'):
t = self.get_datatype(self.points)
ret = ['DATASET POLYDATA',
'POINTS %s %s'%(self.get_size(),t)]
ret.append(self.seq_to_string(self.points,format,t))
for k in ['vertices','lines','polygons','triangle_strips']:
kv = getattr(self,k)
if kv==[] or kv[0]==[]: continue
sz = self._get_nof_objs(kv)+len(kv)
ret = ret + ['%s %s %s'%(string.upper(k),len(kv),sz),
self.seq_to_string(map(lambda v:[len(v)]+list(v),kv),format,'int')]
#ret = ret + ['%s %s %s'%(k.upper(),len(kv),sz),
# self.seq_to_string([[len(v)]+list(v) for v in kv],format,'int')]
return string.join(ret,'\n')
def get_cell_size(self):
sz = 0
for k in ['vertices','lines','polygons','triangle_strips']:
kv = getattr(self,k)
if kv==[] or kv[0]==[]: continue
sz = sz + len(kv)
return sz
if __name__ == "__main__":
print PolyData([[1,2],[2,4],4,5.4],[[1],[0]],[],[1,2,3])
| Python |
#!/usr/bin/env python
"""
Copyright 2001 Pearu Peterson all rights reserved,
Pearu Peterson <pearu@ioc.ee>
Permission to use, modify, and distribute this software is given under the
terms of the LGPL. See http://www.fsf.org
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Revision: 1.1 $
$Date: 2001/05/20 12:51:29 $
Pearu Peterson
"""
import types
import sys
import struct
import string
def is_sequence(obj):
"""Check if obj is sequence."""
try:
len(obj)
return 1
except TypeError:
return 0
def is_sequence2(obj):
"""Check if obj is sequence of sequences."""
return is_sequence(obj) and len(obj) and is_sequence(obj[0])
def is_sequence3(obj):
"""Check if obj is sequence of sequences of sequences."""
return is_sequence(obj) and len(obj) and is_sequence2(obj[0])
def is_number(obj):
"""Check if obj is number."""
return type(obj) in [types.IntType,types.FloatType]
def is_int(obj):
"""Check if obj is integer."""
return type(obj) is types.IntType
def is_string(obj):
"""Check if obj is string."""
return type(obj) is types.StringType
def is_int255(obj):
if is_sequence(obj):
for v in obj:
r = is_int255(v)
if not r: return 0
return 1
return 0<=obj<256
def is_float01(obj):
if is_sequence(obj):
for v in obj:
r = is_float01(v)
if not r: return 0
return 1
return 0<=obj<=1
def is_datasetattr(obj):
return type(obj) is types.InstanceType and isinstance(obj,DataSetAttr.DataSetAttr)
def is_dataset(obj):
return type(obj) is types.InstanceType and isinstance(obj,DataSet.DataSet)
def is_pointdata(obj):
return type(obj) is types.InstanceType and isinstance(obj,Data.PointData)
def is_celldata(obj):
return type(obj) is types.InstanceType and isinstance(obj,Data.CellData)
class Common:
"""Abstract class. Defines output, checker, and getter functions."""
struct_fmt_map = {'char':'c',
'long':'l','double':'d',
'int':'i','float':'f',
'unsigned char':'B'}
default_int = 'int'
default_float = 'float'
def _get_trace(self,m):
try:
frame = sys._getframe().f_back
except AttributeError: # Python 2.0 does not have sys._getframe
frame = None
n = ''
while frame:
i = frame.f_code.co_name
n = '%s.%s'%(i,n)
if i=='__init__':
break
frame = frame.f_back
sys.stderr.write('%s.%s:\n\t%s\n'%(self.__class__.__name__,n[:-1],m))
def warning(self,m=''):
self._get_trace(m)
def skipping(self,m=''):
self._get_trace(m)
def error(self,m=''):
self._get_trace(m)
def __str__(self):
return self.to_string()
def get_datatype(self,obj):
t = type(obj)
if t is types.IntType: return self.default_int
if t is types.FloatType: return self.default_float
if not (is_sequence(obj) and len(obj)):
raise ValueError,'expected int|float|non-empty sequence but got %s'%t
for o in obj:
r = self.get_datatype(o)
if r==self.default_float:
break
return r
def get_seq(self,obj,default=None):
"""Return sequence."""
if is_sequence(obj):
return obj
if is_number(obj): return [obj]
if obj is None and default is not None:
self.warning('using default value (%s)'%(default))
return self.get_seq(default)
raise ValueError,'expected sequence|number but got %s'%(type(obj))
def get_seq_seq(self,obj,default=None):
"""Return sequence of sequences."""
if is_sequence2(obj):
return map(lambda o,s=self,d=default:s.get_seq(o,d),obj)
else:
return [self.get_seq(obj,default)]
def get_n_seq_seq(self,obj,default):
seq = self.get_seq_seq(obj,default)
if is_sequence(default):
n = len(default)
else:
n = max(map(len,seq))
default = [default]*n
ret = []
flag = 0
for v in seq:
if len(v)!=n:
ret.append(list(v)+default[len(v):])
flag = 1
else:
ret.append(list(v))
if flag:
self.warning('Some items were filled with default value (%s) to obtain size=%s'%(default[0],n))
return ret
def get_3_tuple(self,obj,default=None):
"""Return 3-tuple from
number -> (obj,default[1],default[2])
0-sequence|None -> default
1-sequence -> (obj[0],default[1],default[2])
2-sequence -> (obj[0],obj[1],default[2])
(3 or more)-sequence -> (obj[0],obj[1],obj[2])
"""
if not (default is not None \
and type(default) is types.TupleType \
and len(default)==3):
raise ValueError,'argument default must be 3-tuple|None but got %s'%(default)
if is_sequence(obj):
n = len(obj)
if n>3:
self.warning('expected 3-sequence but got %s-%s'%(n,type(obj)))
if n>=3:
return tuple(obj)
self.warning('filling with default value (%s) to obtain size=3'%(default[0]))
if default is not None:
if n==0:
return default
elif n==1:
return (obj[0],default[1],default[2])
elif n==2:
return (obj[0],obj[1],default[2])
elif is_number(obj) and default is not None:
self.warning('filling with default value (%s) to obtain size=3'%(default[0]))
return (obj,default[1],default[2])
elif obj is None and default is not None:
self.warning('filling with default value (%s) to obtain size=3'%(default[0]))
return default
raise ValueError,'failed to construct 3-tuple from %s-%s'%(n,type(obj))
def get_3_tuple_list(self,obj,default=None):
"""Return list of 3-tuples from
sequence of a sequence,
sequence - it is mapped to sequence of 3-sequences if possible
number
"""
if is_sequence2(obj):
return map(lambda o,s=self,d=default:s.get_3_tuple(o,d),obj)
#return [self.get_3_tuple(o,default) for o in obj]
elif is_sequence(obj):
ret = []
for i in range(0,len(obj),3):
ret.append(self.get_3_tuple(obj[i:i+3],default))
return ret
#return [self.get_3_tuple(obj[i:i+3],default) for i in range(0,len(obj),3)]
else:
return [self.get_3_tuple(obj,default)]
def get_3_3_tuple(self,obj,default=None):
"""Return tuple of 3-tuples
"""
if is_sequence2(obj):
ret = []
for i in range(3):
if i<len(obj):
ret.append(self.get_3_tuple(obj[i],default))
else:
ret.append(self.get_3_tuple(default,default))
return tuple(ret)
if is_sequence(obj):
if len(obj)>9:
self.warning('ignoring elements obj[i], i>=9')
r = obj[:9]
rr = []
for j in range(0,len(r),3):
rr.append(self.get_3_tuple(r[j:j+3],default))
r = rr
#r = [self.get_3_tuple(r[j:j+3],default) for j in range(0,len(r),3)]
if len(r)<3:
self.warning('filling with default value (%s) to obtain size=3'%(default[0]))
while len(r)<3:
r.append(self.get_3_tuple(default,default))
return tuple(r)
self.warning('filling with default value (%s) to obtain size=3'%(default[0]))
r1 = self.get_3_tuple(obj,default)
r2 = self.get_3_tuple(default,default)
r3 = self.get_3_tuple(default,default)
return (r1,r2,r3)
def get_3_3_tuple_list(self,obj,default=None):
"""Return list of 3x3-tuples.
"""
if is_sequence3(obj):
return map(lambda o,s=self,d=default:s.get_3_3_tuple(o,d),obj)
#return [self.get_3_3_tuple(o,default) for o in obj]
return [self.get_3_3_tuple(obj,default)]
def _get_nof_objs(self,seq):
if is_sequence2(seq):
return reduce(lambda x,y:x+y,map(self._get_nof_objs,seq),0)
#return reduce(lambda x,y:x+y,[self._get_nof_objs(s) for s in seq],0)
return len(seq)
def seq_to_string(self,seq,format,datatype):
assert is_sequence(seq),'expected sequence but got %s'%(type(seq))
if format == 'ascii':
if is_sequence2(seq):
sep = '\n'
if is_sequence3(seq):
sep = '\n\n'
return string.join(map(lambda v,s=self,f=format,d=datatype:s.seq_to_string(v,f,d),seq),sep)
#return sep.join([self.seq_to_string(v,format,datatype) for v in seq])
else:
return string.join(map(str,seq),' ')
elif format == 'binary':
if is_sequence2(seq):
return string.join(map(lambda v,s=self,f=format,d=datatype:s.seq_to_string(v,f,d),seq),'')
#return ''.join([''.join(self.seq_to_string(v,format,datatype)) for v in seq])
else:
try:
fmt = self.struct_fmt_map[datatype]
except KeyError:
fmt = None
if fmt:
return apply(struct.pack,tuple([fmt*len(seq)]+list(seq)))
#return struct.pack(fmt*len(seq),*seq)
raise NotImplementedError,'format=%s, datatype=%s'%(format,datatype)
def float01_to_int255(self,seq):
assert is_float01(seq)
if is_sequence(seq):
return map(self.float01_to_int255,seq)
#return [self.float01_to_int255(l) for l in seq]
else:
return int(seq*255)
def int255_to_float01(self,seq):
assert is_int255(seq)
if is_sequence(seq):
return map(self.int255_to_float01,seq)
#return [self.int255_to_float01(l) for l in seq]
else:
return round(seq/255.0,6)
import Data
import DataSet
import DataSetAttr
| Python |
#!/usr/bin/env python
"""
Copyright 2001 Pearu Peterson all rights reserved,
Pearu Peterson <pearu@ioc.ee>
Permission to use, modify, and distribute this software is given under the
terms of the LGPL. See http://www.fsf.org
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Revision: 1.1 $
$Date: 2001/05/20 12:51:29 $
Pearu Peterson
"""
__version__ = "$Id: ColorScalars.py,v 1.1 2001/05/20 12:51:29 pearu Exp $"
import common
import DataSetAttr
import string
class ColorScalars(DataSetAttr.DataSetAttr):
"""Holds VTK color scalars.
"""
def __init__(self,scalars,name=None):
self.name = self._get_name(name)
self.scalars = self.get_n_seq_seq(scalars,self.default_value)
def to_string(self,format='ascii'):
ret = ['COLOR_SCALARS %s %s'%(self.name,len(self.scalars[0]))]
seq = self.scalars
if format=='binary':
if not common.is_int255(seq):
seq = self.float01_to_int255(seq)
ret.append(self.seq_to_string(seq,format,'unsigned char'))
else:
if not common.is_float01(seq):
seq = self.int255_to_float01(seq)
ret.append(self.seq_to_string(seq,format,'float'))
return string.join(ret,'\n')
def get_size(self):
return len(self.scalars)
if __name__ == "__main__":
print ColorScalars([[3,3],[4,3],240,3,2]).to_string()
| Python |
#!/usr/bin/env python
"""
Copyright 2001 Pearu Peterson all rights reserved,
Pearu Peterson <pearu@ioc.ee>
Permission to use, modify, and distribute this software is given under the
terms of the LGPL. See http://www.fsf.org
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Revision: 1.1 $
$Date: 2001/05/20 12:51:29 $
Pearu Peterson
"""
import DataSet
import string
class RectilinearGrid(DataSet.DataSet):
"""The topology of a dataset is described by
x-coordinates
y-coordinates
z-coordinates
"""
def __init__(self,x=None,y=None,z=None):
self.x = self.get_seq(x,[0])
self.y = self.get_seq(y,[0])
self.z = self.get_seq(z,[0])
self.dimensions = (len(self.x),len(self.y),len(self.z))
if self._check_dimensions():
raise ValueError,'dimensions must be 3-tuple of ints >=1'
def to_string(self, format='ascii'):
tx = self.get_datatype(self.x)
ty = self.get_datatype(self.y)
tz = self.get_datatype(self.z)
ret = ['DATASET RECTILINEAR_GRID',
'DIMENSIONS %s %s %s'%self.dimensions,
'X_COORDINATES %s %s'%(len(self.x),tx),
self.seq_to_string(self.x,format,tx),
'Y_COORDINATES %s %s'%(len(self.y),ty),
self.seq_to_string(self.y,format,ty),
'Z_COORDINATES %s %s'%(len(self.z),tz),
self.seq_to_string(self.z,format,tz)]
return string.join(ret,'\n')
if __name__ == "__main__":
print RectilinearGrid([1,2,2,4,4,5.4])
| Python |
#!/usr/bin/env python
"""
Copyright 2001 Pearu Peterson all rights reserved,
Pearu Peterson <pearu@ioc.ee>
Permission to use, modify, and distribute this software is given under the
terms of the LGPL. See http://www.fsf.org
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Revision: 1.1 $
$Date: 2001/05/20 12:51:29 $
Pearu Peterson
"""
import DataSet
import string
class StructuredPoints(DataSet.DataSet):
"""The topology of a dataset is described by
dimensions - int|(1-3)-int sequence (>=1)
origin - number|(1-3)-number sequence
spacing - number|(1-3)-number sequence (>0)
"""
def __init__(self,dimensions,origin=(0,0,0),spacing=(1,1,1)):
self.dimensions = self.get_3_tuple(dimensions,(1,1,1))
if self._check_dimensions():
raise ValueError,'dimensions must be 3-tuple of ints >=1'
self.origin = self.get_3_tuple(origin,(1,1,1))
if self._check_origin():
raise ValueError,'origin must be 3-tuple of numbers'
self.spacing = self.get_3_tuple(spacing,(1,1,1))
if self._check_spacing():
raise ValueError,'spacing must be 3-tuple of positive numbers'
def to_string(self,format = 'ascii'):
ret = ['DATASET STRUCTURED_POINTS',
'DIMENSIONS %s %s %s'%self.dimensions,
'ORIGIN %s %s %s'%self.origin,
'SPACING %s %s %s'%self.spacing]
return string.join(ret,'\n')
if __name__ == "__main__":
print StructuredPoints((2,3,4))
print StructuredPoints((2,3))
print StructuredPoints(5)
print StructuredPoints([2,3,5,6]).get_size()
| Python |
from __future__ import print_function
from future.builtins import dict
from future.builtins import open
#!/usr/bin/env python
import os
import sys
from distutils.core import setup
version_file = os.path.join('lib','__version__.py')
if 1 or not os.path.exists(version_file):
major_version = 0
minor_version = 4
exec(compile(open(os.path.join('tools','get_revision.py')).read(), os.path.join('tools','get_revision.py'), 'exec'))
__version__='%d.%d.%d'%(major_version,minor_version,revision_version)
for l in ['lib','lib152']:
f = open(os.path.join(l,'__version__.py'),'w')
f.write('__version__ = "%s"\n'%(__version__))
f.close()
exec(compile(open(version_file).read(), version_file, 'exec'))
if sys.version[:3]>='2.3':
config = dict(\
download_url='http://cens.ioc.ee/projects/pyvtk/rel-0.x/PyVTK-0.latest.tar.gz',
keywords = ['VTK'],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Scientific/Engineering :: Visualization',
],
platforms = 'All')
else:
config = {}
print("PyVTK Version",__version__)
setup (name = "PyVTK",
version = __version__,
description = "PyVTK - tools for manipulating VTK files in Python",
author = "Pearu Peterson",
author_email = "pearu@cens.ioc.ee",
maintainer = "Pearu Peterson",
maintainer_email = "pearu@cens.ioc.ee",
license = "LGPL",
long_description= """\
PyVTK provides tools for manipulating VTK (Visualization Toolkit)
files in Python:
VtkData - create VTK files from Python / read VTK files to Python.""",
url = "http://cens.ioc.ee/projects/pyvtk/",
packages = ['pyvtk'],
package_dir = {'pyvtk':{'3':'lib','2':'lib','1':'lib152'}[sys.version[0]]},
**config
)
| Python |
#!/usr/bin/env python
from __future__ import print_function
from __future__ import absolute_import
from future.builtins import range
from future.builtins import map
"""
Field
"""
"""
Copyright 2001 Pearu Peterson all rights reserved,
Pearu Peterson <pearu@ioc.ee>
Permission to use, modify, and distribute this software is given under the
terms of the LGPL. See http://www.fsf.org
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Revision: 1.2 $
$Date: 2001/05/31 17:48:54 $
Pearu Peterson
"""
from . import DataSetAttr
from . import common
class Field(DataSetAttr.DataSetAttr):
"""Holds VTK Field.
Usage:
Field([<name string>,] arrname_1=<sequence of n_1-sequences>, ...,
arrname_k=<sequence of n_k-sequences>)
Attributes:
data - dictionary of arrays
name
Public methods:
get_size()
to_string(format = 'ascii')
"""
def __init__(self,*args,**kws):
if len(args): name = args[0]
else: name = None
if len(args)>1:
self.warning('Ignoring all arguments except the first')
self.name = self._get_name(name)
data = {}
mx = 0
for k,v in list(kws.items()):
data[k] = self.get_n_seq_seq(v,self.default_value)
mx = max([len(l) for l in list(data.values())])
for k,v in list(data.items()):
if len(v)<mx:
self.warning('Filling array %s (size=%s) with default value (%s) to obtain size=%s'%(repr(k),len(v),self.default_value,mx))
while len(v)<mx:
v.append([self.default_value]*len(v[0]))
self.data = data
def to_string(self,format='ascii'):
ret = ['FIELD %s %s'%(self.name,len(self.data))]
for k,v in list(self.data.items()):
t = self.get_datatype(v)
ret += ['%s %s %s %s'%(k,len(v[0]),len(v),t),
self.seq_to_string(v,format,t)]
return '\n'.join(ret)
def get_size(self):
return len(list(self.data.values())[0])
def field_fromfile(f,n,sl):
dataname = sl[0]
numarrays = eval(sl[1])
dict = {}
for i in range(numarrays):
l = common._getline(f).split(' ')
assert len(l)==4,repr(l)
name = l[0].strip()
numcomps = eval(l[1])
numtuples = eval(l[2])
datatype = l[3].lower()
assert datatype in ['bit','unsigned_char','char','unsigned_short','short','unsigned_int','int','unsigned_long','long','float','double'],repr(datatype)
arr = []
while len(arr)<numcomps*numtuples:
arr += list(map(eval,common._getline(f).split(' ')))
assert len(arr)==numcomps*numtuples
arr2 = []
for j in range(0,numtuples*numcomps,numcomps):
arr2.append(arr[j:j+numcomps])
dict[name] = arr2
return Field(dataname,**dict)
if __name__ == "__main__":
print(Field(a=[[2,23],3,3],c=[2,3,4,5]).to_string())
| Python |
#!/usr/bin/env python
from __future__ import print_function
from __future__ import absolute_import
from future.builtins import zip
from future.builtins import range
from future.builtins import map
"""
UnstructuredGrid
"""
"""
Copyright 2001 Pearu Peterson all rights reserved,
Pearu Peterson <pearu@ioc.ee>
Permission to use, modify, and distribute this software is given under the
terms of the LGPL. See http://www.fsf.org
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Revision: 1.7 $
$Date: 2010-03-03 13:41:41 $
Pearu Peterson
"""
from . import DataSet
from . import common
class UnstructuredGrid(DataSet.DataSet):
"""
Usage:
UnstructuredGrid(<sequence of 3-tuples of points>,
vertex = <sequence [of 1-sequences]>
poly_vertex = <sequence of n-sequences>,
line = <sequence of 2-sequences>,
poly_line = <sequence of n-sequences>,
triangle = <sequence of 3-sequences>,
triangle_strip = <sequence of n-sequences>,
polygon = <sequence of n-sequences>,
pixel = <sequence of 4-sequences>,
quad = <sequence of 4-sequences>,
tetra = <sequence of 4-sequences>,
voxel = <sequence of 8-sequences>,
hexahedron = <sequence of 8-sequences>,
wedge = <sequence of 6-sequences>,
pyramid = <sequence of 5-sequences>,
quadratic_tetra = <sequence of 10-sequences>
)
Attributes:
points
vertex
poly_vertex, line, poly_line, triangle, triangle_strip,
polygon, pixel, quad, tetra, voxel, hexahedron, wedge, pyramid
Public methods:
get_size()
get_cell_size()
to_string(format = 'ascii')
get_points()
<DataSetAttr class>(...)
"""
_vtk_cell_types_map = {'vertex':1,'poly_vertex':2,'line':3,'poly_line':4,
'triangle':5,'triangle_strip':6,'polygon':7,'pixel':8,
'quad':9,'tetra':10,'voxel':11,'hexahedron':12,
'wedge':13,'pyramid':14,'quadratic_tetra':24}
_vtk_cell_nums_map = {'vertex':1,'poly_vertex':-1,'line':2,'poly_line':-1,
'triangle':3,'triangle_strip':-1,'polygon':-1,'pixel':4,
'quad':4,'tetra':4,'voxel':8,'hexahedron':8,
'wedge':6,'pyramid':5,
'quadratic_tetra':10}
_vtk_cell_types_imap = {1:'vertex',2:'poly_vertex',3:'line',4:'poly_line',
5:'triangle',6:'triangle_strip',7:'polygon',
8:'pixel',9:'quad',10:'tetra',11:'voxel',12:'hexahedron',
13:'wedge',14:'pyramid',24:'quadratic_tetra'}
def __init__(self,points,vertex=[],poly_vertex=[],line=[],poly_line=[],
triangle=[],triangle_strip=[],polygon=[],pixel=[],
quad=[],tetra=[],voxel=[],hexahedron=[],wedge=[],pyramid=[],
quadratic_tetra=[]):
self.points = self.get_3_tuple_list(points,(0,0,0))
sz = len(self.points)
for k in list(self._vtk_cell_types_map.keys()):
exec('self.%s = self.get_seq_seq(%s,[])'%(k,k))
if k=='vertex':
r = []
for v in self.vertex:
r += [[a] for a in v]
self.vertex = r
if self._check_int_seq(getattr(self,k),sz):
raise ValueError('In cell %s: must be (seq of seq|seq) integers less than %s'%(k,sz))
for k,n in list(self._vtk_cell_nums_map.items()):
if n==-1: continue
kv = getattr(self,k)
if kv==[] or kv[0]==[]: continue
for v in kv:
if len(v)!=n:
raise ValueError('Cell %s requires exactly %s points but got %s: %s'%(repr(k),n,len(v),v))
def to_string(self,format='ascii'):
t = self.get_datatype(self.points)
ret = ['DATASET UNSTRUCTURED_GRID',
'POINTS %s %s'%(self.get_size(),t),
self.seq_to_string(self.points,format,t)]
tps = []
r = []
sz = 0
for k in list(self._vtk_cell_types_map.keys()):
kv = getattr(self,k)
if kv==[] or kv[0]==[]: continue
s = self.seq_to_string([[len(v)]+list(v) for v in kv],format,'int')
r .append(s)
for v in kv:
tps.append(self._vtk_cell_types_map[k])
sz += len(v)+1
sep = (format=='ascii' and '\n') or (format=='binary' and '')
r = sep.join(r)
ret += ['CELLS %s %s'%(len(tps),sz),
r,
'CELL_TYPES %s'%(len(tps)),
self.seq_to_string(tps,format,'int')]
return '\n'.join(ret)
def get_cell_size(self):
sz = 0
for k in list(self._vtk_cell_types_map.keys()):
kv = getattr(self,k)
if kv==[] or kv[0]==[]: continue
sz += len(kv)
return sz
def get_points(self):
return self.points
def unstructured_grid_fromfile(f,self):
l = common._getline(f)
k,n,datatype = [s.strip().lower() for s in l.split()]
if k!='points':
raise ValueError('expected points but got %s'%(repr(k)))
n = eval(n)
assert datatype in ['bit','unsigned_char','char','unsigned_short','short','unsigned_int','int','unsigned_long','long','float','double'],repr(datatype)
points = []
self.message('\tgetting %s points'%n)
while len(points) < 3*n:
points += list(map(eval,common._getline(f).split()))
assert len(points)==3*n
l = common._getline(f).split()
assert len(l)==3 and l[0].strip().lower() == 'cells',repr(l)
n = eval(l[1])
size = eval(l[2])
lst = []
self.message('\tgetting %s cell indexes'%size)
while len(lst) < size:
line = common._getline(f)
lst += list(map(eval,line.split()))
assert len(lst)==size
lst2 = []
j = 0
for i in range(n):
lst2.append(lst[j+1:j+lst[j]+1])
j += lst[j]+1
l = common._getline(f).split()
assert len(l)==2 and l[0].strip().lower() == 'cell_types' and eval(l[1])==n,repr(l)
tps = []
self.message('\tgetting %s cell types'%n)
while len(tps) < n:
tps += list(map(eval,common._getline(f).split()))
assert len(tps)==n
dict = {}
for i,t in zip(lst2,tps):
k = UnstructuredGrid._vtk_cell_types_imap[t]
if k not in dict:
dict[k] = []
dict[k].append(i)
self.message('\tdone')
return UnstructuredGrid(points,**dict),common._getline(f)
if __name__ == "__main__":
print(UnstructuredGrid([[1,2],[2,4],3,5],
line = [[2,3],[1,2],[2,3]],
vertex=2))
| Python |
#!/usr/bin/env python
from __future__ import print_function
from __future__ import absolute_import
from future.builtins import range
from future.builtins import map
"""
Tensors
"""
"""
Copyright 2001 Pearu Peterson all rights reserved,
Pearu Peterson <pearu@ioc.ee>
Permission to use, modify, and distribute this software is given under the
terms of the LGPL. See http://www.fsf.org
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Revision: 1.4 $
$Date: 2003/10/13 18:40:49 $
Pearu Peterson
"""
from . import DataSetAttr
from . import common
class Tensors(DataSetAttr.DataSetAttr):
"""Holds VTK Tensors.
Usage:
Tensors(<sequence of 3x3-tuples> , name = <string>)
Attributes:
tensors
name
Public methods:
get_size()
to_string(format = 'ascii')
"""
def __init__(self,tensors,name=None):
self.name = self._get_name(name)
self.tensors = self.get_3_3_tuple_list(tensors,(self.default_value,)*3)
def to_string(self,format='ascii'):
t = self.get_datatype(self.tensors)
ret = ['TENSORS %s %s'%(self.name,t),
self.seq_to_string(self.tensors,format,t)]
return '\n'.join(ret)
def get_size(self):
return len(self.tensors)
def tensors_fromfile(f,n,sl):
assert len(sl)==2
dataname = sl[0].strip()
datatype = sl[1].strip().lower()
assert datatype in ['bit','unsigned_char','char','unsigned_short','short','unsigned_int','int','unsigned_long','long','float','double'],repr(datatype)
arr = []
while len(arr)<9*n:
arr += list(map(eval,common._getline(f).split(' ')))
assert len(arr)==9*n
arr2 = []
for i in range(0,len(arr),9):
arr2.append(tuple(map(tuple,[arr[i:i+3],arr[i+3:i+6],arr[i+6:i+9]])))
return Tensors(arr2,dataname)
if __name__ == "__main__":
print(Tensors([[[3,3]],[4,3.],[[240]],3,2,3]).to_string('ascii'))
print(Tensors(3).to_string('ascii'))
| Python |
#!/usr/bin/env python
from __future__ import print_function
from __future__ import absolute_import
from future.builtins import zip
from future.builtins import range
from future.builtins import map
"""
UnstructuredGrid
"""
"""
Copyright 2001 Pearu Peterson all rights reserved,
Pearu Peterson <pearu@ioc.ee>
Permission to use, modify, and distribute this software is given under the
terms of the LGPL. See http://www.fsf.org
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Revision: 1.8 $
$Date: 2011-10-03 10:16:56 $
Pearu Peterson
"""
from . import DataSet
from . import common
class UnstructuredGrid(DataSet.DataSet):
"""
Usage:
UnstructuredGrid(<sequence of 3-tuples of points>,
vertex = <sequence [of 1-sequences]>
poly_vertex = <sequence of n-sequences>,
line = <sequence of 2-sequences>,
poly_line = <sequence of n-sequences>,
triangle = <sequence of 3-sequences>,
triangle_strip = <sequence of n-sequences>,
polygon = <sequence of n-sequences>,
pixel = <sequence of 4-sequences>,
quad = <sequence of 4-sequences>,
tetra = <sequence of 4-sequences>,
voxel = <sequence of 8-sequences>,
hexahedron = <sequence of 8-sequences>,
wedge = <sequence of 6-sequences>,
pyramid = <sequence of 5-sequences>,
quadratic_tetra = <sequence of 10-sequences>
)
Attributes:
points
vertex
poly_vertex, line, poly_line, triangle, triangle_strip,
polygon, pixel, quad, tetra, voxel, hexahedron, wedge, pyramid
Public methods:
get_size()
get_cell_size()
to_string(format = 'ascii')
get_points()
<DataSetAttr class>(...)
"""
_vtk_cell_types_map = {'vertex':1,'poly_vertex':2,'line':3,'poly_line':4,
'triangle':5,'triangle_strip':6,'polygon':7,'pixel':8,
'quad':9,'tetra':10,'voxel':11,'hexahedron':12,
'wedge':13,'pyramid':14,'quadratic_tetra':24}
_vtk_cell_nums_map = {'vertex':1,'poly_vertex':-1,'line':2,'poly_line':-1,
'triangle':3,'triangle_strip':-1,'polygon':-1,'pixel':4,
'quad':4,'tetra':4,'voxel':8,'hexahedron':8,
'wedge':6,'pyramid':5,
'quadratic_tetra':10}
_vtk_cell_types_imap = {1:'vertex',2:'poly_vertex',3:'line',4:'poly_line',
5:'triangle',6:'triangle_strip',7:'polygon',
8:'pixel',9:'quad',10:'tetra',11:'voxel',12:'hexahedron',
13:'wedge',14:'pyramid',24:'quadratic_tetra'}
def __init__(self,points,vertex=[],poly_vertex=[],line=[],poly_line=[],
triangle=[],triangle_strip=[],polygon=[],pixel=[],
quad=[],tetra=[],voxel=[],hexahedron=[],wedge=[],pyramid=[],
quadratic_tetra=[]):
self.points = self.get_3_tuple_list(points,(0,0,0))
sz = len(self.points)
for k in list(self._vtk_cell_types_map.keys()):
exec('self.%s = self.get_seq_seq(%s,[])'%(k,k))
if k=='vertex':
r = []
for v in self.vertex:
r += [[a] for a in v]
self.vertex = r
if self._check_int_seq(getattr(self,k),sz):
raise ValueError('In cell %s: must be (seq of seq|seq) integers less than %s'%(k,sz))
for k,n in list(self._vtk_cell_nums_map.items()):
if n==-1: continue
kv = getattr(self,k)
if kv==[] or kv[0]==[]: continue
for v in kv:
if len(v)!=n:
raise ValueError('Cell %s requires exactly %s points but got %s: %s'%(repr(k),n,len(v),v))
def to_string(self,format='ascii'):
t = self.get_datatype(self.points)
ret = ['DATASET UNSTRUCTURED_GRID',
'POINTS %s %s'%(self.get_size(),t),
self.seq_to_string(self.points,format,t)]
tps = []
r = []
sz = 0
for k in list(self._vtk_cell_types_map.keys()):
kv = getattr(self,k)
if kv==[] or kv[0]==[]: continue
s = self.seq_to_string([[len(v)]+list(v) for v in kv],format,'int')
r .append(s)
for v in kv:
tps.append(self._vtk_cell_types_map[k])
sz += len(v)+1
sep = (format=='ascii' and '\n') or (format=='binary' and '')
r = sep.join(r)
ret += ['CELLS %s %s'%(len(tps),sz),
r,
'CELL_TYPES %s'%(len(tps)),
self.seq_to_string(tps,format,'int')]
return '\n'.join(ret)
def get_cell_size(self):
sz = 0
for k in list(self._vtk_cell_types_map.keys()):
kv = getattr(self,k)
if kv==[] or kv[0]==[]: continue
sz += len(kv)
return sz
def get_points(self):
return self.points
def unstructured_grid_fromfile(f,self):
l = common._getline(f)
k,n,datatype = [s.strip().lower() for s in l.split()]
if k!='points':
raise ValueError('expected points but got %s'%(repr(k)))
n = eval(n)
assert datatype in ['bit','unsigned_char','char','unsigned_short','short','unsigned_int','int','unsigned_long','long','float','double'],repr(datatype)
points = []
self.message('\tgetting %s points'%n)
while len(points) < 3*n:
points += list(map(eval,common._getline(f).split()))
assert len(points)==3*n
l = common._getline(f).split()
assert len(l)==3 and l[0].strip().lower() == 'cells',repr(l)
n = eval(l[1])
size = eval(l[2])
lst = []
self.message('\tgetting %s cell indexes'%size)
while len(lst) < size:
line = common._getline(f)
lst += list(map(eval,line.split()))
assert len(lst)==size
lst2 = []
j = 0
for i in range(n):
lst2.append(lst[j+1:j+lst[j]+1])
j += lst[j]+1
l = common._getline(f).split()
assert len(l)==2 and l[0].strip().lower() == 'cell_types' and eval(l[1])==n,repr(l)
tps = []
self.message('\tgetting %s cell types'%n)
while len(tps) < n:
tps += list(map(eval,common._getline(f).split()))
assert len(tps)==n
dict = {}
for i,t in zip(lst2,tps):
k = UnstructuredGrid._vtk_cell_types_imap[t]
if k not in dict:
dict[k] = []
dict[k].append(i)
self.message('\tdone')
return UnstructuredGrid(points,**dict),common._getline(f)
if __name__ == "__main__":
print(UnstructuredGrid([[1,2],[2,4],3,5],
line = [[2,3],[1,2],[2,3]],
vertex=2))
| Python |
#!/usr/bin/env python
from __future__ import print_function
from __future__ import absolute_import
from future.builtins import range
from future.builtins import map
"""
LookupTable
"""
"""
Copyright 2001 Pearu Peterson all rights reserved,
Pearu Peterson <pearu@ioc.ee>
Permission to use, modify, and distribute this software is given under the
terms of the LGPL. See http://www.fsf.org
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Revision: 1.2 $
$Date: 2001/05/31 17:48:54 $
Pearu Peterson
"""
__version__ = "$Id: LookupTable.py,v 1.2 2001/05/31 17:48:54 pearu Exp $"
from . import common
from . import DataSetAttr
class LookupTable(DataSetAttr.DataSetAttr):
"""Holds VTK LookupTable.
Usage:
LookupTable(<sequence of 4-sequences> ,name = <string>)
Attributes:
table
name
Public methods:
get_size()
to_string(format = 'ascii')
"""
def __init__(self,table,name=None):
self.name = self._get_name(name)
self.table = self.get_n_seq_seq(table,[0,0,0,0])
if len(self.table[0])!=4:
raise ValueError('expected sequence of 4-sequences but got %s'%(len(self.table[0])))
def to_string(self,format='ascii'):
ret = ['LOOKUP_TABLE %s %s'%(self.name,len(self.table))]
seq = self.table
if format=='binary':
if not common.is_int255(seq):
seq = self.float01_to_int255(seq)
ret.append(self.seq_to_string(seq,format,'unsigned char'))
else:
if not common.is_float01(seq):
seq = self.int255_to_float01(seq)
ret.append(self.seq_to_string(seq,format,'float'))
return '\n'.join(ret)
def get_size(self):
return len(self.table)
def lookup_table_fromfile(f,n,sl):
tablename = sl[0]
size = eval(sl[1])
table = []
while len(table)<4*size:
table += list(map(eval,common._getline(f).split(' ')))
assert len(table) == 4*size
table2 = []
for i in range(0,len(table),4):
table2.append(table[i:i+4])
return LookupTable(table2,tablename)
if __name__ == "__main__":
print(LookupTable([[3,3],[4,3],240,3,2]).to_string())
| Python |
#!/usr/bin/env python
from __future__ import print_function
from __future__ import absolute_import
from future.builtins import map
"""
PointData, CellData
"""
"""
Copyright 2001 Pearu Peterson all rights reserved,
Pearu Peterson <pearu@ioc.ee>
Permission to use, modify, and distribute this software is given under the
terms of the LGPL. See http://www.fsf.org
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Revision: 1.3 $
$Date: 2001/06/13 08:35:00 $
Pearu Peterson
"""
__version__ = "$Id: Data.py,v 1.3 2001/06/13 08:35:00 pearu Exp $"
from . import common
class Data(common.Common):
data_type = None
def __init__(self,*args):
if self.__class__.__name__ not in ['PointData','CellData']:
raise TypeError('use PointData or CellData instead of Data')
self.data = []
self.length = None
list(map(self.append,args))
def append(self,obj):
if not common.is_datasetattr(obj):
self.error('expected DataSetAttr argument but got %s'%(type(obj)))
raise TypeError
if self.length is None:
self.length = obj.get_size()
if not common.is_lookuptable(obj) and self.length != obj.get_size():
self.error('attribute data %s must be of length %s (as defined by first DataSetAttr) but got %s'%(repr(obj.__class__.__name__),self.length,obj.get_size()))
raise ValueError
self.data.append(obj)
def get_size(self):
return self.length
def to_string(self,format='ascii'):
if self.data_type is None:
raise TypeError('use PointData or CellData instead of Data')
ret = ['%s %s'%(self.data_type,self.length)]
ret += [a.to_string(format) for a in self.data]
return '\n'.join(ret)
class PointData(Data):
"""
Usage:
PointData(<DataSetAttr instances>)
Attributes:
data - list of DataSetAttr instances
Public methods:
get_size()
to_string(format = 'ascii')
append(<DataSetAttr instance>)
"""
data_type = 'POINT_DATA'
class CellData(Data):
"""
Usage:
CellData(<DataSetAttr instances>)
Attributes:
data - list of DataSetAttr instances
Public methods:
get_size()
to_string(format = 'ascii')
append(<DataSetAttr instance>)
"""
data_type = 'CELL_DATA'
if __name__ == "__main__":
from . import Scalars
print(PointData(Scalars.Scalars([2,3])))
| Python |
#!/usr/bin/env python
from __future__ import print_function
from __future__ import absolute_import
from future.builtins import range
from future.builtins import map
"""
TextureCoordinates
"""
"""
Copyright 2001 Pearu Peterson all rights reserved,
Pearu Peterson <pearu@ioc.ee>
Permission to use, modify, and distribute this software is given under the
terms of the LGPL. See http://www.fsf.org
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Revision: 1.3 $
$Date: 2001/05/31 17:48:55 $
Pearu Peterson
"""
from . import DataSetAttr
class TextureCoordinates(DataSetAttr.DataSetAttr):
"""Holds VTK Texture Coordinates.
Usage:
TextureCoordinates(<sequence of (1,2, or 3)-sequences> ,name = <string>)
Attributes:
coords
name
Public methods:
get_size()
to_string(format = 'ascii')
"""
def __init__(self,scalars,name=None):
self.name = self._get_name(name)
self.coords = self.get_n_seq_seq(scalars,self.default_value)
if not 1<=len(self.coords[0])<=3:
raise ValueError('texture coordinates dimension must be 1, 2, or 3 but got %s'%(len(self.coords[0])))
def to_string(self,format='ascii'):
t = self.get_datatype(self.coords)
ret = ['TEXTURE_COORDINATES %s %s %s'%(self.name,len(self.coords[0]),t),
self.seq_to_string(self.coords,format,t)]
return '\n'.join(ret)
def get_size(self):
return len(self.coords)
def texture_coordinates_fromfile(f,n,sl):
assert len(sl)==3
dataname = sl[0].strip()
dim = eval(sl[1])
datatype = sl[2].strip().lower()
assert datatype in ['bit','unsigned_char','char','unsigned_short','short','unsigned_int','int','unsigned_long','long','float','double'],repr(datatype)
arr = []
while len(arr)<dim*n:
arr += list(map(eval,common._getline(f).split(' ')))
assert len(arr)==dim*n
arr2 = []
for i in range(0,len(arr),dim):
arr2.append(arr[i:i+dim])
return TextureCoordinates(arr2,dataname)
if __name__ == "__main__":
print(TextureCoordinates([[3,3],[4,3],240,3,2]).to_string())
| Python |
#!/usr/bin/env python
from __future__ import print_function
from __future__ import absolute_import
from future.builtins import map
"""
Vectors
"""
"""
Copyright 2001 Pearu Peterson all rights reserved,
Pearu Peterson <pearu@ioc.ee>
Permission to use, modify, and distribute this software is given under the
terms of the LGPL. See http://www.fsf.org
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Revision: 1.2 $
$Date: 2001/05/31 17:48:55 $
Pearu Peterson
"""
from . import DataSetAttr
from . import common
class Vectors(DataSetAttr.DataSetAttr):
"""Holds VTK Vectors.
Usage:
Vectors(<sequence of 3-tuples> ,name = <string>)
Attributes:
vectors
name
Public methods:
get_size()
to_string(format = 'ascii')
"""
def __init__(self,vectors,name=None):
self.name = self._get_name(name)
self.vectors = self.get_3_tuple_list(vectors,(self.default_value,)*3)
def to_string(self,format='ascii'):
t = self.get_datatype(self.vectors)
ret = ['VECTORS %s %s'%(self.name,t),
self.seq_to_string(self.vectors,format,t)]
return '\n'.join(ret)
def get_size(self):
return len(self.vectors)
def vectors_fromfile(f,n,sl):
dataname = sl[0]
datatype = sl[1].lower()
assert datatype in ['bit','unsigned_char','char','unsigned_short','short','unsigned_int','int','unsigned_long','long','float','double'],repr(datatype)
vectors = []
while len(vectors) < 3*n:
vectors += list(map(eval,common._getline(f).split(' ')))
assert len(vectors) == 3*n
return Vectors(vectors,dataname)
if __name__ == "__main__":
print(Vectors([[3,3],[4,3.],240,3,2]).to_string())
| Python |
#!/usr/bin/env python
from __future__ import absolute_import
from future.builtins import str
"""
DataSetAttr
"""
"""
Copyright 2001 Pearu Peterson all rights reserved,
Pearu Peterson <pearu@ioc.ee>
Permission to use, modify, and distribute this software is given under the
terms of the LGPL. See http://www.fsf.org
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Revision: 1.2 $
$Date: 2001/05/31 17:48:54 $
Pearu Peterson
"""
__version__ = "$Id: DataSetAttr.py,v 1.2 2001/05/31 17:48:54 pearu Exp $"
from . import common
class DataSetAttr(common.Common):
"""Abstract class for VTK data."""
counters = {}
default_value = 0
def _get_default_name(self):
n = self.__class__.__name__
try:
self.counters[n] += 1
except KeyError:
self.counters[n] = 0
return self.__class__.__name__+str(self.counters[n])
def _get_name(self,name):
if name is None:
name = self._get_default_name()
self.warning('Using name=%s'%(repr(name)))
return name
if common.is_string(name):
name = name.strip().replace(' ','_')
if name:
return name
raise ValueError('name=%s must be non-empty string'%(repr(name)))
def _get_lookup_table(self,name):
if name is None:
name = 'default'
self.warning('Using lookup_table=%s'%(repr(name)))
return name
if common.is_string(name):
name = name.strip().replace(' ','_')
if name:
return name
raise ValueError('lookup_table=%s must be nonempty string'%(repr(name)))
if __name__ == "__main__":
pass
| Python |
#!/usr/bin/env python
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from future.builtins import map
"""
Normals
"""
"""
Copyright 2001 Pearu Peterson all rights reserved,
Pearu Peterson <pearu@ioc.ee>
Permission to use, modify, and distribute this software is given under the
terms of the LGPL. See http://www.fsf.org
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Revision: 1.3 $
$Date: 2001/05/31 17:48:54 $
Pearu Peterson
"""
from . import DataSetAttr
import math
from . import common
class Normals(DataSetAttr.DataSetAttr):
"""Holds VTK Normals.
Usage:
Normals(<sequence of 3-tuples> ,name = <string>)
Attributes:
normals
name
Public methods:
get_size()
to_string(format = 'ascii')
"""
def __init__(self,normals,name=None):
self.name = self._get_name(name)
seq = []
for v in self.get_3_tuple_list(normals,(self.default_value,)*3):
n = math.sqrt(v[0]*v[0]+v[1]*v[1]+v[2]*v[2])
if n==0:
self.warning('cannot normalize zero vector')
seq.append(v)
elif n==1:
seq.append(v)
else:
seq.append(tuple([c/n for c in v]))
self.normals = seq
def to_string(self,format='ascii'):
t = self.get_datatype(self.normals)
ret = ['NORMALS %s %s'%(self.name,t),
self.seq_to_string(self.normals,format,t)]
return '\n'.join(ret)
def get_size(self):
return len(self.normals)
def normals_fromfile(f,n,sl):
dataname = sl[0]
datatype = sl[1].lower()
assert datatype in ['bit','unsigned_char','char','unsigned_short','short','unsigned_int','int','unsigned_long','long','float','double'],repr(datatype)
normals = []
while len(normals) < 3*n:
normals += list(map(eval,common._getline(f).split(' ')))
assert len(normals) == 3*n
return Normals(normals,dataname)
if __name__ == "__main__":
print(Normals([[3,3],[4,3.],240,3,2]).to_string())
| Python |
#!/usr/bin/env python
from __future__ import absolute_import
from future.builtins import range
from functools import reduce
"""
DataSet
"""
"""
Copyright 2001 Pearu Peterson all rights reserved,
Pearu Peterson <pearu@ioc.ee>
Permission to use, modify, and distribute this software is given under the
terms of the LGPL. See http://www.fsf.org
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Revision: 1.3 $
$Date: 2001/05/31 17:48:54 $
Pearu Peterson
"""
__version__ = "$Id: DataSet.py,v 1.3 2001/05/31 17:48:54 pearu Exp $"
from . import common
class DataSet(common.Common):
"""Abstract class.
It describes the geometry and topology of VTK dataset.
"""
def get_size(self):
if hasattr(self,'points'):
return len(self.points)
return reduce(lambda x,y:x*y,self.dimensions,1)
def get_cell_size(self):
return 0
def _check_dimensions(self):
for i in range(3):
d = self.dimensions[i]
if not common.is_int(d):
self.error('dimensions[%s] must be int but got %s'%(i,type(d)))
return 1
if d<=0:
self.error('dimensions[%s] must be positive int but got %s'%(i,d))
return 1
if hasattr(self,'points'):
d = reduce(lambda x,y:x*y,self.dimensions,1)
if len(self.points)!=d:
self.error('mismatch of points length (%s) and dimensions size (%s)'%(len(self.points),d))
return 1
return 0
def _check_origin(self):
for i in range(3):
d = self.origin[i]
if not common.is_number(d):
self.error('origin[%s] must be number but got %s'%(i,type(d)))
return 1
return 0
def _check_spacing(self):
for i in range(3):
d = self.spacing[i]
if not common.is_number(d):
self.error('spacing[%s] must be number but got %s'%(i,type(d)))
return 1
if d<=0:
self.error('spacing[%s] must be positive number but got %s'%(i,d))
return 1
return 0
def _check_int_seq(self,obj,mx_int):
if common.is_sequence(obj):
for o in obj:
if self._check_int_seq(o,mx_int):
return 1
elif not common.is_int(obj) or obj>=mx_int:
return 1
return 0
def Scalars(self,func,name = None,lookup_table = None):
from . import Scalars
return Scalars.Scalars([func(*p) for p in self.get_points()],name,lookup_table)
def ColorScalars(self,func,name = None):
from . import ColorScalars
return ColorScalars.ColorScalars([func(*p) for p in self.get_points()],name)
def LookupTable(self,func,name = None):
from . import LookupTable
return LookupTable.LookupTable([func(*p) for p in self.get_points()],name)
def Vectors(self,func,name = None):
from . import Vectors
return Vectors.Vectors([func(*p) for p in self.get_points()],name)
def Normals(self,func,name = None):
from . import Normals
return Normals.Normals([func(*p) for p in self.get_points()],name)
def TextureCoordinates(self,func,name = None):
from . import TextureCoordinates
return TextureCoordinates.TextureCoordinates([func(*p) for p in self.get_points()],name)
def Tensors(self,func,name = None):
from . import Tensors
return Tensors.Tensors([func(*p) for p in self.get_points()],name)
def Field(self,func,name = None, **kws):
from . import Field
return Field.Field([func(*p) for p in self.get_points()],name, **kws)
| Python |
#!/usr/bin/env python
from __future__ import print_function
from __future__ import absolute_import
from future.builtins import map
"""
StructuredGrid
"""
"""
Copyright 2001 Pearu Peterson all rights reserved,
Pearu Peterson <pearu@ioc.ee>
Permission to use, modify, and distribute this software is given under the
terms of the LGPL. See http://www.fsf.org
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Revision: 1.4 $
$Date: 2007-02-22 08:43:39 $
Pearu Peterson
"""
from . import DataSet
from . import common
class StructuredGrid(DataSet.DataSet):
"""
Usage:
StructuredGrid(<3-tuple of dimensions>, <sequence of 3-tuples of points>)
Attributes:
dimensions
points
Public methods:
get_points()
get_size()
get_cell_size()
to_string(format = 'ascii')
get_points()
<DataSetAttr class>(...)
"""
def __init__(self,dimensions,points):
self.dimensions = self.get_3_tuple(dimensions,(1,1,1))
self.points = self.get_3_tuple_list(points,(0,0,0))
if self._check_dimensions():
raise ValueError('dimensions must be 3-tuple of ints >=1 and matching with the size of points')
def to_string(self, format='ascii'):
t = self.get_datatype(self.points)
ret = ['DATASET STRUCTURED_GRID',
'DIMENSIONS %s %s %s'%self.dimensions,
'POINTS %s %s'%(self.get_size(),t),
self.seq_to_string(self.points,format,t)]
return '\n'.join(ret)
def get_points(self):
return self.points
def get_cell_size(self):
return len(self.points)
def structured_grid_fromfile(f,self):
l = common._getline(f).split(' ')
assert l[0].strip().lower() == 'dimensions'
dims = list(map(eval,l[1:]))
assert len(dims)==3
l = common._getline(f)
k,n,datatype = [s.strip().lower() for s in l.split(' ')]
if k!='points':
raise ValueError('expected points but got %s'%(repr(k)))
n = eval(n)
assert datatype in ['bit','unsigned_char','char','unsigned_short','short','unsigned_int','int','unsigned_long','long','float','double'],repr(datatype)
points = []
self.message('\tgetting %s points'%n)
while len(points) < 3*n:
l = common._getline(f)
points += list(map(eval,l.split(' ')))
assert len(points)==3*n
return StructuredGrid(dims,points),common._getline(f)
if __name__ == "__main__":
print(StructuredGrid((1,2),[1,2,2,4,4,5.4]))
| Python |
#!/usr/bin/env python
from __future__ import print_function
from __future__ import absolute_import
from future.builtins import map
"""
Scalars
"""
"""
Copyright 2001 Pearu Peterson all rights reserved,
Pearu Peterson <pearu@ioc.ee>
Permission to use, modify, and distribute this software is given under the
terms of the LGPL. See http://www.fsf.org
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Revision: 1.3 $
$Date: 2001/05/31 17:48:54 $
Pearu Peterson
"""
__version__ = "$Id: Scalars.py,v 1.3 2001/05/31 17:48:54 pearu Exp $"
from . import DataSetAttr
from . import common
class Scalars(DataSetAttr.DataSetAttr):
"""Holds VTK scalars.
Usage:
Scalars(<sequence> ,name = <string>, lookup_table = 'default')
Attributes:
scalars
name
lookup_table
Public methods:
get_size()
to_string(format = 'ascii')
"""
def __init__(self,scalars,name=None,lookup_table=None):
self.name = self._get_name(name)
self.lookup_table = self._get_lookup_table(lookup_table)
self.scalars = self.get_seq(scalars,[])
def to_string(self,format='ascii'):
t = self.get_datatype(self.scalars)
ret = ['SCALARS %s %s %s'%(self.name,t,1),
'LOOKUP_TABLE %s'%(self.lookup_table),
self.seq_to_string(self.scalars,format,t)]
return '\n'.join(ret)
def get_size(self):
return len(self.scalars)
def scalars_fromfile(f,n,sl):
dataname = sl[0]
datatype = sl[1].lower()
assert datatype in ['bit','unsigned_char','char','unsigned_short','short','unsigned_int','int','unsigned_long','long','float','double'],repr(datatype)
if len(sl)>2:
numcomp = eval(sl[2])
else:
numcomp = 1
l = common._getline(f)
l = l.split(' ')
assert len(l)==2 and l[0].lower() == 'lookup_table'
tablename = l[1]
scalars = []
while len(scalars) < n:
scalars += list(map(eval,common._getline(f).split(' ')))
assert len(scalars)==n
return Scalars(scalars,dataname,tablename)
if __name__ == "__main__":
print(Scalars([3,4,240]).to_string('binary'))
| Python |
#!/usr/bin/env python
from __future__ import print_function
from __future__ import absolute_import
from future.builtins import range
from future.builtins import open
"""
PyVTK provides tools for manipulating VTK files in Python.
VtkData - create VTK files from Python / read VTK files to Python
"""
"""
Copyright 2001 Pearu Peterson all rights reserved,
Pearu Peterson <pearu@ioc.ee>
Permission to use, modify, and distribute this software is given under the
terms of the LGPL. See http://www.fsf.org
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Revision: 1.12 $
$Date: 2010-03-03 13:41:41 $
Pearu Peterson
"""
__author__ = "Pearu Peterson <pearu@cens.ioc.ee>"
__license__ = "LGPL (see http://www.fsf.org)"
from .__version__ import __version__
__all__ = ['StructuredPoints','StructuredGrid','UnstructuredGrid',
'RectilinearGrid','PolyData',
'Scalars','ColorScalars','LookupTable','Vectors','Normals',
'TextureCoordinates','Tensors','Field',
'PointData','CellData',
'VtkData']
import types
import os
from . import common
from .StructuredPoints import StructuredPoints, structured_points_fromfile
from .StructuredGrid import StructuredGrid, structured_grid_fromfile
from .UnstructuredGrid import UnstructuredGrid, unstructured_grid_fromfile
from .RectilinearGrid import RectilinearGrid, rectilinear_grid_fromfile
from .PolyData import PolyData, polydata_fromfile
from .Scalars import Scalars,scalars_fromfile
from .ColorScalars import ColorScalars, color_scalars_fromfile
from .LookupTable import LookupTable, lookup_table_fromfile
from .Vectors import Vectors, vectors_fromfile
from .Normals import Normals, normals_fromfile
from .TextureCoordinates import TextureCoordinates, texture_coordinates_fromfile
from .Tensors import Tensors, tensors_fromfile
from .Field import Field, field_fromfile
from .Data import PointData,CellData
class VtkData(common.Common):
"""
VtkData
=======
Represents VTK file that has four relevant parts:
header - string up to length 255
format - string: ascii | binary
DataSet - StructuredPoints | StructuredGrid | UnstructuredGrid
| RectilinearGrid | PolyData
Data - PointData | CellData
Usage:
------
v = VtkData(<DataSet instance> [,<header string>,<Data instances>,..])
v = VtkData(<filename>, only_structure = 0) - read VTK data from file.
v.tofile(filename, format = 'ascii') - save VTK data to file.
Attributes:
header
structure
point_data
cell_data
Public methods:
to_string(format = 'ascii')
tofile(filename, format = 'ascii')
DataSet
=======
StructuredPoints(<3-sequence of dimensions>
[,<3-sequence of origin> [, <3-sequence of spacing>]])
StructuredGrid(<3-sequence of dimensions>,
<sequence of 3-sequences of points>)
UnstructuredGrid(<sequence of 3-sequences of points>
[,<cell> = <sequence of (sequences of) integers>])
cell - vertex | poly_vertex | line | poly_line | triangle
| triangle_strip | polygon | pixel | quad | tetra
| voxel | hexahedron | wedge | pyramid
RectilinearGrid([x = <sequence of x-coordinates>],
[y = <sequence of y-coordinates>],
[z = <sequence of z-coordinates>])
PolyData(<sequence of 3-sequences of points>,
[vertices = <sequence of (sequences of) integers>],
[lines = <sequence of (sequences of) integers>],
[polygons = <sequence of (sequences of) integers>],
[triangle_strips = <sequence of (sequences of) integers>])
Data
====
PointData | CellData ([<DataSetAttr instances>]) - construct Data instance
DataSetAttr
===========
DataSetAttr - Scalars | ColorScalars | LookupTable | Vectors
| Normals | TextureCoordinates | Tensors | Field
Scalars(<sequence of scalars> [,name[, lookup_table]])
ColorScalars(<sequence of scalar sequences> [,name])
LookupTable(<sequence of 4-sequences> [,name])
Vectors(<sequence of 3-sequences> [,name])
Normals(<sequence of 3-sequences> [,name])
TextureCoordinates(<sequence of (1,2, or 3)-sequences> [,name])
Tensors(<sequence of (3x3)-sequences> [,name])
Field([name,] [arrayname_1 = sequence of n_1-sequences, ...
arrayname_m = sequence of n_m-sequences,])
where len(array_1) == .. == len(array_m) must hold.
"""
header = None
point_data = None
cell_data = None
def __init__(self,*args,**kws):
assert args,'expected at least one argument'
if type(args[0]) is bytes:
if 'only_structure' in kws and kws['only_structure']:
self.fromfile(args[0],1)
else:
self.fromfile(args[0])
return
else:
structure = args[0]
args = list(args)[1:]
if not common.is_dataset(structure):
raise TypeError('argument structure must be StructuredPoints|StructuredGrid|UnstructuredGrid|RectilinearGrid|PolyData but got %s'%(type(structure)))
self.structure = structure
for a in args:
if common.is_string(a):
if len(a)>255:
self.skipping('striping header string to a length =255')
self.header = a[:255]
elif common.is_pointdata(a):
self.point_data = a
elif common.is_celldata(a):
self.cell_data = a
else:
self.skipping('unexpexted argument %s'%(type(a)))
if self.header is None:
self.header = 'Really cool data'
self.warning('Using header=%s'%(repr(self.header)))
if self.point_data is None and self.cell_data is None:
self.warning('No data defined')
if self.point_data is not None:
s = self.structure.get_size()
s1 = self.point_data.get_size()
if s1 != s:
raise ValueError('DataSet (size=%s) and PointData (size=%s) have different sizes'%(s,s1))
else:
self.point_data = PointData()
if self.cell_data is not None:
s = self.structure.get_cell_size()
s1 = self.cell_data.get_size()
if s1 != s:
raise ValueError('DataSet (cell_size=%s) and CellData (size=%s) have different sizes'%(s,s1))
else:
self.cell_data = CellData()
def to_string(self, format = 'ascii'):
ret = ['# vtk DataFile Version 2.0',
self.header,
format.upper(),
self.structure.to_string(format)
]
if self.cell_data.data:
ret.append(self.cell_data.to_string(format))
if self.point_data.data:
ret.append(self.point_data.to_string(format))
#print `ret`
return '\n'.join(ret)
def tofile(self, filename, format = 'ascii'):
"""Save VTK data to file.
"""
if not common.is_string(filename):
raise TypeError('argument filename must be string but got %s'%(type(filename)))
if format not in ['ascii','binary']:
raise TypeError('argument format must be ascii | binary')
filename = filename.strip()
if not filename:
raise ValueError('filename must be non-empty string')
if filename[-4:]!='.vtk':
filename += '.vtk'
#print 'Creating file',`filename`
f = open(filename,'wb')
f.write(bytes(self.to_string(format),'UTF-8'))
f.close()
def fromfile(self,filename, only_structure = 0):
filename = filename.strip()
if filename[-4:]!='.vtk':
filename += '.vtk'
#print 'Reading file',`filename`
f = open(filename,'rb')
l = f.readline()
fileversion = l.strip().replace(' ','').lower()
if not fileversion == '#vtkdatafileversion2.0':
print('File %s is not in VTK 2.0 format, got %s' % (filename, fileversion), end=' ')
print(' but continuing anyway..')
self.header = f.readline().rstrip()
format = f.readline().strip().lower()
if format not in ['ascii','binary']:
raise ValueError('Expected ascii|binary but got %s'%(repr(format)))
if format == 'binary':
raise NotImplementedError('reading vtk binary format')
l = common._getline(f).lower().split(' ')
if l[0].strip() != 'dataset':
raise ValueError('expected dataset but got %s'%(l[0]))
try:
ff = eval(l[1]+'_fromfile')
except NameError:
raise NotImplementedError('%s_fromfile'%(l[1]))
self.structure,l = ff(f,self)
for i in range(2):
if only_structure: break
if not l: break
l = [s.strip() for s in l.lower().split(' ')]
assert len(l)==2 and l[0] in ['cell_data','point_data'], l[0]
data = l[0]
n = eval(l[1])
lst = []
while 1:
l = common._getline(f)
if not l: break
sl = [s.strip() for s in l.split()]
k = sl[0].lower()
if k not in ['scalars','color_scalars','lookup_table','vectors',
'normals','texture_coordinates','tensors','field']:
break
try:
ff = eval(k+'_fromfile')
except NameError:
raise NotImplementedError('%s_fromfile'%(k))
lst.append(ff(f,n,sl[1:]))
if data == 'point_data':
self.point_data = PointData(*lst)
if data == 'cell_data':
self.cell_data = CellData(*lst)
if self.point_data is None:
self.point_data = PointData()
if self.cell_data is None:
self.cell_data = CellData()
f.close()
if __name__ == "__main__":
vtk = VtkData(StructuredPoints((3,1,1)),
'This is title',
PointData(Scalars([3,4,5]))
)
vtk.tofile('test')
| Python |
__version__ = "0.4.85"
| Python |
#!/usr/bin/env python
from __future__ import print_function
from __future__ import absolute_import
from future.builtins import range
from future.builtins import map
"""
PolyData
"""
"""
Copyright 2001 Pearu Peterson all rights reserved,
Pearu Peterson <pearu@ioc.ee>
Permission to use, modify, and distribute this software is given under the
terms of the LGPL. See http://www.fsf.org
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Revision: 1.6 $
$Date: 2002/11/19 20:40:11 $
Pearu Peterson
"""
from . import DataSet
from . import common
class PolyData(DataSet.DataSet):
"""
Usage:
PolyData(<sequence of 3-tuples of points>,
vertices = <sequence of sequences>
lines = <sequence of sequences>,
polygons = <sequence of sequences>
triangle_strips = <sequence of sequences>,
)
Attributes:
points
vertices
lines
polygons
triangle_strips
Public methods:
get_size()
get_cell_size()
to_string(format = 'ascii')
get_points()
<DataSetAttr class>(...)
"""
def __init__(self,points,
vertices=[],lines=[],polygons=[],triangle_strips=[]):
self.points = self.get_3_tuple_list(points,(0,0,0))
self.vertices = self.get_seq_seq(vertices,[])
self.lines = self.get_seq_seq(lines,[])
self.polygons = self.get_seq_seq(polygons,[])
self.triangle_strips = self.get_seq_seq(triangle_strips,[])
sz = len(self.points)
for k in ['vertices','lines','polygons','triangle_strips']:
if self._check_int_seq(getattr(self,k),sz):
raise ValueError('%s must be (seq of seq|seq) integers less than %s'%(k,sz))
def to_string(self, format='ascii'):
t = self.get_datatype(self.points)
ret = ['DATASET POLYDATA',
'POINTS %s %s'%(self.get_size(),t),
self.seq_to_string(self.points,format,t)]
for k in ['vertices','lines','polygons','triangle_strips']:
kv = getattr(self,k)
if kv==[] or kv[0]==[]: continue
sz = self._get_nof_objs(kv)+len(kv)
ret += ['%s %s %s'%(k.upper(),len(kv),sz),
self.seq_to_string([[len(v)]+list(v) for v in kv],format,'int')]
return '\n'.join(ret)
def get_cell_size(self):
sz = 0
for k in ['vertices','lines','polygons','triangle_strips']:
kv = getattr(self,k)
if kv==[] or kv[0]==[]: continue
sz += len(kv)
return sz
def get_points(self):
return self.points
def polydata_fromfile(f,self):
"""Use VtkData(<filename>)."""
points = []
vertices = []
lines = []
polygons = []
triangle_strips = []
l = common._getline(f)
k,n,datatype = [s.strip().lower() for s in l.split(' ')]
if k!='points':
raise ValueError('expected points but got %s'%(repr(k)))
n = eval(n)
assert datatype in ['bit','unsigned_char','char','unsigned_short','short','unsigned_int','int','unsigned_long','long','float','double'],repr(datatype)
self.message('\tgetting %s points'%n)
while len(points) < 3*n:
l = common._getline(f)
points += list(map(eval,l.split(' ')))
assert len(points)==3*n
while 1:
l = common._getline(f)
if l is None:
break
sl = l.split(' ')
k = sl[0].strip().lower()
if k not in ['vertices','lines','polygons','triangle_strips']:
break
assert len(sl)==3
n,size = list(map(eval,[sl[1],sl[2]]))
lst = []
while len(lst) < size:
l = common._getline(f)
lst += list(map(eval,l.split(' ')))
assert len(lst)==size
lst2 = []
j = 0
for i in range(n):
lst2.append(lst[j+1:j+lst[j]+1])
j += lst[j]+1
exec('%s = lst2'%k)
return PolyData(points,vertices,lines,polygons,triangle_strips),l
if __name__ == "__main__":
print(PolyData([[1,2],[2,4],4,5.4],[[1],[0]],[],[1,2,3]))
| Python |
#!/usr/bin/env python
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from future.builtins import round
from future.builtins import int
from future.builtins import range
from future.builtins import map
from functools import reduce
"""
Common functions/methods.
"""
"""
Copyright 2001 Pearu Peterson all rights reserved,
Pearu Peterson <pearu@ioc.ee>
Permission to use, modify, and distribute this software is given under the
terms of the LGPL. See http://www.fsf.org
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Revision: 1.14 $
Pearu Peterson
"""
import types
import sys
import struct
def is_sequence(obj):
"""Check if obj is sequence."""
try:
len(obj)
return 1
except TypeError:
return 0
def is_sequence2(obj):
"""Check if obj is sequence of sequences."""
return is_sequence(obj) and len(obj) and is_sequence(obj[0])
def is_sequence3(obj):
"""Check if obj is sequence of sequences of sequences."""
return is_sequence(obj) and len(obj) and is_sequence2(obj[0])
def is_number(obj):
"""Check if obj is number."""
return isinstance(obj, (int, float))
def is_int(obj):
"""Check if obj is integer."""
# FIXME: This fails when obj is (e.g.) numpy.int64
# A workaround is to cast every integer-like foo with
# int(foo) before presenting to the vtk writing code.
# There might also be some Py3 int/long unification
# issues, but the import from future.builtins at the top
# of the file *should* take care of that (in principle).
return isinstance(obj, int)
def is_float(obj):
"""Check if obj is float."""
return isinstance(obj, float)
def is_string(obj):
"""Check if obj is string."""
return isinstance(obj, str)
def is_int255(obj):
if is_sequence(obj):
for v in obj:
r = is_int255(v)
if not r: return 0
return 1
return 0<=obj<256
def is_float01(obj):
if is_sequence(obj):
for v in obj:
r = is_float01(v)
if not r: return 0
return 1
return 0<=obj<=1
def is_datasetattr(obj):
from .DataSetAttr import DataSetAttr
# return type(obj) is types.InstanceType and isinstance(obj,DataSetAttr.DataSetAttr) # Python2
return isinstance(obj,DataSetAttr)
def is_dataset(obj):
from .DataSet import DataSet
# return type(obj) is types.InstanceType and isinstance(obj,DataSet.DataSet) # Python2
return isinstance(obj,DataSet)
def is_pointdata(obj):
from .Data import PointData
# return type(obj) is types.InstanceType and isinstance(obj,Data.PointData) # Python2
return isinstance(obj,PointData)
def is_celldata(obj):
from .Data import CellData
# return type(obj) is types.InstanceType and isinstance(obj,Data.CellData) # Python2
return isinstance(obj,CellData)
def is_lookuptable(obj):
from .LookupTable import LookupTable
return isinstance(obj,LookupTable)
def _getline(f):
l = ' '
while l:
l = f.readline()
if l.strip():
return l.strip()
return None
class Common:
"""Abstract class. Defines output, checker, and getter functions."""
struct_fmt_map = {'char':'c','char':'b',
'long':'l','double':'d',
'unsigned long':'L',
'int':'i','float':'f',
'unsigned char':'B',
'unsigned short':'H',
'short':'h',
'unsigned int':'I',
}
default_int = 'int'
default_float = 'float'
def _get_trace(self,m):
try:
frame = sys._getframe().f_back
except AttributeError: # Python 2.0 does not have sys._getframe
frame = None
n = ''
while frame:
i = frame.f_code.co_name
n = '%s.%s'%(i,n)
if i=='__init__':
break
frame = frame.f_back
print('%s.%s:\n\t%s'%(self.__class__.__name__,n[:-1],m), file=sys.stderr)
def warning(self,m=''):
self._get_trace(m)
def skipping(self,m=''):
self._get_trace(m)
def error(self,m=''):
self._get_trace(m)
def message(self,m=''):
self._get_trace(m)
def __str__(self):
return self.to_string()
def get_datatype(self,obj):
typecode = None
if hasattr(obj,'dtype'): # obj is numpy array
typecode = obj.dtype.char
elif hasattr(obj,'typecode'): # obj is Numeric array
typecode = obj.typecode()
if typecode is not None:
r = {'b':'char', #'bit'??
'B':'unsigned_char',
'f':'float',
'd':'double',
'i':'int',
'l':'long',
'L':'unsigned_long',
'1':'char',
's':'short', # Numeric
'h':'short',
'w':'unsigned_short', # Numeric
'H':'unsigned_short',
'u':'unsigned_int', # Numeric
'I':'unsigned_int',
}.get(typecode)
if r is not None:
return r
if is_int(obj): return self.default_int
if is_float(obj): return self.default_float
if not is_sequence(obj):
raise ValueError('expected int|float|non-empty sequence but got %s (typecode=%r)'\
%(type(obj), typecode))
if not len(obj):
self.warning('no data, no datatype, using int')
r = 'int'
for o in obj:
r = self.get_datatype(o)
if r==self.default_float:
break
return r
def get_seq(self,obj,default=None):
"""Return sequence."""
if is_sequence(obj):
return obj
if is_number(obj): return [obj]
if obj is None and default is not None:
self.warning('using default value (%s)'%(default))
return self.get_seq(default)
raise ValueError('expected sequence|number but got %s'%(type(obj)))
def get_seq_seq(self,obj,default=None):
"""Return sequence of sequences."""
if is_sequence2(obj):
return [self.get_seq(o,default) for o in obj]
else:
return [self.get_seq(obj,default)]
def get_n_seq_seq(self,obj,default):
seq = self.get_seq_seq(obj,default)
if is_sequence(default):
n = len(default)
else:
n = max(list(map(len,seq)))
default = [default]*n
ret = []
flag = 0
for v in seq:
if len(v)!=n:
ret.append(list(v)+default[len(v):])
flag = 1
else:
ret.append(list(v))
if flag:
self.warning('Some items were filled with default value (%s) to obtain size=%s'%(default[0],n))
return ret
def get_3_tuple(self,obj,default=None):
"""Return 3-tuple from
number -> (obj,default[1],default[2])
0-sequence|None -> default
1-sequence -> (obj[0],default[1],default[2])
2-sequence -> (obj[0],obj[1],default[2])
(3 or more)-sequence -> (obj[0],obj[1],obj[2])
"""
if not (default is not None \
and type(default) is tuple \
and len(default)==3):
raise ValueError('argument default must be 3-tuple|None but got %s'%(default))
if is_sequence(obj):
n = len(obj)
if n>3:
self.warning('expected 3-sequence but got %s-%s'%(n,type(obj)))
if n>=3:
return tuple(obj)
self.warning('filling with default value (%s) to obtain size=3'%(default[0]))
if default is not None:
if n==0:
return default
elif n==1:
return (obj[0],default[1],default[2])
elif n==2:
return (obj[0],obj[1],default[2])
elif is_number(obj) and default is not None:
self.warning('filling with default value (%s) to obtain size=3'%(default[0]))
return (obj,default[1],default[2])
elif obj is None and default is not None:
self.warning('filling with default value (%s) to obtain size=3'%(default[0]))
return default
raise ValueError('failed to construct 3-tuple from %s-%s'%(n,type(obj)))
def get_3_tuple_list(self,obj,default=None):
"""Return list of 3-tuples from
sequence of a sequence,
sequence - it is mapped to sequence of 3-sequences if possible
number
"""
if is_sequence2(obj):
return [self.get_3_tuple(o,default) for o in obj]
elif is_sequence(obj):
return [self.get_3_tuple(obj[i:i+3],default) for i in range(0,len(obj),3)]
else:
return [self.get_3_tuple(obj,default)]
def get_3_3_tuple(self,obj,default=None):
"""Return tuple of 3-tuples
"""
if is_sequence2(obj):
ret = []
for i in range(3):
if i<len(obj):
ret.append(self.get_3_tuple(obj[i],default))
else:
ret.append(self.get_3_tuple(default,default))
return tuple(ret)
if is_sequence(obj):
if len(obj)>9:
self.warning('ignoring elements obj[i], i>=9')
r = obj[:9]
r = [self.get_3_tuple(r[j:j+3],default) for j in range(0,len(r),3)]
if len(r)<3:
self.warning('filling with default value (%s) to obtain size=3'%(default[0]))
while len(r)<3:
r.append(self.get_3_tuple(default,default))
return tuple(r)
self.warning('filling with default value (%s) to obtain size=3'%(default[0]))
r1 = self.get_3_tuple(obj,default)
r2 = self.get_3_tuple(default,default)
r3 = self.get_3_tuple(default,default)
return (r1,r2,r3)
def get_3_3_tuple_list(self,obj,default=None):
"""Return list of 3x3-tuples.
"""
if is_sequence3(obj):
return [self.get_3_3_tuple(o,default) for o in obj]
return [self.get_3_3_tuple(obj,default)]
def _get_nof_objs(self,seq):
if is_sequence2(seq):
return reduce(lambda x,y:x+y,list(map(self._get_nof_objs,seq)),0)
#return reduce(lambda x,y:x+y,[self._get_nof_objs(s) for s in seq],0)
return len(seq)
def seq_to_string(self,seq,format,datatype):
assert is_sequence(seq),'expected sequence but got %s'%(type(seq))
if format == 'ascii':
if is_sequence2(seq):
sep = '\n'
if is_sequence3(seq):
sep = '\n\n'
return sep.join([self.seq_to_string(v,format,datatype) for v in seq])
else:
return ' '.join(map(str,seq))
elif format == 'binary':
if is_sequence2(seq):
r = ''.join([self.seq_to_string(v,format,datatype) for v in seq])
return r
else:
try:
fmt = self.struct_fmt_map[datatype]
except KeyError:
try:
fmt = self.struct_fmt_map[datatype.replace('_',' ')]
except KeyError:
fmt = None
if fmt:
r = struct.pack('!'+fmt*len(seq),*seq)
return r
raise NotImplementedError('format=%s, datatype=%s'%(format,datatype))
def float01_to_int255(self,seq):
assert is_float01(seq)
if is_sequence(seq):
return list(map(self.float01_to_int255,seq))
#return [self.float01_to_int255(l) for l in seq]
else:
return int(seq*255)
def int255_to_float01(self,seq):
assert is_int255(seq)
if is_sequence(seq):
return list(map(self.int255_to_float01,seq))
#return [self.int255_to_float01(l) for l in seq]
else:
return round(seq/255.0,6)
#from . import Data
#from . import DataSet
#from . import DataSetAttr
| Python |
#!/usr/bin/env python
from __future__ import print_function
from __future__ import absolute_import
from future.builtins import range
from future.builtins import map
"""
ColorScalars
"""
"""
Copyright 2001 Pearu Peterson all rights reserved,
Pearu Peterson <pearu@ioc.ee>
Permission to use, modify, and distribute this software is given under the
terms of the LGPL. See http://www.fsf.org
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Revision: 1.2 $
$Date: 2001/05/31 17:48:54 $
Pearu Peterson
"""
__version__ = "$Id: ColorScalars.py,v 1.2 2001/05/31 17:48:54 pearu Exp $"
from . import common
from . import DataSetAttr
class ColorScalars(DataSetAttr.DataSetAttr):
"""Holds VTK color scalars.
Usage:
ColorScalars(<sequence of n-sequences> ,name = <string>)
Attributes:
scalars
name
Public methods:
get_size()
to_string(format = 'ascii')
"""
def __init__(self,scalars,name=None):
self.name = self._get_name(name)
self.scalars = self.get_n_seq_seq(scalars,self.default_value)
def to_string(self,format='ascii'):
ret = ['COLOR_SCALARS %s %s'%(self.name,len(self.scalars[0]))]
seq = self.scalars
if format=='binary':
if not common.is_int255(seq):
seq = self.float01_to_int255(seq)
ret.append(self.seq_to_string(seq,format,'unsigned char'))
else:
if not common.is_float01(seq):
seq = self.int255_to_float01(seq)
ret.append(self.seq_to_string(seq,format,'float'))
return '\n'.join(ret)
def get_size(self):
return len(self.scalars)
def color_scalars_fromfile(f,n,sl):
assert len(sl)==2
dataname = sl[0].strip()
nvals = eval(sl[1])
scalars = []
while len(scalars)<nvals*n:
scalars += list(map(eval,common._getline(f).split(' ')))
assert len(scalars)==nvals*n
scalars2 = []
for i in range(0,len(scalars),nvals):
scalars2.append(scalars[i:i+nvals])
return ColorScalars(scalars2,dataname)
if __name__ == "__main__":
print(ColorScalars([[3,3],[4,3],240,3,2]).to_string())
| Python |
#!/usr/bin/env python
from __future__ import print_function
from __future__ import absolute_import
from future.builtins import map
"""
RectilinearGrid
"""
"""
Copyright 2001 Pearu Peterson all rights reserved,
Pearu Peterson <pearu@ioc.ee>
Permission to use, modify, and distribute this software is given under the
terms of the LGPL. See http://www.fsf.org
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Revision: 1.3 $
$Date: 2001/05/31 17:48:54 $
Pearu Peterson
"""
from . import DataSet
from . import common
class RectilinearGrid(DataSet.DataSet):
"""
Usage:
RectilinearGrid(x = <sequence>, y = <sequence>, z = <sequence>)
Attributes:
x
y
z
dimensions
Public methods:
get_size()
get_cell_size()
to_string(format = 'ascii')
get_points()
<DataSetAttr class>(...)
"""
def __init__(self,x=None,y=None,z=None):
self.x = self.get_seq(x,[0])
self.y = self.get_seq(y,[0])
self.z = self.get_seq(z,[0])
self.dimensions = (len(self.x),len(self.y),len(self.z))
if self._check_dimensions():
raise ValueError('dimensions must be 3-tuple of ints >=1')
def to_string(self, format='ascii'):
tx = self.get_datatype(self.x)
ty = self.get_datatype(self.y)
tz = self.get_datatype(self.z)
ret = ['DATASET RECTILINEAR_GRID',
'DIMENSIONS %s %s %s'%self.dimensions,
'X_COORDINATES %s %s'%(len(self.x),tx),
self.seq_to_string(self.x,format,tx),
'Y_COORDINATES %s %s'%(len(self.y),ty),
self.seq_to_string(self.y,format,ty),
'Z_COORDINATES %s %s'%(len(self.z),tz),
self.seq_to_string(self.z,format,tz)]
return '\n'.join(ret)
def get_points(self):
if hasattr(self,'points'):
return self.points
arr = [(x,y,z) for z in self.z for y in self.y for x in self.x]
self.points = arr
return arr
def rectilinear_grid_fromfile(f,self):
l = common._getline(f).split(' ')
assert l[0].strip().lower() == 'dimensions'
dims = list(map(eval,l[1:]))
assert len(dims)==3
for c in 'xyz':
l = common._getline(f)
k,n,datatype = [s.strip().lower() for s in l.split(' ')]
if k!=c+'_coordinates':
raise ValueError('expected %s_coordinates but got %s'%(c,repr(k)))
n = eval(n)
assert datatype in ['bit','unsigned_char','char','unsigned_short','short','unsigned_int','int','unsigned_long','long','float','double'],repr(datatype)
points = []
while len(points) < n:
points += list(map(eval,common._getline(f).split(' ')))
assert len(points)==n
exec('%s_coords = points'%c)
assert list(map(len,[x_coords,y_coords,z_coords])) == dims
return RectilinearGrid(x_coords,y_coords,z_coords),common._getline(f)
if __name__ == "__main__":
print(RectilinearGrid([1,2,2,4,4,5.4]))
| Python |
#!/usr/bin/env python
from __future__ import print_function
from __future__ import absolute_import
from future.builtins import map
from future.builtins import range
"""
StructuredPoints
"""
"""
Copyright 2001 Pearu Peterson all rights reserved,
Pearu Peterson <pearu@ioc.ee>
Permission to use, modify, and distribute this software is given under the
terms of the LGPL. See http://www.fsf.org
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Revision: 1.2 $
$Date: 2001/05/31 17:48:54 $
Pearu Peterson
"""
from . import DataSet
from . import common
class StructuredPoints(DataSet.DataSet):
"""
Usage:
StructuredPoints(<3-tuple of dimensions>, origin = <3-tuple>, spacing = <3-tuple>)
Attributes:
dimensions
origin
spacing
Public methods:
get_size()
get_cell_size()
to_string(format = 'ascii')
get_points()
<DataSetAttr class>(...)
"""
def __init__(self,dimensions,origin=(0,0,0),spacing=(1,1,1)):
self.dimensions = self.get_3_tuple(dimensions,(1,1,1))
if self._check_dimensions():
raise ValueError('dimensions must be 3-tuple of ints >=1')
self.origin = self.get_3_tuple(origin,(1,1,1))
if self._check_origin():
raise ValueError('origin must be 3-tuple of numbers')
self.spacing = self.get_3_tuple(spacing,(1,1,1))
if self._check_spacing():
raise ValueError('spacing must be 3-tuple of positive numbers')
def to_string(self,format = 'ascii'):
ret = ['DATASET STRUCTURED_POINTS',
'DIMENSIONS %s %s %s'%self.dimensions,
'ORIGIN %s %s %s'%self.origin,
'SPACING %s %s %s'%self.spacing]
return '\n'.join(ret)
def get_points(self):
if hasattr(self,'points'):
return self.points
arr = []
for k in range(self.dimensions[2]):
z = self.origin[2] + k * self.spacing[2]
for j in range(self.dimensions[1]):
y = self.origin[1] + j * self.spacing[1]
for i in range(self.dimensions[0]):
x = self.origin[0] + i * self.spacing[0]
arr.append((x,y,z))
self.points = arr
return arr
def structured_points_fromfile(f,self):
l = common._getline(f).split(' ')
assert l[0].strip().lower() == 'dimensions'
dims = list(map(eval,l[1:]))
assert len(dims)==3
l = common._getline(f).split(' ')
assert l[0].strip().lower() == 'origin'
origin = list(map(eval,l[1:]))
assert len(origin)==3
l = common._getline(f).split(' ')
assert l[0].strip().lower() == 'spacing'
spacing = list(map(eval,l[1:]))
assert len(spacing)==3
return StructuredPoints(dims,origin,spacing),common._getline(f)
if __name__ == "__main__":
print(StructuredPoints((2,3,4)))
print(StructuredPoints((2,3)))
print(StructuredPoints(5))
print(StructuredPoints([2,3,5,6]).get_size())
| Python |
'''
Created on Jan 22, 2012
@author: yang
'''
import unittest
from google.appengine.ext import testbed
import model.setting as setting
class Test(unittest.TestCase):
def setUp(self):
#create an instance of the Testbed class.
self.testbed = testbed.Testbed()
self.testbed.activate();
self.testbed.init_datastore_v3_stub()
self.setting = setting.Setting(title = "yang",
tags = ["123","234"],
categories =["1","2"],
key_name = "setting"
)
self.setting.put()
def tearDown(self):
self.testbed.deactivate()
def testSetting(self):
settings = self.setting.get_by_key_name("setting")
self.assertEqual(1, len(setting.Setting.all().fetch(2)))
self.assertEqual("yang",settings.title)
self.assertEqual(["123","234"],settings.tags)
self.assertEqual(["1","2"],settings.categories)
if __name__ == "__main__":
unittest.main()
| Python |
# -*- coding: utf-8 -*-
import os,logging
from google.appengine.api import users
from google.appengine.ext import db
from google.appengine.ext.db import Model as DBModel
from google.appengine.api import memcache
from google.appengine.api import mail
from google.appengine.api import urlfetch
from google.appengine.api import datastore
from datetime import datetime
#from base import *
logging.info('module base reloaded')
rootpath=os.path.dirname(__file__)
def vcache(key="",time=3600):
def _decorate(method):
def _wrapper(*args, **kwargs):
if not g_blog.enable_memcache:
return method(*args, **kwargs)
result=method(*args, **kwargs)
memcache.set(key,result,time)
return result
return _wrapper
return _decorate
class Theme:
def __init__(self, name='default'):
self.name = name
self.mapping_cache = {}
self.dir = '/themes/%s' % name
self.viewdir=os.path.join(rootpath, 'view')
self.server_dir = os.path.join(rootpath, 'themes',self.name)
if os.path.exists(self.server_dir):
self.isZip=False
else:
self.isZip=True
self.server_dir =self.server_dir+".zip"
#self.server_dir=os.path.join(self.server_dir,"templates")
logging.debug('server_dir:%s'%self.server_dir)
def __getattr__(self, name):
if self.mapping_cache.has_key(name):
return self.mapping_cache[name]
else:
path ="/".join((self.name,'templates', name + '.html'))
logging.debug('path:%s'%path)
## if not os.path.exists(path):
## path = os.path.join(rootpath, 'themes', 'default', 'templates', name + '.html')
## if not os.path.exists(path):
## path = None
self.mapping_cache[name]=path
return path
class ThemeIterator:
def __init__(self, theme_path='themes'):
self.iterating = False
self.theme_path = theme_path
self.list = []
def __iter__(self):
return self
def next(self):
if not self.iterating:
self.iterating = True
self.list = os.listdir(self.theme_path)
self.cursor = 0
if self.cursor >= len(self.list):
self.iterating = False
raise StopIteration
else:
value = self.list[self.cursor]
self.cursor += 1
if value.endswith('.zip'):
value=value[:-4]
return value
#return (str(value), unicode(value))
class LangIterator:
def __init__(self,path='locale'):
self.iterating = False
self.path = path
self.list = []
for value in os.listdir(self.path):
if os.path.isdir(os.path.join(self.path,value)):
if os.path.exists(os.path.join(self.path,value,'LC_MESSAGES')):
try:
lang=open(os.path.join(self.path,value,'language')).readline()
self.list.append({'code':value,'lang':lang})
except:
self.list.append( {'code':value,'lang':value})
def __iter__(self):
return self
def next(self):
if not self.iterating:
self.iterating = True
self.cursor = 0
if self.cursor >= len(self.list):
self.iterating = False
raise StopIteration
else:
value = self.list[self.cursor]
self.cursor += 1
return value
def getlang(self,language):
from django.utils.translation import to_locale
for item in self.list:
if item['code']==language or item['code']==to_locale(language):
return item
return {'code':'en_US','lang':'English'}
class BaseModel(db.Model):
def __init__(self, parent=None, key_name=None, _app=None, **kwds):
self.__isdirty = False
DBModel.__init__(self, parent=None, key_name=None, _app=None, **kwds)
def __setattr__(self,attrname,value):
"""
DataStore api stores all prop values say "email" is stored in "_email" so
we intercept the set attribute, see if it has changed, then check for an
onchanged method for that property to call
"""
if (attrname.find('_') != 0):
if hasattr(self,'_' + attrname):
curval = getattr(self,'_' + attrname)
if curval != value:
self.__isdirty = True
if hasattr(self,attrname + '_onchange'):
getattr(self,attrname + '_onchange')(curval,value)
DBModel.__setattr__(self,attrname,value)
class Cache(db.Model):
cachekey = db.StringProperty(multiline=False)
content = db.TextProperty()
class Blog(db.Model):
owner = db.UserProperty()
author=db.StringProperty(default='admin')
rpcuser=db.StringProperty(default='admin')
rpcpassword=db.StringProperty(default='')
description = db.TextProperty()
baseurl = db.StringProperty(multiline=False,default=None)
urlpath = db.StringProperty(multiline=False)
title = db.StringProperty(multiline=False,default='Micolog')
subtitle = db.StringProperty(multiline=False,default='This is a micro blog.')
entrycount = db.IntegerProperty(default=0)
posts_per_page= db.IntegerProperty(default=10)
feedurl = db.StringProperty(multiline=False,default='/feed')
blogversion = db.StringProperty(multiline=False,default='0.30')
theme_name = db.StringProperty(multiline=False,default='default')
enable_memcache = db.BooleanProperty(default = False)
link_format=db.StringProperty(multiline=False,default='%(year)s/%(month)s/%(day)s/%(postname)s.html')
comment_notify_mail=db.BooleanProperty(default=True)
#评论顺序
comments_order=db.IntegerProperty(default=0)
#每页评论数
comments_per_page=db.IntegerProperty(default=20)
#comment check type 0-No 1-算术 2-验证码 3-客户端计算
comment_check_type=db.IntegerProperty(default=1)
#0 default 1 identicon
avatar_style=db.IntegerProperty(default=0)
blognotice=db.TextProperty(default='')
domain=db.StringProperty()
show_excerpt=db.BooleanProperty(default=True)
version=0.74
timedelta=db.FloatProperty(default=8.0)# hours
language=db.StringProperty(default="en-us")
sitemap_entries=db.IntegerProperty(default=30)
sitemap_include_category=db.BooleanProperty(default=False)
sitemap_include_tag=db.BooleanProperty(default=False)
sitemap_ping=db.BooleanProperty(default=False)
default_link_format=db.StringProperty(multiline=False,default='?p=%(post_id)s')
default_theme=Theme("default")
allow_pingback=db.BooleanProperty(default=False)
allow_trackback=db.BooleanProperty(default=False)
theme=None
langs=None
application=None
def __init__(self,
parent=None,
key_name=None,
_app=None,
_from_entity=False,
**kwds):
from micolog_plugin import Plugins
self.plugins=Plugins(self)
db.Model.__init__(self,parent,key_name,_app,_from_entity,**kwds)
def tigger_filter(self,name,content,*arg1,**arg2):
return self.plugins.tigger_filter(name,content,blog=self,*arg1,**arg2)
def tigger_action(self,name,*arg1,**arg2):
return self.plugins.tigger_action(name,blog=self,*arg1,**arg2)
def tigger_urlmap(self,url,*arg1,**arg2):
return self.plugins.tigger_urlmap(url,blog=self,*arg1,**arg2)
def get_ziplist(self):
return self.plugins.get_ziplist();
def save(self):
self.put()
def initialsetup(self):
self.title = 'Your Blog Title'
self.subtitle = 'Your Blog Subtitle'
def get_theme(self):
self.theme= Theme(self.theme_name);
return self.theme
def get_langs(self):
self.langs=LangIterator()
return self.langs
def cur_language(self):
return self.get_langs().getlang(self.language)
def rootpath(self):
return rootpath
@vcache("blog.hotposts")
def hotposts(self):
return Entry.all().filter('entrytype =','post').filter("published =", True).order('-readtimes').fetch(8)
@vcache("blog.recentposts")
def recentposts(self):
return Entry.all().filter('entrytype =','post').filter("published =", True).order('-date').fetch(8)
@vcache("blog.postscount")
def postscount(self):
return Entry.all().filter('entrytype =','post').filter("published =", True).order('-date').count()
class Category(db.Model):
uid=db.IntegerProperty()
name=db.StringProperty(multiline=False)
slug=db.StringProperty(multiline=False)
parent_cat=db.SelfReferenceProperty()
@property
def posts(self):
return Entry.all().filter('entrytype =','post').filter("published =", True).filter('categorie_keys =',self)
@property
def count(self):
return self.posts.count()
def put(self):
db.Model.put(self)
g_blog.tigger_action("save_category",self)
def delete(self):
for entry in Entry.all().filter('categorie_keys =',self):
entry.categorie_keys.remove(self.key())
entry.put()
for cat in Category.all().filter('parent_cat =',self):
cat.delete()
db.Model.delete(self)
g_blog.tigger_action("delete_category",self)
def ID(self):
try:
id=self.key().id()
if id:
return id
except:
pass
if self.uid :
return self.uid
else:
#旧版本Category没有ID,为了与wordpress兼容
from random import randint
uid=randint(0,99999999)
cate=Category.all().filter('uid =',uid).get()
while cate:
uid=randint(0,99999999)
cate=Category.all().filter('uid =',uid).get()
self.uid=uid
print uid
self.put()
return uid
@classmethod
def get_from_id(cls,id):
cate=Category.get_by_id(id)
if cate:
return cate
else:
cate=Category.all().filter('uid =',id).get()
return cate
@property
def children(self):
key=self.key()
return [c for c in Category.all().filter('parent_cat =',self)]
@classmethod
def allTops(self):
return [c for c in Category.all() if not c.parent_cat]
class Archive(db.Model):
monthyear = db.StringProperty(multiline=False)
year = db.StringProperty(multiline=False)
month = db.StringProperty(multiline=False)
entrycount = db.IntegerProperty(default=0)
date = db.DateTimeProperty(auto_now_add=True)
class Tag(db.Model):
tag = db.StringProperty(multiline=False)
tagcount = db.IntegerProperty(default=0)
@property
def posts(self):
return Entry.all('entrytype =','post').filter("published =", True).filter('tags =',self)
@classmethod
def add(cls,value):
if value:
tag= Tag.get_by_key_name(value)
if not tag:
tag=Tag(key_name=value)
tag.tag=value
tag.tagcount+=1
tag.put()
return tag
else:
return None
@classmethod
def remove(cls,value):
if value:
tag= Tag.get_by_key_name(value)
if tag:
if tag.tagcount>1:
tag.tagcount-=1
tag.put()
else:
tag.delete()
class Link(db.Model):
href = db.StringProperty(multiline=False,default='')
linktype = db.StringProperty(multiline=False,default='blogroll')
linktext = db.StringProperty(multiline=False,default='')
linkcomment = db.StringProperty(multiline=False,default='')
createdate=db.DateTimeProperty(auto_now=True)
@property
def get_icon_url(self):
"get ico url of the wetsite"
ico_path = '/favicon.ico'
ix = self.href.find('/',len('http://') )
return (ix>0 and self.href[:ix] or self.href ) + ico_path
def put(self):
db.Model.put(self)
g_blog.tigger_action("save_link",self)
def delete(self):
db.Model.delete(self)
g_blog.tigger_action("delete_link",self)
class Entry(BaseModel):
author = db.UserProperty()
author_name = db.StringProperty()
published = db.BooleanProperty(default=False)
content = db.TextProperty(default='')
readtimes = db.IntegerProperty(default=0)
title = db.StringProperty(multiline=False,default='')
date = db.DateTimeProperty(auto_now_add=True)
mod_date = db.DateTimeProperty(auto_now_add=True)
tags = db.StringListProperty()
categorie_keys=db.ListProperty(db.Key)
slug = db.StringProperty(multiline=False,default='')
link= db.StringProperty(multiline=False,default='')
monthyear = db.StringProperty(multiline=False)
entrytype = db.StringProperty(multiline=False,default='post',choices=[
'post','page'])
entry_parent=db.IntegerProperty(default=0)#When level=0 show on main menu.
menu_order=db.IntegerProperty(default=0)
commentcount = db.IntegerProperty(default=0)
trackbackcount = db.IntegerProperty(default=0)
allow_comment = db.BooleanProperty(default=True) #allow comment
#allow_pingback=db.BooleanProperty(default=False)
allow_trackback=db.BooleanProperty(default=True)
password=db.StringProperty()
#compatible with wordpress
is_wp=db.BooleanProperty(default=False)
post_id= db.IntegerProperty()
excerpt=db.StringProperty(multiline=True)
#external page
is_external_page=db.BooleanProperty(default=False)
target=db.StringProperty(default="_self")
external_page_address=db.StringProperty()
#keep in top
sticky=db.BooleanProperty(default=False)
postname=''
_relatepost=None
@property
def content_excerpt(self):
return self.get_content_excerpt(_('..more').decode('utf8'))
def get_author_user(self):
if not self.author:
self.author=g_blog.owner
return User.all().filter('email =',self.author.email()).get()
def get_content_excerpt(self,more='..more'):
if g_blog.show_excerpt:
if self.excerpt:
return self.excerpt+' <a href="/%s">%s</a>'%(self.link,more)
else:
sc=self.content.split('<!--more-->')
if len(sc)>1:
return sc[0]+u' <a href="/%s">%s</a>'%(self.link,more)
else:
return sc[0]
else:
return self.content
def slug_onchange(self,curval,newval):
if not (curval==newval):
self.setpostname(newval)
def setpostname(self,newval):
#check and fix double slug
if newval:
slugcount=Entry.all()\
.filter('entrytype',self.entrytype)\
.filter('date <',self.date)\
.filter('slug =',newval)\
.filter('published',True)\
.count()
if slugcount>0:
self.postname=newval+str(slugcount)
else:
self.postname=newval
else:
self.postname=""
@property
def fullurl(self):
return g_blog.baseurl+'/'+self.link;
@property
def categories(self):
try:
return db.get(self.categorie_keys)
except:
return []
@property
def post_status(self):
return self.published and 'publish' or 'draft'
def settags(self,values):
if not values:tags=[]
if type(values)==type([]):
tags=values
else:
tags=values.split(',')
if not self.tags:
removelist=[]
addlist=tags
else:
#search different tags
removelist=[n for n in self.tags if n not in tags]
addlist=[n for n in tags if n not in self.tags]
for v in removelist:
Tag.remove(v)
for v in addlist:
Tag.add(v)
self.tags=tags
def get_comments_by_page(self,index,psize):
return self.comments().fetch(psize,offset = (index-1) * psize)
@property
def strtags(self):
return ','.join(self.tags)
@property
def edit_url(self):
return '/admin/%s?key=%s&action=edit'%(self.entrytype,self.key())
def comments(self):
if g_blog.comments_order:
return Comment.all().filter('entry =',self).order('-date')
else:
return Comment.all().filter('entry =',self).order('date')
def purecomments(self):
if g_blog.comments_order:
return Comment.all().filter('entry =',self).filter('ctype =',0).order('-date')
else:
return Comment.all().filter('entry =',self).filter('ctype =',0).order('date')
def trackcomments(self):
if g_blog.comments_order:
return Comment.all().filter('entry =',self).filter('ctype IN',[1,2]).order('-date')
else:
return Comment.all().filter('entry =',self).filter('ctype IN',[1,2]).order('date')
def commentsTops(self):
return [c for c in self.purecomments() if c.parent_key()==None]
def delete_comments(self):
cmts = Comment.all().filter('entry =',self)
for comment in cmts:
comment.delete()
self.commentcount = 0
self.trackbackcount = 0
def update_commentno(self):
cmts = Comment.all().filter('entry =',self).order('date')
i=1
for comment in cmts:
comment.no=i
i+=1
comment.store()
def update_archive(self,cnt=1):
"""Checks to see if there is a month-year entry for the
month of current blog, if not creates it and increments count"""
my = self.date.strftime('%B %Y') # September-2008
sy = self.date.strftime('%Y') #2008
sm = self.date.strftime('%m') #09
archive = Archive.all().filter('monthyear',my).get()
if self.entrytype == 'post':
if not archive:
archive = Archive(monthyear=my,year=sy,month=sm,entrycount=1)
self.monthyear = my
archive.put()
else:
# ratchet up the count
archive.entrycount += cnt
archive.put()
g_blog.entrycount+=cnt
g_blog.put()
def save(self,is_publish=False):
"""
Use this instead of self.put(), as we do some other work here
@is_pub:Check if need publish id
"""
g_blog.tigger_action("pre_save_post",self,is_publish)
my = self.date.strftime('%B %Y') # September 2008
self.monthyear = my
old_publish=self.published
self.mod_date=datetime.now()
if is_publish:
if not self.is_wp:
self.put()
self.post_id=self.key().id()
#fix for old version
if not self.postname:
self.setpostname(self.slug)
vals={'year':self.date.year,'month':str(self.date.month).zfill(2),'day':self.date.day,
'postname':self.postname,'post_id':self.post_id}
if self.entrytype=='page':
if self.slug:
self.link=self.postname
else:
#use external page address as link
if self.is_external_page:
self.link=self.external_page_address
else:
self.link=g_blog.default_link_format%vals
else:
if g_blog.link_format and self.postname:
self.link=g_blog.link_format.strip()%vals
else:
self.link=g_blog.default_link_format%vals
self.published=is_publish
self.put()
if is_publish:
if g_blog.sitemap_ping:
Sitemap_NotifySearch()
if old_publish and not is_publish:
self.update_archive(-1)
if not old_publish and is_publish:
self.update_archive(1)
self.removecache()
self.put()
g_blog.tigger_action("save_post",self,is_publish)
def removecache(self):
memcache.delete('/')
memcache.delete('/'+self.link)
memcache.delete('/sitemap')
memcache.delete('blog.postcount')
g_blog.tigger_action("clean_post_cache",self)
@property
def next(self):
return Entry.all().filter('entrytype =','post').filter("published =", True).order('date').filter('date >',self.date).fetch(1)
@property
def prev(self):
return Entry.all().filter('entrytype =','post').filter("published =", True).order('-date').filter('date <',self.date).fetch(1)
@property
def relateposts(self):
if self._relatepost:
return self._relatepost
else:
if self.tags:
self._relatepost= Entry.gql("WHERE published=True and tags IN :1 and post_id!=:2 order by post_id desc ",self.tags,self.post_id).fetch(5)
else:
self._relatepost= []
return self._relatepost
@property
def trackbackurl(self):
if self.link.find("?")>-1:
return g_blog.baseurl+"/"+self.link+"&code="+str(self.key())
else:
return g_blog.baseurl+"/"+self.link+"?code="+str(self.key())
def getbylink(self):
pass
def delete(self):
g_blog.tigger_action("pre_delete_post",self)
if self.published:
self.update_archive(-1)
self.delete_comments()
db.Model.delete(self)
g_blog.tigger_action("delete_post",self)
class User(db.Model):
user = db.UserProperty(required = False)
dispname = db.StringProperty()
email=db.StringProperty()
website = db.LinkProperty()
isadmin=db.BooleanProperty(default=False)
isAuthor=db.BooleanProperty(default=True)
#rpcpwd=db.StringProperty()
def __unicode__(self):
#if self.dispname:
return self.dispname
#else:
# return self.user.nickname()
def __str__(self):
return self.__unicode__().encode('utf-8')
COMMENT_NORMAL=0
COMMENT_TRACKBACK=1
COMMENT_PINGBACK=2
class Comment(db.Model):
entry = db.ReferenceProperty(Entry)
date = db.DateTimeProperty(auto_now_add=True)
content = db.TextProperty(required=True)
author=db.StringProperty()
email=db.EmailProperty()
weburl=db.URLProperty()
status=db.IntegerProperty(default=0)
reply_notify_mail=db.BooleanProperty(default=False)
ip=db.StringProperty()
ctype=db.IntegerProperty(default=COMMENT_NORMAL)
no=db.IntegerProperty(default=0)
comment_order=db.IntegerProperty(default=1)
@property
def mpindex(self):
count=self.entry.commentcount
no=self.no
if g_blog.comments_order:
no=count-no+1
index=no / g_blog.comments_per_page
if no % g_blog.comments_per_page or no==0:
index+=1
return index
@property
def shortcontent(self,len=20):
scontent=self.content
scontent=re.sub(r'<br\s*/>',' ',scontent)
scontent=re.sub(r'<[^>]+>','',scontent)
scontent=re.sub(r'(@[\S]+)-\d{2,7}',r'\1:',scontent)
return scontent[:len].replace('<','<').replace('>','>')
def gravatar_url(self):
# Set your variables here
if g_blog.avatar_style==0:
default = g_blog.baseurl+'/static/images/homsar.jpeg'
else:
default='identicon'
if not self.email:
return default
size = 50
try:
# construct the url
imgurl = "http://www.gravatar.com/avatar/"
imgurl +=hashlib.md5(self.email.lower()).hexdigest()+"?"+ urllib.urlencode({
'd':default, 's':str(size),'r':'G'})
return imgurl
except:
return default
def save(self):
self.put()
self.entry.commentcount+=1
self.comment_order=self.entry.commentcount
if (self.ctype == COMMENT_TRACKBACK) or (self.ctype == COMMENT_PINGBACK):
self.entry.trackbackcount+=1
self.entry.put()
memcache.delete("/"+self.entry.link)
return True
def delit(self):
self.entry.commentcount-=1
if self.entry.commentcount<0:
self.entry.commentcount = 0
if (self.ctype == COMMENT_TRACKBACK) or (self.ctype == COMMENT_PINGBACK):
self.entry.trackbackcount-=1
if self.entry.trackbackcount<0:
self.entry.trackbackcount = 0
self.entry.put()
self.delete()
def put(self):
g_blog.tigger_action("pre_comment",self)
db.Model.put(self)
g_blog.tigger_action("save_comment",self)
def delete(self):
db.Model.delete(self)
g_blog.tigger_action("delete_comment",self)
@property
def children(self):
key=self.key()
comments=Comment.all().ancestor(self)
return [c for c in comments if c.parent_key()==key]
def store(self, **kwargs):
rpc = datastore.GetRpcFromKwargs(kwargs)
self._populate_internal_entity()
return datastore.Put(self._entity, rpc=rpc)
class Media(db.Model):
name =db.StringProperty()
mtype=db.StringProperty()
bits=db.BlobProperty()
date=db.DateTimeProperty(auto_now_add=True)
download=db.IntegerProperty(default=0)
@property
def size(self):
return len(self.bits)
class OptionSet(db.Model):
name=db.StringProperty()
value=db.TextProperty()
#blobValue=db.BlobProperty()
#isBlob=db.BooleanProperty()
@classmethod
def getValue(cls,name,default=None):
try:
opt=OptionSet.get_by_key_name(name)
return pickle.loads(str(opt.value))
except:
return default
@classmethod
def setValue(cls,name,value):
opt=OptionSet.get_or_insert(name)
opt.name=name
opt.value=pickle.dumps(value)
opt.put()
@classmethod
def remove(cls,name):
opt= OptionSet.get_by_key_name(name)
if opt:
opt.delete()
NOTIFICATION_SITES = [
('http', 'www.google.com', 'webmasters/sitemaps/ping', {}, '', 'sitemap')
]
def Sitemap_NotifySearch():
""" Send notification of the new Sitemap(s) to the search engines. """
url=g_blog.baseurl+"/sitemap"
# Cycle through notifications
# To understand this, see the comment near the NOTIFICATION_SITES comment
for ping in NOTIFICATION_SITES:
query_map = ping[3]
query_attr = ping[5]
query_map[query_attr] = url
query = urllib.urlencode(query_map)
notify = urlparse.urlunsplit((ping[0], ping[1], ping[2], query, ping[4]))
# Send the notification
logging.info('Notifying search engines. %s'%ping[1])
logging.info('url: %s'%notify)
try:
result = urlfetch.fetch(notify)
if result.status_code == 200:
logging.info('Notify Result: %s' % result.content)
if result.status_code == 404:
logging.info('HTTP error 404: Not Found')
logging.warning('Cannot contact: %s' % ping[1])
except :
logging.error('Cannot contact: %s' % ping[1])
def InitBlogData():
global g_blog
OptionSet.setValue('PluginActive',[u'googleAnalytics', u'wordpress', u'sys_plugin'])
g_blog = Blog(key_name = 'default')
g_blog.domain=os.environ['HTTP_HOST']
g_blog.baseurl="http://"+g_blog.domain
g_blog.feedurl=g_blog.baseurl+"/feed"
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
lang="zh-cn"
if os.environ.has_key('HTTP_ACCEPT_LANGUAGE'):
lang=os.environ['HTTP_ACCEPT_LANGUAGE'].split(',')[0]
from django.utils.translation import activate,to_locale
g_blog.language=to_locale(lang)
from django.conf import settings
settings._target = None
activate(g_blog.language)
g_blog.save()
entry=Entry(title=_("Hello world!").decode('utf8'))
entry.content=_('<p>Welcome to micolog. This is your first post. Edit or delete it, then start blogging!</p>').decode('utf8')
entry.save(True)
link=Link(href='http://xuming.net',linktext=_("Xuming's blog").decode('utf8'))
link.put()
return g_blog
def gblog_init():
global g_blog
try:
if g_blog :
return g_blog
except:
pass
g_blog = Blog.get_by_key_name('default')
if not g_blog:
g_blog=InitBlogData()
g_blog.get_theme()
g_blog.rootdir=os.path.dirname(__file__)
return g_blog
try:
g_blog=gblog_init()
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
from django.utils.translation import activate
from django.conf import settings
settings._target = None
activate(g_blog.language)
except:
pass
| Python |
# -*- coding: utf-8 -*-
"""
flask.module
~~~~~~~~~~~~
Implements a class that represents module blueprints.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import os
from .blueprints import Blueprint
def blueprint_is_module(bp):
"""Used to figure out if something is actually a module"""
return isinstance(bp, Module)
class Module(Blueprint):
"""Deprecated module support. Until Flask 0.6 modules were a different
name of the concept now available as blueprints in Flask. They are
essentially doing the same but have some bad semantics for templates and
static files that were fixed with blueprints.
.. versionchanged:: 0.7
Modules were deprecated in favor for blueprints.
"""
def __init__(self, import_name, name=None, url_prefix=None,
static_path=None, subdomain=None):
if name is None:
assert '.' in import_name, 'name required if package name ' \
'does not point to a submodule'
name = import_name.rsplit('.', 1)[1]
Blueprint.__init__(self, name, import_name, url_prefix=url_prefix,
subdomain=subdomain, template_folder='templates')
if os.path.isdir(os.path.join(self.root_path, 'static')):
self._static_folder = 'static'
| Python |
# -*- coding: utf-8 -*-
"""
flask.exthook
~~~~~~~~~~~~~
Redirect imports for extensions. This module basically makes it possible
for us to transition from flaskext.foo to flask_foo without having to
force all extensions to upgrade at the same time.
When a user does ``from flask.ext.foo import bar`` it will attempt to
import ``from flask_foo import bar`` first and when that fails it will
try to import ``from flaskext.foo import bar``.
We're switching from namespace packages because it was just too painful for
everybody involved.
This is used by `flask.ext`.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import sys
import os
class ExtensionImporter(object):
"""This importer redirects imports from this submodule to other locations.
This makes it possible to transition from the old flaskext.name to the
newer flask_name without people having a hard time.
"""
def __init__(self, module_choices, wrapper_module):
self.module_choices = module_choices
self.wrapper_module = wrapper_module
self.prefix = wrapper_module + '.'
self.prefix_cutoff = wrapper_module.count('.') + 1
def __eq__(self, other):
return self.__class__.__module__ == other.__class__.__module__ and \
self.__class__.__name__ == other.__class__.__name__ and \
self.wrapper_module == other.wrapper_module and \
self.module_choices == other.module_choices
def __ne__(self, other):
return not self.__eq__(other)
def install(self):
sys.meta_path[:] = [x for x in sys.meta_path if self != x] + [self]
def find_module(self, fullname, path=None):
if fullname.startswith(self.prefix):
return self
def load_module(self, fullname):
if fullname in sys.modules:
return sys.modules[fullname]
modname = fullname.split('.', self.prefix_cutoff)[self.prefix_cutoff]
for path in self.module_choices:
realname = path % modname
try:
__import__(realname)
except ImportError:
exc_type, exc_value, tb = sys.exc_info()
# since we only establish the entry in sys.modules at the
# very this seems to be redundant, but if recursive imports
# happen we will call into the move import a second time.
# On the second invocation we still don't have an entry for
# fullname in sys.modules, but we will end up with the same
# fake module name and that import will succeed since this
# one already has a temporary entry in the modules dict.
# Since this one "succeeded" temporarily that second
# invocation now will have created a fullname entry in
# sys.modules which we have to kill.
sys.modules.pop(fullname, None)
# If it's an important traceback we reraise it, otherwise
# we swallow it and try the next choice. The skipped frame
# is the one from __import__ above which we don't care about
if self.is_important_traceback(realname, tb):
raise exc_type, exc_value, tb.tb_next
continue
module = sys.modules[fullname] = sys.modules[realname]
if '.' not in modname:
setattr(sys.modules[self.wrapper_module], modname, module)
return module
raise ImportError('No module named %s' % fullname)
def is_important_traceback(self, important_module, tb):
"""Walks a traceback's frames and checks if any of the frames
originated in the given important module. If that is the case then we
were able to import the module itself but apparently something went
wrong when the module was imported. (Eg: import of an import failed).
"""
while tb is not None:
if self.is_important_frame(important_module, tb):
return True
tb = tb.tb_next
return False
def is_important_frame(self, important_module, tb):
"""Checks a single frame if it's important."""
g = tb.tb_frame.f_globals
if '__name__' not in g:
return False
module_name = g['__name__']
# Python 2.7 Behavior. Modules are cleaned up late so the
# name shows up properly here. Success!
if module_name == important_module:
return True
# Some python verisons will will clean up modules so early that the
# module name at that point is no longer set. Try guessing from
# the filename then.
filename = os.path.abspath(tb.tb_frame.f_code.co_filename)
test_string = os.path.sep + important_module.replace('.', os.path.sep)
return test_string + '.py' in filename or \
test_string + os.path.sep + '__init__.py' in filename
| Python |
Subsets and Splits
SQL Console for ajibawa-2023/Python-Code-Large
Provides a useful breakdown of language distribution in the training data, showing which languages have the most samples and helping identify potential imbalances across different language groups.