code stringlengths 1 1.72M | language stringclasses 1
value |
|---|---|
import logging
import json
from pylons import request, response, session, tmpl_context as c, url
from pylons.controllers.util import abort, redirect
from sqlalchemy import desc, func
from sqlalchemy.exc import IntegrityError
from fidoweb.lib.base import Session, BaseController, render
from fidoweb.model.map import *
from fidoweb.lib.algorithm import checkTimeScope
log = logging.getLogger(__name__)
class MediaController(BaseController):
def getMedia139MailTypes(self) :
response.headers['Content-Type'] = 'application/x-json'
ret = list()
if (not checkTimeScope('MediaController.getMedia139Types')) : return json.dumps(ret)
from fidoweb.model.media import Media139MailType
for t in Session.query(Media139MailType) : ret.append([t.id, t.name])
return json.dumps(ret)
| Python |
import logging
import json
from pylons import request, response, session, tmpl_context as c, url
from pylons.controllers.util import abort, redirect
from fidoweb.lib.base import Session, BaseController, render
log = logging.getLogger(__name__)
class DiscountController(BaseController) :
def index(self) :
from fidoweb.controllers.login import LoginController
from fidoweb.controllers.map import MapController
lc = LoginController()
mc = MapController()
c.loginUser = lc._getLoginUser()
c.schools = mc._getSchools()
c.title = 'Discount'
c.header = render('global/globalheader.mako')
c.content = render('discount/content.mako')
c.loginUser = lc._getLoginUser()
c.schools = mc._getSchools()
return render('global/global.mako')
def getStoreListPage(self) :
return render('/discount/storelist.mako')
def getLeftnavPage(self) :
return render('/discount/leftnav.mako')
def getStorePage(self) :
return render('/discount/store.mako')
| Python |
import cgi
from paste.urlparser import PkgResourcesParser
from pylons.middleware import error_document_template
from webhelpers.html.builder import literal
from fidoweb.lib.base import BaseController
class ErrorController(BaseController):
"""Generates error documents as and when they are required.
The ErrorDocuments middleware forwards to ErrorController when error
related status codes are returned from the application.
This behaviour can be altered by changing the parameters to the
ErrorDocuments middleware in your config/middleware.py file.
"""
def document(self):
"""Render the error document"""
request = self._py_object.request
resp = request.environ.get('pylons.original_response')
content = literal(resp.body) or cgi.escape(request.GET.get('message', ''))
page = error_document_template % \
dict(prefix=request.environ.get('SCRIPT_NAME', ''),
code=cgi.escape(request.GET.get('code', str(resp.status_int))),
message=content)
return page
def img(self, id):
"""Serve Pylons' stock images"""
return self._serve_file('/'.join(['media/img', id]))
def style(self, id):
"""Serve Pylons' stock stylesheets"""
return self._serve_file('/'.join(['media/style', id]))
def _serve_file(self, path):
"""Call Paste's FileApp (a WSGI application) to serve the file
at the specified path
"""
request = self._py_object.request
request.environ['PATH_INFO'] = '/%s' % path
return PkgResourcesParser('pylons', 'pylons')(request.environ, self.start_response)
| Python |
# -*- coding: gb18030 -*-
import logging
import json
import random
import string
import datetime
import hashlib
import sys
from pylons import request, response, session, tmpl_context as c, url
from pylons.controllers.util import abort, redirect
from fidoweb.lib.base import Session, BaseController, render
from fidoweb.lib.algorithm import checkTimeScope, sendMail
log = logging.getLogger(__name__)
class GroupController(BaseController) :
def index(self) :
from fidoweb.controllers.login import LoginController
lc = LoginController()
c.loginUser = lc._getLoginUser()
c.schools = self._getSchools()
c.jsFiles = list()
c.cssFiles = list()
c.title = 'Fido - Groups'
c.header = render('global/globalheader.mako')
c.content = render('group/content.mako')
return render('global/global.mako')
| Python |
import logging
from pylons import request, response, session, tmpl_context as c, url
from pylons.controllers.util import abort, redirect
from fidoweb.lib.base import BaseController, render, Session
from fidoweb.model.user import User, UserGroup, User_UserGroup
log = logging.getLogger(__name__)
class UserGroupController(BaseController):
def _inGroup(self, user, userGroup) :
while (True) :
if (Session.query(User_UserGroup).filter_by(user_id = user.id).filter_by(userGroup_id = userGroup.id).count() > 0) :
return True
if (userGroup.parent_id != None) :
userGroup = Session.query(UserGroup).filter_by(id = userGroup.parent_id).first()
else :
break
return False
| Python |
import logging
import json
from pylons import request, response, session, tmpl_context as c, url
from pylons.controllers.util import abort, redirect
from fidoweb.lib.base import Session, BaseController, render
from fidoweb.model.homepageGalleryPic import *
log = logging.getLogger(__name__)
class HomepageController(BaseController) :
_mainPages = {
'map' : '/homepage/mainpage_map.mako',
'maploc' : '/homepage/mainpage_maploc.mako',
'myloc' : '/homepage/mainpage_myloc.mako'
}
def index(self, id = '') :
from fidoweb.controllers.login import LoginController
from fidoweb.controllers.map import MapController
lc = LoginController()
mc = MapController()
c.loginUser = lc._getLoginUser()
if (c.loginUser != None) :
redirect('login')
return
c.schools = mc._getSchools()
c.jsFiles = list()
c.cssFiles = list()
c.title = 'Fido - Welcome'
return render('homepage/content.mako')
def getLeftNavPage(self) :
from fidoweb.controllers.login import LoginController
lc = LoginController()
if (lc._isLoggedIn()) :
return render('/homepage/leftnav_loggedin.mako')
else :
return render('/homepage/leftnav_loggedout.mako')
def getMainPage(self) :
if (not 'page' in request.POST) : return ''
page = request.POST['page']
if (self._mainPages.get(page) == None) :
return ''
else :
return render(self._mainPages[page])
| Python |
import gzip
import StringIO
class GzipMiddleware(object):
def __init__(self, app, compresslevel = 9) :
self.app = app
self.compresslevel = compresslevel
def __call__(self, environ, start_response):
if 'gzip' not in environ.get('HTTP_ACCEPT_ENCODING', '') : return self.app(environ, start_response)
path = environ['PATH_INFO']
if path[-4 : ] != '.gif' and path[-3 : ] != '.js' and path[-4 : ] != '.css' and path[-4 : ] != '.png' and path[-4 : ] != '.jpg' : return self.app(environ, start_response)
buffer = StringIO.StringIO()
if (path[-3 : ] == '.js' or path[-4 : ] == ".css" or path[-4 : ] == '.gif' or path[-4 : ] == '.png') :
output = gzip.GzipFile(
mode = 'wb',
compresslevel = self.compresslevel,
fileobj = buffer
)
else :
output = gzip.GzipFile(
mode = 'wb',
compresslevel = 0,
fileobj = buffer
)
start_response_args = []
def dummy_start_response(status, headers, exc_info = None) :
start_response_args.append(status)
start_response_args.append(headers)
start_response_args.append(exc_info)
return output.write
app_iter = self.app(environ, dummy_start_response)
for line in app_iter : output.write(line)
if hasattr(app_iter, 'close') : app_iter.close()
output.close()
buffer.seek(0)
result = buffer.getvalue()
headers = []
for name, value in start_response_args[1] :
if name.lower() != 'content-length' : headers.append((name, value))
headers.append(('Content-Length', str(len(result))))
headers.append(('Content-Encoding', 'gzip'))
if (path[-3 : ] == '.js' or path[-4 : ] == '.css') : headers.append(('Cache-Control', 'private, max-age=5184000'))
else : headers.append(('Cache-Control', 'public, max-age=5184000'))
headers.append(('Vary', 'Accept-Encoding'))
start_response(start_response_args[0], headers, start_response_args[2])
buffer.close()
return [result]
| Python |
"""Routes configuration
The more specific and detailed routes should be defined first so they
may take precedent over the more generic routes. For more information
refer to the routes manual at http://routes.groovie.org/docs/
"""
from routes import Mapper
def make_map(config):
"""Create, configure and return the routes Mapper"""
map = Mapper(directory=config['pylons.paths']['controllers'], always_scan=config['debug'])
map.minimization = False
map.explicit = False
# The ErrorController route (handles 404/500 error pages); it should
# likely stay at the top, ensuring it can always be resolved
map.connect('/error/{action}', controller='error')
map.connect('/error/{action}/{id}', controller='error')
# CUSTOM ROUTES HERE
map.connect('home', '/', controller = 'homepage', action = 'index')
map.connect('/{controller}', action = 'index')
map.connect('/{controller}/{action}')
map.connect('/{controller}/{action}/{id}')
return map
| Python |
"""Pylons middleware initialization"""
from beaker.middleware import SessionMiddleware
from paste.cascade import Cascade
from paste.registry import RegistryManager
from paste.urlparser import StaticURLParser
from paste.deploy.converters import asbool
from pylons.middleware import ErrorHandler, StatusCodeRedirect
from pylons.wsgiapp import PylonsApp
from routes.middleware import RoutesMiddleware
from fidoweb.config.environment import load_environment
from fidoweb.config.gzipMiddleware import GzipMiddleware
def make_app(global_conf, full_stack=True, static_files=True, **app_conf) :
"""Create a Pylons WSGI application and return it
``global_conf``
The inherited configuration for this application. Normally from
the [DEFAULT] section of the Paste ini file.
``full_stack``
Whether this application provides a full WSGI stack (by default,
meaning it handles its own exceptions and errors). Disable
full_stack when this application is "managed" by another WSGI
middleware.
``static_files``
Whether this application serves its own static files; disable
when another web server is responsible for serving them.
``app_conf``
The application's local configuration. Normally specified in
the [app:<name>] section of the Paste ini file (where <name>
defaults to main).
"""
# Configure the Pylons environment
config = load_environment(global_conf, app_conf)
# The Pylons WSGI app
app = PylonsApp(config=config)
# Routing/Session Middleware
app = RoutesMiddleware(app, config['routes.map'], singleton=False)
app = SessionMiddleware(app, config)
# CUSTOM MIDDLEWARE HERE (filtered by error handling middlewares)
if asbool(full_stack):
# Handle Python exceptions
app = ErrorHandler(app, global_conf, **config['pylons.errorware'])
# Display error documents for 401, 403, 404 status codes (and
# 500 when debug is disabled)
if asbool(config['debug']) :
app = StatusCodeRedirect(app)
else :
app = StatusCodeRedirect(app, [400, 401, 403, 404, 500])
# Establish the Registry for this application
app = RegistryManager(app)
if asbool(static_files):
# Serve static files
static_app = StaticURLParser(config['pylons.paths']['static_files'])
app = Cascade([static_app, app])
app = GzipMiddleware(app, compresslevel = 6)
app.config = config
return app
| Python |
"""Pylons environment configuration"""
import os
from mako.lookup import TemplateLookup
from pylons.configuration import PylonsConfig
from pylons.error import handle_mako_error
from sqlalchemy import engine_from_config
import fidoweb.lib.app_globals as app_globals
import fidoweb.lib.helpers
from fidoweb.config.routing import make_map
from fidoweb.model import init_model
def load_environment(global_conf, app_conf):
"""Configure the Pylons environment via the ``pylons.config`` object"""
config = PylonsConfig()
# Pylons paths
root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
paths = dict(root=root,
controllers=os.path.join(root, 'controllers'),
static_files=os.path.join(root, 'public'),
templates=[os.path.join(root, 'templates')])
# Initialize config with the basic options
config.init_app(global_conf, app_conf, package='fidoweb', paths=paths)
config['routes.map'] = make_map(config)
config['pylons.app_globals'] = app_globals.Globals(config)
config['pylons.h'] = fidoweb.lib.helpers
# Setup cache object as early as possible
import pylons
pylons.cache._push_object(config['pylons.app_globals'].cache)
# Create the Mako TemplateLookup, with the default auto-escaping
config['pylons.app_globals'].mako_lookup = TemplateLookup(
directories=paths['templates'],
error_handler=handle_mako_error,
module_directory=os.path.join(app_conf['cache_dir'], 'templates'),
input_encoding='utf-8', output_encoding='utf-8',
default_filters=['decode.utf8'],
imports=['from webhelpers.html import escape'])
# Setup the SQLAlchemy database engine
engine = engine_from_config(config, 'sqlalchemy.')
init_model(engine)
# CONFIGURATION OPTIONS HERE (note: all config options will override
# any Pylons config options)
return config
| Python |
import datetime
import string
class FormChecker :
def isValidEmail(self, addr) :
if (len(addr) == 0) : return False
rfc822_specials = '()<>,;:\\"[]'
c = 0
while (c < len(addr)) :
if (ord(addr[c]) < 32 or ord(addr[c]) >= 127) : return False
if (addr[c] in rfc822_specials) : return False
if (addr[c] == '@') : break
c += 1
if (addr[c] != '@') : return False
if (c == 0 or addr[c - 1] == '.') : return False
c += 1
domain = c
if (domain >= len(addr)) : return False
count = 0
while (c < len(addr)) :
if (addr[c] == '.') :
if (addr[c - 1] == '@' or addr[c - 1] == '.') : return False
count += 1
if (addr[c] in rfc822_specials or addr[c] == '@'): return False
c += 1
if (addr[c - 1] == '.') : return False
return (count >= 1)
def uLen(self, s) :
ret = 0
for i in range(0, len(s)) :
if (ord(s[i]) > 127) : ret += 2
else : ret += 1
return ret
def isValidFidocard(self, number) :
if (len(number) != 13) : return False
for i in range(0, len(number)) :
if (number[i] > '9' or number[i] < '0') : return False
return True
def isValidMobile(self, number) :
if (len(number) != 11) : return False
for i in range(0, len(number)) :
if (number[i] > '9' or number[i] < '0') : return False
return True
def isValidName(self, name) :
if (self.uLen(name) < 4 or self.uLen(name) > 16) : return False
for i in range(0, len(name)) :
c = name[i]
if (not ((ord(c) <= 0x9fff and ord(c) >= 0x4e00) or (c <= '9' and c >= '0') or (c <= 'z' and c >= 'a') or
(c <= 'Z' and c >= 'A') or c == ' ')) :
return False
return True
def isValidPassword(self, password) :
if (self.uLen(password) < 6 or self.uLen(password) > 16) : return False
for i in range(0, len(password)) :
c = password[i]
if (not ((c <= 'Z' and c >= 'A') or (c <= 'z' and c >= 'a') or (c <= '9' and c >= '0') or c == ' ' or c == '.')) :
return False
return True
def isValidText(self, text) :
for i in range(0, len(text)) :
c = text[i]
if (ord(c) < 32 or c == '"' or
(c == '<' and (i + 4 >= len(text) or text[i + 1] != 'b' or text[i + 2] != 'r' or text[i + 3] != '/' or text[i + 4] != '>'))
or c == ' ') : return False
return True
def strToInt(self, s) :
try :
ret = int(s)
return ret
except :
return None
def strToDate(self, s) :
try :
ret = datetime.datetime.strptime(s, "%Y-%m-%d")
except :
return None
return ret | Python |
"""Helper functions
Consists of functions to typically be used within templates, but also
available to Controllers. This module is available to templates as 'h'.
"""
# Import helpers as desired, or define your own, ie:
#from webhelpers.html.tags import checkbox, password
| Python |
"""The base Controller API
Provides the BaseController class for subclassing.
"""
from pylons.controllers import WSGIController
from pylons.templating import render_mako as render
from fidoweb.model.meta import Session
class BaseController(WSGIController):
def __call__(self, environ, start_response):
"""Invoke the Controller"""
# WSGIController.__call__ dispatches to the Controller method
# the request is routed to. This routing information is
# available in environ['pylons.routes_dict']
try:
return WSGIController.__call__(self, environ, start_response)
finally:
Session.remove()
| Python |
"""The application's Globals object"""
from beaker.cache import CacheManager
from beaker.util import parse_cache_config_options
class Globals(object):
"""Globals acts as a container for objects available throughout the
life of the application
"""
def __init__(self, config):
"""One instance of Globals is created during application
initialization and is available during requests via the
'app_globals' variable
"""
self.cache = CacheManager(**parse_cache_config_options(config))
| Python |
import random
import datetime
import math
import pytz
from pytz import timezone
from pylons import session
# Fidocard:
# BWT info(5 bit) + mobile(36 bit) + random(1 bit) + fixed bit(1,1 bit)
class Algorithm :
def fidocardBWT(self, num) :
tmp = list()
for i in range(0, 19) :
tmp.append([num, i])
lst = num & 3
num = (num >> 2) + (lst << 36)
tmp.sort()
loc = 0
res = 0
for i in range(0, 19) :
res = (res << 2) + (tmp[i][0] & 3)
if (tmp[i][1] == 0) : loc = i
res = (res << 5) + loc
return res
def fidocardRevBWT(self, num) :
loc = 18 - (num & 31)
num >>= 5
eStart = [-1, -1, -1, -1]
eNext = list()
eTo = list()
for i in range(0, 19) :
cur = (num >> (i << 1)) & 3
eNext.append(eStart[cur])
eTo.append(cur)
eStart[cur] = i
app = eTo
app.sort()
app.reverse()
bwtNext = list()
for i in range(0, 19) :
cur = app[18 - i]
bwtNext.append(eStart[cur])
eStart[cur] = eNext[eStart[cur]]
bwtNext.reverse()
res = 0
for i in range(0, 19) :
res = (res << 2) + app[loc]
loc = bwtNext[loc]
return res
def calcFidocardByMobile(self, mobile) :
m = int(mobile)
res = 2 + random.randint(0, 1)
for i in range(0, 18) :
res = (res << 2) + (m & 3)
m /= 4
return self.fidocardBWT(res)
def calcMobileByFidocard(self, fidocard) :
t = self.fidocardRevBWT(fidocard)
res = 0
for i in range(0, 18) :
res = (res << 2) + (t & 3)
t >>= 2
return res
def datetimeToStr(date) :
china = timezone('Asia/Shanghai')
return timezone('America/Los_Angeles').localize(date).astimezone(china).strftime('%m-%d %H:%M')
def dateToStr(date) :
china = timezone('Asia/Shanghai')
return timezone('America/Los_Angeles').localize(date).astimezone(china).strftime('%Y-%m-%d')
def checkTimeScope(pageId, interval = 1) :
pageId = 'page.' + pageId
if (not pageId in session) :
session[pageId] = datetime.datetime.now()
session.save()
return True
else :
lastTime = session[pageId]
session[pageId] = datetime.datetime.now()
session.save()
if ((datetime.datetime.now() - lastTime).seconds < interval) : return False
else : return True
def sendMail(mailFrom, mailTo, subject, content) :
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
msg = MIMEMultipart('alternative')
msg['Subject'] = subject
msg['From'] = mailFrom
msg['To'] = mailTo
MIME = MIMEText(content, 'plain', _charset = 'utf8')
msg.attach(MIME)
s = smtplib.SMTP('localhost')
s.sendmail(mailFrom, mailTo, msg.as_string())
s.quit()
def calcSchoolMapLocRanking(avgRating, topicCount, visitCount) :
return avgRating / 5.0 + 1.0 - 1.0 / (math.log(topicCount + 1) + 1.0) + 1.0 - 1.0 / (math.log(visitCount / 100.0 + 1.0) + 1.0)
| Python |
try:
from setuptools import setup, find_packages
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
setup(
name='FidoWeb',
version='0.1',
description='',
author='',
author_email='',
url='',
install_requires=[
"Pylons>=1.0",
"SQLAlchemy>=0.5",
],
setup_requires=["PasteScript>=1.6.3"],
packages=find_packages(exclude=['ez_setup']),
include_package_data=True,
test_suite='nose.collector',
package_data={'fidoweb': ['i18n/*/LC_MESSAGES/*.mo']},
#message_extractors={'fidoweb': [
# ('**.py', 'python', None),
# ('templates/**.mako', 'mako', {'input_encoding': 'utf-8'}),
# ('public/**', 'ignore', None)]},
zip_safe=False,
paster_plugins=['PasteScript', 'Pylons'],
entry_points="""
[paste.app_factory]
main = fidoweb.config.middleware:make_app
[paste.app_install]
main = pylons.util:PylonsInstaller
""",
)
| Python |
#!python
"""Bootstrap setuptools installation
If you want to use setuptools in your package's setup.py, just include this
file in the same directory with it, and add this to the top of your setup.py::
from ez_setup import use_setuptools
use_setuptools()
If you want to require a specific version of setuptools, set a download
mirror, or use an alternate download directory, you can do so by supplying
the appropriate options to ``use_setuptools()``.
This file can also be run as a script to install or upgrade setuptools.
"""
import sys
DEFAULT_VERSION = "0.6c9"
DEFAULT_URL = "http://pypi.python.org/packages/%s/s/setuptools/" % sys.version[:3]
md5_data = {
'setuptools-0.6b1-py2.3.egg': '8822caf901250d848b996b7f25c6e6ca',
'setuptools-0.6b1-py2.4.egg': 'b79a8a403e4502fbb85ee3f1941735cb',
'setuptools-0.6b2-py2.3.egg': '5657759d8a6d8fc44070a9d07272d99b',
'setuptools-0.6b2-py2.4.egg': '4996a8d169d2be661fa32a6e52e4f82a',
'setuptools-0.6b3-py2.3.egg': 'bb31c0fc7399a63579975cad9f5a0618',
'setuptools-0.6b3-py2.4.egg': '38a8c6b3d6ecd22247f179f7da669fac',
'setuptools-0.6b4-py2.3.egg': '62045a24ed4e1ebc77fe039aa4e6f7e5',
'setuptools-0.6b4-py2.4.egg': '4cb2a185d228dacffb2d17f103b3b1c4',
'setuptools-0.6c1-py2.3.egg': 'b3f2b5539d65cb7f74ad79127f1a908c',
'setuptools-0.6c1-py2.4.egg': 'b45adeda0667d2d2ffe14009364f2a4b',
'setuptools-0.6c2-py2.3.egg': 'f0064bf6aa2b7d0f3ba0b43f20817c27',
'setuptools-0.6c2-py2.4.egg': '616192eec35f47e8ea16cd6a122b7277',
'setuptools-0.6c3-py2.3.egg': 'f181fa125dfe85a259c9cd6f1d7b78fa',
'setuptools-0.6c3-py2.4.egg': 'e0ed74682c998bfb73bf803a50e7b71e',
'setuptools-0.6c3-py2.5.egg': 'abef16fdd61955514841c7c6bd98965e',
'setuptools-0.6c4-py2.3.egg': 'b0b9131acab32022bfac7f44c5d7971f',
'setuptools-0.6c4-py2.4.egg': '2a1f9656d4fbf3c97bf946c0a124e6e2',
'setuptools-0.6c4-py2.5.egg': '8f5a052e32cdb9c72bcf4b5526f28afc',
'setuptools-0.6c5-py2.3.egg': 'ee9fd80965da04f2f3e6b3576e9d8167',
'setuptools-0.6c5-py2.4.egg': 'afe2adf1c01701ee841761f5bcd8aa64',
'setuptools-0.6c5-py2.5.egg': 'a8d3f61494ccaa8714dfed37bccd3d5d',
'setuptools-0.6c6-py2.3.egg': '35686b78116a668847237b69d549ec20',
'setuptools-0.6c6-py2.4.egg': '3c56af57be3225019260a644430065ab',
'setuptools-0.6c6-py2.5.egg': 'b2f8a7520709a5b34f80946de5f02f53',
'setuptools-0.6c7-py2.3.egg': '209fdf9adc3a615e5115b725658e13e2',
'setuptools-0.6c7-py2.4.egg': '5a8f954807d46a0fb67cf1f26c55a82e',
'setuptools-0.6c7-py2.5.egg': '45d2ad28f9750e7434111fde831e8372',
'setuptools-0.6c8-py2.3.egg': '50759d29b349db8cfd807ba8303f1902',
'setuptools-0.6c8-py2.4.egg': 'cba38d74f7d483c06e9daa6070cce6de',
'setuptools-0.6c8-py2.5.egg': '1721747ee329dc150590a58b3e1ac95b',
'setuptools-0.6c9-py2.3.egg': 'a83c4020414807b496e4cfbe08507c03',
'setuptools-0.6c9-py2.4.egg': '260a2be2e5388d66bdaee06abec6342a',
'setuptools-0.6c9-py2.5.egg': 'fe67c3e5a17b12c0e7c541b7ea43a8e6',
'setuptools-0.6c9-py2.6.egg': 'ca37b1ff16fa2ede6e19383e7b59245a',
}
import sys, os
try: from hashlib import md5
except ImportError: from md5 import md5
def _validate_md5(egg_name, data):
if egg_name in md5_data:
digest = md5(data).hexdigest()
if digest != md5_data[egg_name]:
print >>sys.stderr, (
"md5 validation of %s failed! (Possible download problem?)"
% egg_name
)
sys.exit(2)
return data
def use_setuptools(
version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir,
download_delay=15
):
"""Automatically find/download setuptools and make it available on sys.path
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end with
a '/'). `to_dir` is the directory where setuptools will be downloaded, if
it is not already available. If `download_delay` is specified, it should
be the number of seconds that will be paused before initiating a download,
should one be required. If an older version of setuptools is installed,
this routine will print a message to ``sys.stderr`` and raise SystemExit in
an attempt to abort the calling script.
"""
was_imported = 'pkg_resources' in sys.modules or 'setuptools' in sys.modules
def do_download():
egg = download_setuptools(version, download_base, to_dir, download_delay)
sys.path.insert(0, egg)
import setuptools; setuptools.bootstrap_install_from = egg
try:
import pkg_resources
except ImportError:
return do_download()
try:
pkg_resources.require("setuptools>="+version); return
except pkg_resources.VersionConflict, e:
if was_imported:
print >>sys.stderr, (
"The required version of setuptools (>=%s) is not available, and\n"
"can't be installed while this script is running. Please install\n"
" a more recent version first, using 'easy_install -U setuptools'."
"\n\n(Currently using %r)"
) % (version, e.args[0])
sys.exit(2)
else:
del pkg_resources, sys.modules['pkg_resources'] # reload ok
return do_download()
except pkg_resources.DistributionNotFound:
return do_download()
def download_setuptools(
version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir,
delay = 15
):
"""Download setuptools from a specified location and return its filename
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end
with a '/'). `to_dir` is the directory where the egg will be downloaded.
`delay` is the number of seconds to pause before an actual download attempt.
"""
import urllib2, shutil
egg_name = "setuptools-%s-py%s.egg" % (version,sys.version[:3])
url = download_base + egg_name
saveto = os.path.join(to_dir, egg_name)
src = dst = None
if not os.path.exists(saveto): # Avoid repeated downloads
try:
from distutils import log
if delay:
log.warn("""
---------------------------------------------------------------------------
This script requires setuptools version %s to run (even to display
help). I will attempt to download it for you (from
%s), but
you may need to enable firewall access for this script first.
I will start the download in %d seconds.
(Note: if this machine does not have network access, please obtain the file
%s
and place it in this directory before rerunning this script.)
---------------------------------------------------------------------------""",
version, download_base, delay, url
); from time import sleep; sleep(delay)
log.warn("Downloading %s", url)
src = urllib2.urlopen(url)
# Read/write all in one block, so we don't create a corrupt file
# if the download is interrupted.
data = _validate_md5(egg_name, src.read())
dst = open(saveto,"wb"); dst.write(data)
finally:
if src: src.close()
if dst: dst.close()
return os.path.realpath(saveto)
def main(argv, version=DEFAULT_VERSION):
"""Install or upgrade setuptools and EasyInstall"""
try:
import setuptools
except ImportError:
egg = None
try:
egg = download_setuptools(version, delay=0)
sys.path.insert(0,egg)
from setuptools.command.easy_install import main
return main(list(argv)+[egg]) # we're done here
finally:
if egg and os.path.exists(egg):
os.unlink(egg)
else:
if setuptools.__version__ == '0.0.1':
print >>sys.stderr, (
"You have an obsolete version of setuptools installed. Please\n"
"remove it from your system entirely before rerunning this script."
)
sys.exit(2)
req = "setuptools>="+version
import pkg_resources
try:
pkg_resources.require(req)
except pkg_resources.VersionConflict:
try:
from setuptools.command.easy_install import main
except ImportError:
from easy_install import main
main(list(argv)+[download_setuptools(delay=0)])
sys.exit(0) # try to force an exit
else:
if argv:
from setuptools.command.easy_install import main
main(argv)
else:
print "Setuptools version",version,"or greater has been installed."
print '(Run "ez_setup.py -U setuptools" to reinstall or upgrade.)'
def update_md5(filenames):
"""Update our built-in md5 registry"""
import re
for name in filenames:
base = os.path.basename(name)
f = open(name,'rb')
md5_data[base] = md5(f.read()).hexdigest()
f.close()
data = [" %r: %r,\n" % it for it in md5_data.items()]
data.sort()
repl = "".join(data)
import inspect
srcfile = inspect.getsourcefile(sys.modules[__name__])
f = open(srcfile, 'rb'); src = f.read(); f.close()
match = re.search("\nmd5_data = {\n([^}]+)}", src)
if not match:
print >>sys.stderr, "Internal error!"
sys.exit(2)
src = src[:match.start(1)] + repl + src[match.end(1):]
f = open(srcfile,'w')
f.write(src)
f.close()
if __name__=='__main__':
if len(sys.argv)>2 and sys.argv[1]=='--md5update':
update_md5(sys.argv[2:])
else:
main(sys.argv[1:])
| Python |
# -*- coding: utf-8 -*-
#this will translate a web page to another language
from urllib import urlopen
from BeautifulSoup import BeautifulSoup
import simplejson
# The google translate API can be found here:
# http://code.google.com/apis/ajaxlanguage/documentation/#Examples
def translate(text, sourceLang = 'en', targetLang = 'pt'):
try:
url = "https://www.googleapis.com/language/translate/v2?" + \
"key=AIzaSyARZORpTevzPWt1XhsIkJUQ0PVZlK4cYTk&q=%s&source=%s&target=%s" % (text, sourceLang, targetLang)
search_results = urlopen(url)
json = simplejson.loads(search_results.read())
return json['data']['translations'][0]['translatedText']
except:
return ''
def webPageRead(urlLink):
# Get a HTML web page to read
page = urlopen(urlLink)
# Read from the object, storing the page's contents in 'text'.
text = page.read()
page.close()
return text
def fixHTML(urlLink):
doc = webPageRead(urlLink)
textList = []
soup = BeautifulSoup(doc)
for s in soup.prettify().split('\n'):
strippedS = s.strip()
if strippedS.find(">") == -1 and strippedS.find("<") == -1 and strippedS.find("|") == -1:
textList.append(strippedS)
return textList
#repeatedWords = {}
#textFile = open('/tmp/Shakeaspeare_Hamlet_fr.txt', 'w')
#for line in fixHTML():
# for word in line.split(' '):
# if not repeatedWords.has_key(word):
# repeatedWords[word] = translate(word)
# print repeatedWords[word],
# textFile.write(repeatedWords[word])
# textFile.write(' ')
# print
# textFile.write('\n')
#textFile.close()
| Python |
# -*- coding: utf-8 -*-
"""Unit tests for Beautiful Soup.
These tests make sure the Beautiful Soup works as it should. If you
find a bug in Beautiful Soup, the best way to express it is as a test
case like this that fails."""
import unittest
from BeautifulSoup import *
class SoupTest(unittest.TestCase):
def assertSoupEquals(self, toParse, rep=None, c=BeautifulSoup):
"""Parse the given text and make sure its string rep is the other
given text."""
if rep == None:
rep = toParse
self.assertEqual(str(c(toParse)), rep)
class FollowThatTag(SoupTest):
"Tests the various ways of fetching tags from a soup."
def setUp(self):
ml = """
<a id="x">1</a>
<A id="a">2</a>
<b id="b">3</a>
<b href="foo" id="x">4</a>
<ac width=100>4</ac>"""
self.soup = BeautifulStoneSoup(ml)
def testFindAllByName(self):
matching = self.soup('a')
self.assertEqual(len(matching), 2)
self.assertEqual(matching[0].name, 'a')
self.assertEqual(matching, self.soup.findAll('a'))
self.assertEqual(matching, self.soup.findAll(SoupStrainer('a')))
def testFindAllByAttribute(self):
matching = self.soup.findAll(id='x')
self.assertEqual(len(matching), 2)
self.assertEqual(matching[0].name, 'a')
self.assertEqual(matching[1].name, 'b')
matching2 = self.soup.findAll(attrs={'id' : 'x'})
self.assertEqual(matching, matching2)
strainer = SoupStrainer(attrs={'id' : 'x'})
self.assertEqual(matching, self.soup.findAll(strainer))
self.assertEqual(len(self.soup.findAll(id=None)), 1)
self.assertEqual(len(self.soup.findAll(width=100)), 1)
self.assertEqual(len(self.soup.findAll(junk=None)), 5)
self.assertEqual(len(self.soup.findAll(junk=[1, None])), 5)
self.assertEqual(len(self.soup.findAll(junk=re.compile('.*'))), 0)
self.assertEqual(len(self.soup.findAll(junk=True)), 0)
self.assertEqual(len(self.soup.findAll(junk=True)), 0)
self.assertEqual(len(self.soup.findAll(href=True)), 1)
def testFindallByClass(self):
soup = BeautifulSoup('<b class="foo">Foo</b><a class="1 23 4">Bar</a>')
self.assertEqual(soup.find(attrs='foo').string, "Foo")
self.assertEqual(soup.find('a', '1').string, "Bar")
self.assertEqual(soup.find('a', '23').string, "Bar")
self.assertEqual(soup.find('a', '4').string, "Bar")
self.assertEqual(soup.find('a', '2'), None)
def testFindAllByList(self):
matching = self.soup(['a', 'ac'])
self.assertEqual(len(matching), 3)
def testFindAllByHash(self):
matching = self.soup({'a' : True, 'b' : True})
self.assertEqual(len(matching), 4)
def testFindAllText(self):
soup = BeautifulSoup("<html>\xbb</html>")
self.assertEqual(soup.findAll(text=re.compile('.*')),
[u'\xbb'])
def testFindAllByRE(self):
import re
r = re.compile('a.*')
self.assertEqual(len(self.soup(r)), 3)
def testFindAllByMethod(self):
def matchTagWhereIDMatchesName(tag):
return tag.name == tag.get('id')
matching = self.soup.findAll(matchTagWhereIDMatchesName)
self.assertEqual(len(matching), 2)
self.assertEqual(matching[0].name, 'a')
def testFindByIndex(self):
"""For when you have the tag and you want to know where it is."""
tag = self.soup.find('a', id="a")
self.assertEqual(self.soup.index(tag), 3)
# It works for NavigableStrings as well.
s = tag.string
self.assertEqual(tag.index(s), 0)
# If the tag isn't present, a ValueError is raised.
soup2 = BeautifulSoup("<b></b>")
tag2 = soup2.find('b')
self.assertRaises(ValueError, self.soup.index, tag2)
def testConflictingFindArguments(self):
"""The 'text' argument takes precedence."""
soup = BeautifulSoup('Foo<b>Bar</b>Baz')
self.assertEqual(soup.find('b', text='Baz'), 'Baz')
self.assertEqual(soup.findAll('b', text='Baz'), ['Baz'])
self.assertEqual(soup.find(True, text='Baz'), 'Baz')
self.assertEqual(soup.findAll(True, text='Baz'), ['Baz'])
def testParents(self):
soup = BeautifulSoup('<ul id="foo"></ul><ul id="foo"><ul><ul id="foo" a="b"><b>Blah')
b = soup.b
self.assertEquals(len(b.findParents('ul', {'id' : 'foo'})), 2)
self.assertEquals(b.findParent('ul')['a'], 'b')
PROXIMITY_TEST = BeautifulSoup('<b id="1"><b id="2"><b id="3"><b id="4">')
def testNext(self):
soup = self.PROXIMITY_TEST
b = soup.find('b', {'id' : 2})
self.assertEquals(b.findNext('b')['id'], '3')
self.assertEquals(b.findNext('b')['id'], '3')
self.assertEquals(len(b.findAllNext('b')), 2)
self.assertEquals(len(b.findAllNext('b', {'id' : 4})), 1)
def testPrevious(self):
soup = self.PROXIMITY_TEST
b = soup.find('b', {'id' : 3})
self.assertEquals(b.findPrevious('b')['id'], '2')
self.assertEquals(b.findPrevious('b')['id'], '2')
self.assertEquals(len(b.findAllPrevious('b')), 2)
self.assertEquals(len(b.findAllPrevious('b', {'id' : 2})), 1)
SIBLING_TEST = BeautifulSoup('<blockquote id="1"><blockquote id="1.1"></blockquote></blockquote><blockquote id="2"><blockquote id="2.1"></blockquote></blockquote><blockquote id="3"><blockquote id="3.1"></blockquote></blockquote><blockquote id="4">')
def testNextSibling(self):
soup = self.SIBLING_TEST
tag = 'blockquote'
b = soup.find(tag, {'id' : 2})
self.assertEquals(b.findNext(tag)['id'], '2.1')
self.assertEquals(b.findNextSibling(tag)['id'], '3')
self.assertEquals(b.findNextSibling(tag)['id'], '3')
self.assertEquals(len(b.findNextSiblings(tag)), 2)
self.assertEquals(len(b.findNextSiblings(tag, {'id' : 4})), 1)
def testPreviousSibling(self):
soup = self.SIBLING_TEST
tag = 'blockquote'
b = soup.find(tag, {'id' : 3})
self.assertEquals(b.findPrevious(tag)['id'], '2.1')
self.assertEquals(b.findPreviousSibling(tag)['id'], '2')
self.assertEquals(b.findPreviousSibling(tag)['id'], '2')
self.assertEquals(len(b.findPreviousSiblings(tag)), 2)
self.assertEquals(len(b.findPreviousSiblings(tag, id=1)), 1)
def testTextNavigation(self):
soup = BeautifulSoup('Foo<b>Bar</b><i id="1"><b>Baz<br />Blee<hr id="1"/></b></i>Blargh')
baz = soup.find(text='Baz')
self.assertEquals(baz.findParent("i")['id'], '1')
self.assertEquals(baz.findNext(text='Blee'), 'Blee')
self.assertEquals(baz.findNextSibling(text='Blee'), 'Blee')
self.assertEquals(baz.findNextSibling(text='Blargh'), None)
self.assertEquals(baz.findNextSibling('hr')['id'], '1')
class SiblingRivalry(SoupTest):
"Tests the nextSibling and previousSibling navigation."
def testSiblings(self):
soup = BeautifulSoup("<ul><li>1<p>A</p>B<li>2<li>3</ul>")
secondLI = soup.find('li').nextSibling
self.assert_(secondLI.name == 'li' and secondLI.string == '2')
self.assertEquals(soup.find(text='1').nextSibling.name, 'p')
self.assertEquals(soup.find('p').nextSibling, 'B')
self.assertEquals(soup.find('p').nextSibling.previousSibling.nextSibling, 'B')
class TagsAreObjectsToo(SoupTest):
"Tests the various built-in functions of Tag objects."
def testLen(self):
soup = BeautifulSoup("<top>1<b>2</b>3</top>")
self.assertEquals(len(soup.top), 3)
class StringEmUp(SoupTest):
"Tests the use of 'string' as an alias for a tag's only content."
def testString(self):
s = BeautifulSoup("<b>foo</b>")
self.assertEquals(s.b.string, 'foo')
def testLackOfString(self):
s = BeautifulSoup("<b>f<i>e</i>o</b>")
self.assert_(not s.b.string)
def testStringAssign(self):
s = BeautifulSoup("<b></b>")
b = s.b
b.string = "foo"
string = b.string
self.assertEquals(string, "foo")
self.assert_(isinstance(string, NavigableString))
class AllText(SoupTest):
"Tests the use of 'text' to get all of string content from the tag."
def testText(self):
soup = BeautifulSoup("<ul><li>spam</li><li>eggs</li><li>cheese</li>")
self.assertEquals(soup.ul.text, "spameggscheese")
self.assertEquals(soup.ul.getText('/'), "spam/eggs/cheese")
class ThatsMyLimit(SoupTest):
"Tests the limit argument."
def testBasicLimits(self):
s = BeautifulSoup('<br id="1" /><br id="1" /><br id="1" /><br id="1" />')
self.assertEquals(len(s.findAll('br')), 4)
self.assertEquals(len(s.findAll('br', limit=2)), 2)
self.assertEquals(len(s('br', limit=2)), 2)
class OnlyTheLonely(SoupTest):
"Tests the parseOnly argument to the constructor."
def setUp(self):
x = []
for i in range(1,6):
x.append('<a id="%s">' % i)
for j in range(100,103):
x.append('<b id="%s.%s">Content %s.%s</b>' % (i,j, i,j))
x.append('</a>')
self.x = ''.join(x)
def testOnly(self):
strainer = SoupStrainer("b")
soup = BeautifulSoup(self.x, parseOnlyThese=strainer)
self.assertEquals(len(soup), 15)
strainer = SoupStrainer(id=re.compile("100.*"))
soup = BeautifulSoup(self.x, parseOnlyThese=strainer)
self.assertEquals(len(soup), 5)
strainer = SoupStrainer(text=re.compile("10[01].*"))
soup = BeautifulSoup(self.x, parseOnlyThese=strainer)
self.assertEquals(len(soup), 10)
strainer = SoupStrainer(text=lambda(x):x[8]=='3')
soup = BeautifulSoup(self.x, parseOnlyThese=strainer)
self.assertEquals(len(soup), 3)
class PickleMeThis(SoupTest):
"Testing features like pickle and deepcopy."
def setUp(self):
self.page = """<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN"
"http://www.w3.org/TR/REC-html40/transitional.dtd">
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<title>Beautiful Soup: We called him Tortoise because he taught us.</title>
<link rev="made" href="mailto:leonardr@segfault.org">
<meta name="Description" content="Beautiful Soup: an HTML parser optimized for screen-scraping.">
<meta name="generator" content="Markov Approximation 1.4 (module: leonardr)">
<meta name="author" content="Leonard Richardson">
</head>
<body>
<a href="foo">foo</a>
<a href="foo"><b>bar</b></a>
</body>
</html>"""
self.soup = BeautifulSoup(self.page)
def testPickle(self):
import pickle
dumped = pickle.dumps(self.soup, 2)
loaded = pickle.loads(dumped)
self.assertEqual(loaded.__class__, BeautifulSoup)
self.assertEqual(str(loaded), str(self.soup))
def testDeepcopy(self):
from copy import deepcopy
copied = deepcopy(self.soup)
self.assertEqual(str(copied), str(self.soup))
def testUnicodePickle(self):
import cPickle as pickle
html = "<b>" + chr(0xc3) + "</b>"
soup = BeautifulSoup(html)
dumped = pickle.dumps(soup, pickle.HIGHEST_PROTOCOL)
loaded = pickle.loads(dumped)
self.assertEqual(str(loaded), str(soup))
class WriteOnlyCode(SoupTest):
"Testing the modification of the tree."
def testModifyAttributes(self):
soup = BeautifulSoup('<a id="1"></a>')
soup.a['id'] = 2
self.assertEqual(soup.renderContents(), '<a id="2"></a>')
del(soup.a['id'])
self.assertEqual(soup.renderContents(), '<a></a>')
soup.a['id2'] = 'foo'
self.assertEqual(soup.renderContents(), '<a id2="foo"></a>')
def testNewTagCreation(self):
"Makes sure tags don't step on each others' toes."
soup = BeautifulSoup()
a = Tag(soup, 'a')
ol = Tag(soup, 'ol')
a['href'] = 'http://foo.com/'
self.assertRaises(KeyError, lambda : ol['href'])
def testNewTagWithAttributes(self):
"""Makes sure new tags can be created complete with attributes."""
soup = BeautifulSoup()
a = Tag(soup, 'a', [('href', 'foo')])
b = Tag(soup, 'b', {'class':'bar'})
soup.insert(0,a)
soup.insert(1,b)
self.assertEqual(soup.a['href'], 'foo')
self.assertEqual(soup.b['class'], 'bar')
def testTagReplacement(self):
# Make sure you can replace an element with itself.
text = "<a><b></b><c>Foo<d></d></c></a><a><e></e></a>"
soup = BeautifulSoup(text)
c = soup.c
soup.c.replaceWith(c)
self.assertEquals(str(soup), text)
# A very simple case
soup = BeautifulSoup("<b>Argh!</b>")
soup.find(text="Argh!").replaceWith("Hooray!")
newText = soup.find(text="Hooray!")
b = soup.b
self.assertEqual(newText.previous, b)
self.assertEqual(newText.parent, b)
self.assertEqual(newText.previous.next, newText)
self.assertEqual(newText.next, None)
# A more complex case
soup = BeautifulSoup("<a><b>Argh!</b><c></c><d></d></a>")
soup.b.insert(1, "Hooray!")
newText = soup.find(text="Hooray!")
self.assertEqual(newText.previous, "Argh!")
self.assertEqual(newText.previous.next, newText)
self.assertEqual(newText.previousSibling, "Argh!")
self.assertEqual(newText.previousSibling.nextSibling, newText)
self.assertEqual(newText.nextSibling, None)
self.assertEqual(newText.next, soup.c)
text = "<html>There's <b>no</b> business like <b>show</b> business</html>"
soup = BeautifulSoup(text)
no, show = soup.findAll('b')
show.replaceWith(no)
self.assertEquals(str(soup), "<html>There's business like <b>no</b> business</html>")
# Even more complex
soup = BeautifulSoup("<a><b>Find</b><c>lady!</c><d></d></a>")
tag = Tag(soup, 'magictag')
tag.insert(0, "the")
soup.a.insert(1, tag)
b = soup.b
c = soup.c
theText = tag.find(text=True)
findText = b.find(text="Find")
self.assertEqual(findText.next, tag)
self.assertEqual(tag.previous, findText)
self.assertEqual(b.nextSibling, tag)
self.assertEqual(tag.previousSibling, b)
self.assertEqual(tag.nextSibling, c)
self.assertEqual(c.previousSibling, tag)
self.assertEqual(theText.next, c)
self.assertEqual(c.previous, theText)
# Aand... incredibly complex.
soup = BeautifulSoup("""<a>We<b>reserve<c>the</c><d>right</d></b></a><e>to<f>refuse</f><g>service</g></e>""")
f = soup.f
a = soup.a
c = soup.c
e = soup.e
weText = a.find(text="We")
soup.b.replaceWith(soup.f)
self.assertEqual(str(soup), "<a>We<f>refuse</f></a><e>to<g>service</g></e>")
self.assertEqual(f.previous, weText)
self.assertEqual(weText.next, f)
self.assertEqual(f.previousSibling, weText)
self.assertEqual(f.nextSibling, None)
self.assertEqual(weText.nextSibling, f)
def testReplaceWithChildren(self):
soup = BeautifulStoneSoup(
"<top><replace><child1/><child2/></replace></top>",
selfClosingTags=["child1", "child2"])
soup.replaceTag.replaceWithChildren()
self.assertEqual(soup.top.contents[0].name, "child1")
self.assertEqual(soup.top.contents[1].name, "child2")
def testAppend(self):
doc = "<p>Don't leave me <b>here</b>.</p> <p>Don't leave me.</p>"
soup = BeautifulSoup(doc)
second_para = soup('p')[1]
bold = soup.find('b')
soup('p')[1].append(soup.find('b'))
self.assertEqual(bold.parent, second_para)
self.assertEqual(str(soup),
"<p>Don't leave me .</p> "
"<p>Don't leave me.<b>here</b></p>")
def testTagExtraction(self):
# A very simple case
text = '<html><div id="nav">Nav crap</div>Real content here.</html>'
soup = BeautifulSoup(text)
extracted = soup.find("div", id="nav").extract()
self.assertEqual(str(soup), "<html>Real content here.</html>")
self.assertEqual(str(extracted), '<div id="nav">Nav crap</div>')
# A simple case, a more complex test.
text = "<doc><a>1<b>2</b></a><a>i<b>ii</b></a><a>A<b>B</b></a></doc>"
soup = BeautifulStoneSoup(text)
doc = soup.doc
numbers, roman, letters = soup("a")
self.assertEqual(roman.parent, doc)
oldPrevious = roman.previous
endOfThisTag = roman.nextSibling.previous
self.assertEqual(oldPrevious, "2")
self.assertEqual(roman.next, "i")
self.assertEqual(endOfThisTag, "ii")
self.assertEqual(roman.previousSibling, numbers)
self.assertEqual(roman.nextSibling, letters)
roman.extract()
self.assertEqual(roman.parent, None)
self.assertEqual(roman.previous, None)
self.assertEqual(roman.next, "i")
self.assertEqual(letters.previous, '2')
self.assertEqual(roman.previousSibling, None)
self.assertEqual(roman.nextSibling, None)
self.assertEqual(endOfThisTag.next, None)
self.assertEqual(roman.b.contents[0].next, None)
self.assertEqual(numbers.nextSibling, letters)
self.assertEqual(letters.previousSibling, numbers)
self.assertEqual(len(doc.contents), 2)
self.assertEqual(doc.contents[0], numbers)
self.assertEqual(doc.contents[1], letters)
# A more complex case.
text = "<a>1<b>2<c>Hollywood, baby!</c></b></a>3"
soup = BeautifulStoneSoup(text)
one = soup.find(text="1")
three = soup.find(text="3")
toExtract = soup.b
soup.b.extract()
self.assertEqual(one.next, three)
self.assertEqual(three.previous, one)
self.assertEqual(one.parent.nextSibling, three)
self.assertEqual(three.previousSibling, soup.a)
def testClear(self):
soup = BeautifulSoup("<ul><li></li><li></li></ul>")
soup.ul.clear()
self.assertEqual(len(soup.ul.contents), 0)
class TheManWithoutAttributes(SoupTest):
"Test attribute access"
def testHasKey(self):
text = "<foo attr='bar'>"
self.assertEquals(BeautifulSoup(text).foo.has_key('attr'), True)
class QuoteMeOnThat(SoupTest):
"Test quoting"
def testQuotedAttributeValues(self):
self.assertSoupEquals("<foo attr='bar'></foo>",
'<foo attr="bar"></foo>')
text = """<foo attr='bar "brawls" happen'>a</foo>"""
soup = BeautifulSoup(text)
self.assertEquals(soup.renderContents(), text)
soup.foo['attr'] = 'Brawls happen at "Bob\'s Bar"'
newText = """<foo attr='Brawls happen at "Bob&squot;s Bar"'>a</foo>"""
self.assertSoupEquals(soup.renderContents(), newText)
self.assertSoupEquals('<this is="really messed up & stuff">',
'<this is="really messed up & stuff"></this>')
# This is not what the original author had in mind, but it's
# a legitimate interpretation of what they wrote.
self.assertSoupEquals("""<a href="foo</a>, </a><a href="bar">baz</a>""",
'<a href="foo</a>, </a><a href="></a>, <a href="bar">baz</a>')
# SGMLParser generates bogus parse events when attribute values
# contain embedded brackets, but at least Beautiful Soup fixes
# it up a little.
self.assertSoupEquals('<a b="<a>">', '<a b="<a>"></a><a>"></a>')
self.assertSoupEquals('<a href="http://foo.com/<a> and blah and blah',
"""<a href='"http://foo.com/'></a><a> and blah and blah</a>""")
class YoureSoLiteral(SoupTest):
"Test literal mode."
def testLiteralMode(self):
text = "<script>if (i<imgs.length)</script><b>Foo</b>"
soup = BeautifulSoup(text)
self.assertEqual(soup.script.contents[0], "if (i<imgs.length)")
self.assertEqual(soup.b.contents[0], "Foo")
def testTextArea(self):
text = "<textarea><b>This is an example of an HTML tag</b><&<&</textarea>"
soup = BeautifulSoup(text)
self.assertEqual(soup.textarea.contents[0],
"<b>This is an example of an HTML tag</b><&<&")
class OperatorOverload(SoupTest):
"Our operators do it all! Call now!"
def testTagNameAsFind(self):
"Tests that referencing a tag name as a member delegates to find()."
soup = BeautifulSoup('<b id="1">foo<i>bar</i></b><b>Red herring</b>')
self.assertEqual(soup.b.i, soup.find('b').find('i'))
self.assertEqual(soup.b.i.string, 'bar')
self.assertEqual(soup.b['id'], '1')
self.assertEqual(soup.b.contents[0], 'foo')
self.assert_(not soup.a)
#Test the .fooTag variant of .foo.
self.assertEqual(soup.bTag.iTag.string, 'bar')
self.assertEqual(soup.b.iTag.string, 'bar')
self.assertEqual(soup.find('b').find('i'), soup.bTag.iTag)
class NestableEgg(SoupTest):
"""Here we test tag nesting. TEST THE NEST, DUDE! X-TREME!"""
def testParaInsideBlockquote(self):
soup = BeautifulSoup('<blockquote><p><b>Foo</blockquote><p>Bar')
self.assertEqual(soup.blockquote.p.b.string, 'Foo')
self.assertEqual(soup.blockquote.b.string, 'Foo')
self.assertEqual(soup.find('p', recursive=False).string, 'Bar')
def testNestedTables(self):
text = """<table id="1"><tr><td>Here's another table:
<table id="2"><tr><td>Juicy text</td></tr></table></td></tr></table>"""
soup = BeautifulSoup(text)
self.assertEquals(soup.table.table.td.string, 'Juicy text')
self.assertEquals(len(soup.findAll('table')), 2)
self.assertEquals(len(soup.table.findAll('table')), 1)
self.assertEquals(soup.find('table', {'id' : 2}).parent.parent.parent.name,
'table')
text = "<table><tr><td><div><table>Foo</table></div></td></tr></table>"
soup = BeautifulSoup(text)
self.assertEquals(soup.table.tr.td.div.table.contents[0], "Foo")
text = """<table><thead><tr>Foo</tr></thead><tbody><tr>Bar</tr></tbody>
<tfoot><tr>Baz</tr></tfoot></table>"""
soup = BeautifulSoup(text)
self.assertEquals(soup.table.thead.tr.contents[0], "Foo")
def testBadNestedTables(self):
soup = BeautifulSoup("<table><tr><table><tr id='nested'>")
self.assertEquals(soup.table.tr.table.tr['id'], 'nested')
class CleanupOnAisleFour(SoupTest):
"""Here we test cleanup of text that breaks SGMLParser or is just
obnoxious."""
def testSelfClosingtag(self):
self.assertEqual(str(BeautifulSoup("Foo<br/>Bar").find('br')),
'<br />')
self.assertSoupEquals('<p>test1<br/>test2</p>',
'<p>test1<br />test2</p>')
text = '<p>test1<selfclosing>test2'
soup = BeautifulStoneSoup(text)
self.assertEqual(str(soup),
'<p>test1<selfclosing>test2</selfclosing></p>')
soup = BeautifulStoneSoup(text, selfClosingTags='selfclosing')
self.assertEqual(str(soup),
'<p>test1<selfclosing />test2</p>')
def testSelfClosingTagOrNot(self):
text = "<item><link>http://foo.com/</link></item>"
self.assertEqual(BeautifulStoneSoup(text).renderContents(), text)
self.assertEqual(BeautifulSoup(text).renderContents(),
'<item><link />http://foo.com/</item>')
def testCData(self):
xml = "<root>foo<![CDATA[foobar]]>bar</root>"
self.assertSoupEquals(xml, xml)
r = re.compile("foo.*bar")
soup = BeautifulSoup(xml)
self.assertEquals(soup.find(text=r).string, "foobar")
self.assertEquals(soup.find(text=r).__class__, CData)
def testComments(self):
xml = "foo<!--foobar-->baz"
self.assertSoupEquals(xml)
r = re.compile("foo.*bar")
soup = BeautifulSoup(xml)
self.assertEquals(soup.find(text=r).string, "foobar")
self.assertEquals(soup.find(text="foobar").__class__, Comment)
def testDeclaration(self):
xml = "foo<!DOCTYPE foobar>baz"
self.assertSoupEquals(xml)
r = re.compile(".*foo.*bar")
soup = BeautifulSoup(xml)
text = "DOCTYPE foobar"
self.assertEquals(soup.find(text=r).string, text)
self.assertEquals(soup.find(text=text).__class__, Declaration)
namespaced_doctype = ('<!DOCTYPE xsl:stylesheet SYSTEM "htmlent.dtd">'
'<html>foo</html>')
soup = BeautifulSoup(namespaced_doctype)
self.assertEquals(soup.contents[0],
'DOCTYPE xsl:stylesheet SYSTEM "htmlent.dtd"')
self.assertEquals(soup.html.contents[0], 'foo')
def testEntityConversions(self):
text = "<<sacré bleu!>>"
soup = BeautifulStoneSoup(text)
self.assertSoupEquals(text)
xmlEnt = BeautifulStoneSoup.XML_ENTITIES
htmlEnt = BeautifulStoneSoup.HTML_ENTITIES
xhtmlEnt = BeautifulStoneSoup.XHTML_ENTITIES
soup = BeautifulStoneSoup(text, convertEntities=xmlEnt)
self.assertEquals(str(soup), "<<sacré bleu!>>")
soup = BeautifulStoneSoup(text, convertEntities=xmlEnt)
self.assertEquals(str(soup), "<<sacré bleu!>>")
soup = BeautifulStoneSoup(text, convertEntities=htmlEnt)
self.assertEquals(unicode(soup), u"<<sacr\xe9 bleu!>>")
# Make sure the "XML", "HTML", and "XHTML" settings work.
text = "<™'"
soup = BeautifulStoneSoup(text, convertEntities=xmlEnt)
self.assertEquals(unicode(soup), u"<™'")
soup = BeautifulStoneSoup(text, convertEntities=htmlEnt)
self.assertEquals(unicode(soup), u"<\u2122'")
soup = BeautifulStoneSoup(text, convertEntities=xhtmlEnt)
self.assertEquals(unicode(soup), u"<\u2122'")
invalidEntity = "foo&#bar;baz"
soup = BeautifulStoneSoup\
(invalidEntity,
convertEntities=htmlEnt)
self.assertEquals(str(soup), invalidEntity)
def testNonBreakingSpaces(self):
soup = BeautifulSoup("<a> </a>",
convertEntities=BeautifulStoneSoup.HTML_ENTITIES)
self.assertEquals(unicode(soup), u"<a>\xa0\xa0</a>")
def testWhitespaceInDeclaration(self):
self.assertSoupEquals('<! DOCTYPE>', '<!DOCTYPE>')
def testJunkInDeclaration(self):
self.assertSoupEquals('<! Foo = -8>a', '<!Foo = -8>a')
def testIncompleteDeclaration(self):
self.assertSoupEquals('a<!b <p>c')
def testEntityReplacement(self):
self.assertSoupEquals('<b>hello there</b>')
def testEntitiesInAttributeValues(self):
self.assertSoupEquals('<x t="xñ">', '<x t="x\xc3\xb1"></x>')
self.assertSoupEquals('<x t="xñ">', '<x t="x\xc3\xb1"></x>')
soup = BeautifulSoup('<x t=">™">',
convertEntities=BeautifulStoneSoup.HTML_ENTITIES)
self.assertEquals(unicode(soup), u'<x t=">\u2122"></x>')
uri = "http://crummy.com?sacré&bleu"
link = '<a href="%s"></a>' % uri
soup = BeautifulSoup(link)
self.assertEquals(unicode(soup), link)
#self.assertEquals(unicode(soup.a['href']), uri)
soup = BeautifulSoup(link, convertEntities=BeautifulSoup.HTML_ENTITIES)
self.assertEquals(unicode(soup),
link.replace("é", u"\xe9"))
uri = "http://crummy.com?sacré&bleu"
link = '<a href="%s"></a>' % uri
soup = BeautifulSoup(link, convertEntities=BeautifulSoup.HTML_ENTITIES)
self.assertEquals(unicode(soup.a['href']),
uri.replace("é", u"\xe9"))
def testNakedAmpersands(self):
html = {'convertEntities':BeautifulStoneSoup.HTML_ENTITIES}
soup = BeautifulStoneSoup("AT&T ", **html)
self.assertEquals(str(soup), 'AT&T ')
nakedAmpersandInASentence = "AT&T was Ma Bell"
soup = BeautifulStoneSoup(nakedAmpersandInASentence,**html)
self.assertEquals(str(soup), \
nakedAmpersandInASentence.replace('&','&'))
invalidURL = '<a href="http://example.org?a=1&b=2;3">foo</a>'
validURL = invalidURL.replace('&','&')
soup = BeautifulStoneSoup(invalidURL)
self.assertEquals(str(soup), validURL)
soup = BeautifulStoneSoup(validURL)
self.assertEquals(str(soup), validURL)
class EncodeRed(SoupTest):
"""Tests encoding conversion, Unicode conversion, and Microsoft
smart quote fixes."""
def testUnicodeDammitStandalone(self):
markup = "<foo>\x92</foo>"
dammit = UnicodeDammit(markup)
self.assertEquals(dammit.unicode, "<foo>’</foo>")
hebrew = "\xed\xe5\xec\xf9"
dammit = UnicodeDammit(hebrew, ["iso-8859-8"])
self.assertEquals(dammit.unicode, u'\u05dd\u05d5\u05dc\u05e9')
self.assertEquals(dammit.originalEncoding, 'iso-8859-8')
def testGarbageInGarbageOut(self):
ascii = "<foo>a</foo>"
asciiSoup = BeautifulStoneSoup(ascii)
self.assertEquals(ascii, str(asciiSoup))
unicodeData = u"<foo>\u00FC</foo>"
utf8 = unicodeData.encode("utf-8")
self.assertEquals(utf8, '<foo>\xc3\xbc</foo>')
unicodeSoup = BeautifulStoneSoup(unicodeData)
self.assertEquals(unicodeData, unicode(unicodeSoup))
self.assertEquals(unicode(unicodeSoup.foo.string), u'\u00FC')
utf8Soup = BeautifulStoneSoup(utf8, fromEncoding='utf-8')
self.assertEquals(utf8, str(utf8Soup))
self.assertEquals(utf8Soup.originalEncoding, "utf-8")
utf8Soup = BeautifulStoneSoup(unicodeData)
self.assertEquals(utf8, str(utf8Soup))
self.assertEquals(utf8Soup.originalEncoding, None)
def testHandleInvalidCodec(self):
for bad_encoding in ['.utf8', '...', 'utF---16.!']:
soup = BeautifulSoup("Räksmörgås", fromEncoding=bad_encoding)
self.assertEquals(soup.originalEncoding, 'utf-8')
def testUnicodeSearch(self):
html = u'<html><body><h1>Räksmörgås</h1></body></html>'
soup = BeautifulSoup(html)
self.assertEqual(soup.find(text=u'Räksmörgås'),u'Räksmörgås')
def testRewrittenXMLHeader(self):
euc_jp = '<?xml version="1.0 encoding="euc-jp"?>\n<foo>\n\xa4\xb3\xa4\xec\xa4\xcfEUC-JP\xa4\xc7\xa5\xb3\xa1\xbc\xa5\xc7\xa5\xa3\xa5\xf3\xa5\xb0\xa4\xb5\xa4\xec\xa4\xbf\xc6\xfc\xcb\xdc\xb8\xec\xa4\xce\xa5\xd5\xa5\xa1\xa5\xa4\xa5\xeb\xa4\xc7\xa4\xb9\xa1\xa3\n</foo>\n'
utf8 = "<?xml version='1.0' encoding='utf-8'?>\n<foo>\n\xe3\x81\x93\xe3\x82\x8c\xe3\x81\xafEUC-JP\xe3\x81\xa7\xe3\x82\xb3\xe3\x83\xbc\xe3\x83\x87\xe3\x82\xa3\xe3\x83\xb3\xe3\x82\xb0\xe3\x81\x95\xe3\x82\x8c\xe3\x81\x9f\xe6\x97\xa5\xe6\x9c\xac\xe8\xaa\x9e\xe3\x81\xae\xe3\x83\x95\xe3\x82\xa1\xe3\x82\xa4\xe3\x83\xab\xe3\x81\xa7\xe3\x81\x99\xe3\x80\x82\n</foo>\n"
soup = BeautifulStoneSoup(euc_jp)
if soup.originalEncoding != "euc-jp":
raise Exception("Test failed when parsing euc-jp document. "
"If you're running Python >=2.4, or you have "
"cjkcodecs installed, this is a real problem. "
"Otherwise, ignore it.")
self.assertEquals(soup.originalEncoding, "euc-jp")
self.assertEquals(str(soup), utf8)
old_text = "<?xml encoding='windows-1252'><foo>\x92</foo>"
new_text = "<?xml version='1.0' encoding='utf-8'?><foo>’</foo>"
self.assertSoupEquals(old_text, new_text)
def testRewrittenMetaTag(self):
no_shift_jis_html = '''<html><head>\n<meta http-equiv="Content-language" content="ja" /></head><body><pre>\n\x82\xb1\x82\xea\x82\xcdShift-JIS\x82\xc5\x83R\x81[\x83f\x83B\x83\x93\x83O\x82\xb3\x82\xea\x82\xbd\x93\xfa\x96{\x8c\xea\x82\xcc\x83t\x83@\x83C\x83\x8b\x82\xc5\x82\xb7\x81B\n</pre></body></html>'''
soup = BeautifulSoup(no_shift_jis_html)
# Beautiful Soup used to try to rewrite the meta tag even if the
# meta tag got filtered out by the strainer. This test makes
# sure that doesn't happen.
strainer = SoupStrainer('pre')
soup = BeautifulSoup(no_shift_jis_html, parseOnlyThese=strainer)
self.assertEquals(soup.contents[0].name, 'pre')
meta_tag = ('<meta content="text/html; charset=x-sjis" '
'http-equiv="Content-type" />')
shift_jis_html = (
'<html><head>\n%s\n'
'<meta http-equiv="Content-language" content="ja" />'
'</head><body><pre>\n'
'\x82\xb1\x82\xea\x82\xcdShift-JIS\x82\xc5\x83R\x81[\x83f'
'\x83B\x83\x93\x83O\x82\xb3\x82\xea\x82\xbd\x93\xfa\x96{\x8c'
'\xea\x82\xcc\x83t\x83@\x83C\x83\x8b\x82\xc5\x82\xb7\x81B\n'
'</pre></body></html>') % meta_tag
soup = BeautifulSoup(shift_jis_html)
if soup.originalEncoding != "shift-jis":
raise Exception("Test failed when parsing shift-jis document "
"with meta tag '%s'."
"If you're running Python >=2.4, or you have "
"cjkcodecs installed, this is a real problem. "
"Otherwise, ignore it." % meta_tag)
self.assertEquals(soup.originalEncoding, "shift-jis")
content_type_tag = soup.meta['content']
self.assertEquals(content_type_tag[content_type_tag.find('charset='):],
'charset=%SOUP-ENCODING%')
content_type = str(soup.meta)
index = content_type.find('charset=')
self.assertEqual(content_type[index:index+len('charset=utf8')+1],
'charset=utf-8')
content_type = soup.meta.__str__('shift-jis')
index = content_type.find('charset=')
self.assertEqual(content_type[index:index+len('charset=shift-jis')],
'charset=shift-jis')
self.assertEquals(str(soup), (
'<html><head>\n'
'<meta content="text/html; charset=utf-8" '
'http-equiv="Content-type" />\n'
'<meta http-equiv="Content-language" content="ja" />'
'</head><body><pre>\n'
'\xe3\x81\x93\xe3\x82\x8c\xe3\x81\xafShift-JIS\xe3\x81\xa7\xe3'
'\x82\xb3\xe3\x83\xbc\xe3\x83\x87\xe3\x82\xa3\xe3\x83\xb3\xe3'
'\x82\xb0\xe3\x81\x95\xe3\x82\x8c\xe3\x81\x9f\xe6\x97\xa5\xe6'
'\x9c\xac\xe8\xaa\x9e\xe3\x81\xae\xe3\x83\x95\xe3\x82\xa1\xe3'
'\x82\xa4\xe3\x83\xab\xe3\x81\xa7\xe3\x81\x99\xe3\x80\x82\n'
'</pre></body></html>'))
self.assertEquals(soup.renderContents("shift-jis"),
shift_jis_html.replace('x-sjis', 'shift-jis'))
isolatin ="""<html><meta http-equiv="Content-type" content="text/html; charset=ISO-Latin-1" />Sacr\xe9 bleu!</html>"""
soup = BeautifulSoup(isolatin)
self.assertSoupEquals(soup.__str__("utf-8"),
isolatin.replace("ISO-Latin-1", "utf-8").replace("\xe9", "\xc3\xa9"))
def testHebrew(self):
iso_8859_8= '<HEAD>\n<TITLE>Hebrew (ISO 8859-8) in Visual Directionality</TITLE>\n\n\n\n</HEAD>\n<BODY>\n<H1>Hebrew (ISO 8859-8) in Visual Directionality</H1>\n\xed\xe5\xec\xf9\n</BODY>\n'
utf8 = '<head>\n<title>Hebrew (ISO 8859-8) in Visual Directionality</title>\n</head>\n<body>\n<h1>Hebrew (ISO 8859-8) in Visual Directionality</h1>\n\xd7\x9d\xd7\x95\xd7\x9c\xd7\xa9\n</body>\n'
soup = BeautifulStoneSoup(iso_8859_8, fromEncoding="iso-8859-8")
self.assertEquals(str(soup), utf8)
def testSmartQuotesNotSoSmartAnymore(self):
self.assertSoupEquals("\x91Foo\x92 <!--blah-->",
'‘Foo’ <!--blah-->')
def testDontConvertSmartQuotesWhenAlsoConvertingEntities(self):
smartQuotes = "Il a dit, \x8BSacré bleu!\x9b"
soup = BeautifulSoup(smartQuotes)
self.assertEquals(str(soup),
'Il a dit, ‹Sacré bleu!›')
soup = BeautifulSoup(smartQuotes, convertEntities="html")
self.assertEquals(str(soup),
'Il a dit, \xe2\x80\xb9Sacr\xc3\xa9 bleu!\xe2\x80\xba')
def testDontSeeSmartQuotesWhereThereAreNone(self):
utf_8 = "\343\202\261\343\203\274\343\202\277\343\202\244 Watch"
self.assertSoupEquals(utf_8)
class Whitewash(SoupTest):
"""Test whitespace preservation."""
def testPreservedWhitespace(self):
self.assertSoupEquals("<pre> </pre>")
self.assertSoupEquals("<pre> woo </pre>")
def testCollapsedWhitespace(self):
self.assertSoupEquals("<p> </p>", "<p> </p>")
if __name__ == '__main__':
unittest.main()
| Python |
"""Beautiful Soup
Elixir and Tonic
"The Screen-Scraper's Friend"
http://www.crummy.com/software/BeautifulSoup/
Beautiful Soup parses a (possibly invalid) XML or HTML document into a
tree representation. It provides methods and Pythonic idioms that make
it easy to navigate, search, and modify the tree.
A well-formed XML/HTML document yields a well-formed data
structure. An ill-formed XML/HTML document yields a correspondingly
ill-formed data structure. If your document is only locally
well-formed, you can use this library to find and process the
well-formed part of it.
Beautiful Soup works with Python 2.2 and up. It has no external
dependencies, but you'll have more success at converting data to UTF-8
if you also install these three packages:
* chardet, for auto-detecting character encodings
http://chardet.feedparser.org/
* cjkcodecs and iconv_codec, which add more encodings to the ones supported
by stock Python.
http://cjkpython.i18n.org/
Beautiful Soup defines classes for two main parsing strategies:
* BeautifulStoneSoup, for parsing XML, SGML, or your domain-specific
language that kind of looks like XML.
* BeautifulSoup, for parsing run-of-the-mill HTML code, be it valid
or invalid. This class has web browser-like heuristics for
obtaining a sensible parse tree in the face of common HTML errors.
Beautiful Soup also defines a class (UnicodeDammit) for autodetecting
the encoding of an HTML or XML document, and converting it to
Unicode. Much of this code is taken from Mark Pilgrim's Universal Feed Parser.
For more than you ever wanted to know about Beautiful Soup, see the
documentation:
http://www.crummy.com/software/BeautifulSoup/documentation.html
Here, have some legalese:
Copyright (c) 2004-2010, Leonard Richardson
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of the the Beautiful Soup Consortium and All
Night Kosher Bakery nor the names of its contributors may be
used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE, DAMMIT.
"""
from __future__ import generators
__author__ = "Leonard Richardson (leonardr@segfault.org)"
__version__ = "3.2.0"
__copyright__ = "Copyright (c) 2004-2010 Leonard Richardson"
__license__ = "New-style BSD"
from sgmllib import SGMLParser, SGMLParseError
import codecs
import markupbase
import types
import re
import sgmllib
try:
from htmlentitydefs import name2codepoint
except ImportError:
name2codepoint = {}
try:
set
except NameError:
from sets import Set as set
#These hacks make Beautiful Soup able to parse XML with namespaces
sgmllib.tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*')
markupbase._declname_match = re.compile(r'[a-zA-Z][-_.:a-zA-Z0-9]*\s*').match
DEFAULT_OUTPUT_ENCODING = "utf-8"
def _match_css_class(str):
"""Build a RE to match the given CSS class."""
return re.compile(r"(^|.*\s)%s($|\s)" % str)
# First, the classes that represent markup elements.
class PageElement(object):
"""Contains the navigational information for some part of the page
(either a tag or a piece of text)"""
def setup(self, parent=None, previous=None):
"""Sets up the initial relations between this element and
other elements."""
self.parent = parent
self.previous = previous
self.next = None
self.previousSibling = None
self.nextSibling = None
if self.parent and self.parent.contents:
self.previousSibling = self.parent.contents[-1]
self.previousSibling.nextSibling = self
def replaceWith(self, replaceWith):
oldParent = self.parent
myIndex = self.parent.index(self)
if hasattr(replaceWith, "parent")\
and replaceWith.parent is self.parent:
# We're replacing this element with one of its siblings.
index = replaceWith.parent.index(replaceWith)
if index and index < myIndex:
# Furthermore, it comes before this element. That
# means that when we extract it, the index of this
# element will change.
myIndex = myIndex - 1
self.extract()
oldParent.insert(myIndex, replaceWith)
def replaceWithChildren(self):
myParent = self.parent
myIndex = self.parent.index(self)
self.extract()
reversedChildren = list(self.contents)
reversedChildren.reverse()
for child in reversedChildren:
myParent.insert(myIndex, child)
def extract(self):
"""Destructively rips this element out of the tree."""
if self.parent:
try:
del self.parent.contents[self.parent.index(self)]
except ValueError:
pass
#Find the two elements that would be next to each other if
#this element (and any children) hadn't been parsed. Connect
#the two.
lastChild = self._lastRecursiveChild()
nextElement = lastChild.next
if self.previous:
self.previous.next = nextElement
if nextElement:
nextElement.previous = self.previous
self.previous = None
lastChild.next = None
self.parent = None
if self.previousSibling:
self.previousSibling.nextSibling = self.nextSibling
if self.nextSibling:
self.nextSibling.previousSibling = self.previousSibling
self.previousSibling = self.nextSibling = None
return self
def _lastRecursiveChild(self):
"Finds the last element beneath this object to be parsed."
lastChild = self
while hasattr(lastChild, 'contents') and lastChild.contents:
lastChild = lastChild.contents[-1]
return lastChild
def insert(self, position, newChild):
if isinstance(newChild, basestring) \
and not isinstance(newChild, NavigableString):
newChild = NavigableString(newChild)
position = min(position, len(self.contents))
if hasattr(newChild, 'parent') and newChild.parent is not None:
# We're 'inserting' an element that's already one
# of this object's children.
if newChild.parent is self:
index = self.index(newChild)
if index > position:
# Furthermore we're moving it further down the
# list of this object's children. That means that
# when we extract this element, our target index
# will jump down one.
position = position - 1
newChild.extract()
newChild.parent = self
previousChild = None
if position == 0:
newChild.previousSibling = None
newChild.previous = self
else:
previousChild = self.contents[position-1]
newChild.previousSibling = previousChild
newChild.previousSibling.nextSibling = newChild
newChild.previous = previousChild._lastRecursiveChild()
if newChild.previous:
newChild.previous.next = newChild
newChildsLastElement = newChild._lastRecursiveChild()
if position >= len(self.contents):
newChild.nextSibling = None
parent = self
parentsNextSibling = None
while not parentsNextSibling:
parentsNextSibling = parent.nextSibling
parent = parent.parent
if not parent: # This is the last element in the document.
break
if parentsNextSibling:
newChildsLastElement.next = parentsNextSibling
else:
newChildsLastElement.next = None
else:
nextChild = self.contents[position]
newChild.nextSibling = nextChild
if newChild.nextSibling:
newChild.nextSibling.previousSibling = newChild
newChildsLastElement.next = nextChild
if newChildsLastElement.next:
newChildsLastElement.next.previous = newChildsLastElement
self.contents.insert(position, newChild)
def append(self, tag):
"""Appends the given tag to the contents of this tag."""
self.insert(len(self.contents), tag)
def findNext(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the first item that matches the given criteria and
appears after this Tag in the document."""
return self._findOne(self.findAllNext, name, attrs, text, **kwargs)
def findAllNext(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns all items that match the given criteria and appear
after this Tag in the document."""
return self._findAll(name, attrs, text, limit, self.nextGenerator,
**kwargs)
def findNextSibling(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the closest sibling to this Tag that matches the
given criteria and appears after this Tag in the document."""
return self._findOne(self.findNextSiblings, name, attrs, text,
**kwargs)
def findNextSiblings(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns the siblings of this Tag that match the given
criteria and appear after this Tag in the document."""
return self._findAll(name, attrs, text, limit,
self.nextSiblingGenerator, **kwargs)
fetchNextSiblings = findNextSiblings # Compatibility with pre-3.x
def findPrevious(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the first item that matches the given criteria and
appears before this Tag in the document."""
return self._findOne(self.findAllPrevious, name, attrs, text, **kwargs)
def findAllPrevious(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns all items that match the given criteria and appear
before this Tag in the document."""
return self._findAll(name, attrs, text, limit, self.previousGenerator,
**kwargs)
fetchPrevious = findAllPrevious # Compatibility with pre-3.x
def findPreviousSibling(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the closest sibling to this Tag that matches the
given criteria and appears before this Tag in the document."""
return self._findOne(self.findPreviousSiblings, name, attrs, text,
**kwargs)
def findPreviousSiblings(self, name=None, attrs={}, text=None,
limit=None, **kwargs):
"""Returns the siblings of this Tag that match the given
criteria and appear before this Tag in the document."""
return self._findAll(name, attrs, text, limit,
self.previousSiblingGenerator, **kwargs)
fetchPreviousSiblings = findPreviousSiblings # Compatibility with pre-3.x
def findParent(self, name=None, attrs={}, **kwargs):
"""Returns the closest parent of this Tag that matches the given
criteria."""
# NOTE: We can't use _findOne because findParents takes a different
# set of arguments.
r = None
l = self.findParents(name, attrs, 1)
if l:
r = l[0]
return r
def findParents(self, name=None, attrs={}, limit=None, **kwargs):
"""Returns the parents of this Tag that match the given
criteria."""
return self._findAll(name, attrs, None, limit, self.parentGenerator,
**kwargs)
fetchParents = findParents # Compatibility with pre-3.x
#These methods do the real heavy lifting.
def _findOne(self, method, name, attrs, text, **kwargs):
r = None
l = method(name, attrs, text, 1, **kwargs)
if l:
r = l[0]
return r
def _findAll(self, name, attrs, text, limit, generator, **kwargs):
"Iterates over a generator looking for things that match."
if isinstance(name, SoupStrainer):
strainer = name
# (Possibly) special case some findAll*(...) searches
elif text is None and not limit and not attrs and not kwargs:
# findAll*(True)
if name is True:
return [element for element in generator()
if isinstance(element, Tag)]
# findAll*('tag-name')
elif isinstance(name, basestring):
return [element for element in generator()
if isinstance(element, Tag) and
element.name == name]
else:
strainer = SoupStrainer(name, attrs, text, **kwargs)
# Build a SoupStrainer
else:
strainer = SoupStrainer(name, attrs, text, **kwargs)
results = ResultSet(strainer)
g = generator()
while True:
try:
i = g.next()
except StopIteration:
break
if i:
found = strainer.search(i)
if found:
results.append(found)
if limit and len(results) >= limit:
break
return results
#These Generators can be used to navigate starting from both
#NavigableStrings and Tags.
def nextGenerator(self):
i = self
while i is not None:
i = i.next
yield i
def nextSiblingGenerator(self):
i = self
while i is not None:
i = i.nextSibling
yield i
def previousGenerator(self):
i = self
while i is not None:
i = i.previous
yield i
def previousSiblingGenerator(self):
i = self
while i is not None:
i = i.previousSibling
yield i
def parentGenerator(self):
i = self
while i is not None:
i = i.parent
yield i
# Utility methods
def substituteEncoding(self, str, encoding=None):
encoding = encoding or "utf-8"
return str.replace("%SOUP-ENCODING%", encoding)
def toEncoding(self, s, encoding=None):
"""Encodes an object to a string in some encoding, or to Unicode.
."""
if isinstance(s, unicode):
if encoding:
s = s.encode(encoding)
elif isinstance(s, str):
if encoding:
s = s.encode(encoding)
else:
s = unicode(s)
else:
if encoding:
s = self.toEncoding(str(s), encoding)
else:
s = unicode(s)
return s
class NavigableString(unicode, PageElement):
def __new__(cls, value):
"""Create a new NavigableString.
When unpickling a NavigableString, this method is called with
the string in DEFAULT_OUTPUT_ENCODING. That encoding needs to be
passed in to the superclass's __new__ or the superclass won't know
how to handle non-ASCII characters.
"""
if isinstance(value, unicode):
return unicode.__new__(cls, value)
return unicode.__new__(cls, value, DEFAULT_OUTPUT_ENCODING)
def __getnewargs__(self):
return (NavigableString.__str__(self),)
def __getattr__(self, attr):
"""text.string gives you text. This is for backwards
compatibility for Navigable*String, but for CData* it lets you
get the string without the CData wrapper."""
if attr == 'string':
return self
else:
raise AttributeError, "'%s' object has no attribute '%s'" % (self.__class__.__name__, attr)
def __unicode__(self):
return str(self).decode(DEFAULT_OUTPUT_ENCODING)
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
if encoding:
return self.encode(encoding)
else:
return self
class CData(NavigableString):
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
return "<![CDATA[%s]]>" % NavigableString.__str__(self, encoding)
class ProcessingInstruction(NavigableString):
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
output = self
if "%SOUP-ENCODING%" in output:
output = self.substituteEncoding(output, encoding)
return "<?%s?>" % self.toEncoding(output, encoding)
class Comment(NavigableString):
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
return "<!--%s-->" % NavigableString.__str__(self, encoding)
class Declaration(NavigableString):
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
return "<!%s>" % NavigableString.__str__(self, encoding)
class Tag(PageElement):
"""Represents a found HTML tag with its attributes and contents."""
def _invert(h):
"Cheap function to invert a hash."
i = {}
for k,v in h.items():
i[v] = k
return i
XML_ENTITIES_TO_SPECIAL_CHARS = { "apos" : "'",
"quot" : '"',
"amp" : "&",
"lt" : "<",
"gt" : ">" }
XML_SPECIAL_CHARS_TO_ENTITIES = _invert(XML_ENTITIES_TO_SPECIAL_CHARS)
def _convertEntities(self, match):
"""Used in a call to re.sub to replace HTML, XML, and numeric
entities with the appropriate Unicode characters. If HTML
entities are being converted, any unrecognized entities are
escaped."""
x = match.group(1)
if self.convertHTMLEntities and x in name2codepoint:
return unichr(name2codepoint[x])
elif x in self.XML_ENTITIES_TO_SPECIAL_CHARS:
if self.convertXMLEntities:
return self.XML_ENTITIES_TO_SPECIAL_CHARS[x]
else:
return u'&%s;' % x
elif len(x) > 0 and x[0] == '#':
# Handle numeric entities
if len(x) > 1 and x[1] == 'x':
return unichr(int(x[2:], 16))
else:
return unichr(int(x[1:]))
elif self.escapeUnrecognizedEntities:
return u'&%s;' % x
else:
return u'&%s;' % x
def __init__(self, parser, name, attrs=None, parent=None,
previous=None):
"Basic constructor."
# We don't actually store the parser object: that lets extracted
# chunks be garbage-collected
self.parserClass = parser.__class__
self.isSelfClosing = parser.isSelfClosingTag(name)
self.name = name
if attrs is None:
attrs = []
elif isinstance(attrs, dict):
attrs = attrs.items()
self.attrs = attrs
self.contents = []
self.setup(parent, previous)
self.hidden = False
self.containsSubstitutions = False
self.convertHTMLEntities = parser.convertHTMLEntities
self.convertXMLEntities = parser.convertXMLEntities
self.escapeUnrecognizedEntities = parser.escapeUnrecognizedEntities
# Convert any HTML, XML, or numeric entities in the attribute values.
convert = lambda(k, val): (k,
re.sub("&(#\d+|#x[0-9a-fA-F]+|\w+);",
self._convertEntities,
val))
self.attrs = map(convert, self.attrs)
def getString(self):
if (len(self.contents) == 1
and isinstance(self.contents[0], NavigableString)):
return self.contents[0]
def setString(self, string):
"""Replace the contents of the tag with a string"""
self.clear()
self.append(string)
string = property(getString, setString)
def getText(self, separator=u""):
if not len(self.contents):
return u""
stopNode = self._lastRecursiveChild().next
strings = []
current = self.contents[0]
while current is not stopNode:
if isinstance(current, NavigableString):
strings.append(current.strip())
current = current.next
return separator.join(strings)
text = property(getText)
def get(self, key, default=None):
"""Returns the value of the 'key' attribute for the tag, or
the value given for 'default' if it doesn't have that
attribute."""
return self._getAttrMap().get(key, default)
def clear(self):
"""Extract all children."""
for child in self.contents[:]:
child.extract()
def index(self, element):
for i, child in enumerate(self.contents):
if child is element:
return i
raise ValueError("Tag.index: element not in tag")
def has_key(self, key):
return self._getAttrMap().has_key(key)
def __getitem__(self, key):
"""tag[key] returns the value of the 'key' attribute for the tag,
and throws an exception if it's not there."""
return self._getAttrMap()[key]
def __iter__(self):
"Iterating over a tag iterates over its contents."
return iter(self.contents)
def __len__(self):
"The length of a tag is the length of its list of contents."
return len(self.contents)
def __contains__(self, x):
return x in self.contents
def __nonzero__(self):
"A tag is non-None even if it has no contents."
return True
def __setitem__(self, key, value):
"""Setting tag[key] sets the value of the 'key' attribute for the
tag."""
self._getAttrMap()
self.attrMap[key] = value
found = False
for i in range(0, len(self.attrs)):
if self.attrs[i][0] == key:
self.attrs[i] = (key, value)
found = True
if not found:
self.attrs.append((key, value))
self._getAttrMap()[key] = value
def __delitem__(self, key):
"Deleting tag[key] deletes all 'key' attributes for the tag."
for item in self.attrs:
if item[0] == key:
self.attrs.remove(item)
#We don't break because bad HTML can define the same
#attribute multiple times.
self._getAttrMap()
if self.attrMap.has_key(key):
del self.attrMap[key]
def __call__(self, *args, **kwargs):
"""Calling a tag like a function is the same as calling its
findAll() method. Eg. tag('a') returns a list of all the A tags
found within this tag."""
return apply(self.findAll, args, kwargs)
def __getattr__(self, tag):
#print "Getattr %s.%s" % (self.__class__, tag)
if len(tag) > 3 and tag.rfind('Tag') == len(tag)-3:
return self.find(tag[:-3])
elif tag.find('__') != 0:
return self.find(tag)
raise AttributeError, "'%s' object has no attribute '%s'" % (self.__class__, tag)
def __eq__(self, other):
"""Returns true iff this tag has the same name, the same attributes,
and the same contents (recursively) as the given tag.
NOTE: right now this will return false if two tags have the
same attributes in a different order. Should this be fixed?"""
if other is self:
return True
if not hasattr(other, 'name') or not hasattr(other, 'attrs') or not hasattr(other, 'contents') or self.name != other.name or self.attrs != other.attrs or len(self) != len(other):
return False
for i in range(0, len(self.contents)):
if self.contents[i] != other.contents[i]:
return False
return True
def __ne__(self, other):
"""Returns true iff this tag is not identical to the other tag,
as defined in __eq__."""
return not self == other
def __repr__(self, encoding=DEFAULT_OUTPUT_ENCODING):
"""Renders this tag as a string."""
return self.__str__(encoding)
def __unicode__(self):
return self.__str__(None)
BARE_AMPERSAND_OR_BRACKET = re.compile("([<>]|"
+ "&(?!#\d+;|#x[0-9a-fA-F]+;|\w+;)"
+ ")")
def _sub_entity(self, x):
"""Used with a regular expression to substitute the
appropriate XML entity for an XML special character."""
return "&" + self.XML_SPECIAL_CHARS_TO_ENTITIES[x.group(0)[0]] + ";"
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING,
prettyPrint=False, indentLevel=0):
"""Returns a string or Unicode representation of this tag and
its contents. To get Unicode, pass None for encoding.
NOTE: since Python's HTML parser consumes whitespace, this
method is not certain to reproduce the whitespace present in
the original string."""
encodedName = self.toEncoding(self.name, encoding)
attrs = []
if self.attrs:
for key, val in self.attrs:
fmt = '%s="%s"'
if isinstance(val, basestring):
if self.containsSubstitutions and '%SOUP-ENCODING%' in val:
val = self.substituteEncoding(val, encoding)
# The attribute value either:
#
# * Contains no embedded double quotes or single quotes.
# No problem: we enclose it in double quotes.
# * Contains embedded single quotes. No problem:
# double quotes work here too.
# * Contains embedded double quotes. No problem:
# we enclose it in single quotes.
# * Embeds both single _and_ double quotes. This
# can't happen naturally, but it can happen if
# you modify an attribute value after parsing
# the document. Now we have a bit of a
# problem. We solve it by enclosing the
# attribute in single quotes, and escaping any
# embedded single quotes to XML entities.
if '"' in val:
fmt = "%s='%s'"
if "'" in val:
# TODO: replace with apos when
# appropriate.
val = val.replace("'", "&squot;")
# Now we're okay w/r/t quotes. But the attribute
# value might also contain angle brackets, or
# ampersands that aren't part of entities. We need
# to escape those to XML entities too.
val = self.BARE_AMPERSAND_OR_BRACKET.sub(self._sub_entity, val)
attrs.append(fmt % (self.toEncoding(key, encoding),
self.toEncoding(val, encoding)))
close = ''
closeTag = ''
if self.isSelfClosing:
close = ' /'
else:
closeTag = '</%s>' % encodedName
indentTag, indentContents = 0, 0
if prettyPrint:
indentTag = indentLevel
space = (' ' * (indentTag-1))
indentContents = indentTag + 1
contents = self.renderContents(encoding, prettyPrint, indentContents)
if self.hidden:
s = contents
else:
s = []
attributeString = ''
if attrs:
attributeString = ' ' + ' '.join(attrs)
if prettyPrint:
s.append(space)
s.append('<%s%s%s>' % (encodedName, attributeString, close))
if prettyPrint:
s.append("\n")
s.append(contents)
if prettyPrint and contents and contents[-1] != "\n":
s.append("\n")
if prettyPrint and closeTag:
s.append(space)
s.append(closeTag)
if prettyPrint and closeTag and self.nextSibling:
s.append("\n")
s = ''.join(s)
return s
def decompose(self):
"""Recursively destroys the contents of this tree."""
self.extract()
if len(self.contents) == 0:
return
current = self.contents[0]
while current is not None:
next = current.next
if isinstance(current, Tag):
del current.contents[:]
current.parent = None
current.previous = None
current.previousSibling = None
current.next = None
current.nextSibling = None
current = next
def prettify(self, encoding=DEFAULT_OUTPUT_ENCODING):
return self.__str__(encoding, True)
def renderContents(self, encoding=DEFAULT_OUTPUT_ENCODING,
prettyPrint=False, indentLevel=0):
"""Renders the contents of this tag as a string in the given
encoding. If encoding is None, returns a Unicode string.."""
s=[]
for c in self:
text = None
if isinstance(c, NavigableString):
text = c.__str__(encoding)
elif isinstance(c, Tag):
s.append(c.__str__(encoding, prettyPrint, indentLevel))
if text and prettyPrint:
text = text.strip()
if text:
if prettyPrint:
s.append(" " * (indentLevel-1))
s.append(text)
if prettyPrint:
s.append("\n")
return ''.join(s)
#Soup methods
def find(self, name=None, attrs={}, recursive=True, text=None,
**kwargs):
"""Return only the first child of this Tag matching the given
criteria."""
r = None
l = self.findAll(name, attrs, recursive, text, 1, **kwargs)
if l:
r = l[0]
return r
findChild = find
def findAll(self, name=None, attrs={}, recursive=True, text=None,
limit=None, **kwargs):
"""Extracts a list of Tag objects that match the given
criteria. You can specify the name of the Tag and any
attributes you want the Tag to have.
The value of a key-value pair in the 'attrs' map can be a
string, a list of strings, a regular expression object, or a
callable that takes a string and returns whether or not the
string matches for some custom definition of 'matches'. The
same is true of the tag name."""
generator = self.recursiveChildGenerator
if not recursive:
generator = self.childGenerator
return self._findAll(name, attrs, text, limit, generator, **kwargs)
findChildren = findAll
# Pre-3.x compatibility methods
first = find
fetch = findAll
def fetchText(self, text=None, recursive=True, limit=None):
return self.findAll(text=text, recursive=recursive, limit=limit)
def firstText(self, text=None, recursive=True):
return self.find(text=text, recursive=recursive)
#Private methods
def _getAttrMap(self):
"""Initializes a map representation of this tag's attributes,
if not already initialized."""
if not getattr(self, 'attrMap'):
self.attrMap = {}
for (key, value) in self.attrs:
self.attrMap[key] = value
return self.attrMap
#Generator methods
def childGenerator(self):
# Just use the iterator from the contents
return iter(self.contents)
def recursiveChildGenerator(self):
if not len(self.contents):
raise StopIteration
stopNode = self._lastRecursiveChild().next
current = self.contents[0]
while current is not stopNode:
yield current
current = current.next
# Next, a couple classes to represent queries and their results.
class SoupStrainer:
"""Encapsulates a number of ways of matching a markup element (tag or
text)."""
def __init__(self, name=None, attrs={}, text=None, **kwargs):
self.name = name
if isinstance(attrs, basestring):
kwargs['class'] = _match_css_class(attrs)
attrs = None
if kwargs:
if attrs:
attrs = attrs.copy()
attrs.update(kwargs)
else:
attrs = kwargs
self.attrs = attrs
self.text = text
def __str__(self):
if self.text:
return self.text
else:
return "%s|%s" % (self.name, self.attrs)
def searchTag(self, markupName=None, markupAttrs={}):
found = None
markup = None
if isinstance(markupName, Tag):
markup = markupName
markupAttrs = markup
callFunctionWithTagData = callable(self.name) \
and not isinstance(markupName, Tag)
if (not self.name) \
or callFunctionWithTagData \
or (markup and self._matches(markup, self.name)) \
or (not markup and self._matches(markupName, self.name)):
if callFunctionWithTagData:
match = self.name(markupName, markupAttrs)
else:
match = True
markupAttrMap = None
for attr, matchAgainst in self.attrs.items():
if not markupAttrMap:
if hasattr(markupAttrs, 'get'):
markupAttrMap = markupAttrs
else:
markupAttrMap = {}
for k,v in markupAttrs:
markupAttrMap[k] = v
attrValue = markupAttrMap.get(attr)
if not self._matches(attrValue, matchAgainst):
match = False
break
if match:
if markup:
found = markup
else:
found = markupName
return found
def search(self, markup):
#print 'looking for %s in %s' % (self, markup)
found = None
# If given a list of items, scan it for a text element that
# matches.
if hasattr(markup, "__iter__") \
and not isinstance(markup, Tag):
for element in markup:
if isinstance(element, NavigableString) \
and self.search(element):
found = element
break
# If it's a Tag, make sure its name or attributes match.
# Don't bother with Tags if we're searching for text.
elif isinstance(markup, Tag):
if not self.text:
found = self.searchTag(markup)
# If it's text, make sure the text matches.
elif isinstance(markup, NavigableString) or \
isinstance(markup, basestring):
if self._matches(markup, self.text):
found = markup
else:
raise Exception, "I don't know how to match against a %s" \
% markup.__class__
return found
def _matches(self, markup, matchAgainst):
#print "Matching %s against %s" % (markup, matchAgainst)
result = False
if matchAgainst is True:
result = markup is not None
elif callable(matchAgainst):
result = matchAgainst(markup)
else:
#Custom match methods take the tag as an argument, but all
#other ways of matching match the tag name as a string.
if isinstance(markup, Tag):
markup = markup.name
if markup and not isinstance(markup, basestring):
markup = unicode(markup)
#Now we know that chunk is either a string, or None.
if hasattr(matchAgainst, 'match'):
# It's a regexp object.
result = markup and matchAgainst.search(markup)
elif hasattr(matchAgainst, '__iter__'): # list-like
result = markup in matchAgainst
elif hasattr(matchAgainst, 'items'):
result = markup.has_key(matchAgainst)
elif matchAgainst and isinstance(markup, basestring):
if isinstance(markup, unicode):
matchAgainst = unicode(matchAgainst)
else:
matchAgainst = str(matchAgainst)
if not result:
result = matchAgainst == markup
return result
class ResultSet(list):
"""A ResultSet is just a list that keeps track of the SoupStrainer
that created it."""
def __init__(self, source):
list.__init__([])
self.source = source
# Now, some helper functions.
def buildTagMap(default, *args):
"""Turns a list of maps, lists, or scalars into a single map.
Used to build the SELF_CLOSING_TAGS, NESTABLE_TAGS, and
NESTING_RESET_TAGS maps out of lists and partial maps."""
built = {}
for portion in args:
if hasattr(portion, 'items'):
#It's a map. Merge it.
for k,v in portion.items():
built[k] = v
elif hasattr(portion, '__iter__'): # is a list
#It's a list. Map each item to the default.
for k in portion:
built[k] = default
else:
#It's a scalar. Map it to the default.
built[portion] = default
return built
# Now, the parser classes.
class BeautifulStoneSoup(Tag, SGMLParser):
"""This class contains the basic parser and search code. It defines
a parser that knows nothing about tag behavior except for the
following:
You can't close a tag without closing all the tags it encloses.
That is, "<foo><bar></foo>" actually means
"<foo><bar></bar></foo>".
[Another possible explanation is "<foo><bar /></foo>", but since
this class defines no SELF_CLOSING_TAGS, it will never use that
explanation.]
This class is useful for parsing XML or made-up markup languages,
or when BeautifulSoup makes an assumption counter to what you were
expecting."""
SELF_CLOSING_TAGS = {}
NESTABLE_TAGS = {}
RESET_NESTING_TAGS = {}
QUOTE_TAGS = {}
PRESERVE_WHITESPACE_TAGS = []
MARKUP_MASSAGE = [(re.compile('(<[^<>]*)/>'),
lambda x: x.group(1) + ' />'),
(re.compile('<!\s+([^<>]*)>'),
lambda x: '<!' + x.group(1) + '>')
]
ROOT_TAG_NAME = u'[document]'
HTML_ENTITIES = "html"
XML_ENTITIES = "xml"
XHTML_ENTITIES = "xhtml"
# TODO: This only exists for backwards-compatibility
ALL_ENTITIES = XHTML_ENTITIES
# Used when determining whether a text node is all whitespace and
# can be replaced with a single space. A text node that contains
# fancy Unicode spaces (usually non-breaking) should be left
# alone.
STRIP_ASCII_SPACES = { 9: None, 10: None, 12: None, 13: None, 32: None, }
def __init__(self, markup="", parseOnlyThese=None, fromEncoding=None,
markupMassage=True, smartQuotesTo=XML_ENTITIES,
convertEntities=None, selfClosingTags=None, isHTML=False):
"""The Soup object is initialized as the 'root tag', and the
provided markup (which can be a string or a file-like object)
is fed into the underlying parser.
sgmllib will process most bad HTML, and the BeautifulSoup
class has some tricks for dealing with some HTML that kills
sgmllib, but Beautiful Soup can nonetheless choke or lose data
if your data uses self-closing tags or declarations
incorrectly.
By default, Beautiful Soup uses regexes to sanitize input,
avoiding the vast majority of these problems. If the problems
don't apply to you, pass in False for markupMassage, and
you'll get better performance.
The default parser massage techniques fix the two most common
instances of invalid HTML that choke sgmllib:
<br/> (No space between name of closing tag and tag close)
<! --Comment--> (Extraneous whitespace in declaration)
You can pass in a custom list of (RE object, replace method)
tuples to get Beautiful Soup to scrub your input the way you
want."""
self.parseOnlyThese = parseOnlyThese
self.fromEncoding = fromEncoding
self.smartQuotesTo = smartQuotesTo
self.convertEntities = convertEntities
# Set the rules for how we'll deal with the entities we
# encounter
if self.convertEntities:
# It doesn't make sense to convert encoded characters to
# entities even while you're converting entities to Unicode.
# Just convert it all to Unicode.
self.smartQuotesTo = None
if convertEntities == self.HTML_ENTITIES:
self.convertXMLEntities = False
self.convertHTMLEntities = True
self.escapeUnrecognizedEntities = True
elif convertEntities == self.XHTML_ENTITIES:
self.convertXMLEntities = True
self.convertHTMLEntities = True
self.escapeUnrecognizedEntities = False
elif convertEntities == self.XML_ENTITIES:
self.convertXMLEntities = True
self.convertHTMLEntities = False
self.escapeUnrecognizedEntities = False
else:
self.convertXMLEntities = False
self.convertHTMLEntities = False
self.escapeUnrecognizedEntities = False
self.instanceSelfClosingTags = buildTagMap(None, selfClosingTags)
SGMLParser.__init__(self)
if hasattr(markup, 'read'): # It's a file-type object.
markup = markup.read()
self.markup = markup
self.markupMassage = markupMassage
try:
self._feed(isHTML=isHTML)
except StopParsing:
pass
self.markup = None # The markup can now be GCed
def convert_charref(self, name):
"""This method fixes a bug in Python's SGMLParser."""
try:
n = int(name)
except ValueError:
return
if not 0 <= n <= 127 : # ASCII ends at 127, not 255
return
return self.convert_codepoint(n)
def _feed(self, inDocumentEncoding=None, isHTML=False):
# Convert the document to Unicode.
markup = self.markup
if isinstance(markup, unicode):
if not hasattr(self, 'originalEncoding'):
self.originalEncoding = None
else:
dammit = UnicodeDammit\
(markup, [self.fromEncoding, inDocumentEncoding],
smartQuotesTo=self.smartQuotesTo, isHTML=isHTML)
markup = dammit.unicode
self.originalEncoding = dammit.originalEncoding
self.declaredHTMLEncoding = dammit.declaredHTMLEncoding
if markup:
if self.markupMassage:
if not hasattr(self.markupMassage, "__iter__"):
self.markupMassage = self.MARKUP_MASSAGE
for fix, m in self.markupMassage:
markup = fix.sub(m, markup)
# TODO: We get rid of markupMassage so that the
# soup object can be deepcopied later on. Some
# Python installations can't copy regexes. If anyone
# was relying on the existence of markupMassage, this
# might cause problems.
del(self.markupMassage)
self.reset()
SGMLParser.feed(self, markup)
# Close out any unfinished strings and close all the open tags.
self.endData()
while self.currentTag.name != self.ROOT_TAG_NAME:
self.popTag()
def __getattr__(self, methodName):
"""This method routes method call requests to either the SGMLParser
superclass or the Tag superclass, depending on the method name."""
#print "__getattr__ called on %s.%s" % (self.__class__, methodName)
if methodName.startswith('start_') or methodName.startswith('end_') \
or methodName.startswith('do_'):
return SGMLParser.__getattr__(self, methodName)
elif not methodName.startswith('__'):
return Tag.__getattr__(self, methodName)
else:
raise AttributeError
def isSelfClosingTag(self, name):
"""Returns true iff the given string is the name of a
self-closing tag according to this parser."""
return self.SELF_CLOSING_TAGS.has_key(name) \
or self.instanceSelfClosingTags.has_key(name)
def reset(self):
Tag.__init__(self, self, self.ROOT_TAG_NAME)
self.hidden = 1
SGMLParser.reset(self)
self.currentData = []
self.currentTag = None
self.tagStack = []
self.quoteStack = []
self.pushTag(self)
def popTag(self):
tag = self.tagStack.pop()
#print "Pop", tag.name
if self.tagStack:
self.currentTag = self.tagStack[-1]
return self.currentTag
def pushTag(self, tag):
#print "Push", tag.name
if self.currentTag:
self.currentTag.contents.append(tag)
self.tagStack.append(tag)
self.currentTag = self.tagStack[-1]
def endData(self, containerClass=NavigableString):
if self.currentData:
currentData = u''.join(self.currentData)
if (currentData.translate(self.STRIP_ASCII_SPACES) == '' and
not set([tag.name for tag in self.tagStack]).intersection(
self.PRESERVE_WHITESPACE_TAGS)):
if '\n' in currentData:
currentData = '\n'
else:
currentData = ' '
self.currentData = []
if self.parseOnlyThese and len(self.tagStack) <= 1 and \
(not self.parseOnlyThese.text or \
not self.parseOnlyThese.search(currentData)):
return
o = containerClass(currentData)
o.setup(self.currentTag, self.previous)
if self.previous:
self.previous.next = o
self.previous = o
self.currentTag.contents.append(o)
def _popToTag(self, name, inclusivePop=True):
"""Pops the tag stack up to and including the most recent
instance of the given tag. If inclusivePop is false, pops the tag
stack up to but *not* including the most recent instqance of
the given tag."""
#print "Popping to %s" % name
if name == self.ROOT_TAG_NAME:
return
numPops = 0
mostRecentTag = None
for i in range(len(self.tagStack)-1, 0, -1):
if name == self.tagStack[i].name:
numPops = len(self.tagStack)-i
break
if not inclusivePop:
numPops = numPops - 1
for i in range(0, numPops):
mostRecentTag = self.popTag()
return mostRecentTag
def _smartPop(self, name):
"""We need to pop up to the previous tag of this type, unless
one of this tag's nesting reset triggers comes between this
tag and the previous tag of this type, OR unless this tag is a
generic nesting trigger and another generic nesting trigger
comes between this tag and the previous tag of this type.
Examples:
<p>Foo<b>Bar *<p>* should pop to 'p', not 'b'.
<p>Foo<table>Bar *<p>* should pop to 'table', not 'p'.
<p>Foo<table><tr>Bar *<p>* should pop to 'tr', not 'p'.
<li><ul><li> *<li>* should pop to 'ul', not the first 'li'.
<tr><table><tr> *<tr>* should pop to 'table', not the first 'tr'
<td><tr><td> *<td>* should pop to 'tr', not the first 'td'
"""
nestingResetTriggers = self.NESTABLE_TAGS.get(name)
isNestable = nestingResetTriggers != None
isResetNesting = self.RESET_NESTING_TAGS.has_key(name)
popTo = None
inclusive = True
for i in range(len(self.tagStack)-1, 0, -1):
p = self.tagStack[i]
if (not p or p.name == name) and not isNestable:
#Non-nestable tags get popped to the top or to their
#last occurance.
popTo = name
break
if (nestingResetTriggers is not None
and p.name in nestingResetTriggers) \
or (nestingResetTriggers is None and isResetNesting
and self.RESET_NESTING_TAGS.has_key(p.name)):
#If we encounter one of the nesting reset triggers
#peculiar to this tag, or we encounter another tag
#that causes nesting to reset, pop up to but not
#including that tag.
popTo = p.name
inclusive = False
break
p = p.parent
if popTo:
self._popToTag(popTo, inclusive)
def unknown_starttag(self, name, attrs, selfClosing=0):
#print "Start tag %s: %s" % (name, attrs)
if self.quoteStack:
#This is not a real tag.
#print "<%s> is not real!" % name
attrs = ''.join([' %s="%s"' % (x, y) for x, y in attrs])
self.handle_data('<%s%s>' % (name, attrs))
return
self.endData()
if not self.isSelfClosingTag(name) and not selfClosing:
self._smartPop(name)
if self.parseOnlyThese and len(self.tagStack) <= 1 \
and (self.parseOnlyThese.text or not self.parseOnlyThese.searchTag(name, attrs)):
return
tag = Tag(self, name, attrs, self.currentTag, self.previous)
if self.previous:
self.previous.next = tag
self.previous = tag
self.pushTag(tag)
if selfClosing or self.isSelfClosingTag(name):
self.popTag()
if name in self.QUOTE_TAGS:
#print "Beginning quote (%s)" % name
self.quoteStack.append(name)
self.literal = 1
return tag
def unknown_endtag(self, name):
#print "End tag %s" % name
if self.quoteStack and self.quoteStack[-1] != name:
#This is not a real end tag.
#print "</%s> is not real!" % name
self.handle_data('</%s>' % name)
return
self.endData()
self._popToTag(name)
if self.quoteStack and self.quoteStack[-1] == name:
self.quoteStack.pop()
self.literal = (len(self.quoteStack) > 0)
def handle_data(self, data):
self.currentData.append(data)
def _toStringSubclass(self, text, subclass):
"""Adds a certain piece of text to the tree as a NavigableString
subclass."""
self.endData()
self.handle_data(text)
self.endData(subclass)
def handle_pi(self, text):
"""Handle a processing instruction as a ProcessingInstruction
object, possibly one with a %SOUP-ENCODING% slot into which an
encoding will be plugged later."""
if text[:3] == "xml":
text = u"xml version='1.0' encoding='%SOUP-ENCODING%'"
self._toStringSubclass(text, ProcessingInstruction)
def handle_comment(self, text):
"Handle comments as Comment objects."
self._toStringSubclass(text, Comment)
def handle_charref(self, ref):
"Handle character references as data."
if self.convertEntities:
data = unichr(int(ref))
else:
data = '&#%s;' % ref
self.handle_data(data)
def handle_entityref(self, ref):
"""Handle entity references as data, possibly converting known
HTML and/or XML entity references to the corresponding Unicode
characters."""
data = None
if self.convertHTMLEntities:
try:
data = unichr(name2codepoint[ref])
except KeyError:
pass
if not data and self.convertXMLEntities:
data = self.XML_ENTITIES_TO_SPECIAL_CHARS.get(ref)
if not data and self.convertHTMLEntities and \
not self.XML_ENTITIES_TO_SPECIAL_CHARS.get(ref):
# TODO: We've got a problem here. We're told this is
# an entity reference, but it's not an XML entity
# reference or an HTML entity reference. Nonetheless,
# the logical thing to do is to pass it through as an
# unrecognized entity reference.
#
# Except: when the input is "&carol;" this function
# will be called with input "carol". When the input is
# "AT&T", this function will be called with input
# "T". We have no way of knowing whether a semicolon
# was present originally, so we don't know whether
# this is an unknown entity or just a misplaced
# ampersand.
#
# The more common case is a misplaced ampersand, so I
# escape the ampersand and omit the trailing semicolon.
data = "&%s" % ref
if not data:
# This case is different from the one above, because we
# haven't already gone through a supposedly comprehensive
# mapping of entities to Unicode characters. We might not
# have gone through any mapping at all. So the chances are
# very high that this is a real entity, and not a
# misplaced ampersand.
data = "&%s;" % ref
self.handle_data(data)
def handle_decl(self, data):
"Handle DOCTYPEs and the like as Declaration objects."
self._toStringSubclass(data, Declaration)
def parse_declaration(self, i):
"""Treat a bogus SGML declaration as raw data. Treat a CDATA
declaration as a CData object."""
j = None
if self.rawdata[i:i+9] == '<![CDATA[':
k = self.rawdata.find(']]>', i)
if k == -1:
k = len(self.rawdata)
data = self.rawdata[i+9:k]
j = k+3
self._toStringSubclass(data, CData)
else:
try:
j = SGMLParser.parse_declaration(self, i)
except SGMLParseError:
toHandle = self.rawdata[i:]
self.handle_data(toHandle)
j = i + len(toHandle)
return j
class BeautifulSoup(BeautifulStoneSoup):
"""This parser knows the following facts about HTML:
* Some tags have no closing tag and should be interpreted as being
closed as soon as they are encountered.
* The text inside some tags (ie. 'script') may contain tags which
are not really part of the document and which should be parsed
as text, not tags. If you want to parse the text as tags, you can
always fetch it and parse it explicitly.
* Tag nesting rules:
Most tags can't be nested at all. For instance, the occurance of
a <p> tag should implicitly close the previous <p> tag.
<p>Para1<p>Para2
should be transformed into:
<p>Para1</p><p>Para2
Some tags can be nested arbitrarily. For instance, the occurance
of a <blockquote> tag should _not_ implicitly close the previous
<blockquote> tag.
Alice said: <blockquote>Bob said: <blockquote>Blah
should NOT be transformed into:
Alice said: <blockquote>Bob said: </blockquote><blockquote>Blah
Some tags can be nested, but the nesting is reset by the
interposition of other tags. For instance, a <tr> tag should
implicitly close the previous <tr> tag within the same <table>,
but not close a <tr> tag in another table.
<table><tr>Blah<tr>Blah
should be transformed into:
<table><tr>Blah</tr><tr>Blah
but,
<tr>Blah<table><tr>Blah
should NOT be transformed into
<tr>Blah<table></tr><tr>Blah
Differing assumptions about tag nesting rules are a major source
of problems with the BeautifulSoup class. If BeautifulSoup is not
treating as nestable a tag your page author treats as nestable,
try ICantBelieveItsBeautifulSoup, MinimalSoup, or
BeautifulStoneSoup before writing your own subclass."""
def __init__(self, *args, **kwargs):
if not kwargs.has_key('smartQuotesTo'):
kwargs['smartQuotesTo'] = self.HTML_ENTITIES
kwargs['isHTML'] = True
BeautifulStoneSoup.__init__(self, *args, **kwargs)
SELF_CLOSING_TAGS = buildTagMap(None,
('br' , 'hr', 'input', 'img', 'meta',
'spacer', 'link', 'frame', 'base', 'col'))
PRESERVE_WHITESPACE_TAGS = set(['pre', 'textarea'])
QUOTE_TAGS = {'script' : None, 'textarea' : None}
#According to the HTML standard, each of these inline tags can
#contain another tag of the same type. Furthermore, it's common
#to actually use these tags this way.
NESTABLE_INLINE_TAGS = ('span', 'font', 'q', 'object', 'bdo', 'sub', 'sup',
'center')
#According to the HTML standard, these block tags can contain
#another tag of the same type. Furthermore, it's common
#to actually use these tags this way.
NESTABLE_BLOCK_TAGS = ('blockquote', 'div', 'fieldset', 'ins', 'del')
#Lists can contain other lists, but there are restrictions.
NESTABLE_LIST_TAGS = { 'ol' : [],
'ul' : [],
'li' : ['ul', 'ol'],
'dl' : [],
'dd' : ['dl'],
'dt' : ['dl'] }
#Tables can contain other tables, but there are restrictions.
NESTABLE_TABLE_TAGS = {'table' : [],
'tr' : ['table', 'tbody', 'tfoot', 'thead'],
'td' : ['tr'],
'th' : ['tr'],
'thead' : ['table'],
'tbody' : ['table'],
'tfoot' : ['table'],
}
NON_NESTABLE_BLOCK_TAGS = ('address', 'form', 'p', 'pre')
#If one of these tags is encountered, all tags up to the next tag of
#this type are popped.
RESET_NESTING_TAGS = buildTagMap(None, NESTABLE_BLOCK_TAGS, 'noscript',
NON_NESTABLE_BLOCK_TAGS,
NESTABLE_LIST_TAGS,
NESTABLE_TABLE_TAGS)
NESTABLE_TAGS = buildTagMap([], NESTABLE_INLINE_TAGS, NESTABLE_BLOCK_TAGS,
NESTABLE_LIST_TAGS, NESTABLE_TABLE_TAGS)
# Used to detect the charset in a META tag; see start_meta
CHARSET_RE = re.compile("((^|;)\s*charset=)([^;]*)", re.M)
def start_meta(self, attrs):
"""Beautiful Soup can detect a charset included in a META tag,
try to convert the document to that charset, and re-parse the
document from the beginning."""
httpEquiv = None
contentType = None
contentTypeIndex = None
tagNeedsEncodingSubstitution = False
for i in range(0, len(attrs)):
key, value = attrs[i]
key = key.lower()
if key == 'http-equiv':
httpEquiv = value
elif key == 'content':
contentType = value
contentTypeIndex = i
if httpEquiv and contentType: # It's an interesting meta tag.
match = self.CHARSET_RE.search(contentType)
if match:
if (self.declaredHTMLEncoding is not None or
self.originalEncoding == self.fromEncoding):
# An HTML encoding was sniffed while converting
# the document to Unicode, or an HTML encoding was
# sniffed during a previous pass through the
# document, or an encoding was specified
# explicitly and it worked. Rewrite the meta tag.
def rewrite(match):
return match.group(1) + "%SOUP-ENCODING%"
newAttr = self.CHARSET_RE.sub(rewrite, contentType)
attrs[contentTypeIndex] = (attrs[contentTypeIndex][0],
newAttr)
tagNeedsEncodingSubstitution = True
else:
# This is our first pass through the document.
# Go through it again with the encoding information.
newCharset = match.group(3)
if newCharset and newCharset != self.originalEncoding:
self.declaredHTMLEncoding = newCharset
self._feed(self.declaredHTMLEncoding)
raise StopParsing
pass
tag = self.unknown_starttag("meta", attrs)
if tag and tagNeedsEncodingSubstitution:
tag.containsSubstitutions = True
class StopParsing(Exception):
pass
class ICantBelieveItsBeautifulSoup(BeautifulSoup):
"""The BeautifulSoup class is oriented towards skipping over
common HTML errors like unclosed tags. However, sometimes it makes
errors of its own. For instance, consider this fragment:
<b>Foo<b>Bar</b></b>
This is perfectly valid (if bizarre) HTML. However, the
BeautifulSoup class will implicitly close the first b tag when it
encounters the second 'b'. It will think the author wrote
"<b>Foo<b>Bar", and didn't close the first 'b' tag, because
there's no real-world reason to bold something that's already
bold. When it encounters '</b></b>' it will close two more 'b'
tags, for a grand total of three tags closed instead of two. This
can throw off the rest of your document structure. The same is
true of a number of other tags, listed below.
It's much more common for someone to forget to close a 'b' tag
than to actually use nested 'b' tags, and the BeautifulSoup class
handles the common case. This class handles the not-co-common
case: where you can't believe someone wrote what they did, but
it's valid HTML and BeautifulSoup screwed up by assuming it
wouldn't be."""
I_CANT_BELIEVE_THEYRE_NESTABLE_INLINE_TAGS = \
('em', 'big', 'i', 'small', 'tt', 'abbr', 'acronym', 'strong',
'cite', 'code', 'dfn', 'kbd', 'samp', 'strong', 'var', 'b',
'big')
I_CANT_BELIEVE_THEYRE_NESTABLE_BLOCK_TAGS = ('noscript',)
NESTABLE_TAGS = buildTagMap([], BeautifulSoup.NESTABLE_TAGS,
I_CANT_BELIEVE_THEYRE_NESTABLE_BLOCK_TAGS,
I_CANT_BELIEVE_THEYRE_NESTABLE_INLINE_TAGS)
class MinimalSoup(BeautifulSoup):
"""The MinimalSoup class is for parsing HTML that contains
pathologically bad markup. It makes no assumptions about tag
nesting, but it does know which tags are self-closing, that
<script> tags contain Javascript and should not be parsed, that
META tags may contain encoding information, and so on.
This also makes it better for subclassing than BeautifulStoneSoup
or BeautifulSoup."""
RESET_NESTING_TAGS = buildTagMap('noscript')
NESTABLE_TAGS = {}
class BeautifulSOAP(BeautifulStoneSoup):
"""This class will push a tag with only a single string child into
the tag's parent as an attribute. The attribute's name is the tag
name, and the value is the string child. An example should give
the flavor of the change:
<foo><bar>baz</bar></foo>
=>
<foo bar="baz"><bar>baz</bar></foo>
You can then access fooTag['bar'] instead of fooTag.barTag.string.
This is, of course, useful for scraping structures that tend to
use subelements instead of attributes, such as SOAP messages. Note
that it modifies its input, so don't print the modified version
out.
I'm not sure how many people really want to use this class; let me
know if you do. Mainly I like the name."""
def popTag(self):
if len(self.tagStack) > 1:
tag = self.tagStack[-1]
parent = self.tagStack[-2]
parent._getAttrMap()
if (isinstance(tag, Tag) and len(tag.contents) == 1 and
isinstance(tag.contents[0], NavigableString) and
not parent.attrMap.has_key(tag.name)):
parent[tag.name] = tag.contents[0]
BeautifulStoneSoup.popTag(self)
#Enterprise class names! It has come to our attention that some people
#think the names of the Beautiful Soup parser classes are too silly
#and "unprofessional" for use in enterprise screen-scraping. We feel
#your pain! For such-minded folk, the Beautiful Soup Consortium And
#All-Night Kosher Bakery recommends renaming this file to
#"RobustParser.py" (or, in cases of extreme enterprisiness,
#"RobustParserBeanInterface.class") and using the following
#enterprise-friendly class aliases:
class RobustXMLParser(BeautifulStoneSoup):
pass
class RobustHTMLParser(BeautifulSoup):
pass
class RobustWackAssHTMLParser(ICantBelieveItsBeautifulSoup):
pass
class RobustInsanelyWackAssHTMLParser(MinimalSoup):
pass
class SimplifyingSOAPParser(BeautifulSOAP):
pass
######################################################
#
# Bonus library: Unicode, Dammit
#
# This class forces XML data into a standard format (usually to UTF-8
# or Unicode). It is heavily based on code from Mark Pilgrim's
# Universal Feed Parser. It does not rewrite the XML or HTML to
# reflect a new encoding: that happens in BeautifulStoneSoup.handle_pi
# (XML) and BeautifulSoup.start_meta (HTML).
# Autodetects character encodings.
# Download from http://chardet.feedparser.org/
try:
import chardet
# import chardet.constants
# chardet.constants._debug = 1
except ImportError:
chardet = None
# cjkcodecs and iconv_codec make Python know about more character encodings.
# Both are available from http://cjkpython.i18n.org/
# They're built in if you use Python 2.4.
try:
import cjkcodecs.aliases
except ImportError:
pass
try:
import iconv_codec
except ImportError:
pass
class UnicodeDammit:
"""A class for detecting the encoding of a *ML document and
converting it to a Unicode string. If the source encoding is
windows-1252, can replace MS smart quotes with their HTML or XML
equivalents."""
# This dictionary maps commonly seen values for "charset" in HTML
# meta tags to the corresponding Python codec names. It only covers
# values that aren't in Python's aliases and can't be determined
# by the heuristics in find_codec.
CHARSET_ALIASES = { "macintosh" : "mac-roman",
"x-sjis" : "shift-jis" }
def __init__(self, markup, overrideEncodings=[],
smartQuotesTo='xml', isHTML=False):
self.declaredHTMLEncoding = None
self.markup, documentEncoding, sniffedEncoding = \
self._detectEncoding(markup, isHTML)
self.smartQuotesTo = smartQuotesTo
self.triedEncodings = []
if markup == '' or isinstance(markup, unicode):
self.originalEncoding = None
self.unicode = unicode(markup)
return
u = None
for proposedEncoding in overrideEncodings:
u = self._convertFrom(proposedEncoding)
if u: break
if not u:
for proposedEncoding in (documentEncoding, sniffedEncoding):
u = self._convertFrom(proposedEncoding)
if u: break
# If no luck and we have auto-detection library, try that:
if not u and chardet and not isinstance(self.markup, unicode):
u = self._convertFrom(chardet.detect(self.markup)['encoding'])
# As a last resort, try utf-8 and windows-1252:
if not u:
for proposed_encoding in ("utf-8", "windows-1252"):
u = self._convertFrom(proposed_encoding)
if u: break
self.unicode = u
if not u: self.originalEncoding = None
def _subMSChar(self, orig):
"""Changes a MS smart quote character to an XML or HTML
entity."""
sub = self.MS_CHARS.get(orig)
if isinstance(sub, tuple):
if self.smartQuotesTo == 'xml':
sub = '&#x%s;' % sub[1]
else:
sub = '&%s;' % sub[0]
return sub
def _convertFrom(self, proposed):
proposed = self.find_codec(proposed)
if not proposed or proposed in self.triedEncodings:
return None
self.triedEncodings.append(proposed)
markup = self.markup
# Convert smart quotes to HTML if coming from an encoding
# that might have them.
if self.smartQuotesTo and proposed.lower() in("windows-1252",
"iso-8859-1",
"iso-8859-2"):
markup = re.compile("([\x80-\x9f])").sub \
(lambda(x): self._subMSChar(x.group(1)),
markup)
try:
# print "Trying to convert document to %s" % proposed
u = self._toUnicode(markup, proposed)
self.markup = u
self.originalEncoding = proposed
except Exception, e:
# print "That didn't work!"
# print e
return None
#print "Correct encoding: %s" % proposed
return self.markup
def _toUnicode(self, data, encoding):
'''Given a string and its encoding, decodes the string into Unicode.
%encoding is a string recognized by encodings.aliases'''
# strip Byte Order Mark (if present)
if (len(data) >= 4) and (data[:2] == '\xfe\xff') \
and (data[2:4] != '\x00\x00'):
encoding = 'utf-16be'
data = data[2:]
elif (len(data) >= 4) and (data[:2] == '\xff\xfe') \
and (data[2:4] != '\x00\x00'):
encoding = 'utf-16le'
data = data[2:]
elif data[:3] == '\xef\xbb\xbf':
encoding = 'utf-8'
data = data[3:]
elif data[:4] == '\x00\x00\xfe\xff':
encoding = 'utf-32be'
data = data[4:]
elif data[:4] == '\xff\xfe\x00\x00':
encoding = 'utf-32le'
data = data[4:]
newdata = unicode(data, encoding)
return newdata
def _detectEncoding(self, xml_data, isHTML=False):
"""Given a document, tries to detect its XML encoding."""
xml_encoding = sniffed_xml_encoding = None
try:
if xml_data[:4] == '\x4c\x6f\xa7\x94':
# EBCDIC
xml_data = self._ebcdic_to_ascii(xml_data)
elif xml_data[:4] == '\x00\x3c\x00\x3f':
# UTF-16BE
sniffed_xml_encoding = 'utf-16be'
xml_data = unicode(xml_data, 'utf-16be').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == '\xfe\xff') \
and (xml_data[2:4] != '\x00\x00'):
# UTF-16BE with BOM
sniffed_xml_encoding = 'utf-16be'
xml_data = unicode(xml_data[2:], 'utf-16be').encode('utf-8')
elif xml_data[:4] == '\x3c\x00\x3f\x00':
# UTF-16LE
sniffed_xml_encoding = 'utf-16le'
xml_data = unicode(xml_data, 'utf-16le').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == '\xff\xfe') and \
(xml_data[2:4] != '\x00\x00'):
# UTF-16LE with BOM
sniffed_xml_encoding = 'utf-16le'
xml_data = unicode(xml_data[2:], 'utf-16le').encode('utf-8')
elif xml_data[:4] == '\x00\x00\x00\x3c':
# UTF-32BE
sniffed_xml_encoding = 'utf-32be'
xml_data = unicode(xml_data, 'utf-32be').encode('utf-8')
elif xml_data[:4] == '\x3c\x00\x00\x00':
# UTF-32LE
sniffed_xml_encoding = 'utf-32le'
xml_data = unicode(xml_data, 'utf-32le').encode('utf-8')
elif xml_data[:4] == '\x00\x00\xfe\xff':
# UTF-32BE with BOM
sniffed_xml_encoding = 'utf-32be'
xml_data = unicode(xml_data[4:], 'utf-32be').encode('utf-8')
elif xml_data[:4] == '\xff\xfe\x00\x00':
# UTF-32LE with BOM
sniffed_xml_encoding = 'utf-32le'
xml_data = unicode(xml_data[4:], 'utf-32le').encode('utf-8')
elif xml_data[:3] == '\xef\xbb\xbf':
# UTF-8 with BOM
sniffed_xml_encoding = 'utf-8'
xml_data = unicode(xml_data[3:], 'utf-8').encode('utf-8')
else:
sniffed_xml_encoding = 'ascii'
pass
except:
xml_encoding_match = None
xml_encoding_match = re.compile(
'^<\?.*encoding=[\'"](.*?)[\'"].*\?>').match(xml_data)
if not xml_encoding_match and isHTML:
regexp = re.compile('<\s*meta[^>]+charset=([^>]*?)[;\'">]', re.I)
xml_encoding_match = regexp.search(xml_data)
if xml_encoding_match is not None:
xml_encoding = xml_encoding_match.groups()[0].lower()
if isHTML:
self.declaredHTMLEncoding = xml_encoding
if sniffed_xml_encoding and \
(xml_encoding in ('iso-10646-ucs-2', 'ucs-2', 'csunicode',
'iso-10646-ucs-4', 'ucs-4', 'csucs4',
'utf-16', 'utf-32', 'utf_16', 'utf_32',
'utf16', 'u16')):
xml_encoding = sniffed_xml_encoding
return xml_data, xml_encoding, sniffed_xml_encoding
def find_codec(self, charset):
return self._codec(self.CHARSET_ALIASES.get(charset, charset)) \
or (charset and self._codec(charset.replace("-", ""))) \
or (charset and self._codec(charset.replace("-", "_"))) \
or charset
def _codec(self, charset):
if not charset: return charset
codec = None
try:
codecs.lookup(charset)
codec = charset
except (LookupError, ValueError):
pass
return codec
EBCDIC_TO_ASCII_MAP = None
def _ebcdic_to_ascii(self, s):
c = self.__class__
if not c.EBCDIC_TO_ASCII_MAP:
emap = (0,1,2,3,156,9,134,127,151,141,142,11,12,13,14,15,
16,17,18,19,157,133,8,135,24,25,146,143,28,29,30,31,
128,129,130,131,132,10,23,27,136,137,138,139,140,5,6,7,
144,145,22,147,148,149,150,4,152,153,154,155,20,21,158,26,
32,160,161,162,163,164,165,166,167,168,91,46,60,40,43,33,
38,169,170,171,172,173,174,175,176,177,93,36,42,41,59,94,
45,47,178,179,180,181,182,183,184,185,124,44,37,95,62,63,
186,187,188,189,190,191,192,193,194,96,58,35,64,39,61,34,
195,97,98,99,100,101,102,103,104,105,196,197,198,199,200,
201,202,106,107,108,109,110,111,112,113,114,203,204,205,
206,207,208,209,126,115,116,117,118,119,120,121,122,210,
211,212,213,214,215,216,217,218,219,220,221,222,223,224,
225,226,227,228,229,230,231,123,65,66,67,68,69,70,71,72,
73,232,233,234,235,236,237,125,74,75,76,77,78,79,80,81,
82,238,239,240,241,242,243,92,159,83,84,85,86,87,88,89,
90,244,245,246,247,248,249,48,49,50,51,52,53,54,55,56,57,
250,251,252,253,254,255)
import string
c.EBCDIC_TO_ASCII_MAP = string.maketrans( \
''.join(map(chr, range(256))), ''.join(map(chr, emap)))
return s.translate(c.EBCDIC_TO_ASCII_MAP)
MS_CHARS = { '\x80' : ('euro', '20AC'),
'\x81' : ' ',
'\x82' : ('sbquo', '201A'),
'\x83' : ('fnof', '192'),
'\x84' : ('bdquo', '201E'),
'\x85' : ('hellip', '2026'),
'\x86' : ('dagger', '2020'),
'\x87' : ('Dagger', '2021'),
'\x88' : ('circ', '2C6'),
'\x89' : ('permil', '2030'),
'\x8A' : ('Scaron', '160'),
'\x8B' : ('lsaquo', '2039'),
'\x8C' : ('OElig', '152'),
'\x8D' : '?',
'\x8E' : ('#x17D', '17D'),
'\x8F' : '?',
'\x90' : '?',
'\x91' : ('lsquo', '2018'),
'\x92' : ('rsquo', '2019'),
'\x93' : ('ldquo', '201C'),
'\x94' : ('rdquo', '201D'),
'\x95' : ('bull', '2022'),
'\x96' : ('ndash', '2013'),
'\x97' : ('mdash', '2014'),
'\x98' : ('tilde', '2DC'),
'\x99' : ('trade', '2122'),
'\x9a' : ('scaron', '161'),
'\x9b' : ('rsaquo', '203A'),
'\x9c' : ('oelig', '153'),
'\x9d' : '?',
'\x9e' : ('#x17E', '17E'),
'\x9f' : ('Yuml', ''),}
#######################################################################
#By default, act as an HTML pretty-printer.
if __name__ == '__main__':
import sys
soup = BeautifulSoup(sys.stdin)
print soup.prettify()
| Python |
from distutils.core import setup
import unittest
import warnings
warnings.filterwarnings("ignore", "Unknown distribution option")
import sys
# patch distutils if it can't cope with the "classifiers" keyword
if sys.version < '2.2.3':
from distutils.dist import DistributionMetadata
DistributionMetadata.classifiers = None
DistributionMetadata.download_url = None
from BeautifulSoup import __version__
#Make sure all the tests complete.
import BeautifulSoupTests
loader = unittest.TestLoader()
result = unittest.TestResult()
suite = loader.loadTestsFromModule(BeautifulSoupTests)
suite.run(result)
if not result.wasSuccessful():
print "Unit tests have failed!"
for l in result.errors, result.failures:
for case, error in l:
print "-" * 80
desc = case.shortDescription()
if desc:
print desc
print error
print '''If you see an error like: "'ascii' codec can't encode character...", see\nthe Beautiful Soup documentation:\n http://www.crummy.com/software/BeautifulSoup/documentation.html#Why%20can't%20Beautiful%20Soup%20print%20out%20the%20non-ASCII%20characters%20I%20gave%20it?'''
print "This might or might not be a problem depending on what you plan to do with\nBeautiful Soup."
if sys.argv[1] == 'sdist':
print
print "I'm not going to make a source distribution since the tests don't pass."
sys.exit(1)
setup(name="BeautifulSoup",
version=__version__,
py_modules=['BeautifulSoup', 'BeautifulSoupTests'],
description="HTML/XML parser for quick-turnaround applications like screen-scraping.",
author="Leonard Richardson",
author_email = "leonardr@segfault.org",
long_description="""Beautiful Soup parses arbitrarily invalid SGML and provides a variety of methods and Pythonic idioms for iterating and searching the parse tree.""",
classifiers=["Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: Python Software Foundation License",
"Programming Language :: Python",
"Topic :: Text Processing :: Markup :: HTML",
"Topic :: Text Processing :: Markup :: XML",
"Topic :: Text Processing :: Markup :: SGML",
"Topic :: Software Development :: Libraries :: Python Modules",
],
url="http://www.crummy.com/software/BeautifulSoup/",
license="BSD",
download_url="http://www.crummy.com/software/BeautifulSoup/download/"
)
# Send announce to:
# python-announce@python.org
# python-list@python.org
| Python |
from urllib import urlopen
import simplejson
from lxml import etree
# The google translate API can be found here:
# http://code.google.com/apis/ajaxlanguage/documentation/#Examples
def translate(text, sourceLang = 'en', targetLang = 'pt'):
try:
url = "https://www.googleapis.com/language/translate/v2?" + \
"key=AIzaSyARZORpTevzPWt1XhsIkJUQ0PVZlK4cYTk&q=%s&source=%s&target=%s" % (text, sourceLang, target)
search_results = urlopen(url)
json = simplejson.loads(search_results.read())
return json['data']['translations'][0]['translatedText']
except:
return ''
def webPageRead(urlLink):
# Get a HTML web page to read
page = urlopen(urlLink)
# Read from the object, storing the page's contents in 'text'.
text = page.read()
page.close()
return text
print webPageRead('http://shakespeare.mit.edu/hamlet/hamlet.1.1.html')
| Python |
"""
pyText2Pdf - Python script to convert plain text files into Adobe
Acrobat PDF files.
Version 1.2
Author: Anand B Pillai <abpillai at lycos dot com>
Keywords: python, tools, converter, pdf, text2pdf, adobe, acrobat,
processing.
Copyright (C) 2003-2004 Free Software Foundation, Inc.
This file is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2, or (at your option)
any later version.
This file is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with GNU Emacs; see the file COPYING. If not, write to
the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA.
Commentary:
Modification History:
Mon Feb 17 12:20:13 2003 Changed option parsing algorithm to use
getopt. Use __main__ calling convention.
Bug in FF character fixed.
Thu Apr 10 11:26:58 2003 Modified to use python style strings
and function objects.
July 1 2003 Fixed help string errors. Added the
Creator property.
Feb 25 2004 Rewrote argument parser to remove
duplicate code.Use string.join() instead
of concatenation. Modified sys.exit()
calls to print messages.
Code:
"""
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/189858
import sys, os
import string
import time
import getopt
LF_EXTRA=0
LINE_END='\015'
# form feed character (^L)
FF=chr(12)
ENCODING_STR = """\
/Encoding <<
/Differences [ 0 /.notdef /.notdef /.notdef /.notdef
/.notdef /.notdef /.notdef /.notdef /.notdef /.notdef
/.notdef /.notdef /.notdef /.notdef /.notdef /.notdef
/.notdef /.notdef /.notdef /.notdef /.notdef /.notdef
/.notdef /.notdef /.notdef /.notdef /.notdef /.notdef
/.notdef /.notdef /.notdef /.notdef /space /exclam
/quotedbl /numbersign /dollar /percent /ampersand
/quoteright /parenleft /parenright /asterisk /plus /comma
/hyphen /period /slash /zero /one /two /three /four /five
/six /seven /eight /nine /colon /semicolon /less /equal
/greater /question /at /A /B /C /D /E /F /G /H /I /J /K /L
/M /N /O /P /Q /R /S /T /U /V /W /X /Y /Z /bracketleft
/backslash /bracketright /asciicircum /underscore
/quoteleft /a /b /c /d /e /f /g /h /i /j /k /l /m /n /o /p
/q /r /s /t /u /v /w /x /y /z /braceleft /bar /braceright
/asciitilde /.notdef /.notdef /.notdef /.notdef /.notdef
/.notdef /.notdef /.notdef /.notdef /.notdef /.notdef
/.notdef /.notdef /.notdef /.notdef /.notdef /.notdef
/dotlessi /grave /acute /circumflex /tilde /macron /breve
/dotaccent /dieresis /.notdef /ring /cedilla /.notdef
/hungarumlaut /ogonek /caron /space /exclamdown /cent
/sterling /currency /yen /brokenbar /section /dieresis
/copyright /ordfeminine /guillemotleft /logicalnot /hyphen
/registered /macron /degree /plusminus /twosuperior
/threesuperior /acute /mu /paragraph /periodcentered
/cedilla /onesuperior /ordmasculine /guillemotright
/onequarter /onehalf /threequarters /questiondown /Agrave
/Aacute /Acircumflex /Atilde /Adieresis /Aring /AE
/Ccedilla /Egrave /Eacute /Ecircumflex /Edieresis /Igrave
/Iacute /Icircumflex /Idieresis /Eth /Ntilde /Ograve
/Oacute /Ocircumflex /Otilde /Odieresis /multiply /Oslash
/Ugrave /Uacute /Ucircumflex /Udieresis /Yacute /Thorn
/germandbls /agrave /aacute /acircumflex /atilde /adieresis
/aring /ae /ccedilla /egrave /eacute /ecircumflex
/edieresis /igrave /iacute /icircumflex /idieresis /eth
/ntilde /ograve /oacute /ocircumflex /otilde /odieresis
/divide /oslash /ugrave /uacute /ucircumflex /udieresis
/yacute /thorn /ydieresis ]
>>
"""
PROG_HELP = """\
%(progname)s [options] [filename]
%(progname)s makes a 7-bit clean PDF file from any input file.
It reads from a named file, and writes the PDF file to a file specified by
the user, otherwise to a file with '.pdf' appended to the input file.
Author: Anand B Pillai.
Copyright (C) 2003-2004 Free Software Foundation, http://www.fsf.org
There are various options as follows:
-h\t\tshow this message\n
-o/-O\t\tdirect output to this file
-f<font>\tuse PostScript <font> (must be in standard 14, default: Courier)
-I\t\tuse ISOLatin1Encoding
-s<size>\tuse font at given pointsize (default 10) points\n
-v<dist>\tuse given line spacing (default 12) points
-l<lines>\tlines per page (default 60, determined automatically\n\t\tif unspecified)
-c<chars>\tmaximum characters per line (default 80)
-t<spaces>\tspaces per tab character (default 4)
-F\t\tignore formfeed characters (^L)
\t\t(i.e, accept formfeed characters as pagebreaks)\n
-A4\t\tuse A4 paper (default Letter)
-A3\t\tuse A3 paper (default Letter)
-x<width>\tindependent paper width in points
-y<height>\tindependent paper height in points
-2\t\tformat in 2 columns
-L\t\tlandscape mode
Note that where one variable is implied by two options, the second option
takes precedence for that variable. (e.g. -A4 -y500)
In landscape mode, page width and height are simply swapped over before
formatting, no matter how or when they were defined.
"""
class pyText2Pdf:
def __init__(self, sourceFile):
# version number
self._version="1.1.1"
# iso encoding flag
self._IsoEnc=0
# formfeeds flag
self._doFFs=0
self._progname="PyText2Pdf"
self._appname = "".join((self._progname, " Version ", str(self._version)))
# default font
self._font="/Courier"
# default font size
self._ptSize=10
# default vert space
self._vertSpace=12
self._lines=0
# number of characters in a row
self._cols=80
self._columns=1
# page ht
self._pageHt=792
# page wd
self._pageWd=612
# input file
self._ifile=sourceFile
# output file
self._ofile=""
# default tab width
self._tab=4
# input file descriptor
self._ifs=None
# output file descriptor
self._ofs=None
# landscape flag
self._landscape=0
# marker objects
self._curobj = 5
self._pageObs = [0]
self._locations = [0,0,0,0,0,0]
self._pageNo=0
# file position marker
self._fpos=0
def argsCallBack(self, argslist, listoftuples=False):
""" Callback function called by argument parser.
Helps to remove duplicate code """
x = 0
while x<len(argslist):
item = argslist[x]
if listoftuples:
o, a = item
else:
o = item
if o == '-h':
self.ShowHelp()
elif o == '-I':
self._IsoEnc=1
elif o == '-F':
self._doFFs=1
elif o == '-2':
self._columns=2
elif o == '-L':
self._landscape=1
if o in ('-f', '-s', '-l', '-x', 'y', '-c', '-v', '-o', '-O'):
if not listoftuples:
x += 1
try:
a = argslist[x]
except:
msg = "Argument error for option " + o
sys.exit(msg)
if a == "" or a[0] == "-":
msg = "Error: argument error for option " + o
sys.exit(msg)
elif o == '-f':
self._font='/' + a
elif o == '-A':
if a == '3':
self._pageWd=842
self._pageHt=1190
elif a =='4':
self._pageWd=595
self._pageHt=842
else:
psz=o[1]+a
print self._progname, ': ignoring unknown paper size ', psz
elif o == '-s':
self._ptSize=int(a)
if self._ptSize<1:
self._ptSize=1
elif o == '-v':
self._vertSpace=int(a)
if self._vertSpace<1:
self._vertSpace=1
elif o == '-l':
self._lines=int(a)
if self._lines<1:
self._lines=1
elif o == '-c':
self._cols=int(a)
if self._cols<4:
self._cols=4
elif o == '-t':
self._tab=int(a)
if self._tab<1:
self._tab=1
elif o == '-x':
self._pageWd=int(a)
if self._pageWd<72:
self._pageWd=72
elif o == '-y':
self._pageHt=int(a)
if self._pageHt<72:
self._pageHt=72
elif o in ('-o', '-O'):
self._ofile=a
else:
print self._progname, ': ignoring invalid switch: ', o
x += 1
def parseArgs(self):
if len(sys.argv) == 1:
self.ShowHelp()
arguments=sys.argv[1:]
optlist, args = getopt.getopt(arguments, 'hIF2Lf:A:s:v:l:c:t:x:y:o:')
# input file is the first element in arg list
# or last element in options list (in case of an error!)
if len(args):
# self._ifile=args[0]
print 'hey its me'
else:
l=len(optlist)
tup=optlist[l-1]
# parse options list
if len(optlist):
self.argsCallBack( optlist, listoftuples=True )
else:
self.argsCallBack( args )
if self._landscape:
print 'Landscape option on...'
if self._columns==2:
print 'Printing in two columns...'
if self._doFFs:
print 'Ignoring form feed character...'
if self._IsoEnc:
print 'Using ISO Latin Encoding...'
print 'Using font', self._font[1:], ' size =', self._ptSize
def writestr(self, str):
""" Write string to output file descriptor.
All output operations go through this function.
We keep the current file position also here"""
# update current file position
self._fpos += len(str)
for x in range(0, len(str)):
if str[x] == '\n':
self._fpos += LF_EXTRA
try:
self._ofs.write(str)
except IOError, e:
print e
return -1
return 0
def Convert(self):
""" Perform the actual conversion """
if self._landscape:
# swap page width & height
tmp = self._pageHt
self._pageHt = self._pageWd
self._pageWd = tmp
if self._lines==0:
self._lines = (self._pageHt - 72)/self._vertSpace
if self._lines < 1:
self._lines=1
try:
self._ifs=open(self._ifile)
except IOError, (strerror, errno):
print 'Error: Could not open file to read --->', self._ifile
sys.exit(3)
if self._ofile=="":
self._ofile=self._ifile + '.pdf'
try:
self._ofs = open(self._ofile, 'wb')
except IOError, (strerror, errno):
print 'Error: Could not open file to write --->', self._ofile
sys.exit(3)
print 'Input file =>', self._ifile
print 'Writing pdf file', self._ofile, '...'
self.WriteHeader(self._ifile)
self.WritePages()
self.WriteRest()
print 'Wrote file', self._ofile
self._ifs.close()
self._ofs.close()
return 0
def WriteHeader(self, title):
"""Write the PDF header"""
ws = self.writestr
t=time.localtime()
timestr=str(time.strftime("D:%Y%m%d%H%M%S", t))
ws("%PDF-1.4\n")
self._locations[1] = self._fpos
ws("1 0 obj\n")
ws("<<\n")
buf = "".join(("/Creator (", self._appname, " By Anand B Pillai )\n"))
ws(buf)
buf = "".join(("/CreationDate (", timestr, ")\n"))
ws(buf)
buf = "".join(("/Producer (", self._appname, "(\\251 Free Software Foundation, 2004))\n"))
ws(buf)
if title:
buf = "".join(("/Title (", title, ")\n"))
ws(buf)
ws(">>\n")
ws("endobj\n")
self._locations[2] = self._fpos
ws("2 0 obj\n")
ws("<<\n")
ws("/Type /Catalog\n")
ws("/Pages 3 0 R\n")
ws(">>\n")
ws("endobj\n")
self._locations[4] = self._fpos
ws("4 0 obj\n")
ws("<<\n")
buf = "".join(("/BaseFont ", str(self._font), " /Encoding /WinAnsiEncoding /Name /F1 /Subtype /Type1 /Type /Font >>\n"))
ws(buf)
if self._IsoEnc:
ws(ENCODING_STR)
ws(">>\n")
ws("endobj\n")
self._locations[5] = self._fpos
ws("5 0 obj\n")
ws("<<\n")
ws(" /Font << /F1 4 0 R >>\n")
ws(" /ProcSet [ /PDF /Text ]\n")
ws(">>\n")
ws("endobj\n")
def StartPage(self):
""" Start a page of data """
ws = self.writestr
self._pageNo += 1
self._curobj += 1
self._locations.append(self._fpos)
self._locations[self._curobj]=self._fpos
self._pageObs.append(self._curobj)
self._pageObs[self._pageNo] = self._curobj
buf = "".join((str(self._curobj), " 0 obj\n"))
ws(buf)
ws("<<\n")
ws("/Type /Page\n")
ws("/Parent 3 0 R\n")
ws("/Resources 5 0 R\n")
self._curobj += 1
buf = "".join(("/Contents ", str(self._curobj), " 0 R\n"))
ws(buf)
ws(">>\n")
ws("endobj\n")
self._locations.append(self._fpos)
self._locations[self._curobj] = self._fpos
buf = "".join((str(self._curobj), " 0 obj\n"))
ws(buf)
ws("<<\n")
buf = "".join(("/Length ", str(self._curobj + 1), " 0 R\n"))
ws(buf)
ws(">>\n")
ws("stream\n")
strmPos = self._fpos
ws("BT\n");
buf = "".join(("/F1 ", str(self._ptSize), " Tf\n"))
ws(buf)
buf = "".join(("1 0 0 1 50 ", str(self._pageHt - 40), " Tm\n"))
ws(buf)
buf = "".join((str(self._vertSpace), " TL\n"))
ws(buf)
return strmPos
def EndPage(self, streamStart):
"""End a page of data """
ws = self.writestr
ws("ET\n")
streamEnd = self._fpos
ws("endstream\n")
ws("endobj\n")
self._curobj += 1
self._locations.append(self._fpos)
self._locations[self._curobj] = self._fpos
buf = "".join((str(self._curobj), " 0 obj\n"))
ws(buf)
buf = "".join((str(streamEnd - streamStart), '\n'))
ws(buf)
ws('endobj\n')
def WritePages(self):
"""Write pages as PDF"""
ws = self.writestr
beginstream=0
lineNo, charNo=0,0
ch, column=0,0
padding,i=0,0
atEOF=0
while not atEOF:
beginstream = self.StartPage()
column=1
while column <= self._columns:
column += 1
atFF=0
atBOP=0
lineNo=0
while lineNo < self._lines and not atFF and not atEOF:
lineNo += 1
ws("(")
charNo=0
while charNo < self._cols:
charNo += 1
ch = self._ifs.read(1)
cond = ((ch != '\n') and not(ch==FF and self._doFFs) and (ch != ''))
if not cond:
break
if ord(ch) >= 32 and ord(ch) <= 127:
if ch == '(' or ch == ')' or ch == '\\':
ws("\\")
ws(ch)
else:
if ord(ch) == 9:
padding =self._tab - ((charNo - 1) % self._tab)
for i in range(padding):
ws(" ")
charNo += (padding -1)
else:
if ch != FF:
# write \xxx form for dodgy character
buf = "".join(('\\', ch))
ws(buf)
else:
# dont print anything for a FF
charNo -= 1
ws(")'\n")
if ch == FF:
atFF=1
if lineNo == self._lines:
atBOP=1
if atBOP:
pos=0
ch = self._ifs.read(1)
pos= self._ifs.tell()
if ch == FF:
ch = self._ifs.read(1)
pos=self._ifs.tell()
# python's EOF signature
if ch == '':
atEOF=1
else:
# push position back by one char
self._ifs.seek(pos-1)
elif atFF:
ch = self._ifs.read(1)
pos=self._ifs.tell()
if ch == '':
atEOF=1
else:
self._ifs.seek(pos-1)
if column < self._columns:
buf = "".join(("1 0 0 1 ",
str((self._pageWd/2 + 25)),
" ",
str(self._pageHt - 40),
" Tm\n"))
ws(buf)
self.EndPage(beginstream)
def WriteRest(self):
"""Finish the file"""
ws = self.writestr
self._locations[3] = self._fpos
ws("3 0 obj\n")
ws("<<\n")
ws("/Type /Pages\n")
buf = "".join(("/Count ", str(self._pageNo), "\n"))
ws(buf)
buf = "".join(("/MediaBox [ 0 0 ", str(self._pageWd), " ", str(self._pageHt), " ]\n"))
ws(buf)
ws("/Kids [ ")
for i in range(1, self._pageNo+1):
buf = "".join((str(self._pageObs[i]), " 0 R "))
ws(buf)
ws("]\n")
ws(">>\n")
ws("endobj\n")
xref = self._fpos
ws("xref\n")
buf = "".join(("0 ", str((self._curobj) + 1), "\n"))
ws(buf)
buf = "".join(("0000000000 65535 f ", str(LINE_END)))
ws(buf)
for i in range(1, self._curobj + 1):
val = self._locations[i]
buf = "".join((string.zfill(str(val), 10), " 00000 n ", str(LINE_END)))
ws(buf)
ws("trailer\n")
ws("<<\n")
buf = "".join(("/Size ", str(self._curobj + 1), "\n"))
ws(buf)
ws("/Root 2 0 R\n")
ws("/Info 1 0 R\n")
ws(">>\n")
ws("startxref\n")
buf = "".join((str(xref), "\n"))
ws(buf)
ws("%%EOF\n")
def ShowHelp(self):
"""Show help on this program"""
sys.exit( PROG_HELP % {'progname': self._progname} )
def main(sourceFile):
pdfclass=pyText2Pdf(sourceFile)
# pdfclass.parseArgs()
pdfclass.Convert()
if __name__ == "__main__":
main()
| Python |
"""
httpExists.py
A quick and dirty way to to check whether a web file is there.
Usage:
>>> from httpExists import *
>>> httpExists('http://www.python.org/')
1
>>> httpExists('http://www.python.org/PenguinOnTheTelly')
Status 404 Not Found : http://www.python.org/PenguinOnTheTelly
0
"""
import httplib
import urlparse
def httpExists(url):
host, path = urlparse.urlsplit(url)[1:3]
found = 0
try:
connection = httplib.HTTPConnection(host) ## Make HTTPConnection Object
connection.request("HEAD", path)
responseOb = connection.getresponse() ## Grab HTTPResponse Object
if responseOb.status == 200:
found = 1
else:
print "Status %d %s : %s" % (responseOb.status, responseOb.reason, url)
except Exception, e:
print e.__class__, e, url
return found
| Python |
# -*- coding: utf-8 -*-
import translator
import recipe
import checkURL
langDict = {'AFRIKAANS' : 'af',
'ALBANIAN' : 'sq',
'AMHARIC' : 'am',
'ARABIC' : 'ar',
'ARMENIAN' : 'hy',
'AZERBAIJANI' : 'az',
'BASQUE' : 'eu',
'BELARUSIAN' : 'be',
'BENGALI' : 'bn',
'BIHARI' : 'bh',
'BRETON' : 'br',
'BULGARIAN' : 'bg',
'BURMESE' : 'my',
'CATALAN' : 'ca',
'CHEROKEE' : 'chr',
'CHINESE' : 'zh',
'CHINESE_SIMPLIFIED' : 'zh-CN',
'CHINESE_TRADITIONAL' : 'zh-TW',
'CORSICAN' : 'co',
'CROATIAN' : 'hr',
'CZECH' : 'cs',
'DANISH' : 'da',
'DHIVEHI' : 'dv',
'DUTCH': 'nl',
'ENGLISH' : 'en',
'ESPERANTO' : 'eo',
'ESTONIAN' : 'et',
'FAROESE' : 'fo',
'FILIPINO' : 'tl',
'FINNISH' : 'fi',
'FRENCH' : 'fr',
'FRISIAN' : 'fy',
'GALICIAN' : 'gl',
'GEORGIAN' : 'ka',
'GERMAN' : 'de',
'GREEK' : 'el',
'GUJARATI' : 'gu',
'HAITIAN_CREOLE' : 'ht',
'HEBREW' : 'iw',
'HINDI' : 'hi',
'HUNGARIAN' : 'hu',
'ICELANDIC' : 'is',
'INDONESIAN' : 'id',
'INUKTITUT' : 'iu',
'IRISH' : 'ga',
'ITALIAN' : 'it',
'JAPANESE' : 'ja',
'JAVANESE' : 'jw',
'KANNADA' : 'kn',
'KAZAKH' : 'kk',
'KHMER' : 'km',
'KOREAN' : 'ko',
'KURDISH': 'ku',
'KYRGYZ': 'ky',
'LAO' : 'lo',
'LATIN' : 'la',
'LATVIAN' : 'lv',
'LITHUANIAN' : 'lt',
'LUXEMBOURGISH' : 'lb',
'MACEDONIAN' : 'mk',
'MALAY' : 'ms',
'MALAYALAM' : 'ml',
'MALTESE' : 'mt',
'MAORI' : 'mi',
'MARATHI' : 'mr',
'MONGOLIAN' : 'mn',
'NEPALI' : 'ne',
'NORWEGIAN' : 'no',
'OCCITAN' : 'oc',
'ORIYA' : 'or',
'PASHTO' : 'ps',
'PERSIAN' : 'fa',
'POLISH' : 'pl',
'PORTUGUESE' : 'pt',
'PORTUGUESE_PORTUGAL' : 'pt-PT',
'PUNJABI' : 'pa',
'QUECHUA' : 'qu',
'ROMANIAN' : 'ro',
'RUSSIAN' : 'ru',
'SANSKRIT' : 'sa',
'SCOTS_GAELIC' : 'gd',
'SERBIAN' : 'sr',
'SINDHI' : 'sd',
'SINHALESE' : 'si',
'SLOVAK' : 'sk',
'SLOVENIAN' : 'sl',
'SPANISH' : 'es',
'SUNDANESE' : 'su',
'SWAHILI' : 'sw',
'SWEDISH' : 'sv',
'SYRIAC' : 'syr',
'TAJIK' : 'tg',
'TAMIL' : 'ta',
'TATAR' : 'tt',
'TELUGU' : 'te',
'THAI' : 'th',
'TIBETAN' : 'bo',
'TONGA' : 'to',
'TURKISH' : 'tr',
'UKRAINIAN' : 'uk',
'URDU' : 'ur',
'UZBEK' : 'uz',
'UIGHUR' : 'ug',
'VIETNAMESE' : 'vi',
'WELSH' : 'cy',
'YIDDISH' : 'yi',
'YORUBA' : 'yo'}
for key, value in langDict.iteritems():
print "For language:", key, " ---- Select:", value
def selectLanguage(question):
checksValid = True
while checksValid:
language = raw_input(question)
for key, value in langDict.iteritems():
if language == value:
print 'You seleced:', key
checksValid = False
break
if checksValid:
print 'Not a valid language'
return language
langSource = selectLanguage("What is the language of the source to translate? ")
langTarget = selectLanguage("What is the language of the desired translation? ")
checksValid = True
while checksValid:
urlLink = raw_input("What is the html link to be translated? ")
if checkURL.httpExists(urlLink):
checksValid = False
saveFile = raw_input("File name to be save in tmp folder: ")
repeatedWords = {}
saveFile = "/tmp/" + saveFile
textFile = open(saveFile, 'w')
for line in translator.fixHTML(urlLink):
for word in line.split(' '):
if not repeatedWords.has_key(word):
repeatedWords[word] = translator.translate(word, langSource, langTarget)
print repeatedWords[word],
textFile.write(repeatedWords[word].encode('utf-8'))
textFile.write(' ')
print
textFile.write('\n')
textFile.close()
recipe.main(saveFile)
| Python |
import time
import os
import re
import sys
import logging
import logging.handlers
import yaml
import getopt
from os.path import getmtime, getsize, normpath, join
logger = None
def init_logger(verbose=True,
filelogging=False,
logfilename='filemonitor.out',
logfilesize=100000,
backupcount=5):
# Set up a specific logger with our desired output level
logger = logging.getLogger('logger')
formatter = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
if verbose:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
if filelogging == True:
# File logger
filehandler = logging.handlers.RotatingFileHandler(logfilename,
maxBytes=logfilesize,
backupCount=backupcount)
filehandler.setFormatter(formatter)
logger.addHandler(filehandler)
# Console logger
consolehandler = logging.StreamHandler()
consolehandler.setFormatter(formatter)
logger.addHandler(consolehandler)
return logger
class EventDef:
def __init__(self, dir, pattern, action, maxerrors=2, minsize=0, tracking_interval=10):
self.dir = dir
self.pattern = pattern
self.action = action
self.maxerrors = maxerrors
self.minsize = minsize
self.tracking_interval = tracking_interval
def __eq__(self, other):
if not other: return False
return (self.dir == other.dir) and (self.pattern == other.pattern)
def __repr__(self):
return "EventDef: dir='%s', pattern='%s', action='%s', minsize='%s', maxerrors='%s', tracking_interval='%s'" % (
self.dir, self.pattern, self.action, self.minsize, self.maxerrors, self.tracking_interval)
class Event:
CANDIDATE = 'CANDIDATE'
ACTIVE = 'ACTIVE'
RETRY = 'RETRY'
DISABLED = 'DISABLED'
def __init__(self, eventdef, filename):
self.eventdef = eventdef
self.filename = filename
self.state = Event.CANDIDATE
self.errors = 0
self.last_mtime = 0
self.last_changed = time.time()
self.last_filesize = 0
def __repr__(self):
return "Event/Candidate: , filename='%s', state='%s', errors='%s', eventdef='%s'" % (
self.filename, self.state, self.errors, self.eventdef)
def __eq__(self, other):
return (self.filename == other.filename) and (self.eventdef == other.eventdef)
def remove_obsolete_events(eventdefs, existing_events, names):
events = []
for event in existing_events:
if event.filename not in names:
logger.info("%s is obsolete, file does no more exist. Removing event from event list" %
(event))
pass
else:
events.append(event)
return events
def find_candidates(eventdefs, existing_candidates, existing_events):
candidates = names = []
logger.debug("find_candidates() with:\nexisting events = %s,\nexisting candidates = %s" %
(existing_events, existing_candidates))
for eventdef in eventdefs:
names = os.listdir(eventdef.dir)
logger.debug("Looking for pattern %s" % (eventdef.pattern))
for name in names:
if re.match(eventdef.pattern, name):
candidate = Event(eventdef, name)
logger.debug("----> matched %s" % (name))
if (candidate not in existing_events) and (candidate not in existing_candidates):
logger.info("New candidate found: %s" % (candidate,))
candidates.append(candidate)
elif (candidate in existing_candidates):
logger.debug("candidate already exists")
candidates.append(existing_candidates[existing_candidates.index(candidate)])
else:
logger.debug("candidate already in existing events list")
events = remove_obsolete_events(eventdefs, existing_events, names)
logger.debug("--> New candidates list len = %d" % (
len(candidates)))
return (candidates, events)
def promote_candidates(existing_candidates, events, now):
candidates = []
logger.debug("Promoting candidates to events ...")
logger.debug("Existing candidates list len = %d" % (
len(existing_candidates)))
logger.debug(existing_candidates)
logger.debug(80*"-")
for candidate in existing_candidates:
abspath = normpath(join(candidate.eventdef.dir, candidate.filename))
mtime = getmtime(abspath)
filesize = getsize(abspath)
logger.debug("modification time of %s = %s, "
"last_access_time = %s, size = %s" %
(candidate.filename, mtime, candidate.last_mtime, filesize))
if (candidate.last_mtime != mtime) or (filesize != candidate.last_filesize):
logger.debug("Not promoting %s - file was changed or just created." % (candidate))
candidate.last_mtime = mtime
candidate.last_change = now
candidate.last_filesize = filesize
candidates.append(candidate)
elif (candidate.eventdef.minsize > filesize):
logger.debug("Not promoting %s - file smaller than minsize." % (candidate))
candidates.append(candidate)
else:
if (now - candidate.last_change) > candidate.eventdef.tracking_interval:
logger.debug("Promoting %s" % (candidate))
candidate.state = Event.ACTIVE
events.append(candidate)
else:
logger.debug("Not yet promoting %s - tracking interval active." % (candidate))
candidates.append(candidate)
logger.debug("--> New candidates list len = %d" % (
len(candidates)))
return (candidates, events)
def execute_events(existing_events):
events=[]
logger.debug("Executing events ...")
logger.debug("Existing event list len = %d" % (
len(existing_events)))
logger.debug(existing_events)
logger.debug(80*"-")
for event in existing_events:
if event.state == Event.DISABLED:
logger.info("Event %s is disabled; skipping." % (event))
events.append(event) # keep that event??
continue
action = event.eventdef.action.format(event=event, eventdef=event.eventdef)
logger.info("Execution action ' %s ' for event %s" % (action, event))
rc=os.system(action)
logger.info("RC = %d" % (rc))
if rc == 0:
logger.info(" -> All OK, removing event from event list")
elif event.errors < event.eventdef.maxerrors:
logger.warning(" -> Error while executing action")
event.errors += 1
event.state = Event.RETRY
events.append(event)
else:
logger.error(" -> Action failed %d times. Disabling event." % (event.errors))
event.state=Event.DISABLED
events.append(event)
logger.debug("--> New event list len = %d" % (len(events)))
return events
def monitor(config_filename, polling_interval=10,dynamic_reload=False):
events = []
candidates = []
eventdefs = read_config(config_filename)
while True:
if dynamic_reload:
eventdefs = read_config(config_filename)
logger.debug(80*"#")
(candidates, events) = find_candidates(eventdefs, candidates, events)
(candidates, events) = promote_candidates(candidates, events, time.time())
events = execute_events(events)
time.sleep(polling_interval)
def read_config(filename):
stream = open(filename)
eventdefs = yaml.load(stream)
stream.close()
return eventdefs
def usage():
print("Usage: %s\n"
" [-t|--test] : Run test mode (use this to create a sample config file)\n"
" [-i N|--interval=N] : Polling interval of N seconds [5]\n"
" [-c file|--config=file] : Use YAML config file [config.yaml]\n"
" [-d|--dynamic] : Enable dynamic config file reload\n"
" [-v|--verbose] : Print debug messages\n"
" [-h|--help] : Print this help message\n"
" [-l|--logging] : Enable file logging, default is console only\n"
" [--backupcount=N] : Keep N logfiles (rotate) [5]\n"
" [--logfilesize=N] : Rotate logfile when N bytes in size [100000]\n"
" [--logfilename=file] : Log file name [monitor.out]\n" % (__file__))
def main():
try:
opts, args = getopt.getopt(sys.argv[1:],
"i:c:f:tlvh",
["interval=",
"config=",
"logfilename=",
"logfilesize=",
"backupcount=",
"dynamic", "logging", "verbose", "help", "test",])
except getopt.GetoptError as err:
print(str(err)) # will print something like "option -a not recognized"
usage()
sys.exit(2)
global logger
verbose = False
logging = False
test = False
logfilename = "monitor.out"
config = "config.yaml"
logfilesize = 100000
backupcount = 5
interval = 5
dynamic = False
for o, a in opts:
if o in ("-v", "--verbose"):
verbose = True
elif o in ("-h", "--help"):
usage()
sys.exit(0)
elif o in ("-i", "--interval"):
interval = int(a)
elif o in ("-l", "--logging"):
logging = True
elif o in ("-f", "--logfilename"):
logfilename = a
elif o in ("--logfilesize"):
logfilesize = int(a)
elif o in ("--backupcount"):
backupcount = int(a)
elif o in ("-c", "--config"):
config = a
elif o in ("-d", "--dynamic"):
dynamic = True
elif o in ("-t", "--test"):
test = True
else:
assert False, "unhandled option"
logger = init_logger(verbose, logging, logfilename, logfilesize, backupcount)
eventdefs = []
if test:
if os.name == 'posix':
# UNIX version
os.system("touch /tmp/huhuralf.txt /tmp/halloralf.txt /tmp/hallo1234.txt")
e1 = EventDef("/tmp", "huhu.*\.txt", "rm /tmp >> /tmp/huhu.out")
e2 = EventDef("/tmp", "hallo.*\.txt", "echo \"{eventdef.dir}\" >> /tmp/hallo.out; rm /tmp/{event.filename}")
elif os.name == 'nt':
# Windows version
os.system("touch c:/temp/huhuralf.txt c:/temp/halloralf.txt c:/temp/hallo1234.txt")
e1 = EventDef("c:/temp", "huhu.*\.txt", "del c:/tmp >> c:/temp/huhu.out", minsize=10000)
e2 = EventDef("c:/temp", "hallo.*\.txt", "type \"{eventdef.dir}\" >> c:\\temp\\hallo.out & del c:\\temp\\{event.filename}")
eventdefs.append(e1)
eventdefs.append(e2)
# store config
stream = open(config, 'w')
yaml.dump(eventdefs, stream)
stream.close()
monitor(config, interval, dynamic)
if __name__ == '__main__':
main()
| Python |
import socket
import time
import thread
import threading
from Tkinter import *
import tkFileDialog
def main():
finestra=Tk()
finestra.title("Lan Sharer")
finestra.minsize(300,300)
finestra.maxsize(300,300)
cornice=Frame(finestra)
cornice.pack()
B1=Button(cornice,text="Start Server",borderwidth=1,command=PB1)
st=StringVar()
txt=Entry(textvariable=st,width=30)
B2=Button(cornice,text="Send File",borderwidth=1,command=PB2)
B1.pack()
txt.pack()
B2.pack()
finestra.mainloop()
def PB1():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('google.com', 0))
ip=s.getsockname()[0]
lab=Label(text="Put this adress when requested from client: ")
lab1=Label(text=ip)
lab.pack()
lab1.pack()
b="1"
b=tuple(b)
time.sleep(1)
def serve(x):
import server
thread.start_new_thread(serve,b)
def PB2():
addr=st.get()
a=tkFileDialog.askopenfilename()
getfile=open(a,"r")
fil=getfile.read()
s=socket.socket()
s.connect((addr,48364))
s.send(fil+"\r\n")
s.close()
main()
| Python |
S_OK = 0;
S_FALSE = 1;
E_FAIL = 2147500037;
E_INVALIDARG = 2147942487;
E_NOTIMPL = 2147500033;
E_OUTOFMEMORY = 2147942414;
E_POINTER = 2147500035;
E_NOINTERFACE = 2147500034;
E_UNEXPECTED = 2147549183;
E_ACCESSDENIED = 2147942405;
FGDB_E_FILE_NOT_FOUND = 2147942402;
FGDB_E_PATH_NOT_FOUND = 2147942403;
FGDB_E_ACCESS_DENIED = 2147942405;
FGDB_E_CANNOT_MAKE = 2147942482;
FGDB_E_SEEK = 2147942425;
FGDB_E_INVALID_HANDLE = 2147942406;
FGDB_E_FILE_EXISTS = 2147942480;
FGDB_E_HANDLE_DISK_FULL = 2147942439;
FGDB_E_NO_PERMISSION = -2147220987;
FGDB_E_NOT_SUPPORTED = -2147220989;
FGDB_E_FILE_IO = -2147220975;
FGDB_E_FIELD_NOT_FOUND = -2147219885;
FGDB_E_FIELD_INVALID_NAME = -2147219886;
FGDB_E_FIELD_NOT_NULLABLE = -2147219879;
FGDB_E_FIELD_NOT_EDITABLE = -2147219880;
FGDB_E_FIELD_INVALID_TYPE = -2147219883;
FGDB_E_FIELD_ALREADY_EXISTS = -2147219884;
FGDB_E_FIELDS_MULTIPLE_OIDS = -2147219707;
FGDB_E_FIELDS_MULTIPLE_GEOMETRIES = -2147219706;
FGDB_E_FIELDS_MULTIPLE_RASTERS = -2147219704;
FGDB_E_FIELDS_MULTIPLE_GLOBALIDS = -2147219703;
FGDB_E_FIELDS_EMPTY = -2147219702;
FGDB_E_FIELD_CANNOT_DELETE_REQUIRED_FIELD = -2147219877;
FGDB_E_TABLE_INVALID_NAME = -2147220654;
FGDB_E_TABLE_NOT_FOUND = -2147220655;
FGDB_E_TABLE_ALREADY_EXISTS = -2147220653;
FGDB_E_TABLE_NO_OID_FIELD = -2147220652;
FGDB_E_DATASET_INVALID_NAME = -2147220734;
FGDB_E_DATASET_ALREADY_EXISTS = -2147220733;
FGDB_E_INDEX_NOT_FOUND = -2147219629;
FGDB_E_GRID_SIZE_TOO_SMALL = -2147216881;
FGDB_E_INVALID_GRID_SIZE = -2147216894;
FGDB_E_NO_SPATIALREF = -2147216889;
FGDB_E_INVALID_SQL = -2147220985;
FGDB_E_XML_PARSE_ERROR = -2147215103;
FGDB_E_SPATIALFILTER_INVALID_GEOMETRY = -2147216814;
FGDB_E_SPATIALREF_INVALID = -2147216892;
FGDB_E_WORKSPACE_ALREADY_EXISTS = -2147220902;
FGDB_E_INVALID_RELEASE = -2147220965;
FGDB_E_LOCK_CONFLICT = -2147220947;
FGDB_E_SCHEMA_LOCK_CONFLICT = -2147220970;
FGDB_E_OBJECT_NOT_LOCKED = -2147220968;
FGDB_E_WORKSPACE_READONLY = -2147220893;
FGDB_E_CANNOT_EDIT_COMPRESSED_DATASET = -2147220113;
FGDB_E_CANNOT_UPDATE_COMPRESSED_DATASET = -2147220112;
FGDB_E_COMPRESSED_DATASET_NOT_INSTALLED = -2147220109;
FGDB_E_NEGATIVE_FID = -2147220945;
FGDB_E_FEATURE_VALUE_TYPE_MISMATCH = -2147217395;
FGDB_E_ROW_BAD_VALUE = -2147219115;
FGDB_E_ROW_ALREADY_EXISTS = -2147219114;
FGDB_E_ROW_NOT_FOUND = -2147219118;
FGDB_E_TABLE_SIZE_EXCEEDED = -2147220951;
FGDB_E_NOT_EDITING = -2147220134;
FGDB_E_EDIT_OPERATION_REQUIRED = -2147220957;
FGDB_E_CANNOT_CHANGE_ITEM_VISIBILITY = -2147211770;
FGDB_E_ITEM_NOT_FOUND = -2147211775;
FGDB_E_ITEM_RELATIONSHIP_NOT_FOUND = -2147211762;
FGDB_E_DOMAIN_NOT_FOUND = -2147209215;
FGDB_E_DOMAIN_NAME_ALREADY_EXISTS = -2147209212;
FGDB_E_DOMAIN_INVALID_NAME = -2147209194;
| Python |
#!/usr/bin/env python
#This is the main executable. Don't change it unless you really need to.
import sys
import re
DEFAULT_PORT = 1337
def print_usage():
print
print "Usage: ./client.py <remote IP> <options>"
print "Remote IP can be written as <ip> or <ip>:<port>"
print "Default port is " + str(DEFAULT_PORT)
print
print "If no <remote ip> is specified, the node will start in initial connection mode"
print
print "Options:"
print "--help\tPrints this message."
print "--about\tPrints credits and version information."
print "--headless\tRuns node in headless mode without any UI."
print "\t\tIgnores most options except remote IP and share directory."
print "--port=<num>\tOverride the default port setting and choose your own."
print
def print_about():
print "\nUnnamed mystery filesharing project!!!1"
print 'By Sam Greene, Matt McNeill, and Anthony Work\n'
print " /\___/\\"
print " ( o o )"
print " ( =^= )"
print " ( )"
print " ( )"
print " ( )))))))))))"
print "oh god how did this get here i am not good with computers\n"
def main():
headless = 0
connIP = ""
connPort = -1
myPort = DEFAULT_PORT
shareDirs = []
#Checkin' arguments, woo
if len(sys.argv) > 1:
regex = '^([0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3})(:([0-9])+)?'
matched = 0
for i in sys.argv:
if i == "--about":
print_about()
return
elif i == "--help":
print_usage()
return
elif headless == 0 and i == "--headless":
matched = 1
headless = 1
elif connIP == "" and re.match(regex, i):
matched = 1
ipStrings = i.split(":")
connIP = ipStrings[0]
if len(ipStrings) > 1:
connPort = int(ipStrings[1])
elif myPort == DEFAULT_PORT and re.match("^--port=[0-9]+", i):
matched = 1
myPort = int(i.split("=")[1])
else:
'''We can be sure that any leftover arguments are the directories that
the user wants to share.'''
matched = 1
shareDirs.append(i)
#This should never happen now! Remove later?
if matched == 0:
print_usage()
return
#Start the UI! DO IT
if headless == 0:
from ui_text import TextUI
fileshare = TextUI(myPort, connIP, connPort, shareDirs)
else:
from ui_headless import HeadlessUI
fileshare = HeadlessUI(myPort, connIP, connPort, shareDirs)
if __name__ == '__main__':
main()
| Python |
"""
This is the part of the code that interfaces with Entangled. Ideally,
you'll be getting everything you need from the UI in this code to talk
to the DHT, regardless of which UI it is. Then, you'll return what you
get in some standardized way.
The uiClass passed to the constructor is the instance of whichever user
interface class is communicating with the user. All of them are required
by their AbstractUI superclass to have some functions for dealing with
the deferred objects Entangled will produce:
searchResult(result)
filePublished(result)
downloadFile(result)
Assuming your deferred object is called df, you'll set these with:
df.addCallback(searchResult)
That way, the UI will deal with whatever the Entangled network produces.
AWESOME.
"""
import os
import entangled.node
import hashlib
from twisted.internet import defer
import twisted.internet.reactor
import socket
from entangled.kademlia.datastore import SQLiteDataStore
import thread
from listen import Listener
import fileshareutils
class EntangledNetworkLayer:
#These things go into the DHT as part of the value in the key/value pair
myIP = "PlaceholderIP"
listenPort = 0
node = None
uiClass = None
listener = None
'''Some clarification:
uiClass: The class instance responsible for the user interface
myPort: The port that the Entangled network will operate from.
This is distinct from the port the listener listens on!
listenPort: The port the listener operates from. This is put in the
DHT as part of the value in the key/value pair.
connectIP: The IP address of another Entangled node we're connecting to
connectPort: The corresponding port
'''
def __init__(self, uiClass, myPort, listenPort, connectIP, connectPort):
self.uiClass = uiClass
self.listenPort = listenPort
if connectIP != "": connectIP= socket.gethostbyname(connectIP)
print connectIP
'''Set up the listener, including a super awful hacky way of getting the IP!'''
listener = Listener(listenPort)
s = socket.socket()
s.connect(('google.com', 80))
self.myIP = s.getsockname()[0]
del s
thread.start_new_thread(listener.run, ())
'''Set up the node'''
dataStore = None#SQLiteDataStore(dbFile="/tmp/fileshare%s.db"%myPort)
self.node = entangled.node.EntangledNode(udpPort=myPort,dataStore = dataStore)
if connectIP == "":
print "Connection-less node started"
ips = None
else:
print "Connecting to " + connectIP + " on port " + str(connectPort)
ips = [(connectIP, connectPort)]
self.node.joinNetwork(ips)
'''Set up the main UI and start the reactor'''
thread.start_new_thread(self.uiClass.run, (self,))
twisted.internet.reactor.run()
def error(self, error):
print error
#Share a single file
def share(self, sharedFile):
filename = os.path.split(sharedFile)[1]
valString = self.myIP+","+str(self.listenPort)+","+sharedFile+","+fileshareutils.filehash(sharedFile)
#df = self.node.publishData(filename, self.myIP+","+str(self.listenPort)+","+sharedFile+","+str(fileshareutils.filehash(sharedFile)))
df = self.node.publishData(filename, valString)
#We don't really need to deal with the deferred object, forget the callback
#df.addErrback(self.error)
#Perform a search on the list of keywords
def search(self, keywords):
if len(keywords) == 0:
return
df = self.node.searchForKeywords(keywords)
df.addCallback(self.displaySearchResult)
df.addErrback(self.error)
def displaySearchResult(self, searchResults):
for i in searchResults:
h = hashlib.sha1()
h.update(i)
df = self.node.iterativeFindValue(h.digest())
df.addCallback(self.uiClass.do_searchDisplay)
df.addErrback(self.error)
def get(self, key):
h = hashlib.sha1()
h.update(key)
df = self.node.iterativeFindValue(h.digest())
df.addCallback(self.uiClass.download)
df.addErrback(self.error)
| Python |
"""
This is the source file for the text-based user interface. It's nice
and simple and has a shell.
"""
import sys, os
import thread
import binascii
import string
import threading
import traceback
import fileT
from network import EntangledNetworkLayer
from listen import Listener
from ui_abstract import AbstractUI
import twisted.internet.reactor
class TextUI(AbstractUI):
noResultsStr = "No results found!"
searchResults = [[[]]]
numInResults = 0
numResultCategories = 0
semaphore = None
def __init__(self, myPort, ip, port, shareDirs):
if ip == "":
ip = raw_input("Connect IP: ")
print ip
if ip != "" and port == -1:
portStr = raw_input("Connect port: ")
if portStr == "":
port = 1337
else:
port = int(portStr)
self.semaphore = threading.Semaphore(1)
AbstractUI.__init__(self, myPort, ip, port, shareDirs)
def do_searchDisplay(self, result):
self.semaphore.acquire()
#Sanity check
if len(result) == 0:
self.semaphore.release()
return
for key,value in result.iteritems():
printVal = 1
splitValue = value.split(",")
if self.numInResults == 0:
#If we're empty, the hash check doesn't need to happen
self.searchResults.append([value])
self.numInResults = 1
self.numResultCategories = 1
else:
loopBreak = 0
#The results string is not empty. We have to either:
#1. Find a preexisting search result with the same file hash, or
#2. Insert at the end of the list
for a in range(0,len(self.searchResults)):
for b in range(0,len(self.searchResults[a])):
if splitValue[3] == self.searchResults[a][b].split(",")[3]:
self.searchResults[a].append(value)
loopBreak = 1
printVal = 0 #Already in list, don't need to print
self.numInResults = self.numInResults + 1
break
if loopBreak: break
if loopBreak == 0:
self.searchResults.append([value])
self.numInResults = self.numInResults + 1
self.numResultCategories = self.numResultCategories + 1
if printVal:
print str(self.numResultCategories)+": " + os.path.split(splitValue[2])[1] + ":"
print "\tRemote IP:Port: " + splitValue[0] + ":" + splitValue[1]
print "\tFile hash: " + splitValue[3]
print
self.semaphore.release()
def filePublished(self, result):
print result
#Stubs for callback methods, put more interesting things in here later
def getFile(self, getNum):
if len(getNum) == 1:
getNum = [raw_input("Enter search result number to download file: ")]
else: del getNum[0]
if self.numInResults==0:
print "No search has been performed yet!"
return
for i in getNum:
self.download(self.searchResults[int(i)-1])
def download(self, result):
#resultList = string.split(result,',')
#filelist = string.split(result,' ')
#[file file file ] [ip port path hash]
pas = []
for i in result:
resultList = i.split(',')
dlTuple = (resultList[0], resultList[1], resultList[2])
pas.append(dlTuple)
fileT.file_recieve_request(pas).start()
#Some private functions used by the run loop
def __printHelp(self):
print "\nCommands:"
print "help - display this message"
print "share - select a folder or file to share, display shared files"
print "search - search for a file by keyword"
print "get - download file by search number, filename, or hash"
print "exit - quit program\n"
def __getSearch(self, search):
if len(search) == 1:
return raw_input("Enter search terms: ").split()
del search[0]
#Convert to all lower case since that's how Entangled works apparently
return [i.lower() for i in search]
def __publishAllInDirs(self, directories, network):
if len(directories) == 1:
AbstractUI.__publishAllInDirs__(self, [raw_input("Directory to share: ")], network)
else:
del directories[0]
AbstractUI.__publishAllInDirs__(self, directories, network)
#Main run loop
def run(self, network):
self.__printHelp()
try:
while(1):
#twisted.internet.reactor.runUntilCurrent()
#t = twisted.internet.reactor.timeout()
#t2 = twisted.internet.reactor.running and t
#twisted.internet.reactor.doIteration(t2)
inputStr = raw_input("?> ").split()
if len(inputStr) == 0:
continue
if inputStr[0] == "exit":
break
if inputStr[0] == "help":
self.__printHelp()
continue
if inputStr[0] == "share":
self.__publishAllInDirs(inputStr, network)
continue
if inputStr[0] == "search":
self.searchResults = []
self.numInResults = 0
network.search(self.__getSearch(inputStr))
continue
if inputStr[0] == "get":
#I'm just using a dummy key at the moment to test this myself.
#It won't work on your computer. Change it to some file in a directory you share.
self.getFile(inputStr)
continue
if inputStr[0] == "results":
print self.searchResults
continue
print "Unknown command, type help for command list"
except(KeyboardInterrupt, SystemExit):
print
return
except:
traceback.print_exc()
| Python |
"""
This is the abstract UI class. It should not be used except by its
subclasses.
"""
import sys
import os
import thread
from network import EntangledNetworkLayer
from listen import Listener
import socket
import time
class AbstractUI:
#don't worry about myPort not being set to DEFAULT_PORT, it gets overwritten anyway
myPort = 1337
listenPort = 1338
connectIP = ""
connectPort = -1
nl = None
shareDirs = None
def searchResult(self, result):
raise NotImplementedError
def filePublished(self, result):
raise NotImplementedError
def download(self, result):
raise NotImplementedError
def __publishAllInDirs__(self, directories, network):
for i in directories:
i = os.path.expanduser(i)
for entries in os.walk(i):
for filename in entries[2]:
network.share(os.path.join(entries[0], filename))
def __init__(self, myPort, ip, port, shareDirs):
print socket.gethostname()
self.myPort = myPort
self.listenPort = myPort + 1
self.connectIP = ip
self.connectPort = port
self.shareDirs = shareDirs
#listener = Listener(myPort)
#thread.start_new_thread(listener.run, ())
self.nl = EntangledNetworkLayer(self, self.myPort, self.listenPort, self.connectIP, self.connectPort)
| Python |
#!/usr/bin/env python
#samsterlicious when cut across the neck a sound like wailing winds is heard
'''
file transfer and recieve classes
these are called by ui's
'''
import socket, os, sys, threading, tempfile, re, time, math, random, string
buf = 10000
class recieve_thread(threading.Thread):
# socket file sema
def __init__(self,sockeet,file_p,semaz):
threading.Thread.__init__(self)
self.sema = semaz
self.sock = sockeet
self.file = file_p
def run(self):
csock, addr = self.sock.accept()
msg = csock.recv(buf)
#message = "filename size"
reg_msg = msg.split("::")
filename = reg_msg[0]
size = reg_msg[1]
final_size = int(math.ceil((float(size)/buf)))
self.sema.acquire()
if(progress == {}):
i = 0
while i < final_size:
progress[i] = 0
i = i + 1
self.sema.release()
finished = False
while finished == False:
self.sema.acquire()
if len(progress) == 0:
finished = True
self.sema.release()
self.sock.close()
sys.exit(1)
else:
has_kizey = False
while has_kizey == False:
starting_key = 0
while(not(progress.has_key(starting_key))):
starting_key = starting_key + 1
temp = 0
for key in progress:
temp = key
i = random.randint(starting_key,temp)
has_kizey = progress.has_key(i)
print len(progress)
self.sema.release()
msg = str(i)
csock.send(msg)
file_part = csock.recv(buf+10)
fp = int(file_part[0:10])
self.sema.acquire()
progress[fp] = 1
index = fp * buf
self.file.seek(index)
self.file.write(file_part[10:len(file_part)])
if(not progress.has_key(i)):
finished = True
self.sema.release()
self.sock.close()
sys.exit(1)
del progress[i]
self.sema.release()
class send_thread(threading.Thread):
def __init__(self,socketz,addr):
threading.Thread.__init__(self)
self.sock = socketz
self.add = addr
def run(self):
#send 'filename size'
#recieving msg requesting sam.txt port 5001
a = self.sock.recv(buf)
splitter = a.split('::')
filez = open(splitter[0],'rb')
size = os.path.getsize(splitter[0])
msg = splitter[0] + '::' + str(size)
total_parts = math.ceil(float(size)/float(buf))
port_num = splitter[1]
sockz = socket.socket()
sockz.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1)
i = 0
sockz.connect((self.add[0],int(port_num)))
sockz.send(msg)
while i < total_parts:
a = sockz.recv(buf)
if(a == ''): break
index = float(a) * float(buf)
index = math.floor(index)
index = int(index)
filez.seek(index)
file_part = filez.read(buf)
msg = a
while len(msg) < 10:
msg = msg + ' '
sockz.send(msg+file_part)
#dict is the dictionary passed by the UI name:filename and addr:[(ip,port)]
class file_recieve_request(threading.Thread):
threadList = []
def __init__(self,list_of_tuplez):
threading.Thread.__init__(self)
#you're getting passed a tuple
global progress
global finished
progress = {}
finished = False
'''
self.full_file_name = dict['name']
splitter = dict['name'].split('/')
self.file_name = splitter[len(splitter)-1]
print self.file_name
self.new_file = open(self.file_name,'wb')
self.ip_list = dict['ip']
'''
windows_check = re.search('[\\\]',list_of_tuplez[0][2])
linux_check = re.search('/',list_of_tuplez[0][2])
if windows_check != None :
temp = string.split(list_of_tuplez[0][2],'\\')
self.file_name = temp[len(temp)-1]
elif linux_check != None :
temp = string.split(list_of_tuplez[0][2],'/')
self.file_name = temp[len(temp)-1]
else:
self.file_name = list_of_tuplez[0][2]
self.new_file = open(self.file_name,'wb')
self.list_of_tuples = list_of_tuplez
def run(self):
#send request for a file to each ip in the dictionary
x = 5000
count = 0
sema = threading.Semaphore(1)
while count < len(self.list_of_tuples):
temp_sock = socket.socket()
temp_sock.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1)
fail = True
localhost = socket.gethostbyname(socket.gethostname())
if(localhost == '127.0.0.1'):
try:
import commands
localhost = commands.getoutput("ifconfig").split("\n")[1].split()[1][5:]
except(Exception):
a = raw_input("you're using localhost type in your ip manually\n")
localhost = a
while(fail == True):
try:
temp_sock.bind((localhost,x))
fail = False
except(Exception):
x = x + 1
temp_sock.listen(5)
temp_thread = recieve_thread(temp_sock,self.new_file,sema)
temp_thread.start()
self.threadList.append(temp_thread)
count = count + 1
starting_port = x-len(self.list_of_tuples)+1
try:
for tupel in self.list_of_tuples:
ip = str(tupel[0])
port = int(tupel[1])
file_path = tupel[2]
c_sock = socket.socket()
c_sock.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1)
c_sock.connect((ip,port))
send_msg = file_path + "::" + str(starting_port)
starting_port = starting_port+1
c_sock.send(send_msg)
except(KeyboardInterrupt, SystemExit):
c_sock.close()
exit(0)
raise
for i in self.threadList:
i.join()
print "Finished downloading "+self.file_name
| Python |
#!/usr/bin/env python
#This is the main executable. Don't change it unless you really need to.
import sys
import re
DEFAULT_PORT = 1337
def print_usage():
print
print "Usage: ./client.py <remote IP> <options>"
print "Remote IP can be written as <ip> or <ip>:<port>"
print "Default port is " + str(DEFAULT_PORT)
print
print "If no <remote ip> is specified, the node will start in initial connection mode"
print
print "Options:"
print "--help\tPrints this message."
print "--about\tPrints credits and version information."
print "--headless\tRuns node in headless mode without any UI."
print "\t\tIgnores most options except remote IP and share directory."
print "--port=<num>\tOverride the default port setting and choose your own."
print
def print_about():
print "\nUnnamed mystery filesharing project!!!1"
print 'By Sam Greene, Matt McNeill, and Anthony Work\n'
print " /\___/\\"
print " ( o o )"
print " ( =^= )"
print " ( )"
print " ( )"
print " ( )))))))))))"
print "oh god how did this get here i am not good with computers\n"
def main():
headless = 0
connIP = ""
connPort = -1
myPort = DEFAULT_PORT
shareDirs = []
#Checkin' arguments, woo
if len(sys.argv) > 1:
regex = '^([0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3})(:([0-9])+)?'
matched = 0
for i in sys.argv:
if i == "--about":
print_about()
return
elif i == "--help":
print_usage()
return
elif headless == 0 and i == "--headless":
matched = 1
headless = 1
elif connIP == "" and re.match(regex, i):
matched = 1
ipStrings = i.split(":")
connIP = ipStrings[0]
if len(ipStrings) > 1:
connPort = int(ipStrings[1])
elif myPort == DEFAULT_PORT and re.match("^--port=[0-9]+", i):
matched = 1
myPort = int(i.split("=")[1])
else:
'''We can be sure that any leftover arguments are the directories that
the user wants to share.'''
matched = 1
shareDirs.append(i)
#This should never happen now! Remove later?
if matched == 0:
print_usage()
return
#Start the UI! DO IT
if headless == 0:
from ui_text import TextUI
fileshare = TextUI(myPort, connIP, connPort, shareDirs)
else:
from ui_headless import HeadlessUI
fileshare = HeadlessUI(myPort, connIP, connPort, shareDirs)
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/env python
#samsterlicious when cut across the neck a sound like wailing winds is heard
'''
file transfer and recieve classes
these are called by ui's
'''
import socket, os, sys, threading, tempfile, re, time, math, random, string
buf = 10000
class recieve_thread(threading.Thread):
# socket file sema
def __init__(self,sockeet,file_p,semaz):
threading.Thread.__init__(self)
self.sema = semaz
self.sock = sockeet
self.file = file_p
def run(self):
csock, addr = self.sock.accept()
msg = csock.recv(buf)
#message = "filename size"
reg_msg = msg.split("::")
filename = reg_msg[0]
size = reg_msg[1]
final_size = int(math.ceil((float(size)/buf)))
self.sema.acquire()
if(progress == {}):
i = 0
while i < final_size:
progress[i] = 0
i = i + 1
self.sema.release()
finished = False
while finished == False:
self.sema.acquire()
if len(progress) == 0:
finished = True
self.sema.release()
self.sock.close()
sys.exit(1)
else:
has_kizey = False
while has_kizey == False:
starting_key = 0
while(not(progress.has_key(starting_key))):
starting_key = starting_key + 1
temp = 0
for key in progress:
temp = key
i = random.randint(starting_key,temp)
has_kizey = progress.has_key(i)
print len(progress)
self.sema.release()
msg = str(i)
csock.send(msg)
file_part = csock.recv(buf+10)
fp = int(file_part[0:10])
self.sema.acquire()
progress[fp] = 1
index = fp * buf
self.file.seek(index)
self.file.write(file_part[10:len(file_part)])
if(not progress.has_key(i)):
finished = True
self.sema.release()
self.sock.close()
sys.exit(1)
del progress[i]
self.sema.release()
class send_thread(threading.Thread):
def __init__(self,socketz,addr):
threading.Thread.__init__(self)
self.sock = socketz
self.add = addr
def run(self):
#send 'filename size'
#recieving msg requesting sam.txt port 5001
a = self.sock.recv(buf)
splitter = a.split('::')
filez = open(splitter[0],'rb')
size = os.path.getsize(splitter[0])
msg = splitter[0] + '::' + str(size)
total_parts = math.ceil(float(size)/float(buf))
port_num = splitter[1]
sockz = socket.socket()
sockz.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1)
i = 0
sockz.connect((self.add[0],int(port_num)))
sockz.send(msg)
while i < total_parts:
a = sockz.recv(buf)
if(a == ''): break
index = float(a) * float(buf)
index = math.floor(index)
index = int(index)
filez.seek(index)
file_part = filez.read(buf)
msg = a
while len(msg) < 10:
msg = msg + ' '
sockz.send(msg+file_part)
#dict is the dictionary passed by the UI name:filename and addr:[(ip,port)]
class file_recieve_request(threading.Thread):
threadList = []
def __init__(self,list_of_tuplez):
threading.Thread.__init__(self)
#you're getting passed a tuple
global progress
global finished
progress = {}
finished = False
'''
self.full_file_name = dict['name']
splitter = dict['name'].split('/')
self.file_name = splitter[len(splitter)-1]
print self.file_name
self.new_file = open(self.file_name,'wb')
self.ip_list = dict['ip']
'''
windows_check = re.search('[\\\]',list_of_tuplez[0][2])
linux_check = re.search('/',list_of_tuplez[0][2])
if windows_check != None :
temp = string.split(list_of_tuplez[0][2],'\\')
self.file_name = temp[len(temp)-1]
elif linux_check != None :
temp = string.split(list_of_tuplez[0][2],'/')
self.file_name = temp[len(temp)-1]
else:
self.file_name = list_of_tuplez[0][2]
self.new_file = open(self.file_name,'wb')
self.list_of_tuples = list_of_tuplez
def run(self):
#send request for a file to each ip in the dictionary
x = 5000
count = 0
sema = threading.Semaphore(1)
while count < len(self.list_of_tuples):
temp_sock = socket.socket()
temp_sock.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1)
fail = True
localhost = socket.gethostbyname(socket.gethostname())
if(localhost == '127.0.0.1'):
try:
import commands
localhost = commands.getoutput("ifconfig").split("\n")[1].split()[1][5:]
except(Exception):
a = raw_input("you're using localhost type in your ip manually\n")
localhost = a
while(fail == True):
try:
temp_sock.bind((localhost,x))
fail = False
except(Exception):
x = x + 1
temp_sock.listen(5)
temp_thread = recieve_thread(temp_sock,self.new_file,sema)
temp_thread.start()
self.threadList.append(temp_thread)
count = count + 1
starting_port = x-len(self.list_of_tuples)+1
try:
for tupel in self.list_of_tuples:
ip = str(tupel[0])
port = int(tupel[1])
file_path = tupel[2]
c_sock = socket.socket()
c_sock.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1)
c_sock.connect((ip,port))
send_msg = file_path + "::" + str(starting_port)
starting_port = starting_port+1
c_sock.send(send_msg)
except(KeyboardInterrupt, SystemExit):
c_sock.close()
exit(0)
raise
for i in self.threadList:
i.join()
print "Finished downloading "+self.file_name
| Python |
"""
Main source file for the headless UI. It's nice and simple and does
almost nothing!
"""
import sys
import thread
from network import EntangledNetworkLayer
from listen import Listener
from ui_abstract import AbstractUI
class HeadlessUI(AbstractUI):
#Unlike the TextUI, we don't have any interactivity, so we set up
#as much as we can in the init and just let it run.
def __init__(self, myPort, ip, port, shareDirs):
if ip == "" or port == -1:
print "Warning: Node is not connected to anything! You cannot fix this from here!"
AbstractUI.__init__(self, myPort, ip, port, shareDirs)
def __publishAllInDirs(self, directories, network):
AbstractUI.__publishAllInDirs__(self, directories, network)
def run(self, network):
if len(self.shareDirs) > 0:
self.__publishAllInDirs(self.shareDirs, network)
#Main run loop
try:
while(1):
continue
except (KeyboardInterrupt, SystemExit):
print
return
#We're headless, we don't care about this crap
def searchResult(self, result):
return
def filePublished(self, result):
return
| Python |
import socket
import threading
from fileT import send_thread
class Listener(threading.Thread):
sock = None
port = 1338
def __init__(self, port):
threading.Thread.__init__(self)
self.port = port
print "listenT port: "+str(port)
self.sock = socket.socket()
self.sock.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1)
def run(self):
self.sock.bind(('', self.port))
self.sock.listen(5)
while 1:
client_sock,address = self.sock.accept()
sender = send_thread(client_sock, address)
sender.start()
| Python |
'''Collection of utility functions'''
import hashlib
import os
def filehash(path):
inFile = file(path, 'r')
md5 = hashlib.md5()
while 1:
fileBuf = inFile.read(1024)
if not fileBuf:
break
md5.update(fileBuf)
return md5.hexdigest()
| Python |
import os, sys, commands, ntpath, math, hashlib, unicodedata
__version__ = '1.0.0'
__author__ = "joesox@gmail.com"
__url__ = 'www.joeswammi.com'
"""
writing on Python 2.6.5
PREREQUISITES:
TrID (and its .trd file)- File Identifier: http://mark0.net/soft-trid-e.html
ssdeep: http://code.google.com/p/pyssdeep/
Pyrex: http://www.cosc.canterbury.ac.nz/greg.ewing/python/Pyrex/
"""
class fileinspecttools:
"""
based from 'Building a Malware Zoo' whitepaper
http://www.sans.org/reading_room/whitepapers/malicious/building-malware-zoo_33543
"""
def __repr__(self):
if not self:
return 'Attrs()'
return 'Attrs(%s)' % list.__repr__(self)
def __init__(self):
self.value = ""
self.FS_ROOT = 'c:\\'
self.TRIDPATH = os.path.join(os.getcwd(), "trid")
self.SSDEEPPATH = os.path.join(os.getcwd(), "ssdeep", "ssdeep")
self.DATABASEFILE = os.path.join(os.getcwd(), "ssdeep","database.dat")
def getSize(self, filename):
#Return file size
size = os.path.getsize(filename)
return size
def getFileType(self,filename):
#Execute "file -b <filename>"
cmd = self.TRIDPATH + " -d: "+filename
pipe = os.popen(cmd)
results = pipe.read()
return results
def getHash(self, filename):
data=open(filename, "r").read()
#Compute MD5 and SHA1 hashes
md5hash=hashlib.md5(data).hexdigest()
sha1hash=hashlib.sha1(data).hexdigest()
#Build return structure
results=[md5hash,sha1hash]
return results
def getFuzzyHash(self, filename):
match = ""
#Compute fuzzy hashes and fuzzy matches
cmd = self.SSDEEPPATH + " -bm "+ self.DATABASEFILE + " " + filename
if(os.path.isfile(self.DATABASEFILE)):
pipe = os.popen(cmd)
match = pipe.read()
else:
#create the file with the header
fo = open(self.DATABASEFILE, "wb")
fo.write("ssdeep,1.1--blocksize:hash:hash,filename\n")
fo.close()
cmd = self.SSDEEPPATH + " -b " + filename
pipe = os.popen(cmd)
fuzzyHash = pipe.read().split("\n")
#add to db
# Open a file
if(match == ""):
fo = open(self.DATABASEFILE, "a+")
fo.write(fuzzyHash[1] + "\n")
fo.close()
results = [fuzzyHash[1], match]
return results
def extractStrings(self,filename):
frag = ""
strList = []
bufLen = 2048
FRAG_LEN = 4 #Min length to report as string
fp = open(filename, "rb")
offset = 0
buf = fp.read(bufLen)
while buf:
for char in buf:
#Uses curses library to locate printable chars in binary files
try:
if (unicodedata.category(unicode(char))[0] in 'LNPS')==False:
if len(frag)>FRAG_LEN:
strList.append([hex(offset-len(frag)),frag])
frag = ""
else:
frag = frag + char
offset+=1
buf=fp.read(bufLen)
except ValueError:
pass # UnicodeDecodeError: 'ascii' codec can't decode byte
return strList
def getEntropy(self,filename):
"""
Calculate the entropy of a chunk of data.
Generally, the higher the entropy the greater
the chance the sample is compressed or encrypted
"""
entropy = 0
frag = ""
strList = []
bufLen = 2048
FRAG_LEN = 4 #Min length to report as string
fp = open(filename, "rb")
offset = 0
buf = fp.read(bufLen)
#Uses curses library to locate printable chars in binary files
try:
for x in range(256):
p_x = float(buf.count(chr(x)))/len(buf)
if p_x > 0:
entropy += - p_x*math.log(p_x,2)
except ValueError:
pass # UnicodeDecodeError: 'ascii' codec can't decode byte
fp.close()
return entropy
def Examine(self, filename):
print "Analysing " + filename
print " Filesize: " + str(self.getSize(filename)) + " bytes"
print " Filetype: " + str(self.getFileType(filename))
hashes = self.getHash(filename)
print " MD5 hash: " + str(hashes[0])
print " SHA1 hash: " + str(hashes[1])
fuzzyhashes= self.getFuzzyHash(filename)
print " Fuzzy hash: " + str(fuzzyhashes[0])
print " Previous match: " + str(fuzzyhashes[1])
print " Strings found: " + str(self.extractStrings(filename))
print " Entropy [higher=compressed or encrypt]: " + str(self.getEntropy(filename))
def test():
t=fileinspecttools()
print "*******"
t.Examine(os.path.join(os.getcwd(), "samples", "sample.bmp"))
print "*******"
t.Examine(os.path.join(os.getcwd(), "samples", "sample.7z"))
#t.Examine(os.path.join(os.getcwd(), "trid.exe"))
print "*******"
if __name__ == '__main__':
test()
| Python |
#
# jQuery File Tree
# Python/Django connector script
# By Martin Skou
#
import os
import urllib
def dirlist(request):
r=['<ul class="jqueryFileTree" style="display: none;">']
try:
r=['<ul class="jqueryFileTree" style="display: none;">']
d=urllib.unquote(request.POST.get('dir','c:\\temp'))
for f in os.listdir(d):
ff=os.path.join(d,f)
if os.path.isdir(ff):
r.append('<li class="directory collapsed"><a href="#" rel="%s/">%s</a></li>' % (ff,f))
else:
e=os.path.splitext(f)[1][1:] # get .ext and remove dot
r.append('<li class="file ext_%s"><a href="#" rel="%s">%s</a></li>' % (e,ff,f))
r.append('</ul>')
except Exception,e:
r.append('Could not load directory: %s' % str(e))
r.append('</ul>')
return HttpResponse(''.join(r)) | Python |
class Error:
def __init__(self,sample,path,message):
self.sample = sample
self.path = path
self.message = str(message).rstrip()
class ErrorCollator:
def __init__(self):
self.errors = []
def add(self,error):
self.errors.append(error)
def hasErrors(self):
if len(self.errors)<=0:
return False
return True
def dump(self):
for error in self.errors:
print error.sample,error.path,error.message
def emailAlert(self, source, notifyList, smtp_server="localhost",
subject = "ICAT metadata scraping errors"):
if len(self.errors)<=0:
return
import smtplib
SERVER = smtp_server
FROM = source
TO = notifyList
SUBJECT = subject
TEXT = "This message was sent with Python's smtplib."
message = """\
From: %s
To: %s
Subject: %s
%s
""" % (FROM, ", ".join(TO), SUBJECT, TEXT)
elist =[message]
for error in self.errors:
elist.append( " ".join([error.sample, error.path, error.message, "\n"]))
error_s = "".join(elist)
# Send the mail
server = smtplib.SMTP(SERVER)
server.sendmail(FROM, TO, error_s)
server.quit()
| Python |
#!/usr/bin/env python
import sys
from config import *
import logging
from errorCollator import *
LOGFILENAME = __file__ + ".log"
logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',
filename = LOGFILENAME, level=logging.INFO)
hiccups = ErrorCollator()
from ZSI import FaultException
from xmlMapping.dbConnect import *
class InstrumentWatcher:
def __init__(self):
self._log = logging.getLogger('%s.%s' % (__name__, self.__class__.__name__))
@staticmethod
def oracleEscape(self,strn):
if not strn: return ''
if strn=='none': return ''
if strn=='None': return ''
out = []
for char in strn:
if char =="'":
out.append("'")
out.append(char)
else:
out.append(char)
ret = "".join(out)
return ret
def cleanup(self):
if self.dbcnx:
self.dbcnx.finish()
self._log.info("Closed ICAT DB connection")
def main(self):
#populate remote ICAT FACILITY_USER table with these users
self._log.info("Connecting: ICAT Oracle Database")
bypass=False
try:
dbcnx = DBConnection(bypass)
dbcnx.cxInit(ICATDBHOST, ICATDBPORT, ICATDBNAME, ICATDBUSER, ICATDBPASS)
dbcnx.connect() # establish tcp communication
except Exception, e:
self._log.critical(e)
self._log.critical( "You may need to port forward over SSH to tunnel through firewall")
self._log.critical( "e.g. ssh dataminx -L 1521:localhost:1521")
hiccups.add(Error(__name__,__file__,e) )
return 2
self._log.info("Remote connections established" )
DBSCHEMA="ICAT"
TABLE = 'INSTRUMENT'
try:
columns =dbcnx.query("""SELECT column_name FROM COLS
WHERE table_name='%(TABLE)s'""" %
{'DBSCHEMA' : DBSCHEMA, 'TABLE' : TABLE } )
except Exception, msg:
#import traceback
#traceback.print_exc(file=sys.stdout)
self._log.critical("Select header from %s failed\n%s" ,TABLE, msg)
hiccups.add(Error(__name__,__file__,msg) )
return 2
header = []
if not columns or columns.ntuples < 1:
self._log.debug("no column headers exist")
else:
for row in columns:
header.append(row[0])
fileformats = []
try:
fields = ", ".join(header)
# print fields
oldformats = dbcnx.query("""SELECT %(fields)s FROM %(TABLE)s """ %
{'DBSCHEMA' : DBSCHEMA ,'fields' : fields, 'TABLE' : TABLE } )
except Exception, msg:
#import traceback
#traceback.print_exc(file=sys.stdout)
self._log.critical("Select from %s failed\n%s" ,TABLE, msg)
hiccups.add(Error(__name__,__file__,msg) )
return 2
if not oldformats or oldformats.ntuples < 1:
self._log.debug("No %s entries exist", TABLE)
else:
for row in oldformats:
# print row
d = {}
for i in range(len(header)):
d[header[i]] = row[i]
fileformats.append(d)
for code in DataFormats.ARTIFACTS.keys():
name = code
short_name = name
type = "Crystallography"
description = " "
ftype = DataFormats.FILETYPES[code]
versions = ftype['version']
if versions:
for version in versions:
match = False
for itype in fileformats:
if itype['NAME'] != ftype['format']: continue
if itype['VERSION'] != version: continue
match = True
break
if match: continue
self._log.info("No current ICAT match for %s %s" , ftype['format'], version)
self._log.info("Adding new entry ...")
injectNewFileFormat(ftype,version)
else: # no version
match = False
for itype in fileformats:
if itype['NAME'] != ftype['format']: continue
match = True
break
if match: continue
self._log.info("No match for %s " , ftype['format'])
injectNewFileFormat(ftype,'1')
self._log.info("ICAT %s and config.py DataFormats are reconciled." , TABLE)
self._log.info("Nothing else to add.")
def injectNewFileFormat(self, format, version):
try:
# (NAME, VERSION, FORMAT_TYPE, DESCRIPTION, SEQ_NUMBER, MOD_TIME, MOD_ID, CREATE_TIME, CREATE_ID, FACILITY_ACQUIRED, DELETED)
# Values
# ('nexus', '3.0.0', 'HDF5', 'Neutron and X-Ray data format.', 999, TO_TIMESTAMP('12/09/2007 13:30:16.4','DD/MM/YYYY HH24:MI:SS.FF'), 'overlord', TO_TIMESTAMP('12/09/2007 13:30:16.4','DD/MM/YYYY HH24:MI:SS.FF'), 'overlord', 'Y', 'N');
query= """
INSERT INTO INSTRUMENT (NAME, SHORT_NAME,
TYPE, DESCRIPTION, SEQ_NUMBER, MOD_TIME, MOD_ID,
CREATE_TIME, CREATE_ID, FACILITY_ACQUIRED, DELETED) VALUES
('%(name)s', '%(version)s', '%(type)s', '%(description)s',
999, systimestamp, 'overlord', systimestamp,'overlord', 'Y', 'N') """ % \
{'DBSCHEMA' : DBSCHEMA,
'name': oracleEscape(format['format']),
'version': oracleEscape(version),
'type': oracleEscape(format['format_type']),
'description':oracleEscape(format['description']),
} # )
print query
res = dbcnx.query(query)
res = dbcnx.query("COMMIT")
except Exception, msg:
print "Failed to inject new dataformat: ", format
print msg
sys.exit()
return
if __name__ == '__main__':
watcher = InstrumentWatcher()
try:
rv = watcher.main()
except Exception, msg:
watcher._log.critical(msg)
watcher.cleanup()
import socket
thisHost = socket.gethostbyaddr(socket.gethostname())
fqdn = thisHost[1][0]
if hiccups.hasErrors():
hiccups.dump()
hiccups.emailAlert("instrumentWatcher@"+fqdn, EMAIL_ALERT_LIST, EMAIL_SMTP_HOST,
"New ICAT instrument addition errors " + __file__ )
| Python |
#!/usr/bin/env python
from config import *
import sys
import logging
from errorCollator import *
LOGFILENAME = __file__ + ".log"
logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',
filename = LOGFILENAME, level=logging.INFO)
hiccups = ErrorCollator()
from pdbWrapper import PDBWS
from ZSI import FaultException
from xmlMapping.dbConnect import *
class UserWatcher:
def __init__(self):
self._log = logging.getLogger('%s.%s' % (__name__, self.__class__.__name__))
@staticmethod
def oracleEscape(self,strn):
if not strn: return ''
if strn=='none': return ''
if strn=='None': return ''
out = []
for char in strn:
if char =="'":
out.append("'")
out.append(char)
else:
out.append(char)
ret = "".join(out)
return ret
def cleanup(self):
if self.pdbcnx:
self.pdbcnx.logout()
self._log.info("Closed CSAF ProposalDB connection")
if self.dbcnx:
self.dbcnx.finish()
self._log.info("Closed ICAT DB connection")
def main(self):
# ping proposaldb server, get all new users since given date/time,
self._log.info("Connecting CSAF ProposalDB")
self.pdbcnx = PDBWS(PDBWSURL,PDBWSUSER,PDBWSPASS)
try:
date = (2003,1,1, 0,0,1, 0,0,0) # ZSI needs 9 elem tuple for dates
newUsers = self.pdbcnx.getAllScientistsSince(date)
scientists = newUsers['scientists']
for scientist in scientists:
self._log.debug(" %s %s %s %s ",
scientist['first_Names'],scientist['last_Name'],
scientist['login_ID'],scientist['email'] )
self._log.info(" %d new scientists since %s", len(scientists) , date)
except FaultException, e:
self._log.critical("-------------- %s ",e)
hiccups.add(Error(__name__,__file__,e) )
return 1
#populate remote ICAT FACILITY_USER table with these users
self._log.info("Connecting: ICAT Oracle Database")
bypass=False
try:
self.dbcnx = DBConnection(bypass)
self.dbcnx.cxInit(ICATDBHOST, ICATDBPORT, ICATDBNAME, ICATDBUSER, ICATDBPASS)
self.dbcnx.connect() # establish tcp communication
except Exception, e:
self._log.critical(e)
self._log.critical( "You may need to port forward over SSH to tunnel through firewall")
self._log.critical( "e.g. ssh dataminx -L 1521:localhost:1521")
hiccups.add(Error(__name__,__file__,e) )
hiccups.add(Error(__name__,__file__,
"Potentialy the database server " + ICATDBHOST + " is down, or we have misconfigured connection settings (in config.py).") )
return 2
self._log.info("Remote connections established" )
addingOnlyOneNewUser=0
for scientist in scientists:
try:
DBSCHEMA="ICAT"
res=self.dbcnx.query("""SELECT * FROM FACILITY_USER WHERE FACILITY_USER_ID='%(name)s' OR FACILITY_USER_ID='%(email)s'""" % {'DBSCHEMA' : DBSCHEMA,'name': scientist['login_ID'], 'email': scientist['email'] } )
except Exception, msg:
#import traceback
#traceback.print_exc(file=sys.stdout)
self._log.critical("Select from facility_user failed\n%s" % msg)
hiccups.add(Error(__name__,__file__,msg) )
return 3
if not res or res.ntuples > 1:
self._log.debug("uhoh many results")
self._log.debug(" %s %s %s %s ",
scientist['first_Names'],scientist['last_Name'],
scientist['login_ID'],scientist['email'] )
continue
elif res.ntuples ==1:
continue
for row in res:
self._log.debug(row)
self._log.debug(" %s %s %s %s ",
scientist['first_Names'],scientist['last_Name'],
scientist['login_ID'],scientist['email'] )
else: # res.ntuples <=0
self._log.info("no ICAT entry exists for:")
self._log.info(" %s %s %s %s ",
scientist['first_Names'],scientist['last_Name'],
scientist['login_ID'],scientist['email'] )
addingOnlyOneNewUser += 1
# if addingOnlyOneNewUser > 1:
# print "skipping ICAT add of" , scientist['login_ID']
# continue
# print scientist
first = scientist['first_Names']
parts = first.split(" ")
init = []
middle = ''
for part in parts:
init.append(part[0].upper())
init.append(".")
initial = "".join(init)
if len(first)>1:
middle = " ".join(parts[1:])
first = parts[0]
try:
query= """INSERT INTO FACILITY_USER (FACILITY_USER_ID, FEDERAL_ID,
TITLE, INITIALS, FIRST_NAME, MIDDLE_NAME, LAST_NAME,
MOD_TIME, MOD_ID, CREATE_TIME, CREATE_ID,
FACILITY_ACQUIRED, DELETED) VALUES
('%(name)s', '%(name)s', '%(title)s', '%(initials)s', '%(first)s',
'%(middle)s', '%(last)s',sysdate,'damian',sysdate,'damian', 'Y','N')""" % \
{'DBSCHEMA' : DBSCHEMA,
'name': oracleEscape(scientist['login_ID']),
'email': oracleEscape(scientist['email']),
'title': oracleEscape(scientist['title']),
'initials':oracleEscape(initial),
'first': oracleEscape(first),
'middle': oracleEscape(middle),
'last': oracleEscape(scientist['last_Name']),
} # )
self._log.debug(query)
########
res=self.dbcnx.query(query)
self._log.info("Added new ICAT user %s", scientist['login_ID'] )
########
except Exception, msg:
self._log.critical("Failed to inject new facility_user: %s\n" % scientist['email'] )
self._log.critical(msg)
hiccups.add(Error(__name__,__file__,msg) )
return 4
self.dbcnx.query("commit")
self._log.info(" %d new ICAT additions", addingOnlyOneNewUser )
if __name__ == '__main__':
watcher = UserWatcher()
try:
rv = watcher.main()
except Exception, msg:
watcher._log.critical(msg)
watcher.cleanup()
import socket
thisHost = socket.gethostbyaddr(socket.gethostname())
fqdn = thisHost[1][0]
if hiccups.hasErrors():
hiccups.dump() # stdout - should go to cron post run email
hiccups.emailAlert("userWatcher@"+fqdn, EMAIL_ALERT_LIST, EMAIL_SMTP_HOST,
"New ICAT user addition errors " + __file__ )
| Python |
#!/usr/bin/env python
import sys
from config import *
# ping proposaldb server, get all new users since given date/time,
from ZSI import FaultException
#populate remote ICAT FACILITY_USER table with these users
print "Connecting: ICAT Oracle Database"
from xmlMapping.dbConnect import *
bypass=False
try:
dbcnx = DBConnection(bypass)
dbcnx.cxInit(ICATDBHOST, ICATDBPORT, ICATDBNAME, ICATDBUSER, ICATDBPASS)
dbcnx.connect() # establish tcp communication
except Exception, e:
print e
print "You may need to port forward over SSH to tunnel through firewall"
print "e.g. ssh dataminx -L 1521:localhost:1521"
sys.exit(1)
print "Connected to ",ICATDBUSER
def oracleEscape(strn):
if not strn: return ''
if strn=='none': return ''
if strn=='None': return ''
out = []
for char in strn:
if char =="'":
out.append("'")
out.append(char)
else:
out.append(char)
ret = "".join(out)
return ret
DBSCHEMA="ICAT"
TABLE = 'DATAFILE_FORMAT'
try:
columns =dbcnx.query("""SELECT column_name FROM COLS
WHERE table_name='%(TABLE)s'""" %
{'DBSCHEMA' : DBSCHEMA, 'TABLE' : TABLE } )
except Exception, msg:
import traceback
traceback.print_exc(file=sys.stdout)
print "Select header from ", TABLE, " failed\n%s" % msg,
sys.exit()
header = []
if not columns or columns.ntuples < 1:
print " no column headers exist"
else:
for row in columns:
header.append(row[0])
fileformats = []
try:
fields = ", ".join(header)
# print fields
oldformats = dbcnx.query("""SELECT %(fields)s FROM %(TABLE)s """ %
{'DBSCHEMA' : DBSCHEMA ,'fields' : fields, 'TABLE' : TABLE } )
except Exception, msg:
import traceback
traceback.print_exc(file=sys.stdout)
print "Select from datafile_format failed\n%s" % msg,
sys.exit()
if not oldformats or oldformats.ntuples < 1:
print " no file format entries exist"
else:
for row in oldformats:
# print row
d = {}
for i in range(len(header)):
d[header[i]] = row[i]
fileformats.append(d)
def injectNewFileFormat(format, version):
try:
# (NAME, VERSION, FORMAT_TYPE, DESCRIPTION, SEQ_NUMBER, MOD_TIME, MOD_ID, CREATE_TIME, CREATE_ID, FACILITY_ACQUIRED, DELETED)
# Values
# ('nexus', '3.0.0', 'HDF5', 'Neutron and X-Ray data format.', 999, TO_TIMESTAMP('12/09/2007 13:30:16.4','DD/MM/YYYY HH24:MI:SS.FF'), 'overlord', TO_TIMESTAMP('12/09/2007 13:30:16.4','DD/MM/YYYY HH24:MI:SS.FF'), 'overlord', 'Y', 'N');
query= """
INSERT INTO DATAFILE_FORMAT (NAME, VERSION,
FORMAT_TYPE, DESCRIPTION, SEQ_NUMBER, MOD_TIME, MOD_ID,
CREATE_TIME, CREATE_ID, FACILITY_ACQUIRED, DELETED) VALUES
('%(name)s', '%(version)s', '%(type)s', '%(description)s',
999, systimestamp, 'overlord', systimestamp,'overlord', 'Y', 'N') """ % \
{'DBSCHEMA' : DBSCHEMA,
'name': oracleEscape(format['format']),
'version': oracleEscape(version),
'type': oracleEscape(format['format_type']),
'description':oracleEscape(format['description']),
} # )
print query
res = dbcnx.query(query)
res = dbcnx.query("COMMIT")
except Exception, msg:
print "Failed to inject new dataformat: ", format
print msg
sys.exit()
return
for code in DataFormats.FILETYPES:
ftype = DataFormats.FILETYPES[code]
versions = ftype['version']
if versions:
for version in versions:
match = False
for itype in fileformats:
if itype['NAME'] != ftype['format']: continue
if itype['VERSION'] != version: continue
match = True
break
if match: continue
print "No current ICAT match for" , ftype['format'], version
print "Adding new entry ..."
injectNewFileFormat(ftype,version)
else: # no version
match = False
for itype in fileformats:
if itype['NAME'] != ftype['format']: continue
match = True
break
if match: continue
print "no match for" , ftype['format']
injectNewFileFormat(ftype,'1')
print "ICAT datafile_formats and config.py FILETYPES are reconciled."
print "Nothing else to add."
dbcnx.finish()
| Python |
#!/usr/bin/env python
from config import *
import sys
import logging
LOGFILENAME = __file__ + ".log"
logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',
filename = LOGFILENAME, level=logging.INFO)
logger = logging.getLogger("fileWatcher")
from errorCollator import *
hiccups = ErrorCollator()
def dumpHiccups():
if hiccups.hasErrors():
import socket
thisHost = socket.gethostbyaddr(socket.gethostname())
fqdn = thisHost[1][0]
# hiccups.dump()
hiccups.emailAlert("fileWatcher@"+fqdn, EMAIL_ALERT_LIST, EMAIL_SMTP_HOST)
import re
import os.path
from fileHandlers.sfrmProcess import SFRM
from fileHandlers.oxfordProcess import OXFORD
logger.info("Connecting CSAF ProposalDB")
from ZSI import FaultException
from pdbWrapper import PDBWS
try:
pdbcnx = PDBWS(PDBWSURL,PDBWSUSER,PDBWSPASS)
except Exception, e:
logger.critical(e)
hiccups.add(Error(__name__,__file__,e) )
hiccups.add(Error(__name__,__file__,"Unable to establish communication with " +
PDBWSURL + " Aborting.") )
dumpHiccups()
sys.exit(1)
from icatAdminWrapper import ICATAWS
from icatWrapper import ICATWS
from ZSI import FaultException
icatacnx = None
sessionId = None
icatcnx = None
def icatReconnect():
global icatacnx
global sessionId
global icatcnx
logger.info("Connecting: ICAT admin")
try:
icatacnx = ICATAWS(ICATADMINURL,ICATADMINUSER,ICATADMINPASS)
except Exception, e:
logger.critical(e)
hiccups.add(Error(__name__,__file__,e) )
hiccups.add(Error(__name__,__file__,"Unable to establish communication with " +
ICATADMINURL + " Aborting.") )
return False
sessionId = icatacnx.sessionId
logger.info("Connecting: ICAT WS")
try:
icatcnx = ICATWS(ICATURL,ICATUSER,ICATPASS,sessionId)
except Exception, e:
logger.critical(e)
hiccups.add(Error(__name__,__file__,e) )
hiccups.add(Error(__name__,__file__,"Unable to establish communication with " +
ICATURL + " Aborting.") )
return False
sessionId = icatcnx.login(ICATUSER,ICATPASS);
return True
if not icatReconnect():
dumpHiccups()
sys.exit(1)
logger.debug(str(sessionId) )
logger.debug(str(icatcnx.listInstruments(sessionId)) )
logger.debug(str(icatcnx.listDatasetTypes(sessionId)) )
logger.debug(str(icatcnx.listInvestigationTypes(sessionId)) )
logger.debug(str(icatcnx.listDatasetStatus(sessionId)) )
#logger.info(icatcnx.listParameters(sessionId)
#logger.info(icatcnx.listDatafileFormats(sessionId)
#logger.info(icatcnx.listRoles(sessionId)
#logger.info(icatcnx.searchByUserSurname(sessionId,"Turner")
#invs = icatcnx.getMyInvestigations(sessionId)
#logger.info(invs
#invHolder = icatcnx.getInvestigation(sessionId,5700)
def methodDump(holder):
attribs = holder.__class__.__dict__
variables = {}
getters = {}
setters = {}
sundries = {}
for attrib,typ in attribs.iteritems():
if type(typ) == type(methodDump):
if attrib[0:4]=="set_":
setters[attrib] = typ
elif attrib[0:4]=="get_":
getters[attrib] = typ
else:
sundries[attrib] = typ
else:
variables[attrib] = typ
logger.debug(str(getters.keys() ))
logger.debug(str(variables.keys() ))
def datasetsDump(list):
logger.debug("datasets: %d" , len(list))
for dataset in list:
logger.debug("%s %s %s", dataset._id, dataset._name, dataset._datasetType)
if not hasattr(dataset, "_datafileCollection"): continue
datafiles = dataset._datafileCollection
logger.debug("datafiles %d" , len(datafiles))
logger.debug("")
if len(datafiles)<=0: continue
datafile0 = datafiles[0]
try:
logger.debug("%s %s" , datafile0._name, datafile0._datafileVersion )
logger.debug(str( vars(datafile0._datafileFormat._datafileFormatPK)) )
logger.debug(" %s" , datafile0._datafileModifyTime)
except Exception, e:
logger.warning(e)
logger.info("Connecting: Bruker Database")
from xmlMapping.dbConnect import *
bypass=False
try:
dbcnx = DBConnection(bypass)
dbcnx.psycopg2Init(BRKDBHOST, BRKDBPORT, BRKDBNAME, BRKDBUSER, BRKDBPASS)
dbcnx.connect() # establish tcp communication
#dbcnx.query("SET search_path TO scd,public") # only for psycopg2?
dbcnx.cursor.execute("SET search_path TO scd,public") # only for psycopg2?
except Exception, e:
logger.critical(e)
hiccups.add(Error(__name__,__file__,e) )
hiccups.add(Error(__name__,__file__,"Unable to establish communication with " +
BRKDBHOST + " Aborting.") )
dumpHiccups()
sys.exit(1)
ICATTEMPLATE = "map-csaf_apex2icat.xml"
logger.info("Loading ICAT XML generation template %s", ICATTEMPLATE)
from xmlMapping.templateParser import TemplateParser
try:
newIcatTemplateWalker = TemplateParser()
newIcatTemplateWalker.registerDBConnection(dbcnx)
newIcatTemplateWalker.initTemplate(ICATTEMPLATE)
except Exception, e:
logger.critical(e)
hiccups.add(Error(__name__,__file__,e) )
hiccups.add(Error(__name__,__file__,"Unable to load XML template " +
ICATTEMPLATE + " Aborting.") )
dumpHiccups()
sys.exit(1)
OLDICATTEMPLATE = "map-csaf_apex2icat-upd.xml"
logger.info("Loading ICAT XML amend template %s", OLDICATTEMPLATE)
try:
oldIcatTemplateWalker = TemplateParser()
oldIcatTemplateWalker.registerDBConnection(dbcnx)
oldIcatTemplateWalker.initTemplate(OLDICATTEMPLATE)
except Exception, e:
logger.critical(e)
hiccups.add(Error(__name__,__file__,e) )
hiccups.add(Error(__name__,__file__,"Unable to load XML template " +
OLDICATTEMPLATE + " Aborting.") )
dumpHiccups()
sys.exit(1)
NOVATEMPLATE = "map-csaf_nova2icat.xml"
logger.info("Loading NOVA ICAT XML template %s", NOVATEMPLATE )
try:
novaIcatTemplateWalker = TemplateParser()
novaIcatTemplateWalker.initTemplate(NOVATEMPLATE)
except Exception, e:
logger.critical(e)
hiccups.add(Error(__name__,__file__,e) )
hiccups.add(Error(__name__,__file__,"Unable to load XML template " +
NOVATEMPLATE + " Aborting.") )
dumpHiccups()
sys.exit(1)
def myDataFolderHandler(path, sample, mode, collection):
""" We need to register this as a callback func to simplify
the FolderRecurser. I.e. we try vainly to localize the
file format specific handling to here.
"""
name = None
if not collection.dataFilesExist:
logger.warning("Not processing %s until datafiles present.",path)
if mode == "create":
# Or should we set a bogus timestamp in our records:
collection.lastModified = 0 # Jan 1 1970 - well before first pass
# record initial alert for email to manager
hiccups.add(Error(sample,path,"No data files exist, yet ..."))
# and pretend we handled it:
return True
else:
return
# we just need a datafile to get the version number ...
datasetFolders = collection.dataFiles
for folder in datasetFolders.keys():
recent = datasetFolders[folder]['recentFileList']
expired = datasetFolders[folder]['expiredFileList']
if len(recent)>0:
name = recent[0]
break
elif len(expired)>0:
name = expired[0]
break
logger.debug("%s %s %s", mode , sample, name)
# if collection.dataFormat != DataFormats.SMART:
# logger.info("==================SKIPPING NON SMART============"
# return
# logger.info("==================BYPASSING HANDLER============"
# return
# BRUKER SPECIFIC
if collection.dataFormat in [DataFormats.SMART, DataFormats.APEX]:
try:
res = dbcnx.query("SELECT samples_id, sample_name, revision, when_created FROM samples WHERE sample_name='%s' ORDER by revision DESC" % sample)
except DBConnectSQLSyntaxException, msg:
hiccups.add(Error(sample,path,msg))
logger.info("Select from samples failed\n%s" , msg )
return
except DBConnectSQLResultException, msg:
hiccups.add(Error(sample,path,msg))
logger.info("Select from samples failed\n%s" , msg )
return
fnames = [ 'samples_id', 'sample_name', 'revision', 'when_created']
for i in range(len(fnames)):
logger.debug('%-15s%-15s%-15s%-15s' , fnames[i] )
# Next, print out the instances.
samples = []
for i in range(res.ntuples):
samples.append([])
strn = ""
for j in range(res.nfields):
samples[i].append(res.getvalue(i, j))
try:
strn = strn + "%-15s" % res.getvalue(i, j)
except:
logger.info(strn(res.getvalue(i, j)) )
logger.debug(strn)
res.clear()
# End of Bruker specific
try:
l_id = int(pdbcnx.xgetLogIdFromDataset(sample)['l_id'])
log = pdbcnx.xgetLogFromId(l_id)
#print log
s_id = int(log['s_id'])
sampledata = pdbcnx.xgetSampleFromId(s_id)
logger.info(" submissionID: %s",sampledata['submissionid'])
users = pdbcnx.xgetScientistBeans(l_id)
scientists = users['scientists']
for scientist in scientists:
logger.debug(" %s %s",scientist['first_Names']," ",scientist['last_Name'])
except FaultException, e:
hiccups.add(Error(sample,path,e))
hiccups.add(Error(sample,path,"No logged users associated with sample" ))
logger.warning(e)
return
logger.debug(str(log) )
logger.debug(str(sampledata))
#
# Pull metadata from file
# -----------------------
#
fname = os.path.split(name)[-1]
file= open(name)
if collection.dataFormat in [DataFormats.SMART, DataFormats.APEX]:
sf = SFRM(file)
meta = sf.getMetadata()
logger.debug(" V:%2s user: %-21s %s", (meta["VERSION"], meta["USER"], fname) )
elif collection.dataFormat == DataFormats.NOVA:
sf = OXFORD(file)
meta = sf.getMetadata()
logger.debug(" V:%2s user: %-21s %s", (meta["version"], meta["manufact"], fname) )
file.close()
cif = None
if collection.dataFormat == DataFormats.NOVA and \
collection.hierarchy['folders'].has_key('struct/tmp'):
cifholder = collection.hierarchy['folders']['struct/tmp']
from fileHandlers.cifHandler import ODCIF
from os.path import split
for recent in cifholder['recentFileList']:
logger.debug(str(recent))
if recent[2] != 'cif':
continue
dir,cifname = split(recent[0])
if not cifname == sample + '.cif':
continue
ciffile = open(recent[0])
cfh = ODCIF(ciffile)
cif = cfh.getMetadata()
break
logger.info("CIF file: %s" , cif)
#
# Hit ICAT for existing entries
# -----------------------------
#
investigationName = sample
logger.info("Retrieving ICAT investigation data for %s", investigationName)
from icatWS.ICATService_types import ns0
advSearchDetails= ns0.advancedSearchDetails_Def("dummy")
advSearchDetails._caseSensitive=True
advSearchDetails._investigationName = investigationName
#advSearchDetails._investigationInclude = "DATASETS_ONLY"
advSearchDetails._investigationInclude = "DATASETS_AND_DATAFILES"
try:
invHolderList = icatcnx.searchByAdvanced(sessionId,advSearchDetails)
except Exception, e:
hiccups.add(Error(sample,path,e))
#hiccups.add(Error(sample,path,"No logged users associated with sample" ))
# possibly the sessionId has timed out. it had an hour
# but this could be slow ...
# maybe we could respawn the connection?
logger.warning("Attempting ICAT reconnection..." )
if not icatReconnect():
hiccups.add(Error(sample,path,e))
logger.critical("Unexpected error: %s", sys.exc_info()[0] )
logger.critical(e)
# this is fatal!
dumpHiccups()
sys.exit(1)
logger.warning("Sucess during ICAT reconnection..." )
# now try query again ...
try:
invHolderList = icatcnx.searchByAdvanced(sessionId,advSearchDetails)
except Exception, e:
hiccups.add(Error(sample,path,e))
logger.error("Unexpected error: %s", sys.exc_info()[0] )
logger.error(e)
# maybe this should also be fatal?
# did we get a match? If not, then its a new investigation ...
if invHolderList:
logger.info("Need to update existing records for %s .......",sample)
logger.debug(type(invHolderList) )
invHolder = invHolderList[0]
logger.debug("facility = %s",invHolder.Facility)
logger.debug(str(vars(invHolder)) )
datasets = invHolder.DatasetCollection
datasetsDump(datasets)
else:
logger.info("No prior records for %s in ICAT!", sample)
datasets = []
# Which template to choose?
if not datasets:
if collection.dataFormat == DataFormats.NOVA:
templateWalker = novaIcatTemplateWalker
else:
templateWalker = newIcatTemplateWalker
else:
if collection.dataFormat == DataFormats.NOVA:
templateWalker = novaIcatTemplateWalker
else:
templateWalker = oldIcatTemplateWalker
logger.info("Template walking: %s", templateWalker.templateFile )
# these parameters are all accessible to the template parser
params = {
'LOG': log, # proposal system
'USERS': scientists, # proposal system
'DATASETS': datasets, # ICAT prior injected datafiles
'PATH': path,
'SAMPLE': sample,
'CIF': cif,
'SAMPLEID': int(log['l_id']),
'FILEDATA': collection,
'MODE': mode,
'PURGE': [], # query this on return for files to delete
'NAME': sample }
if collection.dataFormat in [DataFormats.SMART, DataFormats.APEX]:
params['SAMPLEDATA'] = sampledata # proposal system
if len(samples)>0:
params['SAMPLEID'] = samples[0][0] # Bruker db
params['SNAME'] = samples[0][1] # Bruker db
params['REVISION'] = samples[0][2] # Bruker db
else: # no bruker database entry
params['SNAME'] = sample
params['REVISION'] = 1
logger.debug("params: %s", str(params) )
try:
# apply XML generating template to
# everything we know about the current sample
templateWalker.applyTemplate(params)
except Exception, msg:
import traceback
exc_type, exc_value, exc_traceback = sys.exc_info()
slist = traceback.format_tb(exc_traceback )
# traceback.print_exc(file=sys.stdout)
logger.critical(str(slist[-1]))
logger.critical(msg)
hiccups.add(Error(sample,path,str(slist[-1])))
hiccups.add(Error(sample,path,msg))
hiccups.add(Error(sample,path,"Couldn't walk the template. Error in expectations"))
# potentially this is a common template failure for many samples...?
return
logger.info("Walked template %s", templateWalker.templateFile )
xmlDataDict = templateWalker.genData
# build a filename for dumping the XML to disk
# in principle we could
sname = params['SAMPLE']
rev = "1"
if params.has_key('REVISION') and params['REVISION'] >1:
rev = str(params['REVISION'])
sname = sname + '-' + rev
fname = os.path.join(ICATXMLDATA , sname +"-01" + ".xml" )
upd = 1
#build a version number
while os.access(fname, os.F_OK): # already exists - so preserve previous files
upd = upd + 1
tname = sname + "-%02d" % (upd)
fname = os.path.join(ICATXMLDATA , tname + ".xml" )
DEBUG=0
if DEBUG & 32:
if xmlDataDict != None:
import pprint
pprint.pprint(xmlDataDict, indent=1)
if not xmlDataDict:
# something unexpected went wrong. No idea what.
logger.warning("No data for %s ?????????", sample)
return
# otherwise
# save the datastructure as XML to disc.
icatxmlfile = writeXMLFile(fname,xmlDataDict)
logger.info("Wrote XML instance %s", icatxmlfile)
if len(params['PURGE'])>0:
logger.info("%d old files to be purged from ICAT", len(params['PURGE']) )
logger.warning("Aborting at purge - before ingest...")
return True
if len(params['PURGE'])>0:
#gotta purge these before we can re-ingest modified
for file in params['PURGE']:
datafileId = file['datafile']
try:
#remove = icatcnx.deleteDataFile(sessionId,datafileId)
remove = icatcnx.removeDataFile(sessionId,datafileId)
logger.debug("purged %s %s", datafileId, file['filename'] )
except Exception, e:
logger.debug("purge failed for %s", datafileId )
logger.debug(e)
hiccups.add(Error(sample,file,"can't purge modified file from ICAT "))
# in principle, now we could
file = open(icatxmlfile)
xmldata = file.read()
file.close()
start = time.time()
success = "AAAAHhhhhh bugger!"
try:
logger.info("Ingesting at: %s", time.ctime(start) )
success = icatcnx.ingestMetadata(sessionId,xmldata)
stop = time.time()
except Exception, e:
stop = time.time()
logger.warning("Ingest FAILED: %s", time.ctime(stop) )
logger.warning("elapsed time: %s ", time.strftime("%d $%H:%M:%S",stop-start) )
hiccups.add(Error(sample,path,"Couldn't ingest metadata for file "+icatxmlfile))
logger.info(e)
# move file to failed folder
failed = os.path.join(ICATXMLDATA , FAILED)
if not os.access(failed,os.F_OK):
os.mkdir(failed, 0755)
last_part = os.path.split(icatxmlfile)[1]
os.rename(icatxmlfile, os.path.join(failed, last_part))
return
logger.info("ingest complete at: %s", time.ctime(stop))
logger.info("elapsed time: %s", time.strftime("%d %H:%M:%S",stop-start) )
logger.info(success)
logger.info("Done!\n\n")
return True # all else is None
def writeXMLFile(filepath,xmlMappedDict):
from xml.sax.saxutils import XMLGenerator
from xmlMapping.xml2dict import BinXMLSAXParser
p = BinXMLSAXParser()
p.d = xmlMappedDict
#defaults to iso-8859-1
file = open(filepath,'w')
p.setContentHandler(XMLGenerator(file,'UTF-8'))
p._cont_handler.startDocument()
p.__parse__(xmlMappedDict)
p._cont_handler.endDocument()
file.close()
return filepath
if __name__ == '__main__':
import os
import sys
import cPickle
import time
from folders import *
stopTime = time.time() # now - UNIX seconds since epoch
verbose=True
if len(sys.argv)>1:
if sys.argv[1]=='-q': verbose=False
try:
minxTime=os.path.getmtime(DATAMINX)
# if the next call fails then we can't save state and
# checking becomes very inefficient
# actually, minxTime would never get reset.
# so we would be endlessly trying to reharvest same data
os.utime(DATAMINX,(minxTime,minxTime))
except Exception, e:
# probably the file doesn't exist
minxTime=0
logger.critical("Unable to stat/touch record file: %s", DATAMINX)
logger.critical("Aborting!")
hiccups.add(Error(__name__,__file__,e) )
dumpHiccups()
sys.exit(1)
try:
dataFolderMap = cPickle.load(open(DATAMINX, 'rb'))
# dict map of full folderpathname: last modified time
if type(dataFolderMap) != type({}):
dataFolderMap = {}
except EOFError:
logger.warning("Empty record file %s", DATAMINX )
dataFolderMap = {}
except Exception, e:
logger.info(sys.exc_info()[0] )
# probably the file doesn't exist
logger.critical("Unable to open record file %s", DATAMINX)
logger.critical("Aborting! " )
hiccups.add(Error(__name__,__file__,e) )
dumpHiccups()
sys.exit(1)
for folder in FOLDERS:
try:
info = os.stat(folder)
logger.info("Watching %s", folder)
except Exception, e:
logger.critical("Unable to stat folder: %s", folder)
logger.critical("Aborting!")
hiccups.add(Error(__name__,__file__,e) )
hiccups.add(Error(__name__,__file__,"Maybe the data file system isn't mounted?") )
dumpHiccups()
sys.exit(1)
period = (minxTime,stopTime) # harvest within this time frame
fr = FolderRecurser(period, dataFolderMap)
fr.verbose = verbose
fr.dataCollectionHandler = myDataFolderHandler # register
fr.assessFolder("/",FOLDERS) # pretend FOLDERS are children of /
# write newly modified map back to disc, then quit
cPickle.dump(dataFolderMap,open(DATAMINX ,'wb'))
# touch to set last access/modified time
os.utime(DATAMINX,(stopTime,stopTime))
# that was equivalent to shell touch /var/lib/dataMINX
logger.info("previous %s", time.ctime(minxTime) )
logger.info(" started %s", time.ctime(stopTime) )
logger.info(" now %s", time.ctime(time.time()) )
try:
dbcnx.finish()
pdbcnx.logout()
icatcnx.logout(sessionId)
icatacnx.logout(sessionId)
except:
pass
dumpHiccups()
# all done
| Python |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# just a dummy class to pull metadata from oxford Diffraction .img image file
# shamelessly hacked from CCP$ DiffractionImage
# http://www.ccp4.ac.uk/ccp4bin/viewcvs/ccp4/lib/DiffractionImage/DiffractionImageOxford.cpp?rev=1.15&content-type=text/vnd.viewcvs-markup
import struct
from string import strip
import re
REGEXP=re.compile(r"(\S+)=\s*(\S+\s+\S[^=]+(?!=)|\S*?(?=\S+=)|\S+\s*(?!\S+=))")
DEBUG = 0
class OXFORD:
def __init__(self,file):
self.tbufsize = 80
self.file = file
def getMetadata(self):
meta = {}
meta['format'] = "CRYSALIS";
meta['manufact'] = "OXFORD-DIFFRACTION";
meta['serialNo'] = "N/A";
meta['version'] = '3.0' # ?
meta['rapidAxis'] = [1,0,0]
meta['slowAxis'] = [0,-1,0]
meta['serialNo'] = "N/A";
meta['twoTheta'] = 0
meta['scanaxis'] = -1
buf1 = self.file.readline()
if not buf1.startswith("OD "):
raise Exception("File is not Oxford Diffraction .img format")
if buf1[3] == 'R': # for 'Ruby' detector
meta['pixelX'] = 0.048
meta['pixelY'] = 0.048
elif buf1[3] == 'S' and buf1[13]=='3': # for sapphire 3 who has 0.03
meta['pixelX'] = 0.03
meta['pixelY'] = 0.03
else: # The other detectors, Onyx and Sapphire have a pixel size of 0.06
meta['pixelX'] = 0.06
meta['pixelY'] = 0.06
if DEBUG:
print buf1
header= []
for i in range(4):
header.append(REGEXP.findall(self.file.readline().rstrip()))
buf = self.file.read(80)
header.append(REGEXP.findall(buf))
if DEBUG:
print header
# print buf
if ord(buf[-1]) != 26: # last char is ctrl-z
print "bugger", ord(buf[-1])
meta['width'] = int(header[1][0][1]) # NX
meta['height'] = int(header[1][1][1]) # NY
originalHeaderLength= int(header[2][0][1])
NG= int(header[2][1][1])
NS= int(header[2][2][1])
NK= int(header[2][3][1])
headblk = NG+NS+NK
meta['headblk'] = headblk
# raise Exception(".sfrm file parsing error")
"""
Types from the struct docs:
Format C Type Python Notes
x pad byte no value
c char string of length 1
b signed char integer
B unsigned char integer 1 byte
h short integer (2 bytes)
H unsigned short integer (2 bytes)
i int integer 4 bytes
I unsigned int long 4 bytes
l long integer
L unsigned long long
q long long long (1)
Q unsigned long long long (1)
f float float 4 bytes
d double float 8 bytes
s char[] string
p char[] string
P void * integer
"""
Nbytes = (NG+NS+NK)
if DEBUG:
print Nbytes
t= {'short':'h','double':'d' ,'int':'i','float':'f' }
t1= {'h':2,'d':8 ,'i':4,'f':4 }
record= [('h', 2), # 0 short 2 * 2 bytes
('h', 2), # 1 short pad to double
('d',(NG-(2+2)*2)/8), # 2 end of NG
('d',(NS)/8), # 3 end of NS
('d', 35), # 4 NG+NS +71*4
('i', 16), # 5 NG+NS +81*4 = 81+6 - 71
('d',(NK-(35*8+16*4))/8) # 6 end of NK double
]
format=""
index =[0]
bytes =[0]
for typ,num in record:
format=format+str(num)+typ
index.append(index[-1]+num)
bytes.append(bytes[-1] + t1[typ]*num)
if DEBUG:
print format
buf = self.file.read(Nbytes)
magic = struct.unpack("<"+format, buf) # little endian?
if DEBUG:
for i in range(len(index)-1):
print magic[index[i] :index[i+1]]
print record
print index
print bytes
meta['pixelX'] = meta['pixelX'] * magic[0]
meta['pixelY'] = meta['pixelY'] * magic[1]
meta['exposureTime'] = magic[ index[3] + 60]
shift= (35+16/2)
meta['mulfactor'] = magic[ index[6] + 46-shift]
meta['wavelength'] = magic[ index[6] + 73-shift]
meta['beamX'] = magic[ index[6] + 83-shift] # NG+NS + 83*8
meta['beamY'] = magic[ index[6] + 84-shift] # NG+NS + 84*8
meta['distance'] = magic[ index[6] + 89-shift]
meta['beamX'] = meta['beamX'] *meta['pixelX']
meta['beamY'] = meta['beamY'] *meta['pixelY']
scanaxis = 6
startangles = [0.,0.,0.,0.,0.,0.]
endangles = [0.,0.,0.,0.,0.,0.]
for i in range(6):
startangles[i]= magic[ index[5]+ 0 + i ] # float
endangles[i] = magic[ index[5]+10 + i ] # float
if startangles[i] != endangles[i]:
scanaxis=i
"""
startangles[i]= (NG+NS)/4 +71+i # int
endangles[i] = (NG+NS)/4 +81+i # int
"""
oscAxes =["omega","theta","kappa / chi","phi","omega'","theta'","unknown"]
oscAxis= oscAxes[scanaxis]
RADTODEG = 180.0/3.141592627
if scanaxis != 6:
meta['oscEnd'] = endangles[scanaxis] * RADTODEG * meta['mulfactor']
meta['oscStart'] = startangles[scanaxis] * RADTODEG * meta['mulfactor']
for i in range(6):
startangles[i]= startangles[i] * RADTODEG * meta['mulfactor']
endangles[i]= endangles[i] * RADTODEG * meta['mulfactor']
meta['oscAxis'] = oscAxis
meta['scanaxis'] = scanaxis
meta['startangles'] = startangles
meta['endangles'] = endangles
return meta
if __name__ == '__main__':
import sys
fileList = sys.argv[1:]
print fileList
for arg in fileList:
print arg
file= open(arg,'rb')
sf = OXFORD(file)
meta = sf.getMetadata()
file.close()
for key in meta.keys():
print "%7s : %s" % (key,meta[key])
| Python |
#!/usr/bin/env python
# just a dummy class to pull metadata from Bruker Frame (.sfrm) files
from string import strip
class SFRM:
def __init__(self,file):
self.tbufsize = 80
self.file = file
def getMetadata(self):
buf = self.file.read(self.tbufsize)
if not buf.startswith("FORMAT :"):
raise Exception("File is not .sfrm format")
meta = {}
version = 0
while True:
if buf[0:8] =='VERSION:':
version = buf[8:].strip()
#if buf[0:4] =='\x1a\x04\x00.': # end of metadata
# break
if buf[0:2] =='\x1a\x04': # end of metadata
break
if buf[-1] =='\x04': # end of metadata for version 15
break
parts = buf.split(':')
if len(parts)<2:
print parts
raise Exception(".sfrm file parsing error")
key=parts[0].strip()
value = parts[1].strip()
if meta.has_key(key) and meta[key] != '':
meta[key] = meta[key] + '\n' + value
else:
meta[key]= value
buf = self.file.read(self.tbufsize)
self.meta = meta
return meta
if __name__ == '__main__':
import sys
fileList = sys.argv[1:]
print fileList
for arg in fileList:
print arg
file= open(arg)
sf = SFRM(file)
meta = sf.getMetadata()
file.close()
for key in meta.keys():
print "%7s : %s" % (key,meta[key])
| Python |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# just an adHoc hack to pull metadata from an OD CIF
# for proper CIF reading, you would want to use PyCifRW ...
DEBUG = 0
class ODCIF:
def __init__(self,file):
self.tbufsize = 80
self.file = file
def getMetadata(self):
meta = {}
#get these lines, in this order
required = [
'_cell_length_a',
'_cell_length_b',
'_cell_length_c',
'_cell_angle_alpha',
'_cell_angle_beta',
'_cell_angle_gamma',
'_diffrn_ambient_temperature ',
'_diffrn_radiation_wavelength',
'_diffrn_radiation_type',
'_diffrn_reflns_number',
'_diffrn_reflns_av_R_equivalents',
'_diffrn_reflns_av_sigmaI/netI',
'_diffrn_reflns_theta_min',
'_diffrn_reflns_theta_max',
'_symmetry_space_group_name_H-M',
'_symmetry_cell_setting',
]
buf = True
for field in required:
while buf:
buf = self.file.readline()
if not buf.startswith(field):
continue
# got a match
try:
string = " ".join(buf.split()[1:])
# strip SUs from measurment numbers
if string[-1] == ")":
if string[-3] == "(":
string = string[0:-3]
elif string[-4] == "(":
string = string[0:-4]
# strip single quotes from strings
elif string[0] == "'" and string[-1] == "'":
string = string[1:-1]
# strip double quotes from strings
elif string[0] == '"' and string[-1] == '"':
string = string[1:-1]
meta[field] = string
except Exception, e:
print field, e
break
if DEBUG:
if not meta.has_key(field):
print "Uh Oh. Missed CIF field ", field
return meta
if __name__ == '__main__':
import sys
fileList = sys.argv[1:]
print fileList
for arg in fileList:
print arg
file= open(arg,'rb')
sf = ODCIF(file)
meta = sf.getMetadata()
file.close()
for key in meta.keys():
print "%7s : %s" % (key,meta[key])
| Python |
#!/usr/bin/env python
# just a dummy class to pull metadata from Bruker Frame (.sfrm) files
from string import strip
class SFRM:
def __init__(self,file):
self.tbufsize = 80
self.file = file
def getMetadata(self):
buf = self.file.read(self.tbufsize)
if not buf.startswith("FORMAT :"):
raise Exception("File is not .sfrm format")
meta = {}
version = 0
while True:
if buf[0:8] =='VERSION:':
version = buf[8:].strip()
#if buf[0:4] =='\x1a\x04\x00.': # end of metadata
# break
if buf[0:2] =='\x1a\x04': # end of metadata
break
if buf[-1] =='\x04': # end of metadata for version 15
break
parts = buf.split(':')
if len(parts)<2:
print parts
raise Exception(".sfrm file parsing error")
key=parts[0].strip()
value = parts[1].strip()
if meta.has_key(key) and meta[key] != '':
meta[key] = meta[key] + '\n' + value
else:
meta[key]= value
buf = self.file.read(self.tbufsize)
self.meta = meta
return meta
if __name__ == '__main__':
import sys
fileList = sys.argv[1:]
print fileList
for arg in fileList:
print arg
file= open(arg)
sf = SFRM(file)
meta = sf.getMetadata()
file.close()
for key in meta.keys():
print "%7s : %s" % (key,meta[key])
| Python |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# just a dummy class to pull metadata from oxford Diffraction .img image file
# shamelessly hacked from CCP$ DiffractionImage
# http://www.ccp4.ac.uk/ccp4bin/viewcvs/ccp4/lib/DiffractionImage/DiffractionImageOxford.cpp?rev=1.15&content-type=text/vnd.viewcvs-markup
import struct
from string import strip
import re
REGEXP=re.compile(r"(\S+)=\s*(\S+\s+\S[^=]+(?!=)|\S*?(?=\S+=)|\S+\s*(?!\S+=))")
DEBUG = 0
class OXFORD:
def __init__(self,file):
self.tbufsize = 80
self.file = file
def getMetadata(self):
meta = {}
meta['format'] = "CRYSALIS";
meta['manufact'] = "OXFORD-DIFFRACTION";
meta['serialNo'] = "N/A";
meta['version'] = '3.0' # ?
meta['rapidAxis'] = [1,0,0]
meta['slowAxis'] = [0,-1,0]
meta['serialNo'] = "N/A";
meta['twoTheta'] = 0
meta['scanaxis'] = -1
buf1 = self.file.readline()
if not buf1.startswith("OD "):
raise Exception("File is not Oxford Diffraction .img format")
if buf1[3] == 'R': # for 'Ruby' detector
meta['pixelX'] = 0.048
meta['pixelY'] = 0.048
elif buf1[3] == 'S' and buf1[13]=='3': # for sapphire 3 who has 0.03
meta['pixelX'] = 0.03
meta['pixelY'] = 0.03
else: # The other detectors, Onyx and Sapphire have a pixel size of 0.06
meta['pixelX'] = 0.06
meta['pixelY'] = 0.06
if DEBUG:
print buf1
header= []
for i in range(4):
header.append(REGEXP.findall(self.file.readline().rstrip()))
buf = self.file.read(80)
header.append(REGEXP.findall(buf))
if DEBUG:
print header
# print buf
if ord(buf[-1]) != 26: # last char is ctrl-z
print "bugger", ord(buf[-1])
meta['width'] = int(header[1][0][1]) # NX
meta['height'] = int(header[1][1][1]) # NY
originalHeaderLength= int(header[2][0][1])
NG= int(header[2][1][1])
NS= int(header[2][2][1])
NK= int(header[2][3][1])
headblk = NG+NS+NK
meta['headblk'] = headblk
# raise Exception(".sfrm file parsing error")
"""
Types from the struct docs:
Format C Type Python Notes
x pad byte no value
c char string of length 1
b signed char integer
B unsigned char integer 1 byte
h short integer (2 bytes)
H unsigned short integer (2 bytes)
i int integer 4 bytes
I unsigned int long 4 bytes
l long integer
L unsigned long long
q long long long (1)
Q unsigned long long long (1)
f float float 4 bytes
d double float 8 bytes
s char[] string
p char[] string
P void * integer
"""
Nbytes = (NG+NS+NK)
if DEBUG:
print Nbytes
t= {'short':'h','double':'d' ,'int':'i','float':'f' }
t1= {'h':2,'d':8 ,'i':4,'f':4 }
record= [('h', 2), # 0 short 2 * 2 bytes
('h', 2), # 1 short pad to double
('d',(NG-(2+2)*2)/8), # 2 end of NG
('d',(NS)/8), # 3 end of NS
('d', 35), # 4 NG+NS +71*4
('i', 16), # 5 NG+NS +81*4 = 81+6 - 71
('d',(NK-(35*8+16*4))/8) # 6 end of NK double
]
format=""
index =[0]
bytes =[0]
for typ,num in record:
format=format+str(num)+typ
index.append(index[-1]+num)
bytes.append(bytes[-1] + t1[typ]*num)
if DEBUG:
print format
buf = self.file.read(Nbytes)
magic = struct.unpack("<"+format, buf) # little endian?
if DEBUG:
for i in range(len(index)-1):
print magic[index[i] :index[i+1]]
print record
print index
print bytes
meta['pixelX'] = meta['pixelX'] * magic[0]
meta['pixelY'] = meta['pixelY'] * magic[1]
meta['exposureTime'] = magic[ index[3] + 60]
shift= (35+16/2)
meta['mulfactor'] = magic[ index[6] + 46-shift]
meta['wavelength'] = magic[ index[6] + 73-shift]
meta['beamX'] = magic[ index[6] + 83-shift] # NG+NS + 83*8
meta['beamY'] = magic[ index[6] + 84-shift] # NG+NS + 84*8
meta['distance'] = magic[ index[6] + 89-shift]
meta['beamX'] = meta['beamX'] *meta['pixelX']
meta['beamY'] = meta['beamY'] *meta['pixelY']
scanaxis = 6
startangles = [0.,0.,0.,0.,0.,0.]
endangles = [0.,0.,0.,0.,0.,0.]
for i in range(6):
startangles[i]= magic[ index[5]+ 0 + i ] # float
endangles[i] = magic[ index[5]+10 + i ] # float
if startangles[i] != endangles[i]:
scanaxis=i
"""
startangles[i]= (NG+NS)/4 +71+i # int
endangles[i] = (NG+NS)/4 +81+i # int
"""
oscAxes =["omega","theta","kappa / chi","phi","omega'","theta'","unknown"]
oscAxis= oscAxes[scanaxis]
RADTODEG = 180.0/3.141592627
if scanaxis != 6:
meta['oscEnd'] = endangles[scanaxis] * RADTODEG * meta['mulfactor']
meta['oscStart'] = startangles[scanaxis] * RADTODEG * meta['mulfactor']
for i in range(6):
startangles[i]= startangles[i] * RADTODEG * meta['mulfactor']
endangles[i]= endangles[i] * RADTODEG * meta['mulfactor']
meta['oscAxis'] = oscAxis
meta['scanaxis'] = scanaxis
meta['startangles'] = startangles
meta['endangles'] = endangles
return meta
if __name__ == '__main__':
import sys
fileList = sys.argv[1:]
print fileList
for arg in fileList:
print arg
file= open(arg,'rb')
sf = OXFORD(file)
meta = sf.getMetadata()
file.close()
for key in meta.keys():
print "%7s : %s" % (key,meta[key])
| Python |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# just an adHoc hack to pull metadata from an OD CIF
# for proper CIF reading, you would want to use PyCifRW ...
DEBUG = 0
class ODCIF:
def __init__(self,file):
self.tbufsize = 80
self.file = file
def getMetadata(self):
meta = {}
#get these lines, in this order
required = [
'_cell_length_a',
'_cell_length_b',
'_cell_length_c',
'_cell_angle_alpha',
'_cell_angle_beta',
'_cell_angle_gamma',
'_diffrn_ambient_temperature ',
'_diffrn_radiation_wavelength',
'_diffrn_radiation_type',
'_diffrn_reflns_number',
'_diffrn_reflns_av_R_equivalents',
'_diffrn_reflns_av_sigmaI/netI',
'_diffrn_reflns_theta_min',
'_diffrn_reflns_theta_max',
'_symmetry_space_group_name_H-M',
'_symmetry_cell_setting',
]
buf = True
for field in required:
while buf:
buf = self.file.readline()
if not buf.startswith(field):
continue
# got a match
try:
string = " ".join(buf.split()[1:])
# strip SUs from measurment numbers
if string[-1] == ")":
if string[-3] == "(":
string = string[0:-3]
elif string[-4] == "(":
string = string[0:-4]
# strip single quotes from strings
elif string[0] == "'" and string[-1] == "'":
string = string[1:-1]
# strip double quotes from strings
elif string[0] == '"' and string[-1] == '"':
string = string[1:-1]
meta[field] = string
except Exception, e:
print field, e
break
if DEBUG:
if not meta.has_key(field):
print "Uh Oh. Missed CIF field ", field
return meta
if __name__ == '__main__':
import sys
fileList = sys.argv[1:]
print fileList
for arg in fileList:
print arg
file= open(arg,'rb')
sf = ODCIF(file)
meta = sf.getMetadata()
file.close()
for key in meta.keys():
print "%7s : %s" % (key,meta[key])
| Python |
#!/usr/bin/env python
from config import *
import sys
import logging
LOGFILENAME = __file__ + ".log"
logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',
filename = LOGFILENAME, level=logging.INFO)
logger = logging.getLogger("fileWatcher")
from errorCollator import *
hiccups = ErrorCollator()
def dumpHiccups():
if hiccups.hasErrors():
import socket
thisHost = socket.gethostbyaddr(socket.gethostname())
fqdn = thisHost[1][0]
# hiccups.dump()
hiccups.emailAlert("fileWatcher@"+fqdn, EMAIL_ALERT_LIST, EMAIL_SMTP_HOST)
import re
import os.path
from fileHandlers.sfrmProcess import SFRM
from fileHandlers.oxfordProcess import OXFORD
logger.info("Connecting CSAF ProposalDB")
from ZSI import FaultException
from pdbWrapper import PDBWS
try:
pdbcnx = PDBWS(PDBWSURL,PDBWSUSER,PDBWSPASS)
except Exception, e:
logger.critical(e)
hiccups.add(Error(__name__,__file__,e) )
hiccups.add(Error(__name__,__file__,"Unable to establish communication with " +
PDBWSURL + " Aborting.") )
dumpHiccups()
sys.exit(1)
from icatAdminWrapper import ICATAWS
from icatWrapper import ICATWS
from ZSI import FaultException
icatacnx = None
sessionId = None
icatcnx = None
def icatReconnect():
global icatacnx
global sessionId
global icatcnx
logger.info("Connecting: ICAT admin")
try:
icatacnx = ICATAWS(ICATADMINURL,ICATADMINUSER,ICATADMINPASS)
except Exception, e:
logger.critical(e)
hiccups.add(Error(__name__,__file__,e) )
hiccups.add(Error(__name__,__file__,"Unable to establish communication with " +
ICATADMINURL + " Aborting.") )
return False
sessionId = icatacnx.sessionId
logger.info("Connecting: ICAT WS")
try:
icatcnx = ICATWS(ICATURL,ICATUSER,ICATPASS,sessionId)
except Exception, e:
logger.critical(e)
hiccups.add(Error(__name__,__file__,e) )
hiccups.add(Error(__name__,__file__,"Unable to establish communication with " +
ICATURL + " Aborting.") )
return False
sessionId = icatcnx.login(ICATUSER,ICATPASS);
return True
if not icatReconnect():
dumpHiccups()
sys.exit(1)
logger.debug(str(sessionId) )
logger.debug(str(icatcnx.listInstruments(sessionId)) )
logger.debug(str(icatcnx.listDatasetTypes(sessionId)) )
logger.debug(str(icatcnx.listInvestigationTypes(sessionId)) )
logger.debug(str(icatcnx.listDatasetStatus(sessionId)) )
#logger.info(icatcnx.listParameters(sessionId)
#logger.info(icatcnx.listDatafileFormats(sessionId)
#logger.info(icatcnx.listRoles(sessionId)
#logger.info(icatcnx.searchByUserSurname(sessionId,"Turner")
#invs = icatcnx.getMyInvestigations(sessionId)
#logger.info(invs
#invHolder = icatcnx.getInvestigation(sessionId,5700)
def methodDump(holder):
attribs = holder.__class__.__dict__
variables = {}
getters = {}
setters = {}
sundries = {}
for attrib,typ in attribs.iteritems():
if type(typ) == type(methodDump):
if attrib[0:4]=="set_":
setters[attrib] = typ
elif attrib[0:4]=="get_":
getters[attrib] = typ
else:
sundries[attrib] = typ
else:
variables[attrib] = typ
logger.debug(str(getters.keys() ))
logger.debug(str(variables.keys() ))
def datasetsDump(list):
logger.debug("datasets: %d" , len(list))
for dataset in list:
logger.debug("%s %s %s", dataset._id, dataset._name, dataset._datasetType)
if not hasattr(dataset, "_datafileCollection"): continue
datafiles = dataset._datafileCollection
logger.debug("datafiles %d" , len(datafiles))
logger.debug("")
if len(datafiles)<=0: continue
datafile0 = datafiles[0]
try:
logger.debug("%s %s" , datafile0._name, datafile0._datafileVersion )
logger.debug(str( vars(datafile0._datafileFormat._datafileFormatPK)) )
logger.debug(" %s" , datafile0._datafileModifyTime)
except Exception, e:
logger.warning(e)
logger.info("Connecting: Bruker Database")
from xmlMapping.dbConnect import *
bypass=False
try:
dbcnx = DBConnection(bypass)
dbcnx.psycopg2Init(BRKDBHOST, BRKDBPORT, BRKDBNAME, BRKDBUSER, BRKDBPASS)
dbcnx.connect() # establish tcp communication
#dbcnx.query("SET search_path TO scd,public") # only for psycopg2?
dbcnx.cursor.execute("SET search_path TO scd,public") # only for psycopg2?
except Exception, e:
logger.critical(e)
hiccups.add(Error(__name__,__file__,e) )
hiccups.add(Error(__name__,__file__,"Unable to establish communication with " +
BRKDBHOST + " Aborting.") )
dumpHiccups()
sys.exit(1)
ICATTEMPLATE = "map-csaf_apex2icat.xml"
logger.info("Loading ICAT XML generation template %s", ICATTEMPLATE)
from xmlMapping.templateParser import TemplateParser
try:
newIcatTemplateWalker = TemplateParser()
newIcatTemplateWalker.registerDBConnection(dbcnx)
newIcatTemplateWalker.initTemplate(ICATTEMPLATE)
except Exception, e:
logger.critical(e)
hiccups.add(Error(__name__,__file__,e) )
hiccups.add(Error(__name__,__file__,"Unable to load XML template " +
ICATTEMPLATE + " Aborting.") )
dumpHiccups()
sys.exit(1)
OLDICATTEMPLATE = "map-csaf_apex2icat-upd.xml"
logger.info("Loading ICAT XML amend template %s", OLDICATTEMPLATE)
try:
oldIcatTemplateWalker = TemplateParser()
oldIcatTemplateWalker.registerDBConnection(dbcnx)
oldIcatTemplateWalker.initTemplate(OLDICATTEMPLATE)
except Exception, e:
logger.critical(e)
hiccups.add(Error(__name__,__file__,e) )
hiccups.add(Error(__name__,__file__,"Unable to load XML template " +
OLDICATTEMPLATE + " Aborting.") )
dumpHiccups()
sys.exit(1)
NOVATEMPLATE = "map-csaf_nova2icat.xml"
logger.info("Loading NOVA ICAT XML template %s", NOVATEMPLATE )
try:
novaIcatTemplateWalker = TemplateParser()
novaIcatTemplateWalker.initTemplate(NOVATEMPLATE)
except Exception, e:
logger.critical(e)
hiccups.add(Error(__name__,__file__,e) )
hiccups.add(Error(__name__,__file__,"Unable to load XML template " +
NOVATEMPLATE + " Aborting.") )
dumpHiccups()
sys.exit(1)
def myDataFolderHandler(path, sample, mode, collection):
""" We need to register this as a callback func to simplify
the FolderRecurser. I.e. we try vainly to localize the
file format specific handling to here.
"""
name = None
if not collection.dataFilesExist:
logger.warning("Not processing %s until datafiles present.",path)
if mode == "create":
# Or should we set a bogus timestamp in our records:
collection.lastModified = 0 # Jan 1 1970 - well before first pass
# record initial alert for email to manager
hiccups.add(Error(sample,path,"No data files exist, yet ..."))
# and pretend we handled it:
return True
else:
return
# we just need a datafile to get the version number ...
datasetFolders = collection.dataFiles
for folder in datasetFolders.keys():
recent = datasetFolders[folder]['recentFileList']
expired = datasetFolders[folder]['expiredFileList']
if len(recent)>0:
name = recent[0]
break
elif len(expired)>0:
name = expired[0]
break
logger.debug("%s %s %s", mode , sample, name)
# if collection.dataFormat != DataFormats.SMART:
# logger.info("==================SKIPPING NON SMART============"
# return
# logger.info("==================BYPASSING HANDLER============"
# return
# BRUKER SPECIFIC
if collection.dataFormat in [DataFormats.SMART, DataFormats.APEX]:
try:
res = dbcnx.query("SELECT samples_id, sample_name, revision, when_created FROM samples WHERE sample_name='%s' ORDER by revision DESC" % sample)
except DBConnectSQLSyntaxException, msg:
hiccups.add(Error(sample,path,msg))
logger.info("Select from samples failed\n%s" , msg )
return
except DBConnectSQLResultException, msg:
hiccups.add(Error(sample,path,msg))
logger.info("Select from samples failed\n%s" , msg )
return
fnames = [ 'samples_id', 'sample_name', 'revision', 'when_created']
for i in range(len(fnames)):
logger.debug('%-15s%-15s%-15s%-15s' , fnames[i] )
# Next, print out the instances.
samples = []
for i in range(res.ntuples):
samples.append([])
strn = ""
for j in range(res.nfields):
samples[i].append(res.getvalue(i, j))
try:
strn = strn + "%-15s" % res.getvalue(i, j)
except:
logger.info(strn(res.getvalue(i, j)) )
logger.debug(strn)
res.clear()
# End of Bruker specific
try:
l_id = int(pdbcnx.xgetLogIdFromDataset(sample)['l_id'])
log = pdbcnx.xgetLogFromId(l_id)
#print log
s_id = int(log['s_id'])
sampledata = pdbcnx.xgetSampleFromId(s_id)
logger.info(" submissionID: %s",sampledata['submissionid'])
users = pdbcnx.xgetScientistBeans(l_id)
scientists = users['scientists']
for scientist in scientists:
logger.debug(" %s %s",scientist['first_Names']," ",scientist['last_Name'])
except FaultException, e:
hiccups.add(Error(sample,path,e))
hiccups.add(Error(sample,path,"No logged users associated with sample" ))
logger.warning(e)
return
logger.debug(str(log) )
logger.debug(str(sampledata))
#
# Pull metadata from file
# -----------------------
#
fname = os.path.split(name)[-1]
file= open(name)
if collection.dataFormat in [DataFormats.SMART, DataFormats.APEX]:
sf = SFRM(file)
meta = sf.getMetadata()
logger.debug(" V:%2s user: %-21s %s", (meta["VERSION"], meta["USER"], fname) )
elif collection.dataFormat == DataFormats.NOVA:
sf = OXFORD(file)
meta = sf.getMetadata()
logger.debug(" V:%2s user: %-21s %s", (meta["version"], meta["manufact"], fname) )
file.close()
cif = None
if collection.dataFormat == DataFormats.NOVA and \
collection.hierarchy['folders'].has_key('struct/tmp'):
cifholder = collection.hierarchy['folders']['struct/tmp']
from fileHandlers.cifHandler import ODCIF
from os.path import split
for recent in cifholder['recentFileList']:
logger.debug(str(recent))
if recent[2] != 'cif':
continue
dir,cifname = split(recent[0])
if not cifname == sample + '.cif':
continue
ciffile = open(recent[0])
cfh = ODCIF(ciffile)
cif = cfh.getMetadata()
break
logger.info("CIF file: %s" , cif)
#
# Hit ICAT for existing entries
# -----------------------------
#
investigationName = sample
logger.info("Retrieving ICAT investigation data for %s", investigationName)
from icatWS.ICATService_types import ns0
advSearchDetails= ns0.advancedSearchDetails_Def("dummy")
advSearchDetails._caseSensitive=True
advSearchDetails._investigationName = investigationName
#advSearchDetails._investigationInclude = "DATASETS_ONLY"
advSearchDetails._investigationInclude = "DATASETS_AND_DATAFILES"
try:
invHolderList = icatcnx.searchByAdvanced(sessionId,advSearchDetails)
except Exception, e:
hiccups.add(Error(sample,path,e))
#hiccups.add(Error(sample,path,"No logged users associated with sample" ))
# possibly the sessionId has timed out. it had an hour
# but this could be slow ...
# maybe we could respawn the connection?
logger.warning("Attempting ICAT reconnection..." )
if not icatReconnect():
hiccups.add(Error(sample,path,e))
logger.critical("Unexpected error: %s", sys.exc_info()[0] )
logger.critical(e)
# this is fatal!
dumpHiccups()
sys.exit(1)
logger.warning("Sucess during ICAT reconnection..." )
# now try query again ...
try:
invHolderList = icatcnx.searchByAdvanced(sessionId,advSearchDetails)
except Exception, e:
hiccups.add(Error(sample,path,e))
logger.error("Unexpected error: %s", sys.exc_info()[0] )
logger.error(e)
# maybe this should also be fatal?
# did we get a match? If not, then its a new investigation ...
if invHolderList:
logger.info("Need to update existing records for %s .......",sample)
logger.debug(type(invHolderList) )
invHolder = invHolderList[0]
logger.debug("facility = %s",invHolder.Facility)
logger.debug(str(vars(invHolder)) )
datasets = invHolder.DatasetCollection
datasetsDump(datasets)
else:
logger.info("No prior records for %s in ICAT!", sample)
datasets = []
# Which template to choose?
if not datasets:
if collection.dataFormat == DataFormats.NOVA:
templateWalker = novaIcatTemplateWalker
else:
templateWalker = newIcatTemplateWalker
else:
if collection.dataFormat == DataFormats.NOVA:
templateWalker = novaIcatTemplateWalker
else:
templateWalker = oldIcatTemplateWalker
logger.info("Template walking: %s", templateWalker.templateFile )
# these parameters are all accessible to the template parser
params = {
'LOG': log, # proposal system
'USERS': scientists, # proposal system
'DATASETS': datasets, # ICAT prior injected datafiles
'PATH': path,
'SAMPLE': sample,
'CIF': cif,
'SAMPLEID': int(log['l_id']),
'FILEDATA': collection,
'MODE': mode,
'PURGE': [], # query this on return for files to delete
'NAME': sample }
if collection.dataFormat in [DataFormats.SMART, DataFormats.APEX]:
params['SAMPLEDATA'] = sampledata # proposal system
if len(samples)>0:
params['SAMPLEID'] = samples[0][0] # Bruker db
params['SNAME'] = samples[0][1] # Bruker db
params['REVISION'] = samples[0][2] # Bruker db
else: # no bruker database entry
params['SNAME'] = sample
params['REVISION'] = 1
logger.debug("params: %s", str(params) )
try:
# apply XML generating template to
# everything we know about the current sample
templateWalker.applyTemplate(params)
except Exception, msg:
import traceback
exc_type, exc_value, exc_traceback = sys.exc_info()
slist = traceback.format_tb(exc_traceback )
# traceback.print_exc(file=sys.stdout)
logger.critical(str(slist[-1]))
logger.critical(msg)
hiccups.add(Error(sample,path,str(slist[-1])))
hiccups.add(Error(sample,path,msg))
hiccups.add(Error(sample,path,"Couldn't walk the template. Error in expectations"))
# potentially this is a common template failure for many samples...?
return
logger.info("Walked template %s", templateWalker.templateFile )
xmlDataDict = templateWalker.genData
# build a filename for dumping the XML to disk
# in principle we could
sname = params['SAMPLE']
rev = "1"
if params.has_key('REVISION') and params['REVISION'] >1:
rev = str(params['REVISION'])
sname = sname + '-' + rev
fname = os.path.join(ICATXMLDATA , sname +"-01" + ".xml" )
upd = 1
#build a version number
while os.access(fname, os.F_OK): # already exists - so preserve previous files
upd = upd + 1
tname = sname + "-%02d" % (upd)
fname = os.path.join(ICATXMLDATA , tname + ".xml" )
DEBUG=0
if DEBUG & 32:
if xmlDataDict != None:
import pprint
pprint.pprint(xmlDataDict, indent=1)
if not xmlDataDict:
# something unexpected went wrong. No idea what.
logger.warning("No data for %s ?????????", sample)
return
# otherwise
# save the datastructure as XML to disc.
icatxmlfile = writeXMLFile(fname,xmlDataDict)
logger.info("Wrote XML instance %s", icatxmlfile)
if len(params['PURGE'])>0:
logger.info("%d old files to be purged from ICAT", len(params['PURGE']) )
logger.warning("Aborting at purge - before ingest...")
return True
if len(params['PURGE'])>0:
#gotta purge these before we can re-ingest modified
for file in params['PURGE']:
datafileId = file['datafile']
try:
#remove = icatcnx.deleteDataFile(sessionId,datafileId)
remove = icatcnx.removeDataFile(sessionId,datafileId)
logger.debug("purged %s %s", datafileId, file['filename'] )
except Exception, e:
logger.debug("purge failed for %s", datafileId )
logger.debug(e)
hiccups.add(Error(sample,file,"can't purge modified file from ICAT "))
# in principle, now we could
file = open(icatxmlfile)
xmldata = file.read()
file.close()
start = time.time()
success = "AAAAHhhhhh bugger!"
try:
logger.info("Ingesting at: %s", time.ctime(start) )
success = icatcnx.ingestMetadata(sessionId,xmldata)
stop = time.time()
except Exception, e:
stop = time.time()
logger.warning("Ingest FAILED: %s", time.ctime(stop) )
logger.warning("elapsed time: %s ", time.strftime("%d $%H:%M:%S",stop-start) )
hiccups.add(Error(sample,path,"Couldn't ingest metadata for file "+icatxmlfile))
logger.info(e)
# move file to failed folder
failed = os.path.join(ICATXMLDATA , FAILED)
if not os.access(failed,os.F_OK):
os.mkdir(failed, 0755)
last_part = os.path.split(icatxmlfile)[1]
os.rename(icatxmlfile, os.path.join(failed, last_part))
return
logger.info("ingest complete at: %s", time.ctime(stop))
logger.info("elapsed time: %s", time.strftime("%d %H:%M:%S",stop-start) )
logger.info(success)
logger.info("Done!\n\n")
return True # all else is None
def writeXMLFile(filepath,xmlMappedDict):
from xml.sax.saxutils import XMLGenerator
from xmlMapping.xml2dict import BinXMLSAXParser
p = BinXMLSAXParser()
p.d = xmlMappedDict
#defaults to iso-8859-1
file = open(filepath,'w')
p.setContentHandler(XMLGenerator(file,'UTF-8'))
p._cont_handler.startDocument()
p.__parse__(xmlMappedDict)
p._cont_handler.endDocument()
file.close()
return filepath
if __name__ == '__main__':
import os
import sys
import cPickle
import time
from folders import *
stopTime = time.time() # now - UNIX seconds since epoch
verbose=True
if len(sys.argv)>1:
if sys.argv[1]=='-q': verbose=False
try:
minxTime=os.path.getmtime(DATAMINX)
# if the next call fails then we can't save state and
# checking becomes very inefficient
# actually, minxTime would never get reset.
# so we would be endlessly trying to reharvest same data
os.utime(DATAMINX,(minxTime,minxTime))
except Exception, e:
# probably the file doesn't exist
minxTime=0
logger.critical("Unable to stat/touch record file: %s", DATAMINX)
logger.critical("Aborting!")
hiccups.add(Error(__name__,__file__,e) )
dumpHiccups()
sys.exit(1)
try:
dataFolderMap = cPickle.load(open(DATAMINX, 'rb'))
# dict map of full folderpathname: last modified time
if type(dataFolderMap) != type({}):
dataFolderMap = {}
except EOFError:
logger.warning("Empty record file %s", DATAMINX )
dataFolderMap = {}
except Exception, e:
logger.info(sys.exc_info()[0] )
# probably the file doesn't exist
logger.critical("Unable to open record file %s", DATAMINX)
logger.critical("Aborting! " )
hiccups.add(Error(__name__,__file__,e) )
dumpHiccups()
sys.exit(1)
for folder in FOLDERS:
try:
info = os.stat(folder)
logger.info("Watching %s", folder)
except Exception, e:
logger.critical("Unable to stat folder: %s", folder)
logger.critical("Aborting!")
hiccups.add(Error(__name__,__file__,e) )
hiccups.add(Error(__name__,__file__,"Maybe the data file system isn't mounted?") )
dumpHiccups()
sys.exit(1)
period = (minxTime,stopTime) # harvest within this time frame
fr = FolderRecurser(period, dataFolderMap)
fr.verbose = verbose
fr.dataCollectionHandler = myDataFolderHandler # register
fr.assessFolder("/",FOLDERS) # pretend FOLDERS are children of /
# write newly modified map back to disc, then quit
cPickle.dump(dataFolderMap,open(DATAMINX ,'wb'))
# touch to set last access/modified time
os.utime(DATAMINX,(stopTime,stopTime))
# that was equivalent to shell touch /var/lib/dataMINX
logger.info("previous %s", time.ctime(minxTime) )
logger.info(" started %s", time.ctime(stopTime) )
logger.info(" now %s", time.ctime(time.time()) )
try:
dbcnx.finish()
pdbcnx.logout()
icatcnx.logout(sessionId)
icatacnx.logout(sessionId)
except:
pass
dumpHiccups()
# all done
| Python |
#
# This file is importedd by the XML template, for instrument specific handling
#
# Hopefully OxfordDatasetBuilder is a bit mor ecoherent than BrukerFrameBuilder
#
from config import DataFormats
from datasetBuilder import DatasetBuilder
class OxfordDatasetBuilder(DatasetBuilder):
""" The idea here is to make the garbage we extract from
the Bruker ApexII PostgreSQL database as pallatable to the
XML template parser as possible. """
def __init__(self,dataFormat=DataFormats.NOVA):
super(OxfordDatasetBuilder,self).__init__(dataFormat)
# self.dataFormat = dataFormat
self.protonums = []
def buildDatasetsFromAggregatedFiles(self, filesets, priordata=None, purgelist=None):
"""This method builds a datastructure that the XML template method can
iterate over to dump whole datasets of files and parameters.
Made an executive decision to treat each scan as a dataset
For each image, there should be a corresponding \work\image.raw
integrated intensity file. should we archive those derived files too?
"""
self.priordata = priordata # local ref
self.purgelist = purgelist # local ref
# from ntpath import split # for windows
from os.path import split # for windows
import os.path
import re, types
dsetnum_re = re.compile(r".*\D(\d+)$") # trailing digits from
if isinstance(filesets, list):
# uh oh
print "No frame file data"
return None
keys = filesets.keys()
keys.sort()
dsets = []
# in principle, filesets are aggregated lists of recently
# modified or created files - i.e. files that have changed.
# priordata, on the otherhand, has already been injected
# into ICAT. reinjecting without prior removal is not allowed
print "priordata", priordata
for key in keys: # loop over file groups
# for key = SUNDRIES and INDEXING, the split doesn't work
# in particular,there will be no dsetnum
dir, dsetname = split(key) # ntpath split
filelist = filesets[key]
filelist.sort() # ensure ordered filelist
dsetnum = dsetnum_re.match(key)
if not dsetnum:
# handle this dataset some other way???
print key
if key == DataFormats.INDEXING:
scan = "indexing"
else:
scan = "unknown"
dset = {'proto':None, 'num':None, 'mach':None,
'dsetname':dsetname,
'description': "additional files",
'params':[] , 'scan':scan }
files = []
for file in filelist:
dir , filename = split(file)
print dir, filename
version = "?"
elements = [ #{'name': 'description',
# 'content': '' },
# {'name': 'datafile_version',
# 'content': '1' },
# {'name': 'datafile_version_comment',
# 'content': '' },
{'name': 'datafile_format',
'content': 'unknown' },
{'name': 'datafile_format_version',
'content': version },
]
self.addCommonFileData(file, elements)
file = {'dir':dir, 'name':filename, 'params': [ ],
'elements': elements }
# probably we need to specify file types etc
# and or remove ICAT unsupported/registered format types
self.purgeCheck(file)
files.append(file)
dset['files'] = files
dsets.append(dset)
continue
dsetnum = dsetnum.group(1)
sid = int(dsetnum) # convert string to number
if sid <1 or sid > len(self.operations):
""" So iether the Apex bruker software went wrong, OR,
more likely, this is older SMART data.
"""
# handle this dataset some other way???
# just as a group of files ??? TBD...
##### print "No Bruker Postgres Scan operation info for ", key
print "Building metadata for scan",key
# smart has fixed chi, so only omega scan is possible, mainly.
dset = {'proto':None, 'num':None, 'mach':None,
'dsetname':dsetname,
'description': "Scan frame data",
'params':[] , 'scan':"omega scan" }#assumed. no idea what kind
files = []
version = None
file0 = filelist[0]
file1 = filelist[-1]
meta0 = None; meta1 = None
try:
from fileHandlers.oxfordProcess import OXFORD
filepath = dir + "/" + file0
f = open(filepath)
sf = OXFORD(f)
meta0 = sf.getMetadata()
f.close()
filepath = dir + "/" + file1
f = open(filepath)
sf = OXFORD(f)
meta1 = sf.getMetadata()
f.close()
except Exception, e:
print e
if file0 and file1 and file0!= file1 and meta0 and meta1:
version = meta0['version']
width = float(meta0['oscEnd'])- float(meta0['oscStart'])
delta = width # float(meta0['INCREME'])
axis = int(meta0['scanaxis'])
axisname = meta0['oscAxis']
distance = float(meta0['distance'])
wavelen = float(meta0['wavelength'])
time = float(meta1['exposureTime'])
startang = meta0['startangles'][1:5]
endang = meta1['endangles'][1:5]
frames = len(filelist) # meta1['NUMBER']
range = float(endang[axis-1]) - float(startang[axis-1])
speed = delta /time
types = {1:"Theta scan",2:"Omega scan",3:"Chi scan", 4:"Phi scan",9:"Still scan",
12:"Phi360 scan",14:"Theta/2Theta scan", 15:"Chi scan" }
params = [
{'name': '_diffrn_measurement.method',
'sval': axisname + " scan",
'units': 'n/a'},
{'name': '_diffrn_measurement.sample_detector_distance',
'nval': distance,
'units': 'mm'},
{'name': '_diffrn_scan.integration_time',
'nval': time,
'units': 's'},
{'name': '_diffrn_scan.SPEED',
'nval': speed,
'units': u'\xb0/s'},
{'name': '_diffrn_scan.id',
'sval': sid,
'units': 'n/a'},
# {'name': '_diffrn_scan.frames',
# 'nval': self.protonums[n],
# 'units': 'n/a'},
{'name': '_diffrn_scan.AXIS_ANGLE_RANGE',
'nval': range,
'units': u'\xb0'},
{'name': '_diffrn_scan.AXIS_ANGLE_DIRECTION',
'nval': -1 if delta <0.0 else +1 ,
'units': 'n/a'},
{'name': '_diffrn_scan.AXIS_ANGLE_INCREMENT',
'nval': delta,
'units': u'\xb0'},
{'name': '_diffrn_scan.DETECTOR_AXIS_ANGLE_TWOTHETA',
'nval': startang[0],
'units': u'\xb0'},
{'name': '_diffrn_scan.GONIOMETER_AXIS_ANGLE_OMEGA',
'nval': startang[1],
'units': u'\xb0'},
{'name': '_diffrn_scan.GONIOMETER_AXIS_ANGLE_CHI',
'nval': startang[2],
'units': u'\xb0'},
{'name': '_diffrn_scan.GONIOMETER_AXIS_ANGLE_PHI',
'nval': startang[3],
'units': u'\xb0'},
]
else:
version = "?"
params = { }
for file in filelist:
dir1, filename = split(file)
filepath = dir + "/" + file
"""
if not version: # get version number only for first data file
try:
from sfrmProcess import SFRM
f = open(filepath)
sf = SFRM(f)
meta = sf.getMetadata()
f.close()
version = meta['VERSION']
except Exception, e:
print e
version = "?"
"""
elements = [
# {'name': 'datafile_version_comment',
# 'content': '' },
{'name': 'datafile_format',
'content': 'oxford img' },
{'name': 'datafile_format_version',
'content': version },
]
self.addCommonFileData(filepath,elements)
file = {'dir':dir, 'name':filename, 'params': [ ],
'elements': elements }
# probably we need to specify file types etc
# and or remove ICAT unsupported/registered format types
self.purgeCheck(file)
files.append(file)
dset['params'].append( {'name': '_diffrn_scan.frames',
'nval': len(filelist),
'units': 'n/a'} )
dset['params'].extend(params)
dset['files'] = files
dsets.append(dset)
continue
ops = self.operations[sid-1]
# set = protolist[i]
# if not set: continue # rationalised from existance
# num = self.protonums[i]
num = len(filelist)
set = "ppppp"
#if set[-1]=='sfrm':
if filelist[0].endswith('.sfrm'):
type = 'Apex'
else:
type = 'SMART'
scantype = ops[0]['sval']
ops.append( {'name': '_diffrn_scan.frames',
'nval': len(filelist),
'units': 'n/a'} )
siter = sid
dset = {'proto':set, 'num':len(filelist), 'mach':type,
'dsetname':dsetname,
'description': "Scan frame data",
'params':ops, 'scan':scantype }
print dsetname
files = []
version = None
for file in filelist:
filepath = dir + "/" + file
filename = file
# dir,filename = split(file)
if type == 'Apex':
i = int(''.join(filename[-9:-5]))
else:
i = int(''.join(filename[-3:]))
if not version: # get version number only for first data file
try:
from fileHandlers.sfrmProcess import SFRM
f = open(filepath)
sf = SFRM(f)
meta = sf.getMetadata()
f.close()
version = meta['VERSION']
except Exception, e:
print e
version = "?"
elements = [ {'name': 'description',
'content': 'frame' },
{'name': 'datafile_version',
'content': '1' },
{'name': 'datafile_version_comment',
'content': '' },
{'name': 'datafile_format',
'content': 'bruker sfrm' },
{'name': 'datafile_format_version',
'content': version },
]
self.addCommonFileData(filepath,elements)
file = {'dir':dir, 'name':filename,
'elements': elements,
'params': [
{'name': '_diffrn_scan_frame.frame_id',
'sval': filename,
'units': 'n/a'},
{'name': '_diffrn_scan_frame.scan_id',
'nval': sid,
'units': 'n/a'},
{'name': '_diffrn_scan_frame.frame_number',
'nval': i,
'units': 'n/a'},
],
}
# Maybe should add extra params here, scan, frame id, angles
#_diffrn_scan_frame.GONIOMETER_OMEGA # at start of scan step
#_diffrn_scan_frame.GONIOMETER_KAPPA # at start of scan step kappa/phi
#_diffrn_scan_frame.GONIOMETER_CHI # at start of scan or chi/phi'
#_diffrn_scan_frame.GONIOMETER_PHI # at start of scan step
#_diffrn_scan_frame.GONIOMETER_PHIP # at start of scan step phi'
#_diffrn_scan_frame.DETECTOR_TWO_THETA # at start of scan step
#_diffrn_scan_frame.DETECTOR_DISTANCE # at start of scan
#_diffrn_scan_frame.SCAN_TYPE # omega or phi i.e what angle changed?
# # don't know about omega/2theta scans. I'm not sure if they are used?
#_diffrn_scan_frame.SCAN_END # end angle after scan.
# # maybe it should just be the +/- incr/decr
# # because there are two ways to get there?
self.purgeCheck(file)
files.append(file)
dset['files'] = files
dset['_diffrn_scan.frame_id_start'] = files[0]['name']
dset['_diffrn_scan.frame_id_end'] = files[-1]['name']
dsets.append(dset)
return dsets
def buildDatasets(self, protolist):
"""Made an executive decision to treat each scan as a dataset
For each image, there should be a corresponding \work\image.raw
integrated intensity file. should we archive those derived files too?
"""
protolist = self.rationaliseDatasetData(protolist)
pnum = len(self.protonums) # num of each scan
if pnum != len(protolist): # file protos for each scan
raise Exception, "mismatch between SAINT framename and integrate_nframes " + fileproto
dsets = []
for i in range(0,pnum):
set = protolist[i]
if not set: continue # rationalised from existance
num = self.protonums[i]
if set[-1]=='sfrm':
type = 'Apex'
sfld = -3
else:
type = 'SMART'
sfld = -1
sid = int(set[sfld-2])
print sid, len(self.operations)
if sid>0 and sid<=len(self.operations): # apparently the database is screwed up sometimes.
ops = self.operations[sid-1]
scantype = ops[0]['sval']
else:
scantype = 'unknown'
ops = []
ops.append( {'name': '_diffrn_scan.frames',
'nval': num,
'units': 'n/a'} )
siter = set[sfld]
iter = int(siter)
lit = len(siter)
dset = {'proto':set, 'num':num, 'mach':type,
'dsetname':"".join(set[1:sfld-1]),
'params':ops, 'scan':scantype }
#for key in ops.keys():
# dset[key] = ops[key] # copy
files = []
format = "%%0%dd" % lit
for idx in range(iter,iter+num):
dir = set[0]
sequ = format % idx
new = set[1:]
new[sfld] = sequ
filename =''.join(new)
file = {'dir':dir, 'name':filename, 'params':
[ {'name': '_diffrn_scan_frame.frame_id',
'sval': filename,
'units': 'n/a'},
{'name': '_diffrn_scan_frame.scan_id',
'nval': i,
'units': 'n/a'},
{'name': '_diffrn_scan_frame.frame_number',
'nval': idx,
'units': 'n/a'},
]
}
# Maybe should add extra params here, scan, frame id, angles
#_diffrn_scan_frame.GONIOMETER_OMEGA # at start of scan step
#_diffrn_scan_frame.GONIOMETER_KAPPA # at start of scan step kappa/phi
#_diffrn_scan_frame.GONIOMETER_CHI # at start of scan or chi/phi'
#_diffrn_scan_frame.GONIOMETER_PHI # at start of scan step
#_diffrn_scan_frame.GONIOMETER_PHIP # at start of scan step phi'
#_diffrn_scan_frame.DETECTOR_TWO_THETA # at start of scan step
#_diffrn_scan_frame.DETECTOR_DISTANCE # at start of scan
#_diffrn_scan_frame.SCAN_TYPE # omega or phi i.e what angle changed?
# # don't know about omega/2theta scans. I'm not sure if they are used?
#_diffrn_scan_frame.SCAN_END # end angle after scan.
# # maybe it should just be the +/- incr/decr
# # because there are two ways to get there?
files.append(file)
dset['files'] = files
dset['_diffrn_scan.frame_id_start'] = files[0]['name']
dset['_diffrn_scan.frame_id_end'] = files[-1]['name']
dsets.append(dset)
return dsets
if __name__ == '__main__':
#import and possibly override, default config info
from mapConfig import *
bfb = OxfordDatasetBuilder()
for sample in frames:
sid = sample[0]
if sid != 3: continue
rev = sample[1]
protonums = sample[2]
fileproto = sample[3]
fields = "operation_types_id, time, dx, sweep, direction, width, theta, omega, chi, phi, speed"
try:
print sid, fileproto
#ops = getOperations(dbcnx,sid,rev)
ops = getStrategyScans(dbcnx,sid,rev)
print fields
for op in ops:
print op
zzz = bfb.deduceFrames(fileproto,protonums, ops)
if zzz:
for it in zzz:
print " ", it
yyy = bfb.buildDatasets(zzz)
#print yyy
print protonums
else:
print None
except Exception, m:
import traceback
traceback.print_exc(file=sys.stdout)
print sid, m
| Python |
#
# file: $Id: templateParser.py 78 2010-08-17 00:42:53Z duboulay $
#
# Author: Doug du Boulay <boulay_d@chem.usyd.edu.au>
# Copyright (c) 2009, The University of Sydney
# All rights reserved.
#
# This software is distributed under the BSD license.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import sys
from string import atoi
from xml import sax
from xml.sax import make_parser
import logging
# this is one of ours
from xml2dict import XMLDictionaryHandler, XMLMarshal
from dbConnect import *
DEBUG = 1+ 2 + 4 + 8 + 16 + 32 + 64 + 128 + 256
DEBUG = 0
class Status:
""" just a wrapper around eval/exec/sql results"""
def __init__(self,success, result):
self.success = success
self.value = result
class TemplateParser:
"""class to read Bruker postgres database and
an ICAT xml template file and populate the template
with data from the database."""
def __init__(self):
self._log = logging.getLogger('%s.%s' %
(__name__, self.__class__.__name__))
self.dbcnx = None
self.templateFile = None
self.template = {}
self.globalContext = None
self.contextList = []
self.currentContext = None
self.ancestors = []
self.genData = None
self.injectData = None
self.ctnode = None
def initTemplate(self, file):
self.templateFile = file
p = make_parser()
h = XMLDictionaryHandler()
p.setContentHandler(h)
p.parse(open(file))
self.template = h.d
if DEBUG & 1:
self._log.debug("Template loaded from file: %s",file)
def registerDBConnection(self, dbconnect):
self.dbcnx = dbconnect
def applyTemplate(self, params):
if self.template == None:
raise Exception, "No XML template supplied"
self.globalContext = params
self.contextList.append(params)
self.currentContext = self.contextList[-1]
res = self.parseTemplate(self.template, None )
if res.success:
self.genData = res.value
def parseTemplate(self, inNode, outParent,push=True):
""" inNode is not necessarily an XML element: value hash -
it maybe an attribute hash or a list of children or a string value too.
"""
self.ctnode = inNode
if type(inNode) == type({}):
data = {}
self.ancestors.append(data)
for elem, value in inNode.items():
if elem.find(u'map:')==0:
if elem == u'map:key': # only if a map attrib on icat element
self.currentContext[value] = self.globalContext[value] # redundant
continue
res = self.evalMap(elem, value, self.currentContext,outParent)
if res.success:
if res.value != None: data[elem] = res.value
else: # failed to eval. Abort
#print elem, value, res.value
self.ancestors.pop()
return Status(False,[{},res.value,[]]) # replace with dummy "node"
else:
data[elem] = None # so the kiddies know what their great grandparent was
# print elem
res = self.parseTemplate(value,data)
if res.success:
data[elem] = res.value
else:
# hopefully a dummy result
data[elem] = res.value
pass #????????????
self.ancestors.pop()
data = self.postFixElementHash(data);
return Status(True,data)
elif type(inNode) == type([]): # a list of child elements as hashes?
data = []
self.ancestors.append(data)
if push:
ctxt = self.cloneCtxt()
for value in inNode:
res = self.parseTemplate(value,data)
if res.success:
if res.value != None:
data.append(res.value)
#else:
# parent[1] = '?'
else: # possible subsequent tests may depend on this result
self.ancestors.pop()
if push:
self.popCtxt()
return res
if push:
self.popCtxt()
self.ancestors.pop()
return Status(True,data)
elif type(inNode) == type(''):
return Status(True,inNode)
elif type(inNode) == type(u''):
return Status(True,inNode)
raise Exception, "Unexpected node type", inNode
def postFixElementHash(self, data):
"""We have to postfix the generated node tree to be as
simple as possible and still consistent with a generated
XMLDictionaryHandler() XML data structure"""
mapped = 0
nall = 0
for elem, value in data.items():
if elem.find(u'map:')==0:
mapped += 1
else:
nall += 1
if not mapped:
return data
#
#check = {}
check = None
# print "woot " , data.keys(), mapped, nall
if mapped + nall == mapped:
if mapped == 1:
p = self.ancestors[-1]
gp = self.ancestors[-2]
ggp = self.ancestors[-3]
# print "no way!" , type(p), type(gp)
if type(p) == type([]):
# print ggp
# print gp
# print p
# print data
if type(gp) == type([]):
#print "no way!" , len(gp), gp[1]
if len(gp)>=2 and gp[1] == '': # no data added yet, so just 2!
for elem, value in data.items(): # there is only one
if elem== u'map:attribute': # purge map:attrib
for att, val in data[elem].items(): # there is only one
gp[0][att] =val
return check
if type(value) == type(''):
gp[1] = value
#print gp
elif type(value) == type([]):
p.extend(value) # append list of elements to existing ...
if DEBUG & 8:
self._log.debug("Extending existing list")
else:
gp[1] = value
#print " what the?" , value, " ", type(value)
# print "result: ", gp
return check
elif len(gp)==2 : #
self._log.debug("whats going on? %s %s", str(gp),str(data))
for elem, value in data.items(): # there is only one
if type(value) == type(''):
p.append(value) # append string to child list
elif type(value) == type([]):
p.extend(value) # append list of elements to existing ...
if DEBUG & 8:
self._log.debug("Extending existing list")
else:
gp[1] = value
self._log.debug("what the? %s %s", str(value),str(type(value)))
self._log.debug("result: ", str(gp))
return check
else:
for elem, value in data.items(): # there is only one
check = value
return check
else:
self._log.error("expected grandparent to be a list")
else:
self._log.error("expected parent to be a list")
else:
# ummm not sure how this could happen
p = self.ancestors[-1]
gp = self.ancestors[-2]
self._log.error("Unexpected xml hierarchy mangling condition for: %s" ,
data)
return data
def evalMap(self, elem, value, context,parent):
if elem.find(u'map:')!=0:
raise Exception, "template parsing error on: " + elem + ": " + value + ": " + key
func = elem[4:]
if func == u'sql': # this is a terminal/leaf node value is a list
res = self.evalSQL(elem, value, context, parent)
return res
elif func == u'py': # this is a terminal/leaf node value is a list
res = self.evalPy(elem, value, context, parent)
return res
# this is not a terminal leaf node
elif func == u'for-each': # this has to recurse like parseTemplate
res = self.__foreach(elem, value, context, parent)
return res
# this is not a terminal leaf node
elif func == u'if': # this has to recurse like parseTemplate
res = self.__if(elem, value, context, parent)
return res
# this is not a terminal leaf node
elif func == u'attribute': # this has to recurse like parseTemplate
res = self.__attribute(elem, value, context, parent)
return res
elif func == u'element': # this has to recurse like parseTemplate
res = self.__element(elem, value, context, parent)
return res
self._log.warning("unknown map function %s : %s :" ,func ,value )
return Status(False,None)
def evalSQL(self, elem, value, context, parent):
if self.dbcnx == None:
self._log.debug("%s %s", self.ctnode, context)
self.dumpMap(context)
raise Exception, "No database connection"
try:
val = self.contextSubstitute(value[1], context)#apply substitutions
except Exception, msg:
if u'map:none' in value[0] and value[0][u'map:none'] == 'skip':
if u'map:key' in value[0]:
context[value[0][u'map:key']] = None
return Status(True,None)
self._log.debug("%s %s", self.ctnode, context)
raise Exception, msg
try:
res = self.dbcnx.query(val) # [ attribs, text, sub elements ]
except DBConnectSQLSyntaxException, msg:
self._log.debug("%s %s", self.ctnode, context)
self._log.debug(msg)
raise DBConnectSQLSyntaxException, msg
except DBConnectSQLResultException, msg:
self._log.debug("%s %s", self.ctnode, context)
self._log.debug(msg)
raise DBConnectSQLResultException, msg
ret = None
if res.nfields <= 0:
if u'map:key' in value[0] and \
u'map:none' in value[0] and value[0][u'map:none'] == 'skip':
context[value[0][u'map:key']] = None
return Status(True,None)
gggp = self.ancestors[-4]
if u'string_value' in gggp:
return Status(False, '?') ######### this is a CIF thing ....
elif u'numeric_value' in gggp:
return Status(False, 'N/A' ) ######### this is an oracle thing ....
######### hope it doesn't kill anything else!
return Status(False,'N/A') # query failed to return a result
if u'map:key' in value[0]: # add value to context
if res.ntuples == 1 and res.nfields == 1: # scalar
if res.getvalue(0,0) == None:
if u'map:none' in value[0]:
if value[0][u'map:none'] == 'break':
# elegant stop on this item, but continue
gggp = self.ancestors[-4]
if u'string_value' in gggp:
return Status(False, '?') ######### this is a CIF thing ....
elif u'numeric_value' in gggp:
return Status(False, 'N/A' ) ######### this is an oracle thing ....
######### hope it doesn't kill anything else!
return Status(False,'N/A') # query failed to return a result
if value[0][u'map:none'] == 'skip':
context[value[0][u'map:key']] = None ###
return Status(True,None) # success, but no result
raise DBConnectSQLSyntaxException, \
"don't know what to do with map:none= "+ value[0][u'map:none']
#else, come what may ...
context[value[0][u'map:key']] = res.getvalue(0,0)
self._log.debug(res.getvalue(0,0) )
elif res.nfields == 1: # simple list
l = []
for i in range(res.ntuples):
for j in range(res.nfields):
l.append(res.getvalue(i, j))
if len(l)<=0:
if u'map:none' in value[0] and value[0][u'map:none'] == 'break':
# elegant stop on this item, but continue
gggp = self.ancestors[-4]
if u'string_value' in gggp:
return Status(False, '?') ######### this is a CIF thing ....
elif u'numeric_value' in gggp:
return Status(False, 'N/A' ) ######### this is an oracle thing ....
######### hope it doesn't kill anything else!
return Status(False,'N/A') # query failed to return a result
context[value[0][u'map:key']] = l
else: # an array
l = []
for i in range(res.ntuples):
l.append([])
for j in range(res.nfields):
l[i].append(res.getvalue(i, j))
if len(l)<=0:
if u'map:none' in value[0] and value[0][u'map:none'] == 'break':
# elegant stop on this item, but continue
gggp = self.ancestors[-4]
if u'string_value' in gggp:
return Status(False, '?') ######### this is a CIF thing ....
elif u'numeric_value' in gggp:
return Status(False, 'N/A' ) ######### this is an oracle thing ....
######### hope it doesn't kill anything else!
return Status(False,'N/A') # query failed to return a result
context[value[0][u'map:key']] = l
return Status(True,None) # success, but no result
# end of map:key
else: # this is "the" result.
if res.ntuples == 1 and res.nfields == 1: # scalar
ret = res.getvalue(0,0)
if ret == None or ret == "n/a" or ret == "N/A" or ret == '':
# Handling no result is target specific
# somehow this has to be outsourced to be general.
# print gggp, elem
gggp = self.ancestors[-4]
if u'string_value' in gggp:
ret = "?" ######### this is a CIF thing ....
elif u'numeric_value' in gggp:
ret = "N/A" ######### this is an oracle thing ....
######### hope it doesn't kill anything else!
else: ret = '?'
else:
#convert numbers to strings - strings remain strings
ret = str(ret)
else:
ret = []
for i in range(res.ntuples):
ret.append([])
for j in range(res.nfields):
ret[i].append(res.getvalue(i, j))
res.clear()
if DEBUG & 8:
self._log.debug("sql: %s : %s" , value , ret)
self._log.debug("--------------- %s" , context)
return Status(True,ret) # success, and a result
def my_os_path_basename(self, filename):
if not filename: return filename
if type(filename) != type(''):
raise Exception , "error getting basename from non-string"
if len(filename)<2: return filename
if filename[1] == ":":
filename = filename[2:]
filename = filename.replace("\\", "/")
import os
return os.path.origbasename(filename)
def evalPy(self, elem, value, context, parent):
import os
# futz with basename because we are on unix but looking at windows paths
os.path.__setattr__('origbasename', os.path.basename)
if os.name == 'posix':
os.path.__setattr__('basename', self.my_os_path_basename)
context['InjectXML'] = self.injectXML
context['os'] = os # apparently this is very dangerous!
# because anyone could add a python expression to the template
# to trash the disk or something.
try:
ret = eval(value[1], globals(), context)
if DEBUG & 8:
self._log.debug("py-eval: %s : %s" , value , ret)
if u'map:key' in value[0]: # add value to context
context[value[0][u'map:key']] = ret
ret = None # success, but no result
#ddb #ret = str(ret)
os.path.__setattr__('basename', os.path.origbasename)
return Status(True,ret) # success, and a result
except SyntaxError, msg:
pass
except Exception, msg:
self._log.error(msg)
self._log.error("py-eval: %s :" , value )
self.dumpMap(context)
import sys, traceback
exc_type, exc_value, exc_traceback = sys.exc_info()
slist = traceback.format_tb(exc_traceback)
self._log.error(str(slist[-1]))
raise
#percolate to the top
#from sys import exit
#exit()
try:
if u'map:key' in value[0]:
self._log.error("py-exec: %s :" , value )
self.dumpMap(context)
self._log.critical("Template Error. Cannot apply map:key to an python 'exec' expression." )
self._log.critical("It only works for 'eval' expressions.")
raise Exception, "Fatal Exception Template error FIXUPmsg"
#from sys import exit
#exit()
exec value[1] in globals(), context
if DEBUG & 8:
self._log.error("py-exec: %s" , value )
if self.injectData != None:
tmp = self.injectData
self.injectData = None
if DEBUG & 8:
self._log.error("Yo-katta!")
os.path.__setattr__('basename', os.path.origbasename)
return Status(True,tmp)
os.path.__setattr__('basename', os.path.origbasename)
return Status(True,None) # success, and a result
except Exception, msg:
self._log.error(self.ctnode)
self.dumpMap(context)
self._log.error("py-exec: %s %s" , msg.__class__ , msg)
self._log.error("premature termination")
import traceback
import sys
traceback.print_exc(file=sys.stdout)
raise Exception, msg
#sys.exit(1)
#return Status(False,None) # success, and a result
os.path.__setattr__('basename', os.path.origbasename)
# um, did it work?
def __foreach(self, elem, value, context, parent):
""" The idea is to remove all trace of this "map:for-each" node from the target tree
and replace it with the results of its children.
This is gunna fail if the map:from is, say a SQL 1 row tuple
rather than a multirow list! bummer! """
# get the foreach "list"
if u'map:from' in value[0]: # add value to context
_listName = value[0][u'map:from']
try:
_from = eval(_listName, globals(), context)
except Exception, msg:
self.dumpMap(context)
self._log.error(msg)
self._log.error("py-eval: %s" , value )
#raise Exception, msg
#percolate to the top
self._log.critical("It only works for 'eval' expressions.")
raise Exception, "Fatal Exception Template error FIXUPmsg"
#from sys import exit
#exit()
# make a list out of it so we can iterate
if _from== None:
_from = []
elif type(_from) != type([]) and type(_from) != type(()):
_from = [_from]
# get the foreach "key"
if not u'map:key' in value[0]: # add value to context
self.dumpMap(context)
raise Exception, "map:foreach loop with no iterator 'key'"
_iterName = value[0][u'map:key']
# print "\nancestor ",self.ancestors
# print "\nparent ",parent
# self.ancestors.append(data)
ctxt = self.cloneCtxt() # make a copy of current and push on the stack
# print elem
# print value
for val in _from:
ctxt[_iterName] = val # technically this is self.currentContext
#print "context1" ,type(context),
#self.dumpMap(context)
#print "context2" ,
#self.dumpMap(self.currentContext)
# print type(elem)
# print "hello", val
res = self.parseTemplate(value[2],parent) # value2 is a list of child nodes
if res.success:
if res.value != None:
for v in res.value: # for each result of the child list
parent.append(v) # append as a child of our (for-each) parent
else: # no children so ?
# should we break the loop?
# or throw an exception or what?
break
ctxt = self.popCtxt() # make a copy of current and push on the stack
#return Status(False,[{},"",[]]) # replace with dummy "node"
return Status(True,None) # already injected
def __if(self, elem, value, context, parent):
""" The idea is to remove all trace of this "map:for-each" node from the target tree
and replace it with the results of its children.
This is gunna fail if the map:from is, say a SQL 1 row tuple
rather than a multirow list! bummer! """
# get the if "test"
if not u'map:test' in value[0]: # add value to context
raise Exception, "map:if with no test? that ain't right!"
_test = value[0][u'map:test']
try:
ret = eval(_test, globals(), context)
if DEBUG & 16:
self._log.debug("map:if: %s : %s" , _test , ret)
except Exception, msg:
self._log.error(msg)
self._log.error("map:if: %s " , value )
self.dumpMap(context)
raise Exception, msg
if not ret: # doesn't evaluate true in python sense
return Status(True,None) # successfully evaluated but no result
# print "\nancestor ",self.ancestors
# print "\nparent ",parent
# self.ancestors.append(data)
# print elem
# print value
# print type(elem)
# print "hello", val
#ddb ctxt = self.cloneCtxt() # make a copy of current and push on the stack
res = self.parseTemplate(value[2],parent,False) # value2 is a list of child nodes
# retain current context
if res.success:
if res.value != None:
for v in res.value: # for each result of the child list
parent.append(v) # append as a child of our (for-each) parent
else: # no children so ?
# should we break the loop?
# or throw an exception or what?
pass
#ddb ctxt = self.popCtxt() # pop context from stack
return Status(True,None) # already injected
def __element(self, elem, value, context, outParent):
""" The idea is to remove all trace of this "map:element" node from the target tree
and replace it with the results of its children.
This is gunna fail if the map:from is, say a SQL 1 row tuple
rather than a multirow list! bummer! """
# get the if "test"
if not u'map:name' in value[0]: # add value to context
raise Exception, "map:element with no attribute name!"
_name = value[0][u'map:name']
try:
ret = eval(_name, globals(), context)
if DEBUG & 16:
self._log.debug("map:element: %s : %s" , _name , ret)
except Exception, msg:
self._log.error(msg)
self._log.error("map:element: %s " , value )
self.dumpMap(context)
raise Exception, msg
if not ret:
raise Exception, "map:element generates null name!"
ctxt = self.cloneCtxt() # make a copy of current and push on the stack
# global DEBUG
# savedebug = DEBUG
# DEBUG = 1+ 2 + 4 + 8 + 16 + 32 + 64 + 128 + 256
# element = {ret: value } # new output parent XML element node
children = []
element = {ret: [{},'',children] } # new output parent XML element node
self.ancestors.append([]) # otherwise map:py children don't apply
# parseTemplate(self, inNode, outParent,push=True)
res = self.parseTemplate(value[2],children,False) # value2 is a list of child nodes
self.ancestors.pop()
outParent.append(element)
# The logic here defies me, but it seems to work for simple case ....
"""
print "ancestor ",self.ancestors[-3:]
print "parent " , outParent
print "elem in" , elem
print "element " , element
print "element out" , ret
print "children" , children
"""
if res.success:
if res.value != None:
for v in res.value: # for each result of the child list
# print "append " , v
children.append(v) # append as a child of our ('element') parent
else: # no children so ?
# should we break the loop?
# or throw an exception or what?
pass
ctxt = self.popCtxt() # pop context from stack
# DEBUG = savedebug
#return Status(True,element) # already injected
return Status(True,None) # already injected
def __attribute(self, elem, value, context, parent):
""" The idea is to remove all trace of this "map:attribute" node from the target tree
and replace/add a new attribute in its parent - with a
value taken from result of its children. """
# get the if "test"
if not u'map:atname' in value[0]: # add value to context
self._log.error("map:attribute with no attribute name ")
raise Exception, msg
_name = value[0][u'map:atname']
ctxt = self.cloneCtxt() # make a copy of current and push on the stack
dummy = [{},"",value[2]] # value2 is a list of child nodes
res = self.parseTemplate(dummy,parent)
ret = None
if res.success:
if res.value != None:
sbuf = [res.value[1]] # pull out string val
for v in res.value[2]: # for each hash in the child list
self._log.debug(res.value)
if v: # not empty dict
key=v.keys()[0]
sbuf.append(v[key][1]) # get string component
ret = {_name : "".join(sbuf) }
else: # no children so ?
# should we break the loop?
# or throw an exception or what?
pass
ctxt = self.popCtxt() # pop context from stack
return Status(True,ret) # postFix this baby
def dumpMap(self,map):
self._log.debug("CONTEXT DUMP")
for key in map.keys():
if key!="H5FILE":
self._log.debug(" %s %s", key, str(map[key]) )
else:
self._log.debug(" %s --SUPPRESSED--", key )
def cloneCtxt(self):
ctxt = {}
for key, val in self.contextList[-1].items():
ctxt[key] = val
self.contextList.append(ctxt)
self.currentContext = ctxt
return ctxt
def popCtxt(self):
self.contextList.pop()
self.currentContext = self.contextList[-1]
def contextSubstitute(self, string, context ):
if DEBUG & 2:
self._log.debug("%s ======== %s" ,string, context)
return string % context
def injectXML(self, content ):
if DEBUG & 16:
self._log.debug("injected %s" ,content)
if self.injectData == None:
self.injectData= []
p = make_parser()
h = XMLDictionaryHandler()
p.setContentHandler(h)
q = sax.parseString(content,h)
if DEBUG & 64:
import pprint
pprint.pprint(h.d, indent=1)
self.injectData.append(h.d)
if __name__ == '__main__':
parser = TemplateParser()
parser.initTemplate(ICATTEMPLATE)
try:
params = {} # null params
parser.applyTemplate(params)
except Exception, msg:
if inp[0] != 'b':
print msg
sys.exit(1)
fname = ICATTEMPLATE + ".out"
if DEBUG & 32:
if parser.genData != None:
import pprint
pprint.pprint(parser.genData, indent=1)
if parser.genData != None:
from xml.sax.saxutils import XMLGenerator
from xml2dict import BinXMLSAXParser
p = BinXMLSAXParser()
p.d = parser.genData
p.setContentHandler(XMLGenerator(open(fname,'w') ))
p._cont_handler.startDocument()
p.__parse__(parser.genData)
p._cont_handler.endDocument()
print fname
parser.disconnect()
| Python |
#
# file: $Id: xml2dict.py 72 2009-10-18 23:25:15Z duboulay $
#
# This code is in the public domain.
# see http://code.activestate.com/recipes/415983/
# Recipe 415983: Simple XML serlializer/de-serializer using
# Python dictionaries and marshalling
#
# - though modified somewhat for the purposes of the DataMINX project
# by Doug du Boulay (2009), The University of Sydney.
#
"""Simple XML marshaling (serializing) and
unmarshaling(de-serializing) module using Python
dictionaries and the marshal module.
"""
from xml.sax.handler import ContentHandler
from xml.sax.saxutils import XMLGenerator
from xml.sax.xmlreader import XMLReader
from xml.sax import make_parser
import marshal
import os,sys,zlib
import pprint
class XMLDictionaryHandler(ContentHandler):
"""SAX Handler class which converts an XML
file to a corresponding Python dictionary """
def __init__(self):
self.curr=''
self.parent=''
self.count=0
self.d = {}
self.currd = {}
self.parentd = {}
self.stack = []
self.stack2 = []
self.depth = 0
self.prevEntity = False
def startElement(self, name, attrs):
""" Start element handler """
if self.count==0:
self.parent=name
self.d[name] = [dict(attrs),
'',
[]]
self.currd = self.d
else:
chld={name: [dict(attrs),
'',
[] ]}
self.parent = self.stack[-1]
self.parentd = self.stack2[-1]
chldlist = (self.parentd[self.parent])[2]
chldlist.append(chld)
self.currd = chld
self.stack.append(name)
self.stack2.append(self.currd)
self.curr=name
self.count += 1
def endElement(self, name):
""" End element handler """
if self.stack[-1] ==name:
self.stack.pop()
else :
raise 'BinXMLSAXParserException: XML parsing exception'
item =self.stack2[-1]
if item.has_key(name):
self.stack2.pop()
else :
raise 'BinXMLSAXParserException: XML parsing exception'
#for item in self.stack2:
# if item.has_key(name):
# self.stack2.remove(item)
def characters(self, content): # content is probably unicode
""" Character handler """
entity = False
if ord(content[0]) > 127:
entity = True
content = content.encode('utf-8')
#content = (content.encode('utf-8')).strip()
if content.strip()!='':
#print "con --",content
myd=((self.parentd[self.parent])[2])[-1]
currcontent = (myd[self.curr])[1]
if currcontent.strip():
#(myd[self.curr])[1] = "\n".join((currcontent, content))
if content[0] == ']':
(myd[self.curr])[1] = currcontent + content
elif entity:
(myd[self.curr])[1] = currcontent + content
elif self.prevEntity :
(myd[self.curr])[1] = currcontent + content
else:
(myd[self.curr])[1] = currcontent + "\n" + content
else:
(myd[self.curr])[1] = content
self.prevEntity = entity
def endDocument(self):
""" End document handler """
# Compress all text items
#self.packtext(self.d)
def packtext(self, map):
for key, value in map.items():
text = value[1]
value[1] = zlib.compress(text)
children = value[2]
for submap in children:
self.packtext(submap)
def dump(self,node):
"""
print "hello"
if type(node) == type({}):
for key, value in node.items():
if type(value) == type({}):
self.depth += 1
print key ": { \n" + " "*self.depth
for child in value:
self.dump(child)
self.depth -= 1
print "}"
elif type(value) == type([]):
print key + "\n" + " "*self.depth + "["
for child in value:
self.depth += 1
print key + "\n" + " "*self.depth + "["
if type(value) == type({}):
self.dump(child)
self.depth -= 1
print "]"
else:
print key,value;
"""
class BinXMLSAXParser(XMLReader):
"""A parser for Python binary marshal files representing
XML information using SAX interfaces """
def __init__(self):
XMLReader.__init__(self)
self.depth = 0
def parse(self, stream):
""" Parse Method """
# Check if it is a file object
if type(stream) is file:
try:
self.d = marshal.load(stream)
except Exception, e:
sys.exit(e)
# Check if it is a file path
elif os.path.exists(stream):
try:
self.d = marshal.load(open(stream,'rb'))
except Exception, e:
sys.exit(e)
else:
raise 'BinXMLSAXParserException: Invalid Input Source'
self._cont_handler.startDocument()
self.__parse__(self.d)
self._cont_handler.endDocument()
def __parse__(self, map):
""" Recursive parse method for
XML dictionary """
if type(map) != type({}):
if type(map) == type(u'') or type(map) ==type(''):
self._cont_handler.characters(map)
else:
print "failing on:", map
return
for key, value in map.items():
# For pretty printing
self._cont_handler.ignorableWhitespace(" "*self.depth)
attrs = value[0]
text = value[1]
children = value[2]
# Fire startElement handler event for key
self._cont_handler.startElement(key, attrs)
# Fire character handler event for value
#self._cont_handler.characters(zlib.decompress(text))
encoding = "UTF-8"
if type(text) != type("") and type(text) != type(u""):
text = str(text)
try:
self._cont_handler.characters(text) # content must me unicode
except:
text = unicode(text,encoding)
self._cont_handler.characters(text) # content must me unicode
# Nested element, recursively call
# this function...
self.depth += 1
# For pretty printing
# self._cont_handler.ignorableWhitespace('\n')
for child in children:
self.__parse__(child)
self.depth -= 1
# For pretty printing
# self._cont_handler.ignorableWhitespace(" "*self.depth)
# Fire end element handler event
self._cont_handler.endElement(key)
# For pretty printing
self._cont_handler.ignorableWhitespace('\n')
class XMLMarshal(object):
""" The XML marshalling class """
def dump(stream, xmlfile):
""" Serialize XML data to a file """
try:
p=make_parser()
h = XMLDictionaryHandler()
p.setContentHandler(h)
p.parse(open(xmlfile))
# print h.d
marshal.dump(h.d, stream)
except Exception, e:
sys.exit(e)
def dumps(stream, xmlfile):
""" Serialize XML data to a string """
try:
p=make_parser()
p.setContentHandler()
h = XMLDictionaryHandler()
p.parse(open(xmlfile))
return marshal.dumps(h.d, stream)
except Exception, e:
sys.exit(e)
return None
def load(stream, out=sys.stdout):
""" Load an XML binary stream
and send XML text to the output
stream 'out' """
try:
p=BinXMLSAXParser()
p.setContentHandler(XMLGenerator(out))
p.parse(stream)
except Exception, e:
sys.exit(e)
def loads(stream):
""" Load an XML binary stream
and return XML text as string """
import cStringIO
c=cStringIO.StringIO()
try:
p=BinXMLSAXParser()
p.setContentHandler(XMLGenerator(c))
p.parse(stream)
except Exception, e:
sys.exit(e)
return c.getvalue()
dump=staticmethod(dump)
dumps=staticmethod(dumps)
load=staticmethod(load)
loads=staticmethod(loads)
if __name__ == '__main__':
fname = 'icatmap.xml'
binname = os.path.splitext(fname)[0] + '.bin'
## Dump XML text to binary
#XMLMarshal.dump(open(binname,'wb'), fname)
## Dump XML binary to text
#XMLMarshal.load(open(binname,'rb'), open('sample.xml','w'))
p=make_parser()
h = XMLDictionaryHandler()
p.setContentHandler(h)
p.parse(open(fname))
pprint.pprint(h.d, indent=1)
# format_string = '%0' + str(4) + 'd'
# print format_string % 10
| Python |
#
# file: $Id: mapConfig.py 67 2009-08-28 06:01:54Z duboulay $
#
# Author: Doug du Boulay <boulay_d@chem.usyd.edu.au>
# Copyright (c) 2009, The University of Sydney
# All rights reserved.
#
# This software is distributed under the BSD license.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Here we define some dummy config parameters and hope they
will be overridden by real ones in an other config file -
ostensibly ../mapping-alt-conf/altMapConfig.py"""
import sys
ALTCONFIGDIR = "mapping-alt-conf"
ALTCONFIGFILE = "altMapConfig"
def importAltConfigHack():
"""Add a new config directory to the import path"""
import os
pathname = os.path.dirname(sys.argv[0])
abspath = os.path.abspath(pathname)
altconfig = os.path.join(abspath,"..",ALTCONFIGDIR)
sys.path.append(altconfig)
# define some global system variables as dummies
DBNAME = 'fred'
DBHOST = "flinstones.gov.bedrock"
DBPORT = "1521"
DBUSER = "BARNEY"
DBSCHEMA = "WILMA."
DBPASS = "pebbles"
ICATMETADATADIR = "icatdata"
ICATTEMPLATE = 'icatmap-dflt.xml'
#DEBUG = 1+ 2 + 4 + 8 + 16 + 32 + 64 + 128 + 256
DEBUG = 0
BYPASSDBCONN = True
# now override the above
importAltConfigHack()
try:
from altMapConfig import *
except:
print "No overriding config files. Using dummy settings"
| Python |
# file: $Id: dbConnect.py 68 2009-08-31 00:12:51Z duboulay $
#
# Author: Doug du Boulay <boulay_d@chem.usyd.edu.au>
# Copyright (c) 2009, The University of Sydney
# All rights reserved.
#
# This software is distributed under the BSD license.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
# these are non standard python extension modules
#import cx_Oracle
#import pyPgSQL
#from pyPgSQL import libpq
# see http://kb.parallels.com/en/1133 for info on how to set
# $PGSQL_DATA_D/postgresql.conf listen_addresses = '*'
# and add:
# $PGSQL_DATA_D/pg_hba.conf host all all 0.0.0.0/0 md5
# to permit remote tcpip access
DEBUG = 0
class DBConnectException(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class DBConnectSQLSyntaxException(Exception):
def __init__(self, value):
if hasattr(Exception, '__init__'):
Exception.__init__(self,value)
class DBConnectSQLResultException(Exception):
def __init__(self, value):
if hasattr(Exception, '__init__'):
Exception.__init__(self,value)
class DBConnection:
""" This is a wrapper around iether pyPgSQL / libpq or
cx_Oracle so that we can try to provide a single map:sql interface
in the absence of dbi drivers unixODBC.
"""
def __init__(self,dummy=None):
self.debug = DEBUG
self.simMode = None # db simulation mode
if dummy == None or dummy != False:
self.dbmode = "db_bypass"
else:
self.dbmode = None
def pqInit(self, dbhost, dbport, dbname, dbuser, dbpass):
if self.dbmode == None:
self.dbmode = "libpq"
else:
self.simMode = "libpq"
self.dbname = dbname
self.dbhost = dbhost
self.dbport = dbport
self.dbuser = dbuser
self.dbpass = dbpass
self.dbcnx = None
def psycopg2Init(self, dbhost, dbport, dbname, dbuser, dbpass):
if self.dbmode == None:
self.dbmode = "psycopg2"
else:
self.simMode = "psycopg2"
self.dbname = dbname
self.dbhost = dbhost
self.dbport = dbport
self.dbuser = dbuser
self.dbpass = dbpass
self.dbcnx = None
def cxInit(self, dbhost, dbport, dbname, dbuser, dbpass):
if self.dbmode == None:
self.dbmode = "cx_Oracle"
else:
self.simMode = "cx_Oracle"
self.dbname = dbname
self.dbhost = dbhost
self.dbport = dbport
self.dbuser = dbuser
self.dbpass = dbpass
self.dbstr1 = dbuser + "/" + dbpass + "@" + \
"(DESCRIPTION=(ADDRESS=(PROTOCOL=TCP)(HOST=" + dbhost + \
")(PORT=" + dbport + "))(CONNECT_DATA=(SID=" + dbname +" )))"
self.dbcnx = None
self.cursor = None
def connect(self):
import sys
# Make a connection to the database and check to see if it succeeded.
if self.dbmode == "libpq":
from pyPgSQL import libpq
try:
#self.dbcnx = libpq.connect('%s:%s:%s:%s:%s::' %
self.dbcnx = libpq.PQconnectdb('host=%s port=%s dbname=%s user=%s password=%s' %
(self.dbhost, self.dbport, self.dbname, self.dbuser, self.dbpass) )
except libpq.Error, msg:
print "Connection to database '%s' failed" % self.dbname
print msg,
sys.exit()
elif self.dbmode == "psycopg2":
import psycopg2
try:
#self.dbcnx = libpq.connect('%s:%s:%s:%s:%s::' %
self.dbcnx = psycopg2.connect("host='%s' port='%s' dbname='%s' user='%s' password='%s'" %
(self.dbhost, self.dbport, self.dbname, self.dbuser, self.dbpass) )
self.cursor = self.dbcnx.cursor()
#self.cursor.execute("SET search_path TO scd,public")
except:
exceptionType, exceptionValue, exceptionTraceback = sys.exc_info()
print "Connection to database '%s' failed" % self.dbname
print exceptionType, exceptionValue,
sys.exit()
elif self.dbmode == "cx_Oracle":
import cx_Oracle
try:
self.dbcnx = cx_Oracle.connect(self.dbstr1)
self.cursor = self.dbcnx.cursor()
except cx_Oracle.Error, msg:
print "Connection to database '%s' failed" % self.dbname
print msg,
if __name__ == '__main__':
sys.exit()
raise DBConnectException, msg
elif self.dbmode == "db_bypass":
self.dbcnx = DummyDB(self.simMode)
else:
raise DBConnectException, "Unrecognised database connection mode" + self.dbmode
if self.debug & 1:
print "Connected to: ", self.dbname
def query(self,queryString):
" evaluate the query and pre-assemble the result, cast failures to our custom exceptions "
if self.dbmode == "libpq":
from pyPgSQL import libpq
# obviously this doesn't work transparently yet!
try:
res = self.dbcnx.query(queryString) # [ attribs, text, sub elements ]
except libpq.Error, msg:
print "SQL query failed\n%s" % msg,
raise DBConnectSQLSyntaxException(msg)
#sys.exit()
if res.resultStatus != libpq.TUPLES_OK:
raise DBConnectSQLResultException("ERROR: SQL query failed")
#print self.ctnode
return SQLResult(self.dbmode, res)
elif self.dbmode == "psycopg2":
#import libpq
# obviously this doesn't work transparently yet!
try:
res = self.cursor.execute(queryString) # [ attribs, text, sub elements ]
except:
exceptionType, exceptionValue, exceptionTraceback = sys.exc_info()
print exceptionTraceback
print "SQL query failed\n%s" % exceptionValue,
raise DBConnectSQLSyntaxException(exceptionValue)
#sys.exit()
#if res.resultStatus != libpq.TUPLES_OK:
# raise DBConnectSQLResultException("ERROR: SQL query failed")
# #print self.ctnode
return SQLResult(self.dbmode, self.cursor)
elif self.dbmode == "cx_Oracle":
import cx_Oracle
if self.debug & 1:
print queryString
try:
res = self.cursor.execute(queryString) # [ attribs, text, sub elements ]
except cx_Oracle.Error, msg:
raise DBConnectSQLResultException, msg
return SQLResult(self.dbmode, res)
elif self.dbmode == "db_bypass":
res = self.dbcnx.query(queryString) # [ attribs, text, sub elements ]
return SQLResult(self.dbmode, res)
else:
raise DBConnectException("Unrecognised database connection mode" + self.dbmode )
def finish(self):
if self.dbmode == "libpq":
if self.dbcnx:
self.dbcnx.finish()
elif self.dbmode == "cx_Oracle" or self.dbmode == "psycopg2":
self.cursor = None
if self.dbcnx:
self.dbcnx.close()
elif self.dbmode == "db_bypass":
if self.dbcnx:
self.dbcnx.close()
else:
raise DBConnectException("Unrecognised database connection mode" + self.dbmode )
if self.debug & 1:
print self.dbname , " disconnected!\n"
self.dbcnx = None
class SQLResult:
" we need to transparently wrap libpq/cx_oracle sql result lists somehow. "
def __init__(self, mode, result ):
self.result = result
self.dbmode = mode
self.rcnt = -1
if self.dbmode == "libpq":
self.resultStatus = result.resultStatus
self.nfields = result.nfields # table columns
self.ntuples = result.ntuples # table rows
# self.getvalue = result.getvalue # func call(i,j) - hopefully a reference??
elif self.dbmode == "cx_Oracle" or self.dbmode == "psycopg2":
"""
You can fetch all the rows into a list, but this can have some bad side effects if the result set is large.
You have to wait for the entire result set to be returned to your client process.
You may eat up a lot of memory in your client to hold the built-up list.
It may take a while for Python to construct and deconstruct the list which you are going to immediately discard anyways.
"""
self.ntuples = 0
self.nfields = 0
self.cachedata = []
if result==None: return
data = result.fetchall() # potentially very expensive!!
self.cachedata = data
if data == None: pass
elif type(data) == type([]):
self.ntuples = len(data)
if self.ntuples <=0 or data[0] == None:
self.nfields = 0
elif type(data[0]) == type([]) or type(data[0]) == type(()):
self.nfields = len(data[0])
else:
self.nfields = 1
else:
self.ntuples = 1
self.nfields = 1
#print self.ntuples, self.nfields
#self.resultStatus = result.resultStatus
elif self.dbmode == "db_bypass":
self.cachedata = result
#print "\nresult " , result
self.ntuples = 0
self.nfields = 0
if result == None: pass
elif type(result) == type([]):
self.ntuples = len(result)
if self.ntuples <=0 or result[0] == None:
self.nfields = 0
elif type(result[0]) == type(()):
self.nfields = len(result[0])
else:
self.nfields = 1
else:
self.ntuples = 1
self.nfields = 1
self.cachedata = [[result]]
#print self.ntuples, self.nfields
else:
raise DBConnectSQLResultException("unknown database mode for result")
def __iter__(self):
return self # simplest iterator creation
def next(self):
if self.dbmode == "libpq":
return self.result.next()
elif self.dbmode == "cx_Oracle" or self.dbmode == "psycopg2":
self.rcnt += 1 # increment current row count
if self.rcnt >= len(self.cachedata):
raise StopIteration
return self.cachedata[self.rcnt]
elif self.dbmode == "db_bypass":
self.rcnt += 1 # increment current row count
#print self.rcnt
#if self.rcnt >= len(self.cachedata):
if self.rcnt >= len(self.cachedata):
raise StopIteration
return self.cachedata[self.rcnt]
raise StopIteration
def getvalue(self,i,j):
if self.dbmode == "libpq":
return self.result.getvalue(i,j)
elif self.dbmode == "cx_Oracle" or self.dbmode == "psycopg2":
return self.cachedata[i][j]
elif self.dbmode == "db_bypass":
return self.cachedata[i][j]
return None # or throw an exception ???
def clear(self):
if self.dbmode == "libpq":
return self.result.clear()
elif self.dbmode == "cx_Oracle" or self.dbmode == "psycopg2":
self.cachedata = None
result = self.result
self.result = None
#return result.close()
return None
elif self.dbmode == "db_bypass":
self.cachedata = None
self.result = None
class DummyDB:
""" This class tries to emulate a database connection for offline testing.
- as per Carlos' request.
Its inevitable that something will break when we return an inconsistent
result, given the infinite possible complexities of SQL queries and results.
"""
def __init__(self, mode):
self.simMode = mode # we are trying to simulate this one
self.mode = "db_bypass" # but htis is who we really are
import re
self.split= re.compile("select\s(?P<fields>.+?)\sfrom\s(?P<tables>.*)", re.IGNORECASE)
def close(self):
pass
def query(self, sqlString):
""" Emulate an SQL query, but just return rubbish """
print "DummyDB ",sqlString
m = self.split.match(sqlString)
fieldst = m.group('fields')
tables = m.group('tables')
#print "tables",tables
fields = fieldst.split(',')
fields = self.fixFields(fields)
#print fields
if len(fields) == 1:
if fields[0].endswith("_id") or fields[0].endswith("_ID"):
return 55
else:
return "wibble"
else:
res = ()
for f in fields:
if f.find("TO_DATE")!=-1:
import datetime
#import date
res += (datetime.date.today(), )
elif f.endswith("_id") or f.endswith("_ID"):
res += (77,)
else:
res += ("wibble",)
if tables.find("BOOKINGS")!=-1:
res = [res] # dummy list
return res
return None
def fixFields(self, list):
""" function calls with multiple comma delimited fields screw up
the initial splitting. Now we gotta fix it. """
out = []
save = ""
for elem in list:
save = save + elem
q = save.count('\'')
dq = save.count('"')
op = save.count('(')
cp = save.count(')')
if q % 2 == 0 and dq %2 == 0 and op - cp == 0:
out.append(save)
save = ""
if save != "":
print list
raise DBConnectException("DummyDB cant parse the SQL query" )
return out
| Python |
from config import *
import os
import os.path
import logging
class Collection:
def __init__(self,folder,contents,period, map):
self._log = logging.getLogger('%s.%s' % (__name__, self.__class__.__name__))
self.parent = folder # a pathname string
#self.lastModified = os.path.getctime(folder)
self.lastModified = os.path.getmtime(folder) # unix last change time : win create time
self.hierarchy = {
'files' : {
'recentFileList': [],
'expiredFileList': [],
},
'folders' : {
},
}
self.dataFiles = { }
self.dataFilesExist = False
self.dataCollectionMap = map
self.period = period
self.heldBackTime = self.period[0] # expected start time
if map.has_key(folder):
info = map[folder]
self.heldBackTime = info['time']# last upd time(fudged to 0 if datapending)
self.flist = contents
self.dataFormat = None
self.dataFolder = None
self.tempDataStore = { }
self.recentFileList = []
self.expiredFileList = []
self.recentFileCount = 0
self.expiredFileCount = 0
self.subFolderList = []
self.verbose = False
self.dataCollectionHandler = None
def checkCollectionFolderTimes(self):
justTheFiles = []
instrument = None
for f in self.flist:
if os.path.isdir(f):
if not self.dataCollectionMap.has_key(f):
# its new, or old, not a datafolder and needs
# recursive subfolder checking
self.subFolderList.append(f);
else:
# its an OLD COLLECTION FOLDER
# lets check (sub?)folder modification times
collection = self.dataCollectionMap[f]
if collection['time'] >= self.period[0] or collection['time']==0:
#collection parent has been modified since our last scan
# or, we fudged the time on previous pass
self.subFolderList.append(f);
else:
instrument = collection['type']
# check all subfolders
subFolders = Artifacts.getInstrumentFolders(instrument)
for folder in subFolders:
pathname = os.path.join(f, folder)
if os.path.exists(pathname):
modtime = os.path.getmtime(pathname)
if modtime >= self.period[0]: # xxx
# recent modification to at least one subfolder
if self.lastModified < modtime:
self.lastModified = modtime
self.subFolderList.append(f)
break
# end of OLD COLLECTION FOLDER
# else:
# # its an old collection and not been recently changed
# if self.verbose:
# print "--notnew -- bypassed -- -- -- %s --" % ( f)
elif os.path.isfile(f):
justTheFiles.append(f)
else:
# not a file or folder. WTF?
continue
collectType = None
if self.dataCollectionMap.has_key( self.parent):
collectType = self.dataCollectionMap[self.parent]['type']
if not collectType:
# cant actually decide without getting a subfolder filelist
collectType = self.decideInstrument(justTheFiles)
if not collectType:
return
self.dataFormat = collectType
def decideInstrument(self, topLevelFiles):
"""Status: we don't know which instrument file layout we are dealing with
or which are data files and recognised subfolders.
We also need to be able to reject/ignore non-collection folders that
are just hierarchical ancestors.
This will be horribly heuristic.
What could possibly go wrong?
"""
subFolders = self.subFolderList
subFolderNames = map(lambda a : os.path.basename(a), subFolders)
insts = Artifacts.getInstruments() # possibles
#scores = [ [0]*3 for x in range(len(insts)) ]
scores = {}
for inst in insts:
idealFolders = Artifacts.getInstrumentFolders(inst)
dataFolder = Artifacts.getInstrumentDataFolder(inst)
if not dataFolder:
raise Exception, "No data folder for instrument inst! " + inst
dfname, attribs = dataFolder
datatypes = attribs['data']
primaryData = datatypes[0]
dataRegex = Artifacts.getREforFileType(primaryData[0])
# 0 = non "." datafolder exists
# 1 = other hierarchy unique folders exist
# 2 = datafiles exist
# score plausability of this being a collection of "instrument" type
cnt = 0
for fold in idealFolders:
if fold in subFolderNames:
cnt = cnt +1
ctot = len(idealFolders)
scores[inst] = {
'fcnt': cnt,
'ftot': len(idealFolders),
'dfexists': False,
'dfcnt': 0,
}
if not self.tempDataStore.has_key(dfname):
if dfname == ".": # non-specific data folder
dataFolderFiles = topLevelFiles
else: # maybe specific data folder!
if not (dfname in subFolderNames):
# definitely not this instrument
continue
dataFolder = os.path.join(self.parent, dfname)
dataFolderFiles = self.getFolderContents(dataFolder)
dataFolderFiles = map(lambda a : (a,os.path.getmtime(a)), dataFolderFiles)
self.tempDataStore[dfname] = dataFolderFiles
dataFolderFiles = self.tempDataStore[dfname]
test = lambda a : int(bool(re.match(dataRegex,a[0])))
count = sum(map(test, dataFolderFiles))
scores[inst]['dfexists'] = True
scores[inst]['dfcnt'] = count
if count > 20:
# unequivecobaly this inst type.
# and we have lost no information
return inst
# now rummage about with the scores to decide on the inst
for inst in insts:
stats = scores[inst]
frac = float(stats['fcnt']) / float(stats['ftot'])
if frac > 0.5 and stats['ftot']> 5:
if stats['dfexists']:
return inst
if frac > 0.8 and stats['ftot']> 5:
return inst
# check subdirectories for matching files
# Shouldn't be that many other files, so efficiency is not paramount
for inst in insts:
scores[inst]['mcnt'] = 0
if scores[inst]['fcnt']<=0:
continue
idealFolders = Artifacts.getInstrumentFolders(inst)
matchFileCnt = 0
for fold in idealFolders:
if not (fold in subFolderNames):
continue
filePatts = Artifacts.getFolderFileREs(inst,fold)
folder = os.path.join(self.parent, fold)
folderFiles = self.getFolderContents(folder)
for f in folderFiles:
for format, regex in filePatts:
if regex.match(f):
matchFileCnt = matchFileCnt + 1
scores[inst]['mcnt'] = matchFileCnt
# now rummage about with the scores to decide on the inst
maxmcnt = 0; maxinst = None
lastinst = None
instkeys = scores.keys()
def comp(a,b): # largest to smallest
return scores[b]['mcnt'] - scores[a]['mcnt']
instkeys.sort(cmp=comp)
maxInst = instkeys[0]
nextInst = instkeys[1]
# totally heuristic
if scores[maxInst]['mcnt'] > 10:
if scores[maxInst]['mcnt'] > scores[nextInst]['mcnt'] + 10:
# a clear majority
return maxInst
# we could check file versions as a last fallback
# totally ambiguous
self._log.warning("No instrument match for folder: %s", self.parent)
self._log.warning("Heuristic scores: %s", str(scores))
return None
def getFolderContents(self,folder):
""" return a list of absolute filepath strings """
contents = os.listdir(folder)
contents.sort()
contents = map(lambda a: os.path.join(folder,a),contents)
return contents
def processFolderContents(self, contents,rePatternList,justTheFiles=True):
""" return lists of relevant old and new files from list """
recentFileList = []
expiredFileList = []
for f, mtime in contents:
if os.path.isdir(f):
# we don't care about subfolders? seems shotsighted ...
continue
# mtime = os.path.getmtime(f)
# it may well be that new files were created in a folder between
# the last MINXTIME (start time) and the time of folder lastModification
# because we don't scrape the whole hierarchy instantaneously!
# MINXTIME - - - - - OLD.lastModified - - - - - Now
# If we try to add such files to ICAT, they would already ihave been injected
# and it would spit the dummy.
# self.heldBackTime # last recorded upd time (fudged to 0 if datapending)
if mtime < self.period[0] and self.heldBackTime != 0:
for format, regex in rePatternList:
if regex.match(f):
expiredFileList.append(f)
break
#else: ignorable
elif mtime < self.period[1]:
for format, regex in rePatternList:
if regex.match(f):
if justTheFiles:
recentFileList.append( f )
else:
recentFileList.append( (f,mtime,format) )
break
#else: ignorable
else:
# created very very recently i.e. during the
# execution of this process, so ignore
# we'll pick them up next time this process is run!
continue
recentFileList.sort()
return {
'recentFileList': recentFileList,
'expiredFileList': expiredFileList,
}
def filesToBeProcessedQ(self):
"""Get New and old filenames and times for all collection folders.
Stick partitioned dataset files in self.dataFiles dict.
Put all others in self.hierarchy
Return True when new files exist and not excluded
"""
if not (self.dataFormat in Artifacts.getInstruments()):
return False
newDataFilesExist = False
anyDataFilesExist = None
instrument = self.dataFormat
newFiles = 0
expiredFiles = 0
datafolderInfo = Artifacts.getInstrumentDataFolder(instrument)
dataFolder = datafolderInfo[0]
dataFileTypes = datafolderInfo[1]['data'][0]
dataFileType = dataFileTypes[0]
dataFolderRE = Artifacts.FILETYPES[dataFileType]['regexp']
dataFileREs = [(dataFileType, dataFolderRE) ]
# this is true only if folder has no record in DATAMINX file.
if self.tempDataStore.has_key(dataFolder):
folderFiles = self.tempDataStore[dataFolder]
filtered = {True:[], False:[]}
# split list into matched/unmatched filename sub lists
filter = lambda a : filtered[bool(dataFolderRE.match(a[0]))].append(a)
map(filter,folderFiles) # hope its efficient
dataFiles = filtered[True]
extraFiles = filtered[False]
partndDataFiles = self.processFolderContents(dataFiles,dataFileREs)
self.dataFiles[dataFolder] = partndDataFiles
newFiles = newFiles + len(partndDataFiles['recentFileList'])
expiredFiles = expiredFiles + len(partndDataFiles['expiredFileList'])
anyDataFilesExist = newFiles + expiredFiles
# now check all other files in folder
filePatts = Artifacts.getFolderFileREs(instrument,dataFolder)
# created with 'data' as first pattern
if filePatts[0][1] == dataFolderRE:
filePatts = filePatts[1:]
partndFiles = self.processFolderContents(extraFiles,filePatts,False)
self.hierarchy['folders'][dataFolder] = partndFiles
newFiles = newFiles + len(partndFiles['recentFileList'])
expiredFiles = expiredFiles + len(partndFiles['expiredFileList'])
# folder = "."
# rootLevelFiles = self.tempDataStore[folder]
subFolders = Artifacts.getInstrumentFolders(instrument)
for folder in subFolders:
if folder == dataFolder and self.tempDataStore.has_key(dataFolder):
# handled separately earlier
continue
pathname = os.path.join(self.parent, folder)
if os.path.exists(pathname):
subFolCont = self.getFolderContents(pathname)
if not subFolCont: # empty folder
continue
subFolCont = map(lambda a : (a,os.path.getmtime(a)), subFolCont)
filePatts = Artifacts.getFolderFileREs(instrument,folder)
if folder == dataFolder:
filtered = {True:[], False:[]}
filter = lambda a : filtered[bool(dataFolderRE.match(a[0]))].append(a)
map(filter,subFolCont) # hope its efficient
dataFiles = filtered[True]
subFolCont = filtered[False]
partndDataFiles = self.processFolderContents(dataFiles,[dataFolderRE])
self.dataFiles[folder] = partndDataFiles
newDFiles = len(partndDataFiles['recentFileList'])
expiredDFiles = len(partndDataFiles['expiredFileList'])
anyDataFilesExist = newDFiles + expiredDFiles
newFiles = newFiles + newDFiles
expiredFiles = expiredFiles + expiredDFiles
# now check all other files in folder
# created with 'data' as first pattern
if filePatts[0][1] == dataFolderRE:
filePatts = filePatts[1:]
else:
pass
# erk! what if it isn't ????
partndFiles =self.processFolderContents(subFolCont,filePatts,False)
self.hierarchy['folders'][folder] = partndFiles
newFiles = newFiles + len(partndFiles['recentFileList'])
expiredFiles = expiredFiles + len(partndFiles['expiredFileList'])
self.recentFileCount = newFiles
self.expiredFileCount = expiredFiles
self.dataFilesExist = anyDataFilesExist
del(self.tempDataStore) # no need to retain - unless called again?
if newFiles > 0:
return True
return False
def isADataCollectionQ(self):
if self.dataFormat and \
(self.dataFormat in Artifacts.getInstruments()):
return True
return False
class FolderRecurser:
def __init__(self,period,dataCollectionMap):
self._log = logging.getLogger('%s.%s' % (__name__, self.__class__.__name__))
self.verbose = False # verbose
self.period = period
self.dataCollectionMap = dataCollectionMap
def processSubFolders(self, folders):
""" We avoid this bit for the root folder list, but need it
for every subsubfolder
"""
# not that in creating the folder list we already filtered out
# data folders that are too old or too new
for folder in folders:
# this is the major bottleneck!
contents = os.listdir(folder)
contents.sort()
contents = map(lambda a: os.path.join(folder,a),contents)
self.assessFolder(folder, contents)
def assessFolder(self, folder, contents):
""" We do not go into subfolders of folders that contain data
"""
# analyse the folder and its contents
f = Collection(folder,contents,self.period, self.dataCollectionMap)
f.verbose = self.verbose
f.checkCollectionFolderTimes()
newFiles = f.filesToBeProcessedQ()
# print f.subFolderList
if newFiles:
## we have NEW datafiles!!!!
# no need to descend further in FS hierarchy
return self.processDataFiles(f)
# otherwise no new data files
if f.isADataCollectionQ():
if self.verbose:
self._log.info("--%6s -- ignoring --%3d--%6d-- %s --%d" ,
f.dataFormat, len(f.subFolderList), len(f.recentFileList), folder, len(f.expiredFileList) )
return
# recurse down non-datafile containing subfolders
# note that subFolderList already elminates previously
# recognised but unchanged data folders
self.processSubFolders(f.subFolderList)
if self.verbose:
self._log.info("--nodata -- ignoring --%3d--%6d-- %s --%d" ,
len(f.subFolderList), f.recentFileCount, folder, f.expiredFileCount )
return
def processDataFiles(self, f):
folder = f.parent
mode="create"
if self.dataCollectionMap.has_key(folder):
mode="append"
if not f.dataFilesExist:
format = "--%6s --datapending --%3d--%6d-- %s --%d"
else:
format = "--%6s -- processing --%3d--%6d-- %s --%d"
self._log.info(format, f.dataFormat,
len(f.subFolderList), f.recentFileCount, folder, f.expiredFileCount )
sample = folder.split(os.sep)[-1]
handled = False
if self.dataCollectionHandler:
handled = self.dataCollectionHandler(folder,sample,mode,f)
# call registered func
if not handled:
# If all we did was identify the collection type (dataFormat)
# there would be a slight saving in identification time if we
# recorded it for next invokation, but otherwise, would still
# need to assess whole collection then anyway.
# i.e. we could fudge the 'time' parameter to preceed the collection
# creation time, but it doesn't really save us much
return
# If the collection was handled properly, to completion,
# then we need to record it in the DATAMINX record for next
# invokation. We also record a "last useful check" time
if f.lastModified < self.period[1]:
self.dataCollectionMap[folder]={
'time': f.lastModified,
'type':f.dataFormat}
else:
self.dataCollectionMap[folder]={
'time': self.period[1],
'type':f.dataFormat}
return
| Python |
from config import DataFormats
import logging
class DatasetBuilder(object):
""" This is a base class to be extended by various
instrument/manufacturer specific instance classes.
This should provide some generic file metadata handling mechanisms
"""
def __init__(self,dataFormat):
self._log = logging.getLogger('%s.%s' % (__name__, self.__class__.__name__))
self.dataFormat = dataFormat
self.indexingTemplate = None
self.operations = []
self.protonums = []
self.priordata = None
self.purgelist = [] # overwritten - reset as reference to supplied list
def purgeCheck(self, file):
#if file is in self.priordata, then add to self.purgelist
#self.priordata is a list of retrieved ICAT dataset_and_datafiles
#self.purgelist is a reference to toplevel fed-in params['PURGE']
#so don't go replacing the reference (its the only way to pass
# state back to the calling function currently)
if not self.priordata:
return
for dataset in self.priordata:
if not hasattr(dataset, "_datafileCollection"): continue
datafiles = dataset._datafileCollection
for datafile in datafiles:
if datafile._name == file['name']:
if len(self.purgelist) < 10:
self._log.debug("ICAT match on %s dataset %s file %s",
file['name'], dataset._name, datafile._id )
self.purgelist.append({'dataset':dataset._name,
'filename':datafile._name,
'datafile':datafile._id})
return
def addCommonFileData(self, filepath, elements=[]):
import hashlib
import os
from datetime import datetime
created = os.path.getmtime(filepath)
createdate = datetime.fromtimestamp(created)
created = createdate.strftime("%Y-%m-%dT%H:%M:%S")
modified = os.path.getctime(filepath)
modifieddate = datetime.fromtimestamp(modified)
modified = modifieddate.strftime("%Y-%m-%dT%H:%M:%S")
size = os.path.getsize(filepath)
version = str(11)
f = open(filepath)
h = hashlib.sha1()
h.update(f.read())
hash = h.hexdigest()
f.close()
checksum = "sha1: " + str(hash)
elements.extend([
{'name': 'datafile_create_time',
'content': str(created) },
{'name': 'datafile_modify_time',
'content': str(modified) },
{'name': 'file_size',
'content': str(size) },
{'name': 'checksum',
'content': checksum },
])
def remapDir(self, folder):
"""really don't know what the ultimate form aught to be
at the moment. Should we map to unix FSH? """
part = folder.split(':')
if len(part)==2:
return part[1]
if folder.startswith('\\\\'):
part = folder.split('\\')
if len(part)>3:
return '\\'.join(part[2:])
return folder
def deduceDatasetsFromFiles(self, collection, indexingTemplate=None):
"""
collection = list of recently modified/created files.
indexingTemplate = a template to exclude files used for indexing purposes
"""
self.indexingTemplate = indexingTemplate
datafileFolders = collection.dataFiles
datasets = {}
for folder in datafileFolders.keys():
filelist = datafileFolders[folder]['recentFileList']
hash = self.genProtosFromFileList(filelist)
for key in hash.keys():
datasets[key] = hash[key]
return datasets
"""
----- insert collection.hierarchy['folders'] processor here
otherFolders = collection.hierarchy['folders']
foldernames = otherFolders.keys().sort()
dset = {} # receptacle for amalgamated files
for folder in foldernames:
filelist = otherFolders[folder]['recentFileList']
# loop over all new files in folder
for i in range(0,len(filelist)):
filepath = filelist.pop()
"""
def genProtosFromFileList(self, filelist):
""" Aggregate datafiles into groups based on their
filenames and a pattern matching RE.
Return a dictionary,datasets, with datafile lists.
"""
if not filelist or len(filelist)<=0: return [] # no info
#from os.ntpath import split # for windows
from ntpath import split # for windows
import re
indexing_re = None
if self.indexingTemplate:
indexing = str.replace(self.indexingTemplate,'#','\\d')
indexing_re = re.compile('.*' + indexing)
self._log.debug("\nCheck index against %s" , indexing )
if not self.dataFormat or \
not DataFormats.ARTIFACTS.has_key(self.dataFormat):
raise Exception, "unknown image frame type" + self.dataFormat + " " + file
collectInfo = DataFormats.ARTIFACTS[self.dataFormat]
frame_re = collectInfo['data_re']
field_sep = collectInfo['data_field_sep']
dset = {} # receptacle for amalgamated files
# loop over all new files in folder
for i in range(0,len(filelist)):
filepath = filelist.pop()
# preliminary check to isolate indexing files
if indexing_re and indexing_re.search(filepath):
scan = DataFormats.INDEXING
if not dset.has_key(scan):
dset[scan]=[]
dset[scan].append(filepath)
continue
# check for ordinary scan datafile frames to aggregate by scan
dir, file = split(filepath)
part = file.split(field_sep)
# not a frame file or not part of a coherent dataset
if (not frame_re.search(file) ) or len(part)<=1:
scan = DataFormats.SUNDRIES
if not dset.has_key(scan):
dset[scan]=[]
dset[scan].append(filepath)
continue
# normal data scan frame
scan = dir + "/" + field_sep.join(part[0:-1])
frame = part[-1]
if not dset.has_key(scan):
dset[scan]=[]
dset[scan].append(file)
# print the datasets
keylist = dset.keys()
keylist.sort()
for key in keylist:
flist = dset[key]
self._log.debug("base %s files %d", key, len(flist) )
return dset
def buildFolderSetsFromCollection(self, collection, priordata=None, purgelist=None):
"""
For arbitrary, non-data folders.
This method builds a datastructure that the XML template method can
iterate over to dump whole folders of files and parameters as datasets.
"""
# from ntpath import split # for windows
from os.path import split # for windows
# import os.path
# import re, types
self.priordata = priordata # local ref
self.purgelist = purgelist # local ref
folders = collection.hierarchy['folders']
foldernames = folders.keys() # .sort()
foldernames.sort()
dsets = [] # receptacle for amalgamated files
if not foldernames:
self._log.debug("No subfolders ????")
return dsets
filetypes = DataFormats.FILETYPES
for folder in foldernames:
filelist = folders[folder]['recentFileList']
files = []
# loop over all new files in folder
for fileinfo in filelist:
file,mtime,fclass = fileinfo
dir , filename = split(file) # os.path.split ????
self._log.debug("%s %s %s %s", dir, filename, mtime, fclass)
fclassinfo = filetypes[fclass]
format = fclassinfo['format']
versions = fclassinfo['version']
if type(versions) == type([]):
if len(versions) == 1:
version = versions[0]
else:
# what the hell we gunna do now?????
version ="?"
else:
version ="?"
elements = [ #{'name': 'description',
# 'content': '' },
# {'name': 'datafile_version',
# 'content': '1' },
# {'name': 'datafile_version_comment',
# 'content': '' },
{'name': 'datafile_format',
'content': format },
{'name': 'datafile_format_version',
'content': version },
]
self.addCommonFileData(file, elements)
file = {'dir':dir, 'name':filename, 'params': [ ],
'elements': elements }
# probably we need to specify file types etc
# and or remove ICAT unsupported/registered format types
self.purgeCheck(file)
files.append(file)
if folder==".":
dsetname = DataFormats.SUNDRIES
else:
dsetname = folder
dset = {'proto':None, 'num':None, 'mach':None,
'dsetname': dsetname,
'description': "additional files",
'params':[] , 'type': "derived",
'files': files,
}
dsets.append(dset)
return dsets
| Python |
#!/usr/bin/env python
import sys
from config import *
# ping proposaldb server, get all new users since given date/time,
from ZSI import FaultException
#populate remote ICAT FACILITY_USER table with these users
print "Connecting: ICAT Oracle Database"
from xmlMapping.dbConnect import *
bypass=False
try:
dbcnx = DBConnection(bypass)
dbcnx.cxInit(ICATDBHOST, ICATDBPORT, ICATDBNAME, ICATDBUSER, ICATDBPASS)
dbcnx.connect() # establish tcp communication
except Exception, e:
print e
print "You may need to port forward over SSH to tunnel through firewall"
print "e.g. ssh dataminx -L 1521:localhost:1521"
sys.exit(1)
print "Connected to ",ICATDBUSER
def oracleEscape(strn):
if not strn: return ''
if strn=='none': return ''
if strn=='None': return ''
out = []
for char in strn:
if char =="'":
out.append("'")
out.append(char)
else:
out.append(char)
ret = "".join(out)
return ret
DBSCHEMA="ICAT"
TABLE = 'DATAFILE_FORMAT'
try:
columns =dbcnx.query("""SELECT column_name FROM COLS
WHERE table_name='%(TABLE)s'""" %
{'DBSCHEMA' : DBSCHEMA, 'TABLE' : TABLE } )
except Exception, msg:
import traceback
traceback.print_exc(file=sys.stdout)
print "Select header from ", TABLE, " failed\n%s" % msg,
sys.exit()
header = []
if not columns or columns.ntuples < 1:
print " no column headers exist"
else:
for row in columns:
header.append(row[0])
fileformats = []
try:
fields = ", ".join(header)
# print fields
oldformats = dbcnx.query("""SELECT %(fields)s FROM %(TABLE)s """ %
{'DBSCHEMA' : DBSCHEMA ,'fields' : fields, 'TABLE' : TABLE } )
except Exception, msg:
import traceback
traceback.print_exc(file=sys.stdout)
print "Select from datafile_format failed\n%s" % msg,
sys.exit()
if not oldformats or oldformats.ntuples < 1:
print " no file format entries exist"
else:
for row in oldformats:
# print row
d = {}
for i in range(len(header)):
d[header[i]] = row[i]
fileformats.append(d)
def injectNewFileFormat(format, version):
try:
# (NAME, VERSION, FORMAT_TYPE, DESCRIPTION, SEQ_NUMBER, MOD_TIME, MOD_ID, CREATE_TIME, CREATE_ID, FACILITY_ACQUIRED, DELETED)
# Values
# ('nexus', '3.0.0', 'HDF5', 'Neutron and X-Ray data format.', 999, TO_TIMESTAMP('12/09/2007 13:30:16.4','DD/MM/YYYY HH24:MI:SS.FF'), 'overlord', TO_TIMESTAMP('12/09/2007 13:30:16.4','DD/MM/YYYY HH24:MI:SS.FF'), 'overlord', 'Y', 'N');
query= """
INSERT INTO DATAFILE_FORMAT (NAME, VERSION,
FORMAT_TYPE, DESCRIPTION, SEQ_NUMBER, MOD_TIME, MOD_ID,
CREATE_TIME, CREATE_ID, FACILITY_ACQUIRED, DELETED) VALUES
('%(name)s', '%(version)s', '%(type)s', '%(description)s',
999, systimestamp, 'overlord', systimestamp,'overlord', 'Y', 'N') """ % \
{'DBSCHEMA' : DBSCHEMA,
'name': oracleEscape(format['format']),
'version': oracleEscape(version),
'type': oracleEscape(format['format_type']),
'description':oracleEscape(format['description']),
} # )
print query
res = dbcnx.query(query)
res = dbcnx.query("COMMIT")
except Exception, msg:
print "Failed to inject new dataformat: ", format
print msg
sys.exit()
return
for code in DataFormats.FILETYPES:
ftype = DataFormats.FILETYPES[code]
versions = ftype['version']
if versions:
for version in versions:
match = False
for itype in fileformats:
if itype['NAME'] != ftype['format']: continue
if itype['VERSION'] != version: continue
match = True
break
if match: continue
print "No current ICAT match for" , ftype['format'], version
print "Adding new entry ..."
injectNewFileFormat(ftype,version)
else: # no version
match = False
for itype in fileformats:
if itype['NAME'] != ftype['format']: continue
match = True
break
if match: continue
print "no match for" , ftype['format']
injectNewFileFormat(ftype,'1')
print "ICAT datafile_formats and config.py FILETYPES are reconciled."
print "Nothing else to add."
dbcnx.finish()
| Python |
#
#
#
# Look for experiment data folders modified more recently than
# the timestamp of this file. For testing purposes, adjust as
# touch -t 199501131000 /var/lib/dataMINX
# Additionally it contains a hashtable of experiment folder mod times
DATAMINX="/var/lib/dataMINX"
#mount -t cifs //192.168.97.140/csaf_others /mnt
FOLDERS=["/mnt/avon"]
FOLDERS=["/mnt/avon/mht11jp01"]
FOLDERS=["/mnt/Frames/csaf" ]
FOLDERS=["/mnt/Frames/csaf/act06pj14" ]
FOLDERS=["/mnt/Frames/Paul/afm09pj4"]
FOLDERS=["/mnt/Frames" ]
# folder for stashing generated ICAT XML files
ICATXMLDATA = "/home/icatdata"
# subfolder for failed XML ingest files
FAILED = "failed"
# recipients for screwed up email messages
EMAIL_ALERT_LIST=["someone@mail.host.net"]
EMAIL_SMTP_HOST="smtp.mail.host.net"
# ssh dataminx -L 1521:localhost:1521
#ICATDBHOST = "localhost" # via port-forward SSH tunnel
ICATDBHOST = "my.oracle.host.net"
ICATDBNAME = "XE"
ICATDBPORT = "1521"
ICATDBUSER = "icat"
ICATDBPASS = "pass"
#
ICATADMINURL = "https://icat.admin.ws.host.net/ICATAdminService/ICATAdmin"
ICATADMINUSER = "admin"
ICATADMINPASS = "pass"
#
ICATURL = "https://icat.ws.host.net/ICATService/ICAT"
ICATUSER = "super"
ICATPASS = "pass"
#
PDBWSURL = "http://proposalsdb.ws.host.net/?q=services/soap"
PDBWSUSER = "admin"
PDBWSPASS = "pass"
#
BRKDBNAME = 'BAXSdb'
BRKDBHOST = "bruker.postgresql.host.net"
BRKDBPORT = "5432"
BRKDBUSER = "BrukerPGSQL"
BRKDBPASS = "BrukerPGSQLpass"
import os
import os.path
import re
SMARTRE= re.compile('\.\d\d\d$')
# concerned about different versions of same file
# two separate uses of a single file extension
# distinguishing between SMART and APEX and (Oxford) SUPERNOVA data
# formats are all lower case for easy string comparison
class DataFormats:
"""Singleton class object? Not quite """
# ---------------------------
# changing these will lead to incompatible /var/lib/DATAMINX pickled py file
APEX = "fr591"
SMART = "smart"
NOVA = "nova"
# ---------------------------
SUNDRIES ="SUNDRIES"
INDEXING ="INDEXING"
FILETYPES = {
'raw' : {
'regexp' : r'.*\.raw$',
'version' : ['1'],
'format' : 'bruker raw',
'format_type' : 'ASCII',
'description' : 'Bruker raw intensities harvested from one scan set',
} ,
'hkl' : {
'regexp' : r'.*\.hkl$',
'version' : ['1'], # HKLF3 /4 ???
'format' : 'bruker hkl',
'format_type' : 'ASCII',
'description' : 'Interpolated reflection intensities from one or more scans ',
} ,
'_ls' : {
'regexp' : r'.*\._ls$',
'version' : ['1'],
'format' : 'bruker _ls',
'format_type' : 'ASCII',
'description' : 'Bruker SAINT output listing ',
} ,
'sfrm' : {
'regexp' : r'.*\.sfrm$',
'version' : ['11','13','15'],
'format' : 'bruker sfrm',
'format_type' : 'binary',
'handler' : ('sfrmProcess', 'SFRM'),
'description' : 'Bruker .SFRM image frame format',
} ,
'bg_snap' : {
'regexp' : r'.*bg_snap_\d+_\d+\.sfrm$',
'version' : ['11','13','15'],
'format' : 'bruker sfrm',
'format_type' : 'binary',
'handler' : ('sfrmProcess', 'SFRM'),
'description' : 'Bruker .SFRM image background CCD frame',
} ,
'smart' : {
'regexp' : r'.*\d\.\d\d\d$',
'version' : ['9'],
'format' : 'bruker smart',
'handler' : ('sfrmProcess', 'SFRM'),
'format_type' : 'binary',
'description' : 'Bruker SMART image frame format',
} ,
'bgsnap' : {
'regexp' : r'.*bgsnap\d\d\.\d\d\d$',
'version' : ['9'],
'format' : 'bruker smart',
'handler' : ('sfrmProcess', 'SFRM'),
'format_type' : 'binary',
'description' : 'Bruker SMART image background frame format',
} ,
'_am' : {
'regexp' : r'.*\._am$',
'version' : ['9'],
'format' : 'bruker _am',
'handler' : ('sfrmProcess', 'SFRM'),
'format_type' : 'binary',
'description' : 'Bruker Active Mask in .SFRM format',
} ,
'_ib' : {
'regexp' : r'.*\._ib$',
'version' : ['9'],
'format' : 'bruker _ib',
'handler' : ('sfrmProcess', 'SFRM'),
'format_type' : 'binary',
'description' : 'Bruker Initial Background in .SFRM format',
} ,
'_dk' : {
'regexp' : r'.*\._dk$',
'version' : ['9'],
'format' : 'bruker _dk',
'handler' : ('sfrmProcess', 'SFRM'),
'format_type' : 'binary',
'description' : 'Bruker DARK frame in .SFRM format',
} ,
'_fl' : {
'regexp' : r'.*\._fl$',
'version' : ['9'],
'format' : 'bruker _fl',
'handler' : ('sfrmProcess', 'SFRM'),
'format_type' : 'binary',
'description' : 'Bruker Flood Table in .SFRM format',
} ,
'smart.ini' : {
'regexp' : r'.*smart.ini$',
'version' : ['1'],
'format' : 'bruker smart.ini',
'format_type' : 'ASCII',
'description' : 'Bruker smart.ini config file',
} ,
'_pr' : {
'regexp' : r'.*\._pr$',
'version' : ['1'],
'format' : 'bruker _pr',
'format_type' : 'ASCII',
'description' : 'Bruker pseudo .ins/.sfrm-header text file',
} ,
'abs' : {
'regexp' : r'.*\.abs$',
'version' : ['1'],
'format' : 'bruker abs',
'format_type' : 'ASCII',
'description' : 'Bruker SADABS scaling and absorption correction log',
} ,
'prp' : {
'regexp' : r'.*\.prp$',
'version' : ['1'],
'format' : 'bruker prp',
'format_type' : 'ASCII',
'description' : 'Bruker XPREP listing, text format',
} ,
'xht' : {
'regexp' : r'.*\.xht$',
'version' : ['1'],
'format' : 'bruker xht',
'format_type' : 'ASCII',
'description' : 'Bruker scan analysis control file',
} ,
'cif' : {
'regexp' : r'.*\.cif$',
'version' : ['1.0'],
'format' : 'cif',
'format_type' : 'ASCII', # CIF 2 will be UTF8
'description' : 'Crystallographic Information File',
} ,
'pcf' : {
'regexp' : r'.*\.pcf$',
'version' : ['1.0'],
'format' : 'bruker pcf',
'format_type' : 'ASCII',
'description' : 'Bruker Crystallographic Information File',
} ,
'res' : {
'regexp' : r'.*\.res$',
'version' : ['1'],
'format' : 'shelx res',
'format_type' : 'ASCII',
'description' : 'SHELX result file',
} ,
'ins' : {
'regexp' : r'.*\.ins$',
'version' : ['1'],
'format' : 'shelx ins',
'format_type' : 'ASCII',
'description' : 'SHELX ins file',
} ,
'p4p' : {
'regexp' : r'.*\.p4p$',
'version' : ['1'],
'format' : 'bruker p4p',
'format_type' : 'ASCII',
'description' : 'Bruker SHELX ini file',
} ,
'eps' : {
'regexp' : r'.*\.eps$',
'version' : ['3.0'],
'format' : 'epsf',
'format_type' : 'ASCII',
'description' : 'adobe postscript',
} ,
'png' : {
'regexp' : r'.*\.png$',
'version' : ['1'],
'format' : 'png',
'format_type' : 'binary',
'description' : 'Portable network graphics',
} ,
'vzs' : {
'regexp' : r'.*\.vzs$',
'version' : ['1'],
'format' : 'bruker vzs',
'format_type' : 'binary',
'description' : 'Bruker zipped mpeg?',
} ,
# -----------------------------------------------------------------
'img' : {
'regexp' : r'.*\.img$',
'version' : ['3.0'],
'format' : 'oxford img',
'handler' : ('oxfordProcess', 'OXFORD'),
'format_type' : 'binary',
'description' : 'Oxford Diffraction .img CCD frame file',
} ,
'ini' : {
'regexp' : r'.*\.ini$',
'version' : ['1'],
'format' : 'oxford ini',
'format_type' : 'ASCII',
'description' : 'Oxford Diffraction .ini configuration info',
} ,
'ini_report' : {
'regexp' : r'.*\.ini_report$',
'version' : ['1'],
'format' : 'oxford ini_report',
'format_type' : 'ASCII',
'description' : 'Oxford Diffraction .ini configuration info',
} ,
'ccd' : {
'regexp' : r'.*\.ccd$',
'version' : ['1'],
'format' : 'oxford ccd',
'format_type' : 'binary',
'description' : 'Oxford Diffraction .ccd file',
} ,
'cif_od' : {
'regexp' : r'.*\.cif_od$',
'version' : ['1.0'],
'format' : 'cif',
'format_type' : 'ASCII',
'description' : 'Crystallographic Information File',
} ,
'par' : {
'regexp' : r'.*\.par$',
'version' : ['1.0'],
'format' : 'oxford par',
'format_type' : 'ASCII',
'description' : 'XCALIBUR PARAMETER FILE',
} ,
'jpg' : {
'regexp' : r'.*\.jpg$',
'version' : ['1'],
'format' : 'jpeg',
'format_type' : 'binary',
'description' : 'Joint Photographic Experts Group image file'
} ,
'jpr' : {
'regexp' : r'.*\.jpr$',
'version' : ['1.000'],
'format' : 'km4abs jpr',
'format_type' : 'ASCII',
'description' : 'Oxford KM4ABS microscope movie settings',
} ,
}
ARTIFACTS = {
APEX : {
'xml_template_new' : 'map-csaf_apex2icat.xml',
'xml_template_upd' : 'map-csaf_apex2icat-upd.xml',
'data_re' : re.compile('\.sfrm$') ,
'data_field_sep' : '_',
'folders' : {
'.' : {
'data' : [ ('sfrm' , ['11','13','15'] ) ] ,
'files' : [
('vzs' , '' ),
],
},
'work' : {
'files' : [
('bg_snap' , ['11','13','15'], False ), # ignore it
('raw' , '1' ),
('hkl' , '1' ),
('_ls' , '1' ),
('_am' , '9' ),
('_ib' , '9' ),
('_dk' , '9' ),
('_fl' , '9' ),
('_pr' , '1' ),
('abs' , '1' ),
('prp' , '1' ),
('xht' , '1' ),
('cif' , '1.0' ),
('pcf' , '1.0' ),
('ins' , '1' ),
('res' , '1' ),
('p4p' , '1' ),
('eps' , '3.0' ),
('png' , '1' ),
('vzs' , '1' ),
],
}, # end of work subfolder
'struct' : { # subfolder
'files' : [ ('cif' , '1.0'), ('res' , '1'), ('hkl' , '1') ],
},
}, # end of folder list
},
SMART : {
'xml_template_new' : 'map-csaf_apex2icat.xml',
'xml_template_upd' : 'map-csaf_apex2icat-upd.xml',
'data_re' : re.compile('\.\d\d\d$'),
'data_field_sep' : '.',
'folders' : {
'.' : {
'data' : [ ('smart' , '9') ] ,
'files' : [
('vzs' , '' ),
('p4p' , '1' ),
('smart.ini' , '1' ),
],
},
'work' : {
'files' : [
('raw' , '1' ),
('bgsnap', '9', False), # ignore it
('hkl' , '1' ),
('_ls' , '1' ),
('_am' , '9' ),
('_ib' , '9' ),
('_dk' , '9' ),
('_fl' , '9' ),
('_pr' , '1' ),
('abs' , '1' ),
('prp' , '1' ),
('xht' , '1' ),
('cif' , '1.0' ),
('pcf' , '1.0' ),
('ins' , '1' ),
('res' , '1' ),
('p4p' , '1' ),
('eps' , '3.0' ),
('png' , '1' ),
('vzs' , '1' ),
],
}, # end of work
'struct' : { # subfolder
'files' : [ ('cif' , '1.0'), ('res' , '1'), ('hkl' , '1') ],
},
}, # end of folder list
},
NOVA : {
'xml_template_new' : 'map-csaf_nova2icat.xml',
'xml_template_upd' : 'map-csaf_nova2icat.xml',
'data_re' : re.compile('\.img$') ,
'data_field_sep' : '_',
'folders' : {
'.' : {
'files' : [
('ccd' , '' ),
# ('red' , '' ),
('cif' , '' ),
('cif_od' , '' ),
('hkl' , '' ),
('ins' , '' ),
('p4p' , '' ),
('par' , '' ),
],
},
'frames' : {
'data' : [ ('img' , '3') ] ,
},
'movie' : {
'files' : [
('jpg' , '' ),
('jpr' , '' ),
],
},
'expinfo' : {
'files' : [
('ini' , '' ),
('ini_report' , '' ),
],
},
'log' : {
'files' : [
],
},
'plots_dc' : {
'files' : [
],
},
'plots_red' : {
'files' : [
],
},
'struct/tmp' : {
'files' : [
('cif' , '' ),
],
},
}, # end of folder list
}, # end of NOVA
}
def someClassFunc():
""" do somehting """
pass
bruker_scan_types = {
1: "None",
2: "Omega scan",
3: "Phi scan",
9: "Still scan",
12: "Phi360 scan",
14: "Theta/2Theta scan",
15: "Chi scan"
}
basic_dataset = {
'proto': None,
'num': None, # len(filelist)
'mach': None, # type
'dsetname': None, # dsetname,
'files' : [],
'params': [] ,
'scan': None, # scantype
}
"""
dset['_diffrn_scan.frame_id_start'] = files[0]['name']
dset['_diffrn_scan.frame_id_end'] = files[-1]['name']
"""
bruker_scan_params = [
{'name': '_diffrn_measurement.method',
'sval': None, #types[op[0]],
'units': 'n/a'},
{'name': '_diffrn_measurement.sample_detector_distance',
'nval': None, #op[2],
'units': 'mm'},
{'name': '_diffrn_scan.integration_time',
'nval': None, #op[1],
'units': 's'},
{'name': '_diffrn_scan.SPEED',
'nval': None, #speed,
'units': u'\xb0/s'},
{'name': '_diffrn_scan.id',
'sval': None, #n+1,
'units': 'n/a'},
{'name': '_diffrn_scan.AXIS_ANGLE_RANGE',
'nval': None, #op[3],
'units': u'\xb0'},
{'name': '_diffrn_scan.AXIS_ANGLE_DIRECTION',
'nval': None, #op[4],
'units': 'n/a'},
{'name': '_diffrn_scan.AXIS_ANGLE_INCREMENT',
'nval': None, #op[5],
'units': u'\xb0'},
{'name': '_diffrn_scan.DETECTOR_AXIS_ANGLE_TWOTHETA',
'nval': None, #op[6],
'units': u'\xb0'},
{'name': '_diffrn_scan.GONIOMETER_AXIS_ANGLE_OMEGA',
'nval': None, #op[7],
'units': u'\xb0'},
{'name': '_diffrn_scan.GONIOMETER_AXIS_ANGLE_CHI',
'nval': None, #op[8],
'units': u'\xb0'},
{'name': '_diffrn_scan.GONIOMETER_AXIS_ANGLE_PHI',
'nval': None, #op[9],
'units': u'\xb0'},
{'name': '_diffrn_scan.frames',
'nval': None, #len(filelist),
'units': 'n/a'},
]
basic_file = {
'dir': None, # file or string?
'name': None, # file or string?
'params': [ ],
'elements': [ ]
}
bruker_data_file = {
'dir': None,
'name': None,
'params': [
{'name': '_diffrn_scan_frame.frame_id',
'sval': None, # filename,
'units': 'n/a'},
{'name': '_diffrn_scan_frame.scan_id',
'nval': None, # sid,
'units': 'n/a'},
{'name': '_diffrn_scan_frame.frame_number',
'nval': None, # i,
'units': 'n/a'},
]
}
class Artifacts(DataFormats):
"""Singleton class object """
@staticmethod
def getInstruments():
""" return list of instrument keys """
if not DataFormats.ARTIFACTS:
return None
insts = DataFormats.ARTIFACTS.keys()
return insts
@staticmethod
def getInstrumentFolders(instrument):
if not DataFormats.ARTIFACTS.has_key(instrument):
return None
inst = DataFormats.ARTIFACTS[instrument]
return inst['folders'].keys()
@staticmethod
def getInstrumentDataFolder(instrument):
"""Primary folder holding bulk of raw data"""
if not DataFormats.ARTIFACTS.has_key(instrument):
return None
inst = DataFormats.ARTIFACTS[instrument]
if inst.has_key('data'):
return (".", inst) # top directory
folders = inst['folders']
for folder in folders.keys():
if folders[folder].has_key('data'):
return (folder, folders[folder])
return None
@staticmethod
def getREforFileType(ftype):
filetypes = DataFormats.FILETYPES
if not filetypes.has_key(ftype):
# the config edit has gone wrong somewhere!
raise Exception, "DataFormats.FILETYPES has no type: " + str(ftype)
typeinfo = filetypes[ftype]
regexp = typeinfo['regexp']
#print regexp
#regex = re.compile(regexp)
return regexp
@staticmethod
def getFolderArtifacts(instrument, folder):
if not DataFormats.ARTIFACTS.has_key(instrument):
return None
inst = DataFormats.ARTIFACTS[instrument]
if not folder:
return inst # just return collection root
folders = inst['folders']
if not folders.has_key(folder):
return None
return folders[folder]
@staticmethod
def getFolderFileREs(instrument, folder):
artifacts = Artifacts.getFolderArtifacts(instrument,folder)
re_list = []
filetypes = DataFormats.FILETYPES
if artifacts.has_key('data'):
data = artifacts['data']
for type, version in data:
if not filetypes.has_key(type):
# the config edit has gone wrong somewhere!
raise Exception, "DataFormat.FILETYPES has no type: " + type
typeinfo = filetypes[type]
# or the whole typeinfo record????
re_list.append( (type, typeinfo['regexp'] ) )
if artifacts.has_key('files'):
files = artifacts['files']
for info in files: # ( type, version, [exclude?] )
type = info[0]
if not filetypes.has_key(type):
# the config edit has gone wrong somewhere!
raise Exception, "DataFormat.FILETYPES has no type: " + type
typeinfo = filetypes[type]
# or the whole typeinfo record????
re_list.append( (type, typeinfo['regexp'] ) )
return re_list
# compile all the regular expressions for efficiency
for key in DataFormats.FILETYPES.keys():
regexp = DataFormats.FILETYPES[key]['regexp']
DataFormats.FILETYPES[key]['regexp'] = re.compile(regexp)
| Python |
#!/usr/bin/env python
import sys
from config import *
import logging
from errorCollator import *
LOGFILENAME = __file__ + ".log"
logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',
filename = LOGFILENAME, level=logging.INFO)
hiccups = ErrorCollator()
from ZSI import FaultException
from xmlMapping.dbConnect import *
class InstrumentWatcher:
def __init__(self):
self._log = logging.getLogger('%s.%s' % (__name__, self.__class__.__name__))
@staticmethod
def oracleEscape(self,strn):
if not strn: return ''
if strn=='none': return ''
if strn=='None': return ''
out = []
for char in strn:
if char =="'":
out.append("'")
out.append(char)
else:
out.append(char)
ret = "".join(out)
return ret
def cleanup(self):
if self.dbcnx:
self.dbcnx.finish()
self._log.info("Closed ICAT DB connection")
def main(self):
#populate remote ICAT FACILITY_USER table with these users
self._log.info("Connecting: ICAT Oracle Database")
bypass=False
try:
dbcnx = DBConnection(bypass)
dbcnx.cxInit(ICATDBHOST, ICATDBPORT, ICATDBNAME, ICATDBUSER, ICATDBPASS)
dbcnx.connect() # establish tcp communication
except Exception, e:
self._log.critical(e)
self._log.critical( "You may need to port forward over SSH to tunnel through firewall")
self._log.critical( "e.g. ssh dataminx -L 1521:localhost:1521")
hiccups.add(Error(__name__,__file__,e) )
return 2
self._log.info("Remote connections established" )
DBSCHEMA="ICAT"
TABLE = 'INSTRUMENT'
try:
columns =dbcnx.query("""SELECT column_name FROM COLS
WHERE table_name='%(TABLE)s'""" %
{'DBSCHEMA' : DBSCHEMA, 'TABLE' : TABLE } )
except Exception, msg:
#import traceback
#traceback.print_exc(file=sys.stdout)
self._log.critical("Select header from %s failed\n%s" ,TABLE, msg)
hiccups.add(Error(__name__,__file__,msg) )
return 2
header = []
if not columns or columns.ntuples < 1:
self._log.debug("no column headers exist")
else:
for row in columns:
header.append(row[0])
fileformats = []
try:
fields = ", ".join(header)
# print fields
oldformats = dbcnx.query("""SELECT %(fields)s FROM %(TABLE)s """ %
{'DBSCHEMA' : DBSCHEMA ,'fields' : fields, 'TABLE' : TABLE } )
except Exception, msg:
#import traceback
#traceback.print_exc(file=sys.stdout)
self._log.critical("Select from %s failed\n%s" ,TABLE, msg)
hiccups.add(Error(__name__,__file__,msg) )
return 2
if not oldformats or oldformats.ntuples < 1:
self._log.debug("No %s entries exist", TABLE)
else:
for row in oldformats:
# print row
d = {}
for i in range(len(header)):
d[header[i]] = row[i]
fileformats.append(d)
for code in DataFormats.ARTIFACTS.keys():
name = code
short_name = name
type = "Crystallography"
description = " "
ftype = DataFormats.FILETYPES[code]
versions = ftype['version']
if versions:
for version in versions:
match = False
for itype in fileformats:
if itype['NAME'] != ftype['format']: continue
if itype['VERSION'] != version: continue
match = True
break
if match: continue
self._log.info("No current ICAT match for %s %s" , ftype['format'], version)
self._log.info("Adding new entry ...")
injectNewFileFormat(ftype,version)
else: # no version
match = False
for itype in fileformats:
if itype['NAME'] != ftype['format']: continue
match = True
break
if match: continue
self._log.info("No match for %s " , ftype['format'])
injectNewFileFormat(ftype,'1')
self._log.info("ICAT %s and config.py DataFormats are reconciled." , TABLE)
self._log.info("Nothing else to add.")
def injectNewFileFormat(self, format, version):
try:
# (NAME, VERSION, FORMAT_TYPE, DESCRIPTION, SEQ_NUMBER, MOD_TIME, MOD_ID, CREATE_TIME, CREATE_ID, FACILITY_ACQUIRED, DELETED)
# Values
# ('nexus', '3.0.0', 'HDF5', 'Neutron and X-Ray data format.', 999, TO_TIMESTAMP('12/09/2007 13:30:16.4','DD/MM/YYYY HH24:MI:SS.FF'), 'overlord', TO_TIMESTAMP('12/09/2007 13:30:16.4','DD/MM/YYYY HH24:MI:SS.FF'), 'overlord', 'Y', 'N');
query= """
INSERT INTO INSTRUMENT (NAME, SHORT_NAME,
TYPE, DESCRIPTION, SEQ_NUMBER, MOD_TIME, MOD_ID,
CREATE_TIME, CREATE_ID, FACILITY_ACQUIRED, DELETED) VALUES
('%(name)s', '%(version)s', '%(type)s', '%(description)s',
999, systimestamp, 'overlord', systimestamp,'overlord', 'Y', 'N') """ % \
{'DBSCHEMA' : DBSCHEMA,
'name': oracleEscape(format['format']),
'version': oracleEscape(version),
'type': oracleEscape(format['format_type']),
'description':oracleEscape(format['description']),
} # )
print query
res = dbcnx.query(query)
res = dbcnx.query("COMMIT")
except Exception, msg:
print "Failed to inject new dataformat: ", format
print msg
sys.exit()
return
if __name__ == '__main__':
watcher = InstrumentWatcher()
try:
rv = watcher.main()
except Exception, msg:
watcher._log.critical(msg)
watcher.cleanup()
import socket
thisHost = socket.gethostbyaddr(socket.gethostname())
fqdn = thisHost[1][0]
if hiccups.hasErrors():
hiccups.dump()
hiccups.emailAlert("instrumentWatcher@"+fqdn, EMAIL_ALERT_LIST, EMAIL_SMTP_HOST,
"New ICAT instrument addition errors " + __file__ )
| Python |
#! /usr/bin/env python
from DrupalSoap_client import *
from ZSI import FaultException
import sys
filename ="VBLMetaMan.wsdl"
FILENAME = "https://vbl.synchrotron.org.au/MetaMan/VBLMetaMan.wsdl"
URL = "http://cima.example.com:8080/?q=services/soap"
LOGNAME=""
PASSWORD=""
DATASETID="10tdk013"
def getFromListResponse(key,list):
for hash in list:
if type(hash) == type({}) and hash.has_key('title') \
and hash['title']==key:
return hash['value']
return none
def mapList2Hash(list):
hash = {}
for dict in list:
if type(dict) == type({}) and dict.has_key('title') \
and dict.has_key('value'):
hash[dict['title']] = dict['value']
else:
raise Exception, "trouble parsing dict %s" % dict
return hash
if __name__ == '__main__':
print "locating the service"
loc = DrupalSoapLocator()
# prints messages sent and received if tracefile is set
import sys
kw = { # 'tracefile' : sys.stdout,
'user': LOGNAME, 'password':PASSWORD,
'url': URL,
'auth' : ( ZSI.client.AUTH.httpbasic, LOGNAME, PASSWORD ) }
print "binding ..."
binding = loc.getDrupalSoapPort(**kw) # returns SOAP object
print "\ntry to login ..."
request = user_loginRequest()
request._username = LOGNAME
request._password = PASSWORD
response = binding.user_login(request)
resp = response._return
print "got resp: " , type(resp)
hash = mapList2Hash(resp)
print "got response: " , hash
print "\nget log Id..."
request = proposaldb_xgetLogIdFromDatasetRequest()
request._dataset_name=DATASETID
try:
response = binding.proposaldb_xgetLogIdFromDataset(request)
print "did we get anything? "
except FaultException, e:
print "uh oh", e
exit(1)
resp = response._return
print "got resp: " , type(resp)
print "got resp: " , resp
hash = mapList2Hash(resp)
l_id = int(hash['l_id'])
print "Log Id " , l_id
print "\nget log record"
request = proposaldb_xgetLogFromIdRequest()
request._l_id = l_id
response = binding.proposaldb_xgetLogFromId(request)
print "did we get anything? "
resp = response._return
print "got resp: " , type(resp)
print "got resp: " , resp
hash = mapList2Hash(resp)
print "Log " , hash
print "\nget sample Id..."
request = proposaldb_xgetSampleFromIdRequest()
request._s_id = int(hash['s_id'])
response = binding.proposaldb_xgetSampleFromId(request)
print "did we get anything? "
resp = response._return
print "got resp: " , type(resp)
print "got resp: " , resp
hash = mapList2Hash(resp)
print "sample " , hash
import sys
sys.exit()
print "\nget scientists ..."
import datetime
date = list(datetime.datetime.now().timetuple())
date[6]=0
print date
date = [2009, 11, 26, 15, 41, 3, 0, 330, -1]
request = proposaldb_getAllScientistsSinceRequest()
request._date=date
# request._date=[2010, 11, 26, 15, 41, 3, 0, 330, -1]
response = binding.proposaldb_getAllScientistsSince(request)
print "did we get anything? "
resp = response._return
print "got resp: " , type(resp)
hash = mapList2Hash(resp)
print "got resp: " , hash
print "\ntry to connect ..."
request = system_connectRequest()
response = binding.system_connect(request)
print "did we connect? "
resp = response._return
print "got resp: " , type(resp)
print "got resp: " , resp
print "\nget scientists ..."
request = proposaldb_getAllScientistsSinceRequest()
import datetime
date = list(datetime.datetime.now().timetuple())
date[6]=0
date = [2004, 1, 1, 0, 0, 0, 0, 330, -1]
request._date=date
# request._date=[1, 11, 26, 15, 41, 3, 0, 330, -1]
print date
response = binding.proposaldb_getAllScientistsSince(request)
print "did we get anything? "
resp = response._return
print "got resp: " , type(resp)
print "got resp: " , resp
print "\ntry to logout ..."
request = user_logoutRequest()
#request._username = LOGNAME
#request._password = PASSWORD
response = binding.user_logout(request)
resp = response._return
print "got resp: " , type(resp)
print "got response: " , resp
print "\ngetting Prop Echo ..."
request = proposaldb_echoRequest()
request._message = "The bloody thing worked"
response = binding.proposaldb_echo(request)
resp = response._return
print "got resp: " , type(resp)
print "got result: " , resp
| Python |
#! /usr/bin/env python
from DrupalSoap_client import *
from ZSI import FaultException
import sys
filename ="VBLMetaMan.wsdl"
FILENAME = "https://vbl.synchrotron.org.au/MetaMan/VBLMetaMan.wsdl"
URL = "http://cima.example.com:8080/?q=services/soap"
LOGNAME=""
PASSWORD=""
DATASETID="10tdk013"
def getFromListResponse(key,list):
for hash in list:
if type(hash) == type({}) and hash.has_key('title') \
and hash['title']==key:
return hash['value']
return none
def mapList2Hash(list):
hash = {}
for dict in list:
if type(dict) == type({}) and dict.has_key('title') \
and dict.has_key('value'):
hash[dict['title']] = dict['value']
else:
raise Exception, "trouble parsing dict %s" % dict
return hash
if __name__ == '__main__':
print "locating the service"
loc = DrupalSoapLocator()
# prints messages sent and received if tracefile is set
import sys
kw = { # 'tracefile' : sys.stdout,
'user': LOGNAME, 'password':PASSWORD,
'url': URL,
'auth' : ( ZSI.client.AUTH.httpbasic, LOGNAME, PASSWORD ) }
print "binding ..."
binding = loc.getDrupalSoapPort(**kw) # returns SOAP object
print "\ntry to login ..."
request = user_loginRequest()
request._username = LOGNAME
request._password = PASSWORD
response = binding.user_login(request)
resp = response._return
print "got resp: " , type(resp)
hash = mapList2Hash(resp)
print "got response: " , hash
print "\nget log Id..."
request = proposaldb_xgetLogIdFromDatasetRequest()
request._dataset_name=DATASETID
try:
response = binding.proposaldb_xgetLogIdFromDataset(request)
print "did we get anything? "
except FaultException, e:
print "uh oh", e
exit(1)
resp = response._return
print "got resp: " , type(resp)
print "got resp: " , resp
hash = mapList2Hash(resp)
l_id = int(hash['l_id'])
print "Log Id " , l_id
print "\nget log record"
request = proposaldb_xgetLogFromIdRequest()
request._l_id = l_id
response = binding.proposaldb_xgetLogFromId(request)
print "did we get anything? "
resp = response._return
print "got resp: " , type(resp)
print "got resp: " , resp
hash = mapList2Hash(resp)
print "Log " , hash
print "\nget sample Id..."
request = proposaldb_xgetSampleFromIdRequest()
request._s_id = int(hash['s_id'])
response = binding.proposaldb_xgetSampleFromId(request)
print "did we get anything? "
resp = response._return
print "got resp: " , type(resp)
print "got resp: " , resp
hash = mapList2Hash(resp)
print "sample " , hash
import sys
sys.exit()
print "\nget scientists ..."
import datetime
date = list(datetime.datetime.now().timetuple())
date[6]=0
print date
date = [2009, 11, 26, 15, 41, 3, 0, 330, -1]
request = proposaldb_getAllScientistsSinceRequest()
request._date=date
# request._date=[2010, 11, 26, 15, 41, 3, 0, 330, -1]
response = binding.proposaldb_getAllScientistsSince(request)
print "did we get anything? "
resp = response._return
print "got resp: " , type(resp)
hash = mapList2Hash(resp)
print "got resp: " , hash
print "\ntry to connect ..."
request = system_connectRequest()
response = binding.system_connect(request)
print "did we connect? "
resp = response._return
print "got resp: " , type(resp)
print "got resp: " , resp
print "\nget scientists ..."
request = proposaldb_getAllScientistsSinceRequest()
import datetime
date = list(datetime.datetime.now().timetuple())
date[6]=0
date = [2004, 1, 1, 0, 0, 0, 0, 330, -1]
request._date=date
# request._date=[1, 11, 26, 15, 41, 3, 0, 330, -1]
print date
response = binding.proposaldb_getAllScientistsSince(request)
print "did we get anything? "
resp = response._return
print "got resp: " , type(resp)
print "got resp: " , resp
print "\ntry to logout ..."
request = user_logoutRequest()
#request._username = LOGNAME
#request._password = PASSWORD
response = binding.user_logout(request)
resp = response._return
print "got resp: " , type(resp)
print "got response: " , resp
print "\ngetting Prop Echo ..."
request = proposaldb_echoRequest()
request._message = "The bloody thing worked"
response = binding.proposaldb_echo(request)
resp = response._return
print "got resp: " , type(resp)
print "got result: " , resp
| Python |
#! /usr/bin/env python
from DrupalSoap_services import *
import sys
filename ="VBLMetaMan.wsdl"
FILENAME = "https://vbl.synchrotron.org.au/MetaMan/VBLMetaMan.wsdl"
URL = "http://cima.chem.usyd.edu.au:8080/?q=services/soap"
LOGNAME="admin"
PASSWORD="CSAFPropPass"
if __name__ == '__main__':
print "locating the service"
loc = DrupalSoapLocator()
# prints messages sent and received if tracefile is set
import sys
kw = { 'tracefile' : sys.stdout,
'user': LOGNAME, 'password':PASSWORD,
'url': URL,
'auth' : ( ZSI.client.AUTH.httpbasic, LOGNAME, PASSWORD ) }
print "binding ..."
binding = loc.getDrupalSoapPortType(**kw) # returns SOAP object
print "\ntry to login ..."
request = user_loginRequest()
request._username = LOGNAME
request._password = PASSWORD
response = binding.user_login(request)
resp = response._return
print "got resp: " , type(resp)
print "got response: " , resp
print "\nget scientists ..."
import datetime
date = list(datetime.datetime.now().timetuple())
date[6]=0
print date
date = [2009, 11, 26, 15, 41, 3, 0, 330, -1]
request = proposaldb_getAllScientistsSinceRequest()
request._date=date
# request._date=[2010, 11, 26, 15, 41, 3, 0, 330, -1]
response = binding.proposaldb_getAllScientistsSince(request)
print "did we get anything? "
resp = response._return
print "got resp: " , type(resp)
print "got resp: " , resp
print "\nget log Id..."
request = proposaldb_xgetLogIdFromDataset()
request._dataset_name="mm03"
response = binding.proposaldb_xgetLogIdFromDataset(request)
print "did we get anything? "
resp = response._return
print "got resp: " , type(resp)
print "got resp: " , resp
import sys
sys.exit()
print "\ntry to connect ..."
request = system_connectRequest()
response = binding.system_connect(request)
print "did we connect? "
resp = response._return
print "got resp: " , type(resp)
print "got resp: " , resp
print "\nget scientists ..."
request = proposaldb_getAllScientistsSinceRequest()
import datetime
date = list(datetime.datetime.now().timetuple())
date[6]=0
date = [2004, 1, 1, 0, 0, 0, 0, 330, -1]
request._date=date
# request._date=[1, 11, 26, 15, 41, 3, 0, 330, -1]
print date
response = binding.proposaldb_getAllScientistsSince(request)
print "did we get anything? "
resp = response._return
print "got resp: " , type(resp)
print "got resp: " , resp
print "\ntry to logout ..."
request = user_logoutRequest()
#request._username = LOGNAME
#request._password = PASSWORD
response = binding.user_logout(request)
resp = response._return
print "got resp: " , type(resp)
print "got response: " , resp
print "\ngetting Prop Echo ..."
request = proposaldb_echoRequest()
request._message = "The bloody thing worked"
response = binding.proposaldb_echo(request)
resp = response._return
print "got resp: " , type(resp)
print "got result: " , resp
| Python |
#! /usr/bin/env python
from DrupalSoap_services import *
import sys
filename ="VBLMetaMan.wsdl"
FILENAME = "https://vbl.synchrotron.org.au/MetaMan/VBLMetaMan.wsdl"
URL = "http://cima.chem.usyd.edu.au:8080/?q=services/soap"
LOGNAME="admin"
PASSWORD="CSAFPropPass"
if __name__ == '__main__':
print "locating the service"
loc = DrupalSoapLocator()
# prints messages sent and received if tracefile is set
import sys
kw = { 'tracefile' : sys.stdout,
'user': LOGNAME, 'password':PASSWORD,
'url': URL,
'auth' : ( ZSI.client.AUTH.httpbasic, LOGNAME, PASSWORD ) }
print "binding ..."
binding = loc.getDrupalSoapPortType(**kw) # returns SOAP object
print "\ntry to login ..."
request = user_loginRequest()
request._username = LOGNAME
request._password = PASSWORD
response = binding.user_login(request)
resp = response._return
print "got resp: " , type(resp)
print "got response: " , resp
print "\nget scientists ..."
import datetime
date = list(datetime.datetime.now().timetuple())
date[6]=0
print date
date = [2009, 11, 26, 15, 41, 3, 0, 330, -1]
request = proposaldb_getAllScientistsSinceRequest()
request._date=date
# request._date=[2010, 11, 26, 15, 41, 3, 0, 330, -1]
response = binding.proposaldb_getAllScientistsSince(request)
print "did we get anything? "
resp = response._return
print "got resp: " , type(resp)
print "got resp: " , resp
print "\nget log Id..."
request = proposaldb_xgetLogIdFromDataset()
request._dataset_name="mm03"
response = binding.proposaldb_xgetLogIdFromDataset(request)
print "did we get anything? "
resp = response._return
print "got resp: " , type(resp)
print "got resp: " , resp
import sys
sys.exit()
print "\ntry to connect ..."
request = system_connectRequest()
response = binding.system_connect(request)
print "did we connect? "
resp = response._return
print "got resp: " , type(resp)
print "got resp: " , resp
print "\nget scientists ..."
request = proposaldb_getAllScientistsSinceRequest()
import datetime
date = list(datetime.datetime.now().timetuple())
date[6]=0
date = [2004, 1, 1, 0, 0, 0, 0, 330, -1]
request._date=date
# request._date=[1, 11, 26, 15, 41, 3, 0, 330, -1]
print date
response = binding.proposaldb_getAllScientistsSince(request)
print "did we get anything? "
resp = response._return
print "got resp: " , type(resp)
print "got resp: " , resp
print "\ntry to logout ..."
request = user_logoutRequest()
#request._username = LOGNAME
#request._password = PASSWORD
response = binding.user_logout(request)
resp = response._return
print "got resp: " , type(resp)
print "got response: " , resp
print "\ngetting Prop Echo ..."
request = proposaldb_echoRequest()
request._message = "The bloody thing worked"
response = binding.proposaldb_echo(request)
resp = response._return
print "got resp: " , type(resp)
print "got result: " , resp
| Python |
INVESTIGATIONTITLE = "lflj134"
ADMINURL = "https://icat.admin.ws.host.net/ICATAdminService/ICATAdmin"
ICATURL = "https://icat.ws.host.net/ICATService/ICAT"
ICATUSER = "user"
ICATPASS = "pass"
ADMINUSER ="admin"
ADMINPASS ="pass"
from icatWS.ICATAdminService_client import *
from icatWS.ICATService_client import *
from icatWS.ICATService_types import ns0
from ZSI import FaultException
import sys
#login to admin
loc = ICATAdminServiceLocator()
kw = { # 'tracefile' : sys.stdout,
'user': ADMINUSER, 'password':ADMINPASS,
'auth' : ( ZSI.client.AUTH.httpbasic, ADMINUSER, ADMINPASS ) }
adminBinding = loc.getICATAdminPort(ADMINURL, **kw) # returns SOAP object
#
request = loginAdmin()
request._username = ICATUSER
response = adminBinding.loginAdmin(request)
adminSessionId = response._return
print adminSessionId
#login to icat
loc = ICATServiceLocator()
kw = { # 'tracefile' : sys.stdout,
'user': ICATUSER, 'password':ICATPASS,
'url': ICATURL,
'auth' : ( ZSI.client.AUTH.httpbasic, ICATUSER, ICATPASS) }
#
icatBinding = loc.getICATPort(**kw) # returns SOAP object
request = login()
request._username = ICATUSER
request._password = ICATPASS
response = icatBinding.login(request)
sessionId = response._return
print sessionId
#list instruments
request = listInstruments()
request._sessionId = sessionId
response = icatBinding.listInstruments(request)
print response._return
#list dataset types
request = listDatasetTypes()
request._sessionId = sessionId
response = icatBinding.listDatasetTypes(request)
print response._return
#list investigation types
request = listInvestigationTypes()
request._sessionId = sessionId
response = icatBinding.listInvestigationTypes(request)
print response._return
#list listDatasetStatus
request = listDatasetStatus()
request._sessionId = sessionId
response = icatBinding.listDatasetStatus(request)
print response._return
print "Retrieving ICAT investigation data for ", INVESTIGATIONTITLE
advSearchDetails= ns0.advancedSearchDetails_Def("dummy")
advSearchDetails._caseSensitive=True
advSearchDetails._investigationName = INVESTIGATIONTITLE
advSearchDetails._investigationInclude = "DATASETS_ONLY"
request = searchByAdvanced()
request._sessionId = sessionId
request._advancedSearchDetails = advSearchDetails
try:
response = icatBinding.searchByAdvanced(request)
invHolderList = response._return
except:
import sys
print "Unexpected error:", sys.exc_info()[0]
# possibly the sessionId has timed out. it had an hour
# but this could be slow ...
# maybe we could respawn the connection?
sys.exit(1)
# did we get a match? If not, then its a new investigation ...
if invHolderList:
print "Need to update records for ",INVESTIGATIONTITLE, " .........."
print type(invHolderList)
invHolder = invHolderList[0]
print "facility = ",invHolder.Facility
print vars(invHolder)
datasets = invHolder.DatasetCollection
print "datasets: " , len(datasets)
for dataset in datasets:
print dataset._id, dataset._name, dataset._datasetType
if not hasattr(dataset, "_datafileCollection"): continue
datafiles = dataset._datafileCollection
print "datafiles " , len(datafiles)
print ""
if len(datafiles)<=0: continue
datafile0 = datafiles[0]
print " " , datafile0._name, datafile0._datafileVersion
print " " , vars(datafile0._datafileFormat._datafileFormatPK)
print " " , datafile0._datafileModifyTime
else:
print "No prior records for ",INVESTIGATIONTITLE, "in ICAT!"
# retrieve specific, basic
#invs = icatcnx.getMyInvestigations(sessionId)
#print invs
#invHolder = icatcnx.getInvestigation(sessionId,5700)
# try to generate a new user in facility users table
user= ns0.facilityUser_Def("dummy")
# required entity base bean fields ... # WTF????
user._facilityAcquiredData = True
#user._icatRole = "?"
user._selected = False
user._uniqueId = "new_dummy_user"
# investigator extensions ...
user.facilityUserId = "new_dummy_user"
user.federalId = "new_dummy_user"
user.firstName = "New"
user.initials = "N D"
user.lastName = "User"
user.middleName = "Dummy"
user.title = "Mr"
investPK = ns0.investigatorPK_Def("dummy")
# required primaryKey base bean fields ...
# investPK extensions
investPK.facilityUserId = "new_dummy_user"
investPK.investigationId = 1
invest = ns0.investigator_Def("dummy")
# required entity base bean fields ...
invest._facilityAcquiredData = True # WTF????
#invest._icatRole = "?"
invest._selected = False
invest._uniqueId = "thisUniqueId"
# investigator extensions ...
invest._facilityUser = user
invest._investigatorPK = investPK
invest._role = "INVESTIGATOR"
request = addInvestigator()
request._sessionId = sessionId
request._investigator = invest
request._investigationdId = 1
print "adding new user..."
response = icatBinding.addInvestigator(request)
print "added new user? ", response._return
request = logout()
request._sessionId = sessionId
response = icatBinding.logout(request)
print "logged out: ", response._return
# no admin logout function
| Python |
#
# THis file is imported by the XML template, for instrument specific
# handling.
#
# BrukerFrameBuilder is a bit confused. It started off as a trial
# method of generating metadata for ICAT based purely on records
# from the Bruker PostgreSQL database. Subsequently it has
# been adapted to generate metadata based on real image files
# aggregated into a collection.
# It was also modified to do this for SMART files which predate
# the Bruker PostgreSQL approach.
# Thereafter it seems reasonable to try and extract as much
# common/generic behaviour into a base class (DatasetBuilder)
# for re-use by other instrument datacollection strategies.
#
from config import DataFormats
from datasetBuilder import DatasetBuilder
class BrukerFrameBuilder(DatasetBuilder):
""" The idea here is to make the garbage we extract from
the Bruker ApexII PostgreSQL database as pallatable to the
XML template parser as possible. """
def __init__(self,dataFormat=DataFormats.APEX):
super(BrukerFrameBuilder,self).__init__(dataFormat)
# self.dataFormat = dataFormat
self.protonums = []
def deduceFrames(self, fileproto,protonums, operations, scans=None):
"""
From the Bruker PostgreSQL database:
fileproto = the BAXSdb.saint.integrate_framename field
protonums = the BAXSdb.saint.integrate_nframes field
Here we generate the datasets, the filenames, (and the metadata).
"""
self._log.info(fileproto)
self._log.info( protonums)
self.parseNums(protonums)
self.buildScanMetadataFromDatabase(operations,scans)
res = self.parseFileTemplate(fileproto)
return res
def parseNums(self, protonums):
if protonums=='' or protonums=='"': # empty list
self.protonums = []
return
list = protonums.split(',')
#self.protonums = map(list, int )
#print list
self.protonums = map(int, list )
def parseFileTemplate(self, fileproto):
if fileproto=='' or fileproto=='"': return [] # No info
#print fileproto
#from os.ntpath import split # for windows
from ntpath import split # for windows
instrument = None
if fileproto.find('.sfrm')>0:
# its apex
instrument = "Apex"
else:
# assume its SMART .00n
instrument = "SMART"
comma = fileproto.find(',')
lb =fileproto.find('[')
rb =fileproto.find(']')
protos = [ ]
if comma <0 and lb<0: # just a single scan run?
dir, name = split(fileproto) # ntpath split
dir = self.remapDir(dir)
if instrument=="Apex":
name,ext = name.split('.')
part = name.split('_')
if len(part)>3:
fproto = [dir,'_'.join(part[0:-2]),'_',part[-2],'_',part[-1],'.', ext ]
elif len(part)==3:
fproto = [ dir, part[0],'_',part[1],'_',part[2],'.', ext ]
else:
raise Exception, "Unknown SAINT framename pattern (1): " + fileproto
elif instrument=="SMART":
part = name.split('_')
bits = part[-1].split('.')
if len(bits)!=2:
raise Exception, "unknown SAINT framename pattern (2): " + fileproto
part[-1] = bits[-2]
part.append(bits[-1])
if len(part)>3:
fproto = [dir,'_'.join(part[0:-2]),'_',part[-2],'.',part[-1]]
elif len(part)==3:
fproto = [ dir, part[0],'_',part[-2],'.',part[-1]]
else:
raise Exception, "unknown SAINT framename pattern (3): " + fileproto
else:
raise Exception, "unknown image frame type" + fileproto
#print fproto
protos.append(fproto)
elif comma >0 and lb<0: # its a comma delimited list of scans
scans = fileproto.split(',')
ext = ""
bits = []
c = -1
for scan in scans:
c += 1
if c == 0: # first has folder path prepended
dir, name = split(scan) # ntpath
dir = self.remapDir(dir)
else:
name = scan
if instrument=="Apex":
if c == 0: # first has filename extension
name,ext = name.split('.')
part = name.split('_')
if len(part)>3:
fproto = [dir,'_'.join(part[0:-2]),'_',part[-2],'_',part[-1],'.', ext ]
elif len(part)==3:
fproto = [ dir, part[0],'_',part[1],'_',part[2],'.', ext ]
else:
raise Exception, "Unknown SAINT framename pattern (1): " + fileproto
if instrument=="SMART":
part = name.split('_')
if c == 0: # first has filename extension
bits = part[-1].split('.')
if len(bits)!=2:
raise Exception, "unknown SAINT framename pattern (2): " + fileproto
part[-1] = bits[-2]
part.append(bits[-1])
if len(part)>3:
fproto = [dir,'_'.join(part[0:-2]),'_',part[-2],'.',part[-1]]
elif len(part)==3:
fproto = [ dir, part[0],'_',part[-2],'.',part[-1]]
elif len(part)==2:
fproto = [ dir, part[0],'.',part[-1]]
else:
raise Exception, "unknown SAINT framename pattern (3): " + fileproto
protos.append(fproto)
elif comma <0 and lb >= 0 and rb>lb:
# its a [a-b] template
dir, name = split(fileproto)
dir = self.remapDir(dir)
ext = ""
lb = name.find('[')
rb = name.find(']')
rnge = name[lb+1:rb]
rnge = rnge.split('-')
rnge= map(int,rnge)
if rnge[1]>9: ten_plus=True
else: ten_plus=False
trailer = name[rb+1: ]
if instrument == 'Apex':
pre, ext = trailer.split('.')
part = pre.split('_')
if len(part)!=2:
raise Exception, "unknown SAINT framename pattern (2): " + fileproto
if part[0]!='':
raise Exception, "unknown SAINT framename pattern (2): " + fileproto
elif instrument == 'SMART':
part = trailer.split('.')
if len(part)!=2:
raise Exception, "unknown SAINT framename pattern (2): " + fileproto
for idx in range(rnge[0],rnge[1]+1): # loop for the scans
if ten_plus:
sidx = "%02d" % idx
else:
sidx = "%d" % idx
if instrument == 'Apex':
fproto = [ dir, name[0:lb], sidx, '_',part[1],'.',ext ]
elif instrument == 'SMART':
fproto = [ dir, name[0:lb], sidx, '.',part[1] ]
#print fproto
protos.append(fproto)
else:
raise Exception, "unknown SAINT framename pattern (4): " + fileproto
#print protos
return protos
def buildScanMetadataFromDatabase(self, operators, scans):
""" operations is a list of Bruker scan parameters
associated with each of the filename prototypes """
if not operators and not scans:
self.operations = []
#raise Exception, "Empty scan list!"
self._log.warning("Empty scan list!") # no metadata ...?
return
if operators and not scans:
operations = operators
mode = "op"
elif scans and not operators:
operations = scans
mode = "scan"
elif len(operators)> len(scans):
self._log.warning("operation/scan table mismatch: %d %d",
len(operators), len(scans) )
operations = operators
mode = "op"
elif len(operators)< len(scans):
self._log.warning("operation/scan table mismatch: %d %d",
len(operators), len(scans) )
operations = scans
mode = "scan"
elif len(operators)== len(scans):
operations = scans
mode = "scan"
else:
raise Exception, "How could this happen?"
fields = "operation_types_id, time, dx, sweep, direction, width, theta, omega, chi, phi, speed"
ops = []
# from the Bruker database operation_types table
types = {1:"None",2:"Omega scan",3:"Phi scan",9:"Still scan",
12:"Phi360 scan",14:"Theta/2Theta scan", 15:"Chi scan" }
n = -1
for op in operations:
self._log.debug("op: %s", op)
n += 1
if mode == "scan":
speed = op[5]/op[1]
else:
speed = op[10]/60. # deg / min -> deg /sec
# anything in CAPITALS ddb just made up
params = [
{'name': '_diffrn_measurement.method',
'sval': types[op[0]],
'units': 'n/a'},
{'name': '_diffrn_measurement.sample_detector_distance',
'nval': op[2],
'units': 'mm'},
{'name': '_diffrn_scan.integration_time',
'nval': op[1],
'units': 's'},
{'name': '_diffrn_scan.SPEED',
'nval': speed,
'units': u'\xb0/s'},
{'name': '_diffrn_scan.id',
'sval': n+1,
'units': 'n/a'},
# {'name': '_diffrn_scan.frames',
# 'nval': self.protonums[n],
# 'units': 'n/a'},
{'name': '_diffrn_scan.AXIS_ANGLE_RANGE',
'nval': op[3],
'units': u'\xb0'},
{'name': '_diffrn_scan.AXIS_ANGLE_DIRECTION',
'nval': op[4],
'units': 'n/a'},
{'name': '_diffrn_scan.AXIS_ANGLE_INCREMENT',
'nval': op[5],
'units': u'\xb0'},
{'name': '_diffrn_scan.DETECTOR_AXIS_ANGLE_TWOTHETA',
'nval': op[6],
'units': u'\xb0'},
{'name': '_diffrn_scan.GONIOMETER_AXIS_ANGLE_OMEGA',
'nval': op[7],
'units': u'\xb0'},
{'name': '_diffrn_scan.GONIOMETER_AXIS_ANGLE_CHI',
'nval': op[8],
'units': u'\xb0'},
{'name': '_diffrn_scan.GONIOMETER_AXIS_ANGLE_PHI',
'nval': op[9],
'units': u'\xb0'},
]
#params['_diffrn_scan.date_start'] '2001-11-18T03:26:42'
#params['_diffrn_scan.date_end'] '2001-11-18T03:36:45'
#params['_diffrn_scan.frame_id_start'] mad_L2_000
#params['_diffrn_scan.frame_id_end'] mad_L2_200
ops.append(params)
self.operations = ops
def rationaliseDatasetData(self, protolist):
pnum = len(self.protonums) # num of each scan
if pnum != len(protolist): # file protos for each scan
raise Exception, "mismatch between SAINT framename and integrate_nframes " + fileproto
dsets = []
# Now we have to do a sanity check
# Some folk do wierd things, like doubly including files
# in their SAINT analysis??????? What the ...?
# first parse ...
for i in range(0,pnum):
set = protolist[i]
num = self.protonums[i]
if set[-1]=='sfrm':
type = 'Apex'
sfld = -3
else:
type = 'SMART'
sfld = -1
sid = int(set[sfld-2])
siter = set[sfld]
iter = int(siter)
dsetname = "".join(set[1:sfld-1])
first = iter
last = iter + num
dsets.append( [dsetname, first, last] )
droplist= []
for i in range(0,pnum): # advance loop
iname = dsets[i][0]
ifirst = dsets[i][1]
ilast = dsets[i][2]
for j in range(0,i): # back loop over prev
if dsets[j][0] != iname: continue
if dsets[j][1] > ifirst:
dsets[j][1] = ifirst
if dsets[j][2] < ilast:
dsets[j][2] = ilast
# what about potential gaps in between ???
# too bad so sad.
self._log.debug("dropped %d %s",i, dsets[i] )
dsets[i][0] = ''
droplist.append(i)
# ideally we could test for file existance and cull if not.
if len(droplist)>0:
for drop in droplist:
protolist[drop]= None
return protolist
def buildDatasetsFromAggregatedFiles(self, filesets, priordata=None, purgelist=None):
"""This method builds a datastructure that the XML template method can
iterate over to dump whole datasets of files and parameters.
Made an executive decision to treat each scan as a dataset
For each image, there should be a corresponding \work\image.raw
integrated intensity file. should we archive those derived files too?
"""
self.priordata = priordata # local ref
self.purgelist = purgelist # local ref
# from ntpath import split # for windows
from os.path import split # for windows
import os.path
import re, types
dsetnum_re = re.compile(r".*\D(\d+)$") # trailing digits from
if isinstance(filesets, list):
# uh oh
self._log.info("No frame file data")
return None
keys = filesets.keys()
keys.sort()
dsets = []
# in principle, filesets are aggregated lists of recently
# modified or created files - i.e. files that have changed.
# priordata, on the otherhand, has already been injected
# into ICAT. reinjecting without prior removal is not allowed
self._log.debug("priordata %s", priordata )
for key in keys: # loop over file groups
# for key = SUNDRIES and INDEXING, the split doesn't work
# in particular,there will be no dsetnum
dir, dsetname = split(key) # ntpath split
filelist = filesets[key]
filelist.sort() # ensure ordered filelist
dsetnum = dsetnum_re.match(key)
if not dsetnum:
# handle this dataset some other way???
self._log.debug("building dset: %s", key )
if key == DataFormats.INDEXING:
scan = "indexing"
else:
scan = "unknown"
dset = {'proto':None, 'num':None, 'mach':None,
'dsetname':dsetname,
'description': "additional files",
'params':[] , 'scan':scan }
files = []
for file in filelist:
dir , filename = split(file)
self._log.debug("%s %s", dir, filename )
version = "?"
elements = [ #{'name': 'description',
# 'content': '' },
# {'name': 'datafile_version',
# 'content': '1' },
# {'name': 'datafile_version_comment',
# 'content': '' },
{'name': 'datafile_format',
'content': 'unknown' },
{'name': 'datafile_format_version',
'content': version },
]
self.addCommonFileData(file, elements)
file = {'dir':dir, 'name':filename, 'params': [ ],
'elements': elements }
# probably we need to specify file types etc
# and or remove ICAT unsupported/registered format types
self.purgeCheck(file)
files.append(file)
dset['files'] = files
dsets.append(dset)
continue
dsetnum = dsetnum.group(1)
sid = int(dsetnum) # convert string to number
if sid <1 or sid > len(self.operations):
""" So iether the Apex bruker software went wrong, OR,
more likely, this is older SMART data.
"""
# handle this dataset some other way???
# just as a group of files ??? TBD...
self._log.warning("No Bruker Postgres Scan operation info for %s", key)
# smart has fixed chi, so only omega scan is possible, mainly.
dset = {'proto':None, 'num':None, 'mach':None,
'dsetname':dsetname,
'description': "Scan frame data",
'params':[] , 'scan':"omega scan" }#assumed. no idea what kind
files = []
version = None
file0 = filelist[0]
file1 = filelist[-1]
meta0 = None; meta1 = None
try:
from fileHandlers.sfrmProcess import SFRM
filepath = dir + "/" + file0
f = open(filepath)
sf = SFRM(f)
meta0 = sf.getMetadata()
f.close()
filepath = dir + "/" + file1
f = open(filepath)
sf = SFRM(f)
meta1 = sf.getMetadata()
f.close()
except Exception, e:
self._log.error(e)
if file0 and file1 and file0!= file1 and meta0 and meta1:
version = meta0['VERSION']
width = float(meta0['RANGE'])
delta = float(meta0['INCREME'])
axis = int(meta0['AXIS'])
distance = float(meta0['DISTANC'].split()[0]) #2 values??
wavelen = float(meta0['WAVELEN'].split()[0])
time = float(meta1['ELAPSDR'].split()[0]) #2 value ?? v13
startang = meta0['ANGLES'].split()
endang = meta1['ENDING'].split()
frames = meta1['NUMBER']
range = float(endang[axis-1]) - float(startang[axis-1])
speed = delta /time
types = {1:"None",2:"Omega scan",3:"Phi scan",9:"Still scan",
12:"Phi360 scan",14:"Theta/2Theta scan", 15:"Chi scan" }
params = [
{'name': '_diffrn_measurement.method',
'sval': types[axis],
'units': 'n/a'},
{'name': '_diffrn_measurement.sample_detector_distance',
'nval': distance *10.,
'units': 'mm'},
{'name': '_diffrn_scan.integration_time',
'nval': time,
'units': 's'},
{'name': '_diffrn_scan.SPEED',
'nval': speed,
'units': u'\xb0/s'},
{'name': '_diffrn_scan.id',
'sval': sid,
'units': 'n/a'},
# {'name': '_diffrn_scan.frames',
# 'nval': self.protonums[n],
# 'units': 'n/a'},
{'name': '_diffrn_scan.AXIS_ANGLE_RANGE',
'nval': range,
'units': u'\xb0'},
{'name': '_diffrn_scan.AXIS_ANGLE_DIRECTION',
'nval': -1 if delta <0.0 else +1 ,
'units': 'n/a'},
{'name': '_diffrn_scan.AXIS_ANGLE_INCREMENT',
'nval': delta,
'units': u'\xb0'},
{'name': '_diffrn_scan.DETECTOR_AXIS_ANGLE_TWOTHETA',
'nval': startang[0],
'units': u'\xb0'},
{'name': '_diffrn_scan.GONIOMETER_AXIS_ANGLE_OMEGA',
'nval': startang[1],
'units': u'\xb0'},
{'name': '_diffrn_scan.GONIOMETER_AXIS_ANGLE_CHI',
'nval': startang[2],
'units': u'\xb0'},
{'name': '_diffrn_scan.GONIOMETER_AXIS_ANGLE_PHI',
'nval': startang[3],
'units': u'\xb0'},
]
else:
version = "?"
params = { }
for file in filelist:
dir1, filename = split(file)
filepath = dir + "/" + file
"""
if not version: # get version number only for first data file
try:
from fileHandlers.sfrmProcess import SFRM
f = open(filepath)
sf = SFRM(f)
meta = sf.getMetadata()
f.close()
version = meta['VERSION']
except Exception, e:
print e
version = "?"
"""
elements = [ {'name': 'description',
'content': 'frame' },
{'name': 'datafile_version',
'content': '1' },
# {'name': 'datafile_version_comment',
# 'content': '' },
{'name': 'datafile_format',
'content': 'bruker smart' },
{'name': 'datafile_format_version',
'content': version },
]
self.addCommonFileData(filepath,elements)
file = {'dir':dir, 'name':filename, 'params': [ ],
'elements': elements }
# probably we need to specify file types etc
# and or remove ICAT unsupported/registered format types
self.purgeCheck(file)
files.append(file)
dset['params'].append( {'name': '_diffrn_scan.frames',
'nval': len(filelist),
'units': 'n/a'} )
dset['params'].extend(params)
dset['files'] = files
dsets.append(dset)
continue
ops = self.operations[sid-1]
# set = protolist[i]
# if not set: continue # rationalised from existance
# num = self.protonums[i]
num = len(filelist)
set = "ppppp"
#if set[-1]=='sfrm':
if filelist[0].endswith('.sfrm'):
type = 'Apex'
else:
type = 'SMART'
scantype = ops[0]['sval']
ops.append( {'name': '_diffrn_scan.frames',
'nval': len(filelist),
'units': 'n/a'} )
siter = sid
dset = {'proto':set, 'num':len(filelist), 'mach':type,
'dsetname':dsetname,
'description': "Scan frame data",
'params':ops, 'scan':scantype }
self._log.debug(dsetname)
files = []
version = None
for file in filelist:
filepath = dir + "/" + file
filename = file
# dir,filename = split(file)
if type == 'Apex':
i = int(''.join(filename[-9:-5]))
else:
i = int(''.join(filename[-3:]))
if not version: # get version number only for first data file
try:
from fileHandlers.sfrmProcess import SFRM
f = open(filepath)
sf = SFRM(f)
meta = sf.getMetadata()
f.close()
version = meta['VERSION']
except Exception, e:
self._log.error(e)
version = "?"
elements = [ {'name': 'description',
'content': 'frame' },
{'name': 'datafile_version',
'content': '1' },
{'name': 'datafile_version_comment',
'content': '' },
{'name': 'datafile_format',
'content': 'bruker sfrm' },
{'name': 'datafile_format_version',
'content': version },
]
self.addCommonFileData(filepath,elements)
file = {'dir':dir, 'name':filename,
'elements': elements,
'params': [
{'name': '_diffrn_scan_frame.frame_id',
'sval': filename,
'units': 'n/a'},
{'name': '_diffrn_scan_frame.scan_id',
'nval': sid,
'units': 'n/a'},
{'name': '_diffrn_scan_frame.frame_number',
'nval': i,
'units': 'n/a'},
],
}
# Maybe should add extra params here, scan, frame id, angles
#_diffrn_scan_frame.GONIOMETER_OMEGA # at start of scan step
#_diffrn_scan_frame.GONIOMETER_KAPPA # at start of scan step kappa/phi
#_diffrn_scan_frame.GONIOMETER_CHI # at start of scan or chi/phi'
#_diffrn_scan_frame.GONIOMETER_PHI # at start of scan step
#_diffrn_scan_frame.GONIOMETER_PHIP # at start of scan step phi'
#_diffrn_scan_frame.DETECTOR_TWO_THETA # at start of scan step
#_diffrn_scan_frame.DETECTOR_DISTANCE # at start of scan
#_diffrn_scan_frame.SCAN_TYPE # omega or phi i.e what angle changed?
# # don't know about omega/2theta scans. I'm not sure if they are used?
#_diffrn_scan_frame.SCAN_END # end angle after scan.
# # maybe it should just be the +/- incr/decr
# # because there are two ways to get there?
self.purgeCheck(file)
files.append(file)
dset['files'] = files
dset['_diffrn_scan.frame_id_start'] = files[0]['name']
dset['_diffrn_scan.frame_id_end'] = files[-1]['name']
dsets.append(dset)
return dsets
def buildDatasets(self, protolist):
"""Made an executive decision to treat each scan as a dataset
For each image, there should be a corresponding \work\image.raw
integrated intensity file. should we archive those derived files too?
"""
protolist = self.rationaliseDatasetData(protolist)
pnum = len(self.protonums) # num of each scan
if pnum != len(protolist): # file protos for each scan
raise Exception, "mismatch between SAINT framename and integrate_nframes " + fileproto
dsets = []
for i in range(0,pnum):
set = protolist[i]
if not set: continue # rationalised from existance
num = self.protonums[i]
if set[-1]=='sfrm':
type = 'Apex'
sfld = -3
else:
type = 'SMART'
sfld = -1
sid = int(set[sfld-2])
self._log.debug("%s %d", sid, len(self.operations) )
if sid>0 and sid<=len(self.operations): # apparently the database is screwed up sometimes.
ops = self.operations[sid-1]
scantype = ops[0]['sval']
else:
scantype = 'unknown'
ops = []
ops.append( {'name': '_diffrn_scan.frames',
'nval': num,
'units': 'n/a'} )
siter = set[sfld]
iter = int(siter)
lit = len(siter)
dset = {'proto':set, 'num':num, 'mach':type,
'dsetname':"".join(set[1:sfld-1]),
'params':ops, 'scan':scantype }
#for key in ops.keys():
# dset[key] = ops[key] # copy
files = []
format = "%%0%dd" % lit
for idx in range(iter,iter+num):
dir = set[0]
sequ = format % idx
new = set[1:]
new[sfld] = sequ
filename =''.join(new)
file = {'dir':dir, 'name':filename, 'params':
[ {'name': '_diffrn_scan_frame.frame_id',
'sval': filename,
'units': 'n/a'},
{'name': '_diffrn_scan_frame.scan_id',
'nval': i,
'units': 'n/a'},
{'name': '_diffrn_scan_frame.frame_number',
'nval': idx,
'units': 'n/a'},
]
}
# Maybe should add extra params here, scan, frame id, angles
#_diffrn_scan_frame.GONIOMETER_OMEGA # at start of scan step
#_diffrn_scan_frame.GONIOMETER_KAPPA # at start of scan step kappa/phi
#_diffrn_scan_frame.GONIOMETER_CHI # at start of scan or chi/phi'
#_diffrn_scan_frame.GONIOMETER_PHI # at start of scan step
#_diffrn_scan_frame.GONIOMETER_PHIP # at start of scan step phi'
#_diffrn_scan_frame.DETECTOR_TWO_THETA # at start of scan step
#_diffrn_scan_frame.DETECTOR_DISTANCE # at start of scan
#_diffrn_scan_frame.SCAN_TYPE # omega or phi i.e what angle changed?
# # don't know about omega/2theta scans. I'm not sure if they are used?
#_diffrn_scan_frame.SCAN_END # end angle after scan.
# # maybe it should just be the +/- incr/decr
# # because there are two ways to get there?
files.append(file)
dset['files'] = files
dset['_diffrn_scan.frame_id_start'] = files[0]['name']
dset['_diffrn_scan.frame_id_end'] = files[-1]['name']
dsets.append(dset)
return dsets
def getSamples(dbcnx):
try:
res = dbcnx.query("SELECT samples_id, sample_name, revision, when_created FROM samples ORDER by samples_id")
except DBConnectSQLSyntaxException, msg:
print "Select from samples failed\n%s" % msg,
sys.exit()
except DBConnectSQLResultException, msg:
print "Select from samples failed\n%s" % msg,
sys.exit()
samples = []
for i in range(res.ntuples):
samples.append([])
for j in range(res.nfields):
samples[i].append(res.getvalue(i, j))
try:
sys.stdout.write('%-15s' % res.getvalue(i, j))
except:
print res.getvalue(i, j)
print
res.clear()
return samples
def getSaint(dbcnx):
try:
res = dbcnx.query("SELECT samples_id, revision, integrate_nframes, integrate_framename FROM saint ORDER by samples_id")
except DBConnectSQLSyntaxException, msg:
print "Select from samples failed\n%s" % msg,
sys.exit()
except DBConnectSQLResultException, msg:
print "Select from samples failed\n%s" % msg,
sys.exit()
frames = []
for i in range(res.ntuples):
frames.append([])
for j in range(res.nfields):
frames[i].append(res.getvalue(i, j))
try:
sys.stdout.write('%-15s' % res.getvalue(i, j))
except:
print res.getvalue(i, j)
print
res.clear()
return frames
def getOperations(dbcnx, SAMPLEID,REVISION):
try:
res = dbcnx.query("SELECT operation_types_id, time, dx, sweep, direction, width, theta, omega, chi, phi, speed FROM operations WHERE samples_id='%(SAMPLEID)s' AND active='t' AND revision='%(REVISION)s' AND (operation_types_id=2 OR operation_types_id=3)" % {'SAMPLEID':SAMPLEID, 'REVISION':REVISION })
except DBConnectSQLSyntaxException, msg:
print "Select from operations failed\n%s" % msg,
sys.exit()
except DBConnectSQLResultException, msg:
print "Select from operations failed\n%s" % msg,
sys.exit()
operations = []
for i in range(res.ntuples):
operations.append([])
for j in range(res.nfields):
operations[i].append(res.getvalue(i, j))
#try:
# sys.stdout.write('%-15s' % res.getvalue(i, j))
#except:
# print res.getvalue(i, j)
#print
res.clear()
return operations
def getStrategyScans(dbcnx, SAMPLEID,REVISION):
try:
res = dbcnx.query("SELECT operation_types_id, time, dx, sweep, direction, width, theta, omega, chi, phi, start_angle, end_angle FROM strategy_scans WHERE samples_id=%(SAMPLEID)d AND active='t' AND revision=%(REVISION)d AND (operation_types_id=2 OR operation_types_id=3)" % {'SAMPLEID':SAMPLEID, 'REVISION':REVISION })
except DBConnectSQLSyntaxException, msg:
print "Select from operations failed\n%s" % msg,
sys.exit()
except DBConnectSQLResultException, msg:
print "Select from operations failed\n%s" % msg,
sys.exit()
operations = []
for i in range(res.ntuples):
operations.append([])
for j in range(res.nfields):
operations[i].append(res.getvalue(i, j))
#try:
# sys.stdout.write('%-15s' % res.getvalue(i, j))
#except:
# print res.getvalue(i, j)
#print
res.clear()
return operations
if __name__ == '__main__':
#import and possibly override, default config info
from xmlMapping.dbConnect import *
from mapConfig import *
dbcnx = DBConnection(False)
dbcnx.pqInit(DBHOST, DBPORT, DBNAME, DBUSER, DBPASS)
dbcnx.connect()
samples = getSamples(dbcnx)
frames = getSaint(dbcnx)
bfb = BrukerFrameBuilder()
for sample in frames:
sid = sample[0]
if sid != 3: continue
rev = sample[1]
protonums = sample[2]
fileproto = sample[3]
fields = "operation_types_id, time, dx, sweep, direction, width, theta, omega, chi, phi, speed"
try:
print sid, fileproto
#ops = getOperations(dbcnx,sid,rev)
ops = getStrategyScans(dbcnx,sid,rev)
print fields
for op in ops:
print op
zzz = bfb.deduceFrames(fileproto,protonums, ops)
if zzz:
for it in zzz:
print " ", it
yyy = bfb.buildDatasets(zzz)
#print yyy
print protonums
else:
print None
except Exception, m:
import traceback
traceback.print_exc(file=sys.stdout)
print sid, m
| Python |
#!/usr/bin/env python
from config import *
import sys
import logging
from errorCollator import *
LOGFILENAME = __file__ + ".log"
logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',
filename = LOGFILENAME, level=logging.INFO)
hiccups = ErrorCollator()
from pdbWrapper import PDBWS
from ZSI import FaultException
from xmlMapping.dbConnect import *
class UserWatcher:
def __init__(self):
self._log = logging.getLogger('%s.%s' % (__name__, self.__class__.__name__))
@staticmethod
def oracleEscape(self,strn):
if not strn: return ''
if strn=='none': return ''
if strn=='None': return ''
out = []
for char in strn:
if char =="'":
out.append("'")
out.append(char)
else:
out.append(char)
ret = "".join(out)
return ret
def cleanup(self):
if self.pdbcnx:
self.pdbcnx.logout()
self._log.info("Closed CSAF ProposalDB connection")
if self.dbcnx:
self.dbcnx.finish()
self._log.info("Closed ICAT DB connection")
def main(self):
# ping proposaldb server, get all new users since given date/time,
self._log.info("Connecting CSAF ProposalDB")
self.pdbcnx = PDBWS(PDBWSURL,PDBWSUSER,PDBWSPASS)
try:
date = (2003,1,1, 0,0,1, 0,0,0) # ZSI needs 9 elem tuple for dates
newUsers = self.pdbcnx.getAllScientistsSince(date)
scientists = newUsers['scientists']
for scientist in scientists:
self._log.debug(" %s %s %s %s ",
scientist['first_Names'],scientist['last_Name'],
scientist['login_ID'],scientist['email'] )
self._log.info(" %d new scientists since %s", len(scientists) , date)
except FaultException, e:
self._log.critical("-------------- %s ",e)
hiccups.add(Error(__name__,__file__,e) )
return 1
#populate remote ICAT FACILITY_USER table with these users
self._log.info("Connecting: ICAT Oracle Database")
bypass=False
try:
self.dbcnx = DBConnection(bypass)
self.dbcnx.cxInit(ICATDBHOST, ICATDBPORT, ICATDBNAME, ICATDBUSER, ICATDBPASS)
self.dbcnx.connect() # establish tcp communication
except Exception, e:
self._log.critical(e)
self._log.critical( "You may need to port forward over SSH to tunnel through firewall")
self._log.critical( "e.g. ssh dataminx -L 1521:localhost:1521")
hiccups.add(Error(__name__,__file__,e) )
hiccups.add(Error(__name__,__file__,
"Potentialy the database server " + ICATDBHOST + " is down, or we have misconfigured connection settings (in config.py).") )
return 2
self._log.info("Remote connections established" )
addingOnlyOneNewUser=0
for scientist in scientists:
try:
DBSCHEMA="ICAT"
res=self.dbcnx.query("""SELECT * FROM FACILITY_USER WHERE FACILITY_USER_ID='%(name)s' OR FACILITY_USER_ID='%(email)s'""" % {'DBSCHEMA' : DBSCHEMA,'name': scientist['login_ID'], 'email': scientist['email'] } )
except Exception, msg:
#import traceback
#traceback.print_exc(file=sys.stdout)
self._log.critical("Select from facility_user failed\n%s" % msg)
hiccups.add(Error(__name__,__file__,msg) )
return 3
if not res or res.ntuples > 1:
self._log.debug("uhoh many results")
self._log.debug(" %s %s %s %s ",
scientist['first_Names'],scientist['last_Name'],
scientist['login_ID'],scientist['email'] )
continue
elif res.ntuples ==1:
continue
for row in res:
self._log.debug(row)
self._log.debug(" %s %s %s %s ",
scientist['first_Names'],scientist['last_Name'],
scientist['login_ID'],scientist['email'] )
else: # res.ntuples <=0
self._log.info("no ICAT entry exists for:")
self._log.info(" %s %s %s %s ",
scientist['first_Names'],scientist['last_Name'],
scientist['login_ID'],scientist['email'] )
addingOnlyOneNewUser += 1
# if addingOnlyOneNewUser > 1:
# print "skipping ICAT add of" , scientist['login_ID']
# continue
# print scientist
first = scientist['first_Names']
parts = first.split(" ")
init = []
middle = ''
for part in parts:
init.append(part[0].upper())
init.append(".")
initial = "".join(init)
if len(first)>1:
middle = " ".join(parts[1:])
first = parts[0]
try:
query= """INSERT INTO FACILITY_USER (FACILITY_USER_ID, FEDERAL_ID,
TITLE, INITIALS, FIRST_NAME, MIDDLE_NAME, LAST_NAME,
MOD_TIME, MOD_ID, CREATE_TIME, CREATE_ID,
FACILITY_ACQUIRED, DELETED) VALUES
('%(name)s', '%(name)s', '%(title)s', '%(initials)s', '%(first)s',
'%(middle)s', '%(last)s',sysdate,'damian',sysdate,'damian', 'Y','N')""" % \
{'DBSCHEMA' : DBSCHEMA,
'name': oracleEscape(scientist['login_ID']),
'email': oracleEscape(scientist['email']),
'title': oracleEscape(scientist['title']),
'initials':oracleEscape(initial),
'first': oracleEscape(first),
'middle': oracleEscape(middle),
'last': oracleEscape(scientist['last_Name']),
} # )
self._log.debug(query)
########
res=self.dbcnx.query(query)
self._log.info("Added new ICAT user %s", scientist['login_ID'] )
########
except Exception, msg:
self._log.critical("Failed to inject new facility_user: %s\n" % scientist['email'] )
self._log.critical(msg)
hiccups.add(Error(__name__,__file__,msg) )
return 4
self.dbcnx.query("commit")
self._log.info(" %d new ICAT additions", addingOnlyOneNewUser )
if __name__ == '__main__':
watcher = UserWatcher()
try:
rv = watcher.main()
except Exception, msg:
watcher._log.critical(msg)
watcher.cleanup()
import socket
thisHost = socket.gethostbyaddr(socket.gethostname())
fqdn = thisHost[1][0]
if hiccups.hasErrors():
hiccups.dump() # stdout - should go to cron post run email
hiccups.emailAlert("userWatcher@"+fqdn, EMAIL_ALERT_LIST, EMAIL_SMTP_HOST,
"New ICAT user addition errors " + __file__ )
| Python |
import urllib
import webapp2
from google.appengine.ext import blobstore,db
from google.appengine.ext.blobstore import BlobInfo
from google.appengine.ext.webapp import blobstore_handlers
class FileRecord(db.Model):
blob = blobstore.BlobReferenceProperty()
class MainHandler(webapp2.RequestHandler):
def get(self):
respond = self.response.out.write
upload_url = blobstore.create_upload_url('/upload')
page = '<html><body>'
files = FileRecord.all()
if files.count():
page += '<table border="0">'
for record in files:
date = str(record.blob.creation)
key = record.key().id()
filename = record.blob.filename
size = str(round(float(record.blob.size) / 1024 / 1024,3)) + ' Mb'
page += '<tr><td>%s</td><td>%s</td><td><a href="/get/%s">' % (date,size,key)
page += '%s</a></td><td><a href="/delete/%s">' % (filename,key)
page += 'Delete</a></td></tr>'
page += '</table><br>'
page += '<form action="%s" method="POST" enctype="multipart/form-data">' % upload_url
page += """Upload File: <input type="file" name="file"><br> <input type="submit" name="submit"
value="Submit"></form></body></html>"""
respond(page)
class UploadHandler(blobstore_handlers.BlobstoreUploadHandler):
def post(self):
blob_info = self.get_uploads('file')[0]
record = FileRecord(blob = blob_info)
record.put()
self.redirect('/')
class GetHandler(blobstore_handlers.BlobstoreDownloadHandler):
def get(self, blob_key):
blob_key = str(urllib.unquote(blob_key))
record = FileRecord.get_by_id(int(blob_key))
self.send_blob(record.blob,save_as=record.blob.filename)
class DeleteHandler(webapp2.RequestHandler):
def get(self,blob_key):
try:
blob_key = urllib.unquote(blob_key)
record = FileRecord.get_by_id(int(blob_key))
record.blob.delete()
record.delete()
except:
self.error(404)
self.redirect('/')
app = webapp2.WSGIApplication(
[('/', MainHandler),
('/upload', UploadHandler),
('/delete/([^/]+)?', DeleteHandler),
('/get/([^/]+)?', GetHandler),
], debug=False) | Python |
#===istalismanplugin===
# -*- coding: utf-8 -*-
# Registers Plugin
def register_plugin_join(groupchat, nick, afl, role):
if time.time()-INFO['start']>10: # Очень полезная конструкция, добавляет "паузу" перед началом работы в 10 секунд.
msg(groupchat, u'К нам присоединился '+nick+u', он '+afl+'/'+role)
def register_plugin_leave(groupchat, nick, reason, code):
if time.time()-INFO['start']>10: # Очень полезная конструкция, добавляет "паузу" перед началом работы в 10 секунд.
msg(groupchat, u'Нас покинул '+nick+u', по причине '+reason+u' с кодом: '+str(code))
def register_plugin_message(raw, type, source, parameters):
if time.time()-INFO['start']>10: # Очень полезная конструкция, добавляет "паузу" перед началом работы в 10 секунд.
groupchat = source[1] # Мы же помним, что source это кортеж трёх переменных? В т.ч. одна из них конференция...
nick = source[2] # ...а другая - ник
msg(groupchat, u'Юзер '+nick+u', написал сообщение '+parameters+u' типа '+type)
register_join_handler(register_plugin_join) # Регистрируем событие на вход в конференцию
register_leave_handler(register_plugin_leave) # Регистрируем событие на выход из конференции
#register_message_handler(register_plugin_message) # Регистрируем событие на любое сообщение в конференции
| Python |
from distutils.core import setup
import os
import fileprocessor
currentFileDirectory = os.path.dirname(__file__)
with open(os.path.join(currentFileDirectory, "README"), "r") as f:
readme = f.read()
setup(
name="fileprocessor",
version=fileprocessor.VERSION,
description="Harness that makes bulk processing files quick and easy",
long_description=readme,
author="Donald Whyte",
author_email="donaldwhyte0@gmail.com",
url="http://code.google.com/p/fileprocessor",
classifiers=[
"Development Status :: 3 - Alpha Development Status"
"Intended Audience :: Developers",
"Programming Language :: Python 3",
"Programming Language :: Python 3.2",
"Programming Language :: Python 3.3",
],
keywords="batch file processing generic filesystem component lightweight utility",
license="MIT",
packages=("fileprocessor",),
data_files=[ (".", ["LICENCE"]) ]
) | Python |
import sys
import os
import unittest
def runTests(testDirectory, projectDirectory = None, testName = None):
"""Run unit tests from files with filenames of the form test_*.py.
Arguments:
testDirectory -- Absolute path to directory containing all the tests
Keyword arguments:
projectDirectory -- Path to directory containing project to test. If
None, then no extra paths will be added to the
system path for module imports. (default: None)
testName -- Name of specific test to run. If None, then all the test
files within the test directory will used. (default: None)
"""
# Store current working directory so it can be restored later
oldValue = os.environ.get("PROJECT_ROOT_DIRECTORY")
try:
# If test name provided, search for specific test
if testName:
searchPattern = "test_{}.py".format(testName)
# Otherwise, select all tests in directory
else:
searchPattern = "test_*.py"
# Before processing tests, ensure desired project directory has been set
if projectDirectory:
os.environ["PROJECT_ROOT_DIRECTORY"] = projectDirectory
# Perform the search for desired test
suites = unittest.defaultTestLoader.discover(testDirectory, pattern=searchPattern)
# Run all found test suites
mainSuite = unittest.TestSuite(suites)
textTestRunner = unittest.TextTestRunner(stream=sys.stdout).run(mainSuite)
# Restore old environment variable if it existed
finally:
if oldValue:
os.environ["PROJECT_ROOT_DIRECTORY"] = oldValue
def main(projectDirectory, testName):
"""Entry point of test script."""
# Run specified tests using file's current directory as searching point
testDirectory = os.path.abspath( os.path.dirname(__file__) )
runTests(testDirectory, projectDirectory, testName)
if __name__ == "__main__":
# Parse command line arguments
# Ensure arguments after program name are EVEN so it's all pairs
if len(sys.argv) == 0:
arguments = []
else:
arguments = sys.argv[1:]
if (len(arguments) % 2) == 1: # if odd
arguments = arguments[:-1] # take last arg off
# Look in pairs for deisred information
projectDirectory = None
testName = None
for i in range(0, len(arguments), 2):
if arguments[i] == "-d":
projectDirectory = arguments[i + 1]
elif arguments[i] == "-t":
testName = arguments[i + 1]
# Run program
main(projectDirectory, testName) | Python |
"""Recursviely searches through directory and displays any image URLs
inside web pages.
Created to provide an example of how to use the fileprocessor module.
"""
import sys
import re
from fileprocessor import FileProcessor, searchers, filterers, extractors
class ImageURLExtractor(extractors.TextExtractor):
"""Extract image URLs from a web page."""
# Pre-compiled regexes that are used to
IMAGE_TAG_REGEX = re.compile( r"<img.*?src=([\"\'])(.*?)\1.*?/>" )
CSS_URL_REGEX = re.compile( r"url\( ?([\"\'])(.*?)\1 ?\)" )
def extractFromString(self, data):
"""Return list of found image URls in a web page.
Arguments:
data -- The full web page to scan as a string
"""
# Search for all occurrences of JavaScript
matches = [ match for match in self.IMAGE_TAG_REGEX.finditer(data) ]
matches += [ match for match in self.CSS_URL_REGEX.finditer(data) ]
# Extract the found image URLs
imageURLs = []
for match in matches:
imageURLs.append( match.group(2) )
return imageURLs
def main(directory, showAll):
"""Run image resource extraction process.
Arguments:
directoriesToSearch -- List containing all of the
directories containing web page
soure code that need to be scanned
showAll -- If set to True, then all scanned files will
be displayed, even if no image URLs were
extracted from them. This means if this is
False, then any files where no data was
found are omitted.
"""
# Build components to use for file processor
searcher = searchers.FileSearcher(True)
filterer = filterers.ExtensionFilterer( ["html", "htm", "shtml", "php", "css", "js"] )
extractor = ImageURLExtractor()
processor = FileProcessor(searcher, [ filterer ], extractor)
# Perform the URL extraction and display findings
extractedURLs = processor.process(directoriesToSearch)
for filename, imageURLs in extractedURLs.items():
# If nothing was found in this file and the
# approrpiate flag is set, skip this file
if len(imageURLs) == 0 and not showAll:
continue
imageURLLines = ""
for url in imageURLs:
imageURLLines += "\t{}\n".format(url)
# Remove last newline
if len(imageURLLines) > 0:
imageURLLines = imageURLLines[:-1]
print("{}\n{}".format(filename, imageURLLines))
if __name__ == "__main__":
# Parse command line arguments
if len(sys.argv) < 3:
sys.exit("Usage: python {} {{ -d <directory> }} {{-a}}".format(sys.argv[0]))
directoriesToSearch = [] # store all directories requesed
for i in range(1, len(sys.argv)):
if sys.argv[i] == "-d" and i < (len(sys.argv) - 1):
i += 1 # go to next argumentt, the actual directory
directoriesToSearch.append( sys.argv[i] )
showAll = ("-a" in sys.argv)
if len(directoriesToSearch) == 0:
sys.exit("No directories to search specified")
main(directoriesToSearch, showAll) | Python |
"""Generates checksums for every file within a directory (recursively),
displaying those checksums through stdout.
Created to provide an example of how to use the fileprocessor module.
"""
import sys
import hashlib
from fileprocessor import FileProcessor, searchers, filterers, extractors
class ChecksumGenerator(extractors.ByteStreamExtractor):
"""Generates """
def __init__(self, blockSize = 65536):
"""Construct instance of ChecksumGenerator.
Keyword arguments:
blockSize -- Amount of data to read it at once when
generating checksum. Should be fairly
low if the machine does not have much
memory. (default: 65536)
"""
self.blockSize = blockSize
def extractFromStream(self, data):
"""Generate and reutrn SHA-1 checksum from stream of byte data.
Arguments:
data -- Byte stream containing data to generate checksum for
"""
hasher = hashlib.sha1()
buff = data.read(self.blockSize)
while len(buff) > 0:
hasher.update(buff)
buff = data.read(self.blockSize)
return hasher.hexdigest()
def main(directoriesToSearch):
"""Run checksum generation process.
Arguments:
directoriesToSearch -- List containing all of the
directories containing files
to generate checksums for
"""
# Build components to use for file processor
searcher = searchers.FileSearcher(True)
extractor = ChecksumGenerator()
processor = FileProcessor(searcher, [], extractor)
# Perofrm checksum generation and display every checksum
generatedChecksums = processor.process(directoriesToSearch)
for filename, checksum in generatedChecksums.items():
print("{}\n\t{}".format(filename, checksum))
if __name__ == "__main__":
# Parse command line arguments
if len(sys.argv) < 3:
sys.exit("Usage: python {} {{-d <directory> }}".format(sys.argv[0]))
directoriesToSearch = [] # store all directories requesed
for i in range(1, len(sys.argv)):
if sys.argv[i] == "-d" and i < (len(sys.argv) - 1):
i += 1 # go to next argumnet, the actual directory
directoriesToSearch.append( sys.argv[i] )
if len(directoriesToSearch) == 0:
sys.exit("No directories to search specified")
main(directoriesToSearch) | Python |
"""Contains all built-in Filterer classes."""
import collections
import fnmatch
import os
from fileprocessor.abstracts import Filterer
class ExcludeListFilterer(Filterer):
"""Filterer which filters files based on glob patterns."""
def __init__(self, excludeList):
"""Construct instance of ExcludeListFilterer.
Arguments:
excludeList -- List of glob patterns which will be used as
as a black list to remove files in the list
from the final listing.
"""
if not isinstance(excludeList, collections.Iterable):
raise TypeError("Exclusion list should be an iterable collection of strings")
self.excludeList = excludeList
def filter(self, fileListing):
"""Filter file listing based on stored glob patterns.
Returns NEW list containing the the files which passed the filter.
Arguments:
fileListing -- A list containing the absolute paths of the
files to filter.
"""
if not isinstance(fileListing, collections.Iterable):
raise TypeError("List of files to filter should be an iterable collection of strings")
newListing = list(fileListing)
# Go through each entry, checking if they match one of the
# glob patterns. If an entry does, it is removed
i = 0
while i < len(newListing):
filtered = False
for exclusion in self.excludeList:
if fnmatch.fnmatch(newListing[i], exclusion):
filtered = True
del newListing[i]
break
# If the current element was not deleted, increment
# the index into the file listing. We don't do it if
# an element was deleted as the next element would
# have moved ot the delete elements's index
if not filtered:
i += 1
return newListing
class IncludeListFilterer(Filterer):
"""Filterer which filters files that don't match glob patterns."""
def __init__(self, includeList):
"""Construct instance of IncludeListFilterer.
Arguments:
includeList -- List of glob patterns which will be used as
as a white list to remove files NOT in the list
from the final listing.
"""
if not isinstance(includeList, collections.Iterable):
raise TypeError("Inclusion list should be an iterable collection of strings")
self.includeList = includeList
def filter(self, fileListing):
"""Filter file listing based on stored glob patterns.
Returns NEW list containing the the files which passed the filter.
Arguments:
fileListing -- A list containing the absolute paths of the
files to filter.
"""
if not isinstance(fileListing, collections.Iterable):
raise TypeError("List of files to filter should be an iterable collection of strings")
# Add any files from the original listing which match
# one of the whitelist patterns.
newListing = []
for elem in fileListing:
for pattern in self.includeList:
if fnmatch.fnmatch(elem, pattern):
newListing.append(elem)
break
return newListing
class ExtensionFilterer(Filterer):
def __init__(self, allowedExtensions):
"""Construct instance of ExtensionFilterer.
Arguments:
allowedExtensions -- List of allowed extensions (e.g. ["txt", "py"]).
Any files which don't have these extensions
will be removed.
"""
if not isinstance(allowedExtensions, collections.Iterable):
raise TypeError("Allowed extension list should be an iterable collection of strings")
self.allowedExtensions = allowedExtensions
def filter(self, fileListing):
"""Filter file listing based on stored extension whitelist.
Returns NEW list containing the the files which passed the filter.
Arguments:
fileListing -- A list containing the absolute paths of the
files to filter.
"""
if not isinstance(fileListing, collections.Iterable):
raise TypeError("List of files to filter should be an iterable collection of strings")
newListing = list(fileListing)
i = 0
while i < len(newListing):
name, extension = os.path.splitext(newListing[i])
# Increment index and allow file if its extension is in
# the white list
if extension[1:] in self.allowedExtensions:
i += 1
else:
del newListing[i]
return newListing | Python |
"""Contains the abstract classes for the major components of the library."""
class Searcher:
"""Searches directory for files to process."""
def search(self, rootDirectory):
"""Search directory for files and return list of absolute paths to those files.
Arguments:
rootDirectory -- Root directory to start searching in.
"""
raise NotImplementedError
class Filterer:
"""Filters lists of files based on some criteria."""
def filter(self, fileListing):
"""Filter list of files and return a NEW list containing only the files that passed the filter.
NOTE: This should not alter the original list given.
Arguments:
fileListing -- A list containing the absolute paths of the
files to filter."""
raise NotImplementedError
class Extractor:
"""Extracts data from files."""
def extract(self, filename):
"""Extract data from the file with the given filename.
What this returns depends on what data is to be extracted.
This is determined by the concrete subclasses of Extractor.
Arguments:
filename -- Name of the file to extract data from
"""
raise NotImplementedError | Python |
"""Contains all built-in Extractor classes."""
from fileprocessor.abstracts import Extractor
class ByteExtractor(Extractor):
"""Extractor used for extracting data from a binary file.
If the files being read are large, then it may be worth
using ByteStreamExtractor to read the file bit-by-bit.
Otherwise, a lot of memory will be used up as the entire
contents of the large file is loaded in one go.
"""
def extract(self, filename):
"""Read entire contents of binary file and extract data from it.
What this returns depends on what data is to be extracted.
This is determined by the concrete subclasses of Extractor.
Arguments:
filename -- Name of the file to extract data from.
TypeError is raised if this is not a string.
"""
if not isinstance(filename, str):
raise TypeError("Filename must be a string")
# Open file as BINARY and read it all in at once
with open(filename, "rb") as f:
data = f.read()
return self.extractFromBytes(data)
def extractFromBytes(self, data):
"""Extract information from byte data and return that information.
Raises a NotImplementedError . This method should be
overriden by subclasses.
Arguments:
data -- a bytes object that contains all of the data to process
"""
raise NotImplementedError
class ByteStreamExtractor(Extractor):
"""Extractor used for extracting data from a binary file.
This class streams the binary file, so it can be read
incrementally rather than reading the entire file in one go.
"""
def extract(self, filename):
"""Open binary file stream and extract data from it.
What this returns depends on what data is to be extracted.
This is determined by the concrete subclasses of Extractor.
Arguments:
filename -- Name of the file to extract data from
TypeError is raised if this is not a string.
"""
if not isinstance(filename, str):
raise TypeError("Filename must be a string")
# Open file and read it all in at once
with open(filename, "rb") as f:
return self.extractFromStream(f)
def extractFromStream(self, stream):
"""Extract information from a byte stream and return that information.
Raises a NotImplementedError. This method should be
overriden by subclasses.
Arguments:
data -- a binary stream that can be used to access data
to process
"""
raise NotImplementedError
class TextExtractor(Extractor):
"""Extractor used for extracting data from a text file.
If the files being read are large, then it may be worth
using TextStreamExtractor to read the file bit-by-bit.
Otherwise, a lot of memory will be used up as the entire
contents of the large file is loaded in one go.
"""
def extract(self, filename):
"""Read entire contents of text file and extract data from it.
What this returns depends on what data is to be extracted.
This is determined by the concrete subclasses of Extractor.
Arguments:
filename -- Name of the file to extract data from
TypeError is raised if this is not a string.
"""
if not isinstance(filename, str):
raise TypeError("Filename must be a string")
# Open file as TEXT and read it all in at once
with open(filename, "r") as f:
data = f.read()
return self.extractFromString(data)
def extractFromString(self, data):
"""Extract information from text data and return that information.
Raises a NotImplementedError. This method should be
overriden by subclasses.
Arguments:
data -- a string that contains all of the data to process
"""
raise NotImplementedError
class TextStreamExtractor(Extractor):
"""Extractor used for extracting data from a text file.
This class streams the text file, so it can be read
incrementally rather than reading the entire file in one go.
"""
def extract(self, filename):
"""Open text file stream and extract data from it.
What this returns depends on what data is to be extracted.
This is determined by the concrete subclasses of Extractor.
Arguments:
filename -- Name of the file to extract data from
TypeError is raised if this is not a string.
"""
if not isinstance(filename, str):
raise TypeError("Filename must be a string")
# Open file and read it all in at once
with open(filename, "r") as f:
return self.extractFromStream(f)
def extractFromStream(self, stream):
"""Extract information from a text stream and return that information.
Raises a NotImplementedError. This method should be
overriden by subclasses.
Arguments:
data -- a text stream that can be used to access data
to process
"""
raise NotImplementedError | Python |
"""Main import for fileprocessing library. Contains main class for
processing files."""
import sys
import os
import collections
# Constant which specifies which version of fileprocessor this is
VERSION = "0.1"
# Done so importing modules from library is easier
def getSubModulesAndPackages():
"""Return list of all modules and packages contained within current package."""
modulesToImport = []
# Add all Python files in directory
directoryName = os.path.dirname(__file__)
for filename in os.listdir(directoryName):
# Ignore filenames in exclude list
if filename in ["__pycache__"]:
continue
# If filename ends with .py, we assume it's a Python module
elif filename.endswith(".py"):
modulesToImport.append(filename[:-3])
# If filename is actually a directory, we assume it's a subpackage
else:
absolutePath = os.path.abspath( os.path.join(directoryName, filename) )
if os.path.isdir(absolutePath):
modulesToImport.append(filename)
return modulesToImport
__all__ = getSubModulesAndPackages()
class FileProcessor:
"""Harness for searching, filtering and extracting data from files."""
def __init__(self, searcher, filterers, extractor):
"""Construct new instance of FileProcessor.
Arguments:
searcher -- Object which searches the file system and returns
a list of all the files found. Should be an instance
of Searcher.
filterers -- List of objects which filter the file listing based
on some criteria. Objects should be instances of
Filterer.
extractor -- Object which processes a single file and returns the
desired data from it. Should be an instance of
Extractor.
"""
self.searcher = searcher
self.filterers = filterers
self.extractor = extractor
def process(self, rootDirectories):
"""Process one or more directories of files in some way.
Return a dictionary where the keys are the absolute paths
to the files and values are the data extracted from the
corresponding files.
Exactly how it searches for files to process and what data
it extracts from the files is determined by the objects
given to the FileProcessor instance in the constructor.
Arguments:
rootDirectories -- Either a string containing the path to one
directory or a list containing multiple
directories to process
"""
if isinstance(rootDirectories, str): # wrap single directory in a list
rootDirectories = [ rootDirectories ]
elif not isinstance(rootDirectories, collections.Iterable):
raise TypeError("Path to root directory must be a string or collection of strings")
# Now process each directory, keepinga global dictionary of extracted data
data = {}
for directory in rootDirectories:
# If directory doesn't exist, report the issue and skip to the next one
if not os.path.isdir(directory):
print("Directory '{}' does not exist".format(directory), file=sys.stderr)
continue
# Search for the files in the directory, filter the resultant list
# and then extract data from the files
fileListing = self.searcher.search(directory)
for filterer in self.filterers:
fileListing = filterer.filter(fileListing)
for path in fileListing:
data[path] = self.extractor.extract(path)
return data | Python |
"""Contains all built-in Searcher classes."""
import sys
import os
import collections
from .abstracts import Searcher
class FileSearcher(Searcher):
"""Searches the filesystem for files, either recursively and non-recursively."""
def __init__(self, recurse = False):
"""Construct instance of FileSearcher.
Keyword arguments:
recurse -- If set to True, then the searcher will recursively
search through the given directory's sub-directories
for files.
"""
self.recurse = recurse
def search(self, rootDirectory):
"""Return a list containing the absolute paths of all files found.
All files found are returned, regardless of name or type.
Arguments:
rootDirectory -- Path to directory ot start searching from
"""
if not isinstance(rootDirectory, str):
raise TypeError("Path to root directory to start search from should be a string")
if not os.path.isdir(rootDirectory):
raise IOError("Root directory '{}' does not exist".format(rootDirectory))
fileListing = []
if self.recurse:
for root, directories, files in os.walk(rootDirectory):
for filename in files:
path = os.path.join(root, filename)
fileListing.append( os.path.abspath(path) )
else:
for filename in os.listdir(rootDirectory):
path = os.path.join(rootDirectory, filename)
if os.path.isfile(path):
fileListing.append( os.path.abspath(path) )
return fileListing
class CompositeSearcher(Searcher):
"""Uses multiple searchers and combines their findings into a single listing of resources."""
def __init__(self, searchers):
if not isinstance(searchers, collections.Iterable):
raise TypeError("Collection of searchers to use must be an iterable object")
self.searchers = searchers
def search(self, rootDirectory):
"""Pass given directory to child searchers and combine their results.
If a searcher raises an exception or returns an invalid value,
then that searcher is simply ignored during the run. The findings
of the other searchers will still be returned.
Arguments:
rootDirectory -- Path to directory to start searching from
"""
# All the resources found by the searchers. This is a set to
# get rid of duplicate values
allFindings = set()
for searcher in self.searchers:
try:
# Get child searcher's findings as a set and merge it
# with the set containing all the findings
findings = set(searcher.search(rootDirectory))
allFindings = allFindings.union(findings)
except BaseException as e:
print("Error searching for resources: {}".format(e), file=sys.stderr)
return allFindings | Python |
#!/usr/bin/python
import os, sys
sys.path.append(os.getcwd() + '/lib')
sys.path.append(os.getcwd() + '/lib/general')
sys.path.append(os.getcwd() + '/lib/gtk')
sys.path.append(os.getcwd() + '/lib/c')
# my libraries
from CFileRunnerGUI import CFileRunnerGUI
def real_main():
oApp = CFileRunnerGUI()
oApp.run()
return
def profile_main():
# This is the main function for profiling
# We've renamed our original main() above to real_main()
import cProfile, pstats, StringIO, logging
prof = cProfile.Profile()
prof = prof.runctx("real_main()", globals(), locals())
stream = StringIO.StringIO()
stats = pstats.Stats(prof, stream=stream)
stats.strip_dirs()
stats.sort_stats("cumulative") # Or cumulative
stats.reverse_order()
stats.print_stats(150) # 100 = how many to print
# The rest is optional.
#stats.dump_stats('profile_stats.dat')
#stats.print_callees()
#stats.print_callers()
print stream.getvalue()
#logging.info("Profile data:\n%s", stream.getvalue())
if __name__ == "__main__":
if len(sys.argv) == 2:
if sys.argv[1] == '-p':
profile_main()
# run FileRunner with statistics [profiler enabled]
else:
real_main()
else:
real_main()
| Python |
#!/usr/bin/python
import os, sys
sys.path.append(os.getcwd() + '/lib')
sys.path.append(os.getcwd() + '/lib/general')
sys.path.append(os.getcwd() + '/lib/gtk')
sys.path.append(os.getcwd() + '/lib/c')
# my libraries
from CFileRunnerGUI import CFileRunnerGUI
def real_main():
oApp = CFileRunnerGUI()
oApp.run()
return
def profile_main():
# This is the main function for profiling
# We've renamed our original main() above to real_main()
import cProfile, pstats, StringIO, logging
prof = cProfile.Profile()
prof = prof.runctx("real_main()", globals(), locals())
stream = StringIO.StringIO()
stats = pstats.Stats(prof, stream=stream)
stats.strip_dirs()
stats.sort_stats("cumulative") # Or cumulative
stats.reverse_order()
stats.print_stats(150) # 100 = how many to print
# The rest is optional.
#stats.dump_stats('profile_stats.dat')
#stats.print_callees()
#stats.print_callers()
print stream.getvalue()
#logging.info("Profile data:\n%s", stream.getvalue())
if __name__ == "__main__":
if len(sys.argv) == 2:
if sys.argv[1] == '-p':
profile_main()
# run FileRunner with statistics [profiler enabled]
else:
real_main()
else:
real_main()
| Python |
# Name: CFileRunnerGUI.py
# Date: Sat Mar 27 09:56:02 CET 2010
# Purpose: Common FileRunner constants
import os
### general const
HOME = os.path.expanduser('~')
#HOME = os.path.expanduser('~/tmp/filerunner_testing/CFG')
VERSION = '<DEVEL-VERSION>'
AUTHOR = 'Filip Husak'
CFG_DIR = HOME + '/.filerunner'
CFG_FILE = CFG_DIR + '/00_filerunner.cfg'
HISTORY_FILE = CFG_DIR + '/history'
### GUI const
WIDTH = 600
LABEL = 'F i l e R u n n e r'
TOOLTIP = 'FileRunner'
ICON = 'img/icon.png'
ICON_TRAY = 'img/icon_tray.png'
LICENSE = 'Apache License, Version 2.0'
ENTRY_COMPLETION_HISTORY_SEP = '--- HISTORY MATCHES --------------------------------------------------'
ENTRY_COMPLETION_ALL_SEP = '--- ALL MATCHES --------------------------------------------------'
ENTRY_COMPLETION_SEPARATOR_CNT = 2 # 2 macros above
# Menu items
MENU_GENERATE_DB_FILE = 'Regenerate database file'
MENU_ABOUT = 'About...'
MENU_EXIT = 'Quit'
# Messages
MSG_GENERATING_DB_FILE = 'The database file is generating. Please wait...'
#MSG_FILEGENERATED = 'The database file was generated successfully.'
MSG_FILEGENERATED_OK = 'The database file was generated successfully.'
MSG_FILEGENERATED_FAILED = 'The database file was not generated. Status code: '
MSG_ENTRY_FILES_LOADED = ' entries were loaded from the database file.'
MSG_PERMISSION_DENIED = 'Cannot access the database file. Permission denied: '
MSG_DB_FILE_NOT_EXISTS = 'The database file does not exist. \n\
You can generate the database file from popup menu. This operation may take a few minutes.'
#MSG_UNKNOWN_ERROR = 'Unknown error: '
MSG_SEARCH_PATH_NOT_EXIST = 'The search path directory does not exist. Check the configuration parameter SEARCH_PATH.'
| Python |
# Name: CDataHnd.py
# Date: Tue Mar 23 23:18:22 CET 2010
# Purpose: main data class of fileRunner application. It loads data from config and data files.
# Def: CDataHnd()
# Inputs:
import os
import sys
import string
import fnmatch
import re
import commands
import errno
import time
# my libraries
from CFile import CFile
from CConfigReader import CConfigReader
from CProcMngr import CProcMngr
from CProfile import CProfile
from CExceptionHnd import CExceptionHnd
import myconst
import mmStrMatch
####################################################################################################
class CDataHnd(CExceptionHnd):
__doExit = None
__oCfgReader = None
oDBFile = None
oHistoryFile = None
oHistoryFileRO = None
__oProcMngr = None
oDbGen = None
oExcHnd = None
__entryBoxText = None # this is set when user inserts the text in the entry box
__camelCaseExp = None # this is set when user inserts the text in the entry box
__searchMode = None # 1st bit - wildcard, 2nd bit - regexp
__entrySepCnt = None
__WILDCARD_SEARCH_MODE = 1
__REGEXP_SEARCH_MODE = 2
__CAMELCASE_SEARCH_MODE = 4
__GENERAL_SECTION = 'GENERAL'
__DB_FILE_OPTION = 'DB_FILE'
__SEARCH_PATH_OPTION = 'SEARCH_PATH'
__SYM_LINK_OPTION = 'SYM_LINK'
__EXTENSIONS_SECTION = 'EXTENSIONS'
__SEARCH_EXPRESSION_SECTION = 'SEARCH_EXPRESSION'
__WILDCARD_OPTION = 'WILDCARD'
__REGEXP_OPTION = 'REGEXP'
__CAMELCASE_OPTION = 'CAMELCASE'
#---------------------------------------------------------------------------------------------------
def __init__(self):
self.__doExit = False
self.__searchMode = 0
self.__entrySepCnt = myconst.ENTRY_COMPLETION_SEPARATOR_CNT
self.__oProfile = CProfile(myconst.CFG_DIR)
self.__oCfgReader = CConfigReader(myconst.CFG_FILE)
self.__oProcMngr = CProcMngr()
#---------------------------------------------------------------------------------------------------
def load(self, loadDbFile = True):
self.__oProfile.createProfile()
self.__oProfile.addFile(os.path.basename(myconst.CFG_FILE))
self.__oCfgReader.load()
dbFilename = self.getDbFilename()
# it is False while regenerating DB
if loadDbFile is True:
try:
self.oDBFile = CFile(dbFilename, 'r')
except IOError, e:
#raise CDataHndException(e), IOError(e)#CDataHndException(CDataHndException.DB_FILE_EXC)
if e.errno == errno.ENOENT:
raise CDataHndDBFileNotExistExc(e)
else:
raise IOError(e)
self.oHistoryFile = CFile(myconst.HISTORY_FILE, 'a+')
self.oHistoryFileRO = CFile(myconst.HISTORY_FILE, 'r')
self.setSearchMode()
#---------------------------------------------------------------------------------------------------
def getErrno(self):
return self.__errno
#---------------------------------------------------------------------------------------------------
def setErrno(self, val):
self.__errno = val
#---------------------------------------------------------------------------------------------------
def getDbFilename(self):
dbFile = self.__oCfgReader.getValue(self.__GENERAL_SECTION, self.__DB_FILE_OPTION)
return myconst.CFG_DIR + '/' + dbFile
#---------------------------------------------------------------------------------------------------
def getProcMngr(self):
return self.__oProcMngr
#---------------------------------------------------------------------------------------------------
def getSearchMode(self):
return self.__searchMode
#---------------------------------------------------------------------------------------------------
def setSearchMode(self):
wildcard = str(self.__oCfgReader.getValue(self.__SEARCH_EXPRESSION_SECTION, self.__WILDCARD_OPTION))
if wildcard in '1' or wildcard in 'Yes' or wildcard in 'True':
self.__searchMode = 0 | self.__WILDCARD_SEARCH_MODE # set 1st bit
regexp = str(self.__oCfgReader.getValue(self.__SEARCH_EXPRESSION_SECTION, self.__REGEXP_OPTION))
if regexp in '1' or regexp in 'Yes' or regexp in 'True':
self.__searchMode = self.__searchMode | self.__REGEXP_SEARCH_MODE # set 2nd bit
camel = str(self.__oCfgReader.getValue(self.__SEARCH_EXPRESSION_SECTION, self.__CAMELCASE_OPTION))
if camel in '1' or camel in 'Yes' or camel in 'True':
self.__searchMode = self.__searchMode | self.__CAMELCASE_SEARCH_MODE # set 3rd bit
#---------------------------------------------------------------------------------------------------
def getSearchPath(self):
path = self.__oCfgReader.getValue(self.__GENERAL_SECTION, self.__SEARCH_PATH_OPTION)
path = path.replace('\n', ' ')
path = path.replace(':', ' ')
self.__checkSearchPathsExist(path)
return path
#---------------------------------------------------------------------------------------------------
def __checkSearchPathsExist(self, paths):
for path in paths.split(' '):
if os.path.exists(path) is False:
raise IOError(myconst.MSG_SEARCH_PATH_NOT_EXIST)
#---------------------------------------------------------------------------------------------------
def getSymLink(self):
return self.__oCfgReader.getValue(self.__GENERAL_SECTION, self.__SYM_LINK_OPTION)
#---------------------------------------------------------------------------------------------------
def getExtCmd(self):
return self.__oCfgReader.getSettings(self.__EXTENSIONS_SECTION)
#---------------------------------------------------------------------------------------------------
def getExt(self):
ext = []
for i in self.getExtCmd():
ext.append(i[0])
return ext
#---------------------------------------------------------------------------------------------------
# it is called when entry box text is changed
def setEntryBoxText(self, entryText):
self.__entryBoxText = entryText
#---------------------------------------------------------------------------------------------------
# it is called when entry box text is changed
def setCamelCaseExpression(self, entryText):
self.__camelCaseExp = self.__camelCaseToRegExp(entryText)
#---------------------------------------------------------------------------------------------------
# it is called when entry box text is changed
def setModel(self, model):
self.__model = model
#---------------------------------------------------------------------------------------------------
def match_func_C(self, completion, key, iter, oEntry):
result = False
text = self.__model.get_value(iter, 0)
if self.__entrySepCnt > 0:
if self.__isEntrySeparator(text) is True:
return True
# Wildcard
if self.__searchMode & self.__WILDCARD_SEARCH_MODE:
if mmStrMatch.wildcard(self.__entryBoxText, text):
result = True
# Regexp
if self.__searchMode & self.__REGEXP_SEARCH_MODE:
try:
if mmStrMatch.regExp(self.__entryBoxText, text):
result = True
except re.error:
# TODO: raise exception
if result is not True:
result = False
# CamelCase
if self.__searchMode & self.__CAMELCASE_SEARCH_MODE:
if self.__camelCaseExp is not None:
if mmStrMatch.camelCase(self.__camelCaseExp, text):
result = True
return result
#---------------------------------------------------------------------------------------------------
def match_func_Py(self, completion, key, iter, oEntry):
result = False
model = completion.get_model()
text = model.get_value(iter, 0)
if self.__isEntrySeparator(text) is True:
return True
# Wildcard
if self.__searchMode & self.__WILDCARD_SEARCH_MODE:
if fnmatch.fnmatch(text, self.__entryBoxText):
result = True
# Regexp
if self.__searchMode & self.__REGEXP_SEARCH_MODE:
try:
if re.search(self.__entryBoxText, text):
result = True
except re.error:
# TODO: raise exception
if result is not True:
result = False
## CamelCase
if self.__searchMode & self.__CAMELCASE_SEARCH_MODE:
if self.__camelCaseExp is not None:
if re.search(self.__camelCaseExp, text):
result = True
return result
#---------------------------------------------------------------------------------------------------
def __isEntrySeparator(self, text):
if myconst.ENTRY_COMPLETION_HISTORY_SEP in text:
self.__entrySepCnt = self.__entrySepCnt - 1
return True
if myconst.ENTRY_COMPLETION_ALL_SEP in text:
self.__entrySepCnt = self.__entrySepCnt - 1
return True
return False
#---------------------------------------------------------------------------------------------------
def __camelCaseToRegExp(self, camelStr):
regExpStr = ''
for index in range(len(camelStr)):
if camelStr[index] not in string.uppercase:
return None
regExpStr += camelStr[index] + '[a-z]*'
return regExpStr
#---------------------------------------------------------------------------------------------------
def getExtFromFilename(self, filename):
baseFilename = os.path.basename(filename)
filenameParts = baseFilename.split('.')
if len(filenameParts) > 0:
ext = filenameParts[len(filenameParts) - 1]
return ext
else:
return None
#---------------------------------------------------------------------------------------------------
def runFile(self, file):
''' Return True if any file was selected from the entry completion box
'' Return False if the separator entry was selected
'''
cmd = None
if self.__isEntrySeparator(file) is True:
return False
ext = self.getExtFromFilename(file)
if ext is not None:
cmd = self.__oCfgReader.getValue(self.__EXTENSIONS_SECTION, ext)
# TODO: Message - cannot
if cmd is not None:
self.__oProcMngr.setCmd(cmd + ' ' + file)
self.__oProcMngr.createProc()
self.__addFileToHistory(file)
return True
#---------------------------------------------------------------------------------------------------
def __addFileToHistory(self, file):
if self.__isFileInHistory(file) is False:
self.oHistoryFile.getFileDesc().write(file + '\n')
self.oHistoryFile.sync()
#---------------------------------------------------------------------------------------------------
def __isFileInHistory(self, file):
self.oHistoryFile.begin()
result = False
for line in self.oHistoryFile.getFileDesc():
if line.replace('\n', '') == file:
result = True
return result
#---------------------------------------------------------------------------------------------------
# prepare function before calling match_func
def prepareBeforeMatch(self, entryText):
self.setEntryBoxText(entryText)
self.setCamelCaseExpression(entryText)
self.__entrySepCnt = myconst.ENTRY_COMPLETION_SEPARATOR_CNT
####################################################################################################
####################################################################################################
# CLASS EXCEPTIONS
####################################################################################################
class CDataHndDBFileNotExistExc(Exception):
pass
# TODO: implement CEnum own class
#def __init__(self, e):
#super(CDataHndException, self).__init__(e)
# self.errno = e.errno
# enum for exception type
# CFG_FILE_EXC, DB_FILE_EXC = range(2)
#
# __type = None
#
# def __init__(self, type):
# self.__type= type
#
# def getType(self):
# return self.__type
#
####################################################################################################
| Python |
# Name: CFileRunnerGUI.py
# Date: Tue Mar 23 22:26:28 CET 2010
# Purpose: main GUI class of fileRunner application
# Def: CFileRunnerGUI()
# Inputs:
import os
import sys
import gtk
import gobject
import time
import errno
from threading import Lock
# my libraries
from CDataHnd import CDataHnd
from CTrayIcon import CTrayIcon
from CSimpleMsgDlg import CSimpleMsgDlg
from CAboutDlg import CAboutDlg
from CKeyPressHnd import CKeyPressHnd
from CProgressWndRnd import CProgressWndRnd
from CGenerateDb import CGenerateDb
from CExceptionHnd import CExceptionHnd
from CThread import CThread
import myconst
# my exceptions
from CProcMngr import CProcMngrStatusException
from CDataHnd import CDataHndDBFileNotExistExc
# http://faq.pygtk.org/index.py?file=faq20.006.htp&req=show
gobject.threads_init()
####################################################################################################
class CFileRunnerGUI(gtk.Window, CExceptionHnd):
__height = None
__vbox = None
__entry = None
__completion = None
__listStore = None
__trayIcon = None
__popupMenu = None
__generateDbFileItem = None
__aboutItem = None
__exitItem = None
__oDataHnd = None
__oKeyPressHnd = None
oProgBarWnd = None
oDbGen = None
__totalCntMsg = None
__cleanEntry = None
lock = Lock()
#---------------------------------------------------------------------------------------------------
def __init__(self):
super(CFileRunnerGUI, self).__init__()
self.__oDataHnd = CDataHnd()
self.__oDataHnd.setExterExcHndCB(self.handleException)
self.__oKeyPressHnd = CKeyPressHnd()
self.__cleanEntry = False
# set member from CExceptionHnd
self.setDoExit(False)
# window settings
self.set_position(gtk.WIN_POS_CENTER)
self.set_title(myconst.LABEL)
gtk.window_set_default_icon_from_file(myconst.ICON)
self.__createPopupMenu()
self.__trayIcon = CTrayIcon(myconst.ICON_TRAY, myconst.TOOLTIP, True)
self.__createInputBox()
# set window size
(width, self.__height) = self.__entry.size_request()
self.set_size_request(myconst.WIDTH, self.__height)
#---------------------------------------------------------------------------------------------------
def __createInputBox(self):
self.__vbox = gtk.VBox()
self.__entry = gtk.Entry()
self.__completion = gtk.EntryCompletion()
self.__listStore = gtk.ListStore(gobject.TYPE_STRING)
self.__vbox.pack_start(self.__entry)
self.add(self.__vbox)
self.__completion.set_text_column(0)
self.__completion.set_match_func(self.__oDataHnd.match_func_C, self.__entry)
#self.__completion.set_match_func(self.__oDataHnd.match_func_Py, self.__entry)
#---------------------------------------------------------------------------------------------------
def __createPopupMenu(self):
self.__popupMenu = gtk.Menu()
self.__generateDbFileItem = gtk.MenuItem(myconst.MENU_GENERATE_DB_FILE)
self.__aboutItem = gtk.MenuItem(myconst.MENU_ABOUT)
self.__exitItem = gtk.ImageMenuItem(gtk.STOCK_QUIT)
self.__popupMenu.append(self.__generateDbFileItem)
self.__popupMenu.append(self.__aboutItem)
self.__popupMenu.append(self.__exitItem)
#---------------------------------------------------------------------------------------------------
def __loadData(self):
try:
self.__oDataHnd.load()
self.__loadInputBoxEntries()
except:
self.handle()
#---------------------------------------------------------------------------------------------------
def __loadInputBoxEntries(self):
i = 0
self.__listStore.insert(i, [myconst.ENTRY_COMPLETION_HISTORY_SEP])
i += 1
# load all entries [from history file]
self.__oDataHnd.oHistoryFile.begin()
for s in self.__oDataHnd.oHistoryFile.getFileDesc():
self.__listStore.insert(i, [s.replace('\n', '')])
i += 1
self.__listStore.insert(i, [myconst.ENTRY_COMPLETION_ALL_SEP])
i += 1
# load all entries [from DB file]
self.__oDataHnd.oDBFile.begin()
for s in self.__oDataHnd.oDBFile.getFileDesc():
self.__listStore.insert(i, [s.replace('\n', '')])
i += 1
entriesCnt = len(self.__listStore) - myconst.ENTRY_COMPLETION_SEPARATOR_CNT
self.__totalCntMsg = ' ' + str(entriesCnt) + myconst.MSG_ENTRY_FILES_LOADED
print self.__totalCntMsg
self.__completion.set_model(self.__listStore)
self.__entry.set_completion(self.__completion)
# set model for DataHnd - avoid calling get_model every time when match_func is called
self.__oDataHnd.setModel(self.__completion.get_model())
#---------------------------------------------------------------------------------------------------
def __cleanInputBoxEntries(self):
self.__listStore = gtk.ListStore(gobject.TYPE_STRING)
self.__completion.set_model(None)
self.__entry.set_completion(self.__completion)
#---------------------------------------------------------------------------------------------------
def __cleanEntryBox(self):
self.__entry.set_text('')
#---------------------------------------------------------------------------------------------------
def generateDB(self):
try:
self.oProgBarWnd = CProgressWndRnd(myconst.MSG_GENERATING_DB_FILE, self)
# setting callback function
self.oProgBarWnd.setCbFnc(self.loadData_cb)
self.oProgBarWnd.start()
self.oDbGen = CGenerateDb(self.__oDataHnd)
self.oDbGen.setExterExcHndCB(self.handleException)
# setting callback function
self.oDbGen.setCbFnc(self.oProgBarWnd.stop)
self.oDbGen.start()
except:
self.handle()
#---------------------------------------------------------------------------------------------------
# set the signals [callbacks]
def __loadSignals(self):
# main app [window] signals
self.connect("delete-event", self.__hideWndSignal_cb)
self.connect("key-press-event", self.__keyPressSignal_cb)
# entry box signal - while inserting text
self.__entry.connect('changed', self.__entryBoxChanged_cb)
# entry completion list box signal
self.__completion.connect('match-selected', self.__selectCompletionEntrySignal_cb)
# menu items signals
self.__generateDbFileItem.connect("activate", self.__regenerateDBFileSignal_cb)
self.__aboutItem.connect("activate", self.__showAboutDlgSignal_cb)
self.__exitItem.connect("activate", gtk.main_quit)
# tray icon signals
self.__trayIcon.connect("activate", self.__showWndSignal_cb)
self.__trayIcon.connect("popup-menu", self.__popupMenuSignal_cb)
#---------------------------------------------------------------------------------------------------
# CALLBACKS
#---------------------------------------------------------------------------------------------------
def loadData_cb(self):
# do nothing if APP is going to exit, e.g. if getDoExit returns True
if self.getDoExit() is False:
self.__loadData()
if self.__totalCntMsg is not None:
gobject.idle_add(self.__showMsg, myconst.MSG_FILEGENERATED_OK + str(self.__totalCntMsg))
#---------------------------------------------------------------------------------------------------
def __hideWndSignal_cb(self, widget, event):
self.__hideApp()
return True
#---------------------------------------------------------------------------------------------------
def __entryBoxChanged_cb(self, editable):
self.__oDataHnd.prepareBeforeMatch(editable.get_text())
#---------------------------------------------------------------------------------------------------
def __keyPressSignal_cb(self, widget, event):
self.__oKeyPressHnd.setEvent(event)
self.__oKeyPressHnd.processEvent()
#---------------------------------------------------------------------------------------------------
def __popupMenuSignal_cb(self, status_icon, button, activate_time):
self.__popupMenu.show_all()
self.__popupMenu.popup(None, None, None, button, activate_time, None)
#---------------------------------------------------------------------------------------------------
def __regenerateDBFileSignal_cb(self, widget):
self.__cleanInputBoxEntries()
self.__oDataHnd.load(False)
self.generateDB()
#---------------------------------------------------------------------------------------------------
def __selectCompletionEntrySignal_cb(self, completion, model, iter):
if self.__oDataHnd.runFile(model[iter][0]) is False:
# clean input expression box
self.__cleanEntry = True
self.__hideApp()
#---------------------------------------------------------------------------------------------------
def __showWndSignal_cb(self, icon):
if self.get_property('visible') is False:
self.__showApp()
else:
self.__hideApp()
#---------------------------------------------------------------------------------------------------
def __showAboutDlgSignal_cb(self, widget):
self.set_skip_taskbar_hint(False)
CAboutDlg()
self.set_skip_taskbar_hint(True)
#---------------------------------------------------------------------------------------------------
# END OF CALLBACKS
#---------------------------------------------------------------------------------------------------
def __hideApp(self):
if self.oProgBarWnd is not None:
self.oProgBarWnd.hideAll()
self.hide_all()
#---------------------------------------------------------------------------------------------------
def run(self):
self.__showApp()
self.__loadSignals()
self.__loadData()
gtk.main()
#---------------------------------------------------------------------------------------------------
def __showApp(self):
if self.__cleanEntry is True:
self.__cleanEntryBox()
self.__cleanEntry = False
self.set_skip_taskbar_hint(False)
if self.oProgBarWnd is not None:
self.oProgBarWnd.showAll()
self.show_all()
self.set_skip_taskbar_hint(True)
#---------------------------------------------------------------------------------------------------
def __showMsg(self, msg):
self.set_skip_taskbar_hint(False)
CSimpleMsgDlg(self, msg)
self.set_skip_taskbar_hint(True)
#---------------------------------------------------------------------------------------------------
def __doExit(self):
if self.__oDataHnd.getDoExit() is True:
sys.exit(self.__oDataHnd.getErrno())
#---------------------------------------------------------------------------------------------------
def handleException(self, childExcHnd = None):
'''
'' Main exception handling function. The function is thread safe.
'''
if self.lock.acquire():
if self.getExcType() is None:
excHnd = childExcHnd
else:
excHnd = self
#TODO: to be moved to log file
print 'MAIN handleException() is called by:',excHnd
print 'MAIN handleException():',excHnd.getExcType(),excHnd.getExcVal(),excHnd.getMsg()
if excHnd.getExcType() is IOError:
if excHnd.getExcVal().errno == errno.EACCES:
self.setDoExit(True)
elif excHnd.getExcVal().errno == errno.ENOENT:
gobject.idle_add(self.__showMsg, excHnd.getMsg())
self.setDoExit(False)
else:
self.setDoExit(True)
if excHnd.getExcType() is CDataHndDBFileNotExistExc:
gobject.idle_add(self.__showMsg, myconst.MSG_DB_FILE_NOT_EXISTS)
self.setDoExit(False)
#self.generateDB()
#gobject.idle_add(self.generateDB())
#if excHnd. is CDataHndException.DB_FILE_EXC:
#self.setDoExit(True)
if excHnd.getExcType() is CProcMngrStatusException:
self.setDoExit(True)
if self.getDoExit() is True:
exit_text = 'The application will exit due to the following error: ' + excHnd.getMsg()
gobject.idle_add(self.__showMsg, exit_text)
gobject.idle_add(gtk.main_quit)
excHnd.cleanExc()
self.lock.release()
####################################################################################################
| Python |
# Name: CProfile.py
# Date: Thu Apr 8 22:04:26 CEST 2010
# Purpose: Create and update profile. The profile is often created in the $HOME directory.
# Def: CProfile(profileDir)
# Inputs:
import os
# my libraries
from CProcMngr import CProcMngr
####################################################################################################
class CProfile():
__profileDir = None
__oProcMngr = None
__CMD_CREATE_DIR = 'mkdir -p '
__CMD_REMOVE_DIR = 'rm -rf '
__CMD_COPY_FILE = 'cp '
__CMD_COPY_FILE_FORCE = 'cp -r '
#---------------------------------------------------------------------------------------------------
def __init__(self, profileDir):
self.__profileDir = profileDir
self.__oProcMngr = CProcMngr()
#---------------------------------------------------------------------------------------------------
def createProfile(self):
self.__oProcMngr.execCmd(self.__CMD_CREATE_DIR + self.__profileDir)
#---------------------------------------------------------------------------------------------------
def deleteProfile(self):
self.__oProcMngr.execCmd(self.__CMD_REMOVE_DIR + self.__profileDir)
#---------------------------------------------------------------------------------------------------
def addFile(self, file, overWrite = False):
filePath = self.__profileDir + '/' + os.path.basename(file)
# TODO: exception if 'file' does not exist
if overWrite is True:
self.__oProcMngr.execCmd(self.__CMD_COPY_FILE + file + ' ' + self.__profileDir)
else:
if os.path.exists(filePath) is False:
self.__oProcMngr.execCmd(self.__CMD_COPY_FILE + file + ' ' + self.__profileDir)
####################################################################################################
| Python |
# Name: CProcMngr.py
# Date: Sat Mar 27 10:42:41 CET 2010
# Purpose: Create and show about dialog
# Depends: myconst module
# Def: CProcMngr(widget = None)
# Inputs:
import os
import commands
####################################################################################################
class CProcMngr():
__cmd = None
#---------------------------------------------------------------------------------------------------
def __init__(self, command = None):
self.__cmd = command
#---------------------------------------------------------------------------------------------------
def getCmd(self):
return self.__cmd
#---------------------------------------------------------------------------------------------------
def setCmd(self, command):
self.__cmd = command
#---------------------------------------------------------------------------------------------------
def getProcStatusOutput(self):
(status, output) = commands.getstatusoutput(self.__cmd)
if status is not 0:
raise CProcMngrStatusException(status)
else:
return (status, output)
#---------------------------------------------------------------------------------------------------
def createProc(self, runInBackground = True):
if runInBackground is True:
os.system(self.__cmd + ' &')
else:
os.system(self.__cmd)
#---------------------------------------------------------------------------------------------------
def execCmd(self, command, runInBackground = False):
self.setCmd(command)
self.createProc(runInBackground)
####################################################################################################
####################################################################################################
# CLASS EXCEPTIONS
####################################################################################################
class CProcMngrStatusException(Exception):
__status = None
def __init__(self, status):
self.__status = status
def getStatus(self):
return self.__status
####################################################################################################
| Python |
# Name: CThread.py
# Date: Sat May 1 19:59:54 CEST 2010
# Purpose: The abstract thread class with callback function.
# The base class (which inherits CThread) MUST call run method.
# super(ClassName, self).run()
# Def CThread(name)
# Inputs: name - the name of the thread [string].
import threading
####################################################################################################
class CThread(threading.Thread):
'''
'' The abstract thread class with callback function
'''
__cbFnc = None
#---------------------------------------------------------------------------------------------------
def __init__(self, name):
super(CThread, self).__init__()
self.setName(name)
#---------------------------------------------------------------------------------------------------
def setCbFnc(self, cbFunction):
self.__cbFnc = cbFunction
#---------------------------------------------------------------------------------------------------
def run(self):
# print self.getName(),': Callback: ',self.__cbFnc
if self.__cbFnc is not None:
self.__cbFnc()
####################################################################################################
| Python |
# Name: CExceptionHnd.py
# Date: Sun May 2 15:18:25 CEST 2010
# Purpose: Handles exceptions
# Def CExceptionHnd(extExcHnd_cb)
# Inputs:
import sys
####################################################################################################
class CExceptionHnd():
'''
'' Base class for handling exceptions
'''
__excType = None
__excVal = None
__msg = None
__doExit = None
# callable object which should reference to external function 'handleException' in derived class
__extExcHnd_cb = None
#---------------------------------------------------------------------------------------------------
def __init__(self):
#self.__errno = 0
self.__doExit = False
#---------------------------------------------------------------------------------------------------
# def hasException(self):
# if self.__errno == 0:
# return False
# else:
# return True
#---------------------------------------------------------------------------------------------------
#def handle(self, e = None):
def handle(self):
''' handles known exceptions '''
self.__excType, self.__excVal = sys.exc_info()[:2]
#TODO: to be moved to the log file
print 'CExceptionHnd:',self.__class__,self.__excType,self.__excVal
# handling general exceptions
if self.__excType is IOError:
if self.__excVal.filename is not None:
self.__msg = self.__excVal.strerror + ": '" + self.__excVal.filename + "'"
else:
self.__msg = self.__excVal
self.__doExit = True
self.handleException()
if self.__extExcHnd_cb is not None:
self.__extExcHnd_cb(self)
else:
print 'No external exception handler defined.'
#---------------------------------------------------------------------------------------------------
def cleanExc(self):
self.__excType = None
self.__excVal = None
self.__msg = None
#---------------------------------------------------------------------------------------------------
# override method in the derived class
def handleException(self):
pass
#---------------------------------------------------------------------------------------------------
def getExcType(self):
return self.__excType
#---------------------------------------------------------------------------------------------------
def setExcType(self, val):
self.__excType = val
#---------------------------------------------------------------------------------------------------
def getExcVal(self):
return self.__excVal
#---------------------------------------------------------------------------------------------------
def setExcVal(self, val):
self.__excVal = val
#---------------------------------------------------------------------------------------------------
def getMsg(self):
return str(self.__msg)
#---------------------------------------------------------------------------------------------------
def setMsg(self, val):
self.__msg = val
#---------------------------------------------------------------------------------------------------
def getDoExit(self):
return self.__doExit
#---------------------------------------------------------------------------------------------------
def setDoExit(self, val):
self.__doExit = val
#---------------------------------------------------------------------------------------------------
def getExterExcHndCB(self):
return self.__extExcHnd_cb
#---------------------------------------------------------------------------------------------------
def setExterExcHndCB(self, fnc):
if callable(fnc):
self.__extExcHnd_cb = fnc
####################################################################################################
| Python |
# Name: CFile.py
# Date: Tue Mar 9 15:30:27 CET 2010
# Purpose: The class handles file descriptor
# Def: CFile(filename, mode)
# Inputs: filename - the name of file to work with
# mode - the mode in which the file is opened [r, w]
import os
import sys
####################################################################################################
class CFile():
'''The class handles file descriptor
'''
__fileName = None
__mode = None
__fileDesc = None
#---------------------------------------------------------------------------------------------------
def __init__(self, name, mode):
self.__fileName = name
self.__mode = mode
self.__load()
#---------------------------------------------------------------------------------------------------
def __del__(self):
if self.__fileDesc != None:
self.__fileDesc.close()
#---------------------------------------------------------------------------------------------------
def __load(self):
try:
self.__fileDesc = open(self.__fileName, self.__mode)
except IOError, e:
# TODO: Create Logger class
self.__fileDesc = None
raise e
#sys.exit(1)
#---------------------------------------------------------------------------------------------------
def getFileName(self):
return self.__fileName
#---------------------------------------------------------------------------------------------------
def getFileDesc(self):
return self.__fileDesc
#---------------------------------------------------------------------------------------------------
def begin(self):
self.__fileDesc.seek(0, 0)
#---------------------------------------------------------------------------------------------------
def sync(self):
self.__fileDesc.flush()
os.fsync(self.__fileDesc.fileno())
####################################################################################################
| Python |
# Name: CConfigReader.py
# Date: Sun Mar 21 19:11:46 CET 2010
# Purpose: Load and parse config file and get variables/valus for given section
# Def: ConfigReader(fileName)
# Inputs: filename - the name of config file
import ConfigParser
class CConfigReader():
'''
'' Load and parse config file and get variables/values for given section
'''
__cfgFilename = None
__oCfgParser = None # raw config parser object
#---------------------------------------------------------------------------------------------------
def __init__(self, filename):
self.__cfgFilename = filename
#---------------------------------------------------------------------------------------------------
def getCfgFilename(self):
return self.__cfgFilename
#---------------------------------------------------------------------------------------------------
def setCfgFilename(self, filename):
self.__cfgFilename = filename
#---------------------------------------------------------------------------------------------------
def load(self):
self.__oCfgParser = ConfigParser.RawConfigParser()
self.__oCfgParser.read(self.getCfgFilename())
#---------------------------------------------------------------------------------------------------
def getSettings(self, section = None):
'''
'' return all items in the appropriate section. If section is None return the list of sections
'''
# TODO: raise exception if oCfgParser is None
if self.__oCfgParser is None:
return None
if section is None:
return self.__oCfgParser.sections()
else:
return self.__oCfgParser.items(section)
#---------------------------------------------------------------------------------------------------
def getValue(self, section, option):
'''
'' return value of option which is located in the appropriate section
'''
# TODO: raise exception if section/option does not exist
return self.__oCfgParser.get(section, option)
####################################################################################################
| Python |
#!/usr/bin/python
from distutils.core import setup, Extension
module1 = Extension('mmStrMatch', sources = ['mmStrMatch.c'])
setup (name = 'mmStrMatch',
version = '1.0',
description = 'My Module for matching string and pattern. Module is written in C.',
ext_modules = [module1])
| Python |
#!/usr/bin/python
from distutils.core import setup, Extension
module1 = Extension('mmStrMatch', sources = ['mmStrMatch.c'])
setup (name = 'mmStrMatch',
version = '1.0',
description = 'My Module for matching string and pattern. Module is written in C.',
ext_modules = [module1])
| Python |
# Name: CTrayIcon.py
# Date: Sat Mar 27 09:41:29 CET 2010
# Purpose:
# Def: CTrayIcon()
# Inputs:
import gtk
####################################################################################################
class CTrayIcon(gtk.StatusIcon):
__file = None
__tooltip = None
#---------------------------------------------------------------------------------------------------
def __init__(self, iconFile, iconTooltip, iconVisible = False):
super(CTrayIcon, self).__init__()
self.__file = iconFile
self.__tooltip = iconTooltip
self.set_visible(iconVisible)
self.set_from_file(self.__file)
self.set_tooltip(self.__tooltip)
####################################################################################################
| Python |
# Name: CProgressWndRnd.py
# Date: Sat May 1 20:56:56 CEST 2010
# Purpose: Show random status of progress bar. It is used while running long-time operation.
# Depends: CThread
# Def: CProgressWndRnd()
# Inputs:
from threading import Event
import random, time
import gtk
import gobject
# my libraries
from CThread import CThread
####################################################################################################
class CProgressWndRnd(CThread):
'''
''Show random status of progress bar
'''
__NAME='CProgressWndRnd Thread'
__width = 450
__height = 40
__delay = 0.3
event = None
#---------------------------------------------------------------------------------------------------
def __init__(self, text, parentWnd = None):
super(CProgressWndRnd, self).__init__(self.__NAME)
self.threadEvent = Event()
#self.wnd = gtk.Dialog(text, parentWnd, gtk.DIALOG_MODAL | gtk.DIALOG_NO_SEPARATOR)
self.wnd = gtk.Window(gtk.WINDOW_POPUP)
self.wnd.set_position(gtk.WIN_POS_CENTER)
self.wnd.set_size_request(self.__width, self.__height)
self.progBar = gtk.ProgressBar()
self.progBar.set_text(text)
self.wnd.add(self.progBar)
self.wnd.connect('destroy', self.stop)
#gtk.gdk.threads_init()
gobject.threads_init()
#---------------------------------------------------------------------------------------------------
def run(self):
#While the stopthread event isn't setted, the thread keeps going on
#self.wnd.show_all()
gobject.idle_add(self.wnd.show_all)
while not self.threadEvent.isSet():
# Acquiring the gtk global mutex
#gtk.gdk.threads_enter()
#Setting a random value for the fraction
#self.progBar.set_fraction(random.random())
gobject.idle_add(self.progBar.set_fraction, random.random())
# Releasing the gtk global mutex
#gtk.gdk.threads_leave()
time.sleep(self.__delay)
#self.wnd.hide_all()
gobject.idle_add(self.wnd.hide_all)
super(CProgressWndRnd, self).run()
#---------------------------------------------------------------------------------------------------
def stop(self, waitFor = None):
if waitFor is not None:
waitFor.join()
self.threadEvent.set()
#---------------------------------------------------------------------------------------------------
def getHeight(self):
return self.__height
#---------------------------------------------------------------------------------------------------
def setHeight(self, height):
self.__height = height
#---------------------------------------------------------------------------------------------------
def getWidth(self, width):
return self.__width
#---------------------------------------------------------------------------------------------------
def setWidth(self, width):
self.__width = width
#---------------------------------------------------------------------------------------------------
def hideAll(self):
if not self.threadEvent.isSet():
self.wnd.hide_all()
#---------------------------------------------------------------------------------------------------
def showAll(self):
if not self.threadEvent.isSet():
self.wnd.show_all()
####################################################################################################
| Python |
# Name: CKeyPressHnd.py
# Date: Sun Mar 28 21:24:42 CEST 2010
# Purpose: Handles key pressing
# Def: CKeyPressHnd()
# Inputs:
import gtk
####################################################################################################
class CKeyPressHnd():
__event = None
#---------------------------------------------------------------------------------------------------
def __init__(self, event = None):
self.__event = event
#---------------------------------------------------------------------------------------------------
def getEvent(self):
return self.__event
#---------------------------------------------------------------------------------------------------
def setEvent(self, event):
self.__event = event
#---------------------------------------------------------------------------------------------------
def processEvent(self):
ev = self.__event
# Ctrl+Q handling
if ev.state & gtk.gdk.CONTROL_MASK:
if gtk.gdk.keyval_name(ev.keyval).upper() == 'Q':
# TODO: use callback function in the main class instead
gtk.main_quit()
####################################################################################################
| Python |
# Name: CAboutDlg.py
# Date: Sat Mar 27 10:42:41 CET 2010
# Purpose: Create and show about dialog
# Depends: myconst module
# Def: CAboutDlg(widget = None)
# Inputs: widget
import gtk
# my libraries
import myconst
####################################################################################################
class CAboutDlg(gtk.AboutDialog):
#---------------------------------------------------------------------------------------------------
def __init__(self, widget = None, parentWnd = None):
super(CAboutDlg, self).__init__()
self.set_name(myconst.TOOLTIP)
self.set_version(myconst.VERSION)
self.set_license(myconst.LICENSE)
self.set_authors([myconst.AUTHOR])
self.run()
self.destroy()
####################################################################################################
| Python |
# Name: CSimpleMsgDlg.py
# Date: Sat Mar 27 10:42:41 CET 2010
# Purpose: Create and show simple message dialog
# Def: CSimpleMsgDlg(msg)
# Inputs: msg - message to be diplayed in the dialog
import gtk
####################################################################################################
class CSimpleMsgDlg(gtk.MessageDialog):
__parentWnd = None
__msg = None
#---------------------------------------------------------------------------------------------------
def __init__(self, parentWindow, message):
self.__parentWnd = parentWindow
self.__msg = message
super(CSimpleMsgDlg, self).__init__(self.__parentWnd, gtk.DIALOG_MODAL, gtk.MESSAGE_INFO, gtk.BUTTONS_OK, self.__msg)
self.run()
self.destroy()
####################################################################################################
| Python |
# Name: CGenerateDb.py
# Date: Sat May 1 20:37:18 CEST 2010
# Purpose: Generate DB file.
# Depends: CThread, CFile
# Def: CGenerateDB(dataHandler)
# Inputs: dataHandler - instance of CDataHnd
import sys
import gtk
# my libraries
from CThread import CThread
from CFile import CFile
from CProcMngr import CProcMngrStatusException
from CExceptionHnd import CExceptionHnd
import myconst
import time
####################################################################################################
class CGenerateDb(CThread, CExceptionHnd):
'''
'' Generate DB file
'''
__NAME='CGenerateDb Thread'
__oDataHnd = None
__progBarWnd = None
#---------------------------------------------------------------------------------------------------
def __init__(self, dataHandler):
super(CGenerateDb, self).__init__(self.__NAME)
self.__oDataHnd = dataHandler
#---------------------------------------------------------------------------------------------------
def run(self):
try:
findCmd = self.__buildFindCmd()
self.__oDataHnd.getProcMngr().setCmd(findCmd)
(status, output) = self.__oDataHnd.getProcMngr().getProcStatusOutput()
try:
self.__oDataHnd.oDBFile = CFile(self.__oDataHnd.getDbFilename(), 'w')
self.__oDataHnd.oDBFile.getFileDesc().write(output + '\n')
except IOError, e:
self.handle()
except CProcMngrStatusException, e:
self.setMsg(myconst.MSG_FILEGENERATED_FAILED + str(e.getStatus()))
self.handle()
except IOError, e:
self.handle()
finally:
super(CGenerateDb, self).run()
#---------------------------------------------------------------------------------------------------
def __buildFindCmd(self):
followSymLink = str(self.__oDataHnd.getSymLink())
searchPath = self.__oDataHnd.getSearchPath()
if followSymLink in '1' or followSymLink in 'Yes' or followSymLink in 'True':
cmd = 'find -L '
else:
cmd = 'find '
cmd += searchPath
prependOr = False
for ext in self.__oDataHnd.getExt():
if prependOr is True:
cmd += ' -or'
cmd += ' -iname \'*' + ext + '\''
if prependOr is False:
prependOr = True
return cmd
####################################################################################################
| Python |
#!/usr/bin/python
import os, sys
sys.path.append(os.getcwd() + '/lib')
sys.path.append(os.getcwd() + '/lib/general')
sys.path.append(os.getcwd() + '/lib/gtk')
sys.path.append(os.getcwd() + '/lib/c')
# my libraries
from CFileRunnerGUI import CFileRunnerGUI
def real_main():
oApp = CFileRunnerGUI()
oApp.run()
return
def profile_main():
# This is the main function for profiling
# We've renamed our original main() above to real_main()
import cProfile, pstats, StringIO, logging
prof = cProfile.Profile()
prof = prof.runctx("real_main()", globals(), locals())
stream = StringIO.StringIO()
stats = pstats.Stats(prof, stream=stream)
stats.strip_dirs()
stats.sort_stats("cumulative") # Or cumulative
stats.reverse_order()
stats.print_stats(150) # 100 = how many to print
# The rest is optional.
#stats.dump_stats('profile_stats.dat')
#stats.print_callees()
#stats.print_callers()
print stream.getvalue()
#logging.info("Profile data:\n%s", stream.getvalue())
if __name__ == "__main__":
if len(sys.argv) == 2:
if sys.argv[1] == '-p':
profile_main()
# run FileRunner with statistics [profiler enabled]
else:
real_main()
else:
real_main()
| Python |
Subsets and Splits
SQL Console for ajibawa-2023/Python-Code-Large
Provides a useful breakdown of language distribution in the training data, showing which languages have the most samples and helping identify potential imbalances across different language groups.